python-gnatpython-54.orig/0000755000175000017500000000000011654532470015342 5ustar xavierxavierpython-gnatpython-54.orig/scripts/0000755000175000017500000000000011654532465017035 5ustar xavierxavierpython-gnatpython-54.orig/scripts/opt-parser0000755000175000017500000000445111654532465021063 0ustar xavierxavier#!/usr/bin/env python ############################################################################ # # # OPT-PARSER # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ import logging import sys from gnatpython.optfileparser import * if __name__ == "__main__": if len(sys.argv) < 3: print "Usage: parser " sys.exit(1) for arg in sys.argv[3:]: if arg == "-d": logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') filename = sys.argv[2] tags = sys.argv[1] logging.debug('opt_parse (' + tags + ', ' + filename + ')') try: parser = OptFileParse(tags, filename) result = '%s' % parser if result != '': print result except BadFormattingError, error: print filename + " error" sys.exit(error) python-gnatpython-54.orig/scripts/mainloop0000755000175000017500000001040311654532465020577 0ustar xavierxavier#!/usr/bin/env python ############################################################################ # # # MAINLOOP # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Usage: mainloop [options] test_list_file command_pattern Command line interface for testsuite mainloop (in the ACATS/Fixed bugs style) """ from gnatpython.main import Main from gnatpython.mainloop import ( MainLoop, add_mainloop_options, generate_collect_result) from gnatpython.ex import Run from gnatpython.fileutils import split_file import os import sys def main(): m = Main(add_targets_options=True) add_mainloop_options(m) m.add_option("-o", "--output-dir", dest="output_dir", metavar="DIR", default="./", help="select output dir") m.add_option('--format', dest="diffs_format", metavar="FORMAT", default=os.environ.get("MAINLOOP_FORMAT", "expected+actual"), help="Select diffs format. Supported values are:" " expected+actual (legacy format) or" " diff (using the TEST.diff output)." " Note that you can set MAINLOOP_FORMAT environment variable" " to change the default (expected+value).") m.parse_args() if len(m.args) != 2: print "Error: 2 arguments expected" sys.exit(m.usage) test_list_file = m.args[0] command_pattern = m.args[1] # Retrieve list of old crashes and diffs/failed # ??? this can be computed by generate_collect_result if old_output_dir is # set old_crashes_file = os.path.join(m.options.output_dir, '/old_crashes') old_diffs_file = os.path.join(m.options.output_dir, 'old_diffs') # Initialize some metrics # First read the list of tests to be run test_list = split_file(test_list_file, ' ') metrics = {'total': sum([len(e) for e in test_list])} # Retrieve list of old crashes and diffs/failed metrics['old_diffs'] = [k.split(':')[0] for k in split_file(old_diffs_file, ignore_errors=True)] metrics['old_crashes'] = [k.split(':')[0] for k in split_file(old_crashes_file, ignore_errors=True)] def run_test(name, job_info): """Run a test See mainloop documentation """ # VxWorks tests needs WORKER_ID to be set in order to have an id for # vxsim that will not collide with other instances. os.environ['WORKER_ID'] = str(job_info[0]) cmd = command_pattern % {'name': name} cmd = cmd.split(";") return Run(cmd, bg=True, output=None, error=None) collect_result = generate_collect_result( results_file=os.path.join(m.options.output_dir, 'results'), metrics=metrics, options=m.options) # Launch the mainloop MainLoop(test_list, run_test, collect_result) if __name__ == '__main__': main() python-gnatpython-54.orig/README0000644000175000017500000000113511654532470016222 0ustar xavierxavierINSTALLATION ============ If you want to install this module into your Python distribution just launch: $ python ./setup.py install If you want to install it only for the current user you can add the --user option to the previous command: $ python ./setup.py install --user Troubleshooting: =============== If you get an error when using --user that this switch cannot be combined with "prefix", this is a bug in the distutils.py setup on your machine (happens with several linux distributions). The workaround is to create a file called $HOME/.pydistutils.cfg, which contains [install] prefix= python-gnatpython-54.orig/COPYING30000644000175000017500000010451311654532470016464 0ustar xavierxavier GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . python-gnatpython-54.orig/src/0000755000175000017500000000000011654532465016135 5ustar xavierxavierpython-gnatpython-54.orig/src/mod_term/0000755000175000017500000000000011654532465017743 5ustar xavierxavierpython-gnatpython-54.orig/src/mod_term/terminals.c0000644000175000017500000013750611654532465022121 0ustar xavierxavier/**************************************************************************** * * * T E R M I N A L S * * * * C Implementation File * * * * Copyright (C) 2008-2011, AdaCore * * * * GNAT is free software; you can redistribute it and/or modify it under * * terms of the GNU General Public License as published by the Free Soft- * * ware Foundation; either version 2, or (at your option) any later ver- * * sion. GNAT is distributed in the hope that it will be useful, but WITH- * * OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * * for more details. You should have received a copy of the GNU General * * Public License distributed with GNAT; see file COPYING. If not, write * * to the Free Software Foundation, 51 Franklin Street, Fifth Floor, * * Boston, MA 02110-1301, USA. * * * * As a special exception, if you link this file with other files to * * produce an executable, this file does not by itself cause the resulting * * executable to be covered by the GNU General Public License. This except- * * ion does not however invalidate any other reasons why the executable * * file might be covered by the GNU Public License. * * * * GNAT was originally developed by the GNAT team at New York University. * * Extensive contributions were provided by Ada Core Technologies Inc. * * * ****************************************************************************/ #ifndef WIN32 /* First defined some macro to identify easily some systems */ #if defined (__FreeBSD__) \ || defined (__OpenBSD__) \ || defined (__NetBSD__) \ || defined (__DragonFly__) # define FREEBSD #endif #if defined (__alpha__) && defined (__osf__) # define OSF1 #endif #if defined (__mips) && defined (__sgi) # define IRIX #endif /* Include every system header we need */ #define _GNU_SOURCE #include #include #include /* On some system termio is either absent or including it will disable termios (HP-UX) */ #if ! defined (__hpux__) && ! defined (FREEBSD) && ! defined (__APPLE__) # include #endif #include #include #include #include #include #include #include #include #if defined (sun) # include #endif #if defined (FREEBSD) || defined (sun) # include #endif #if defined (__hpux__) # include # include #endif #define CDISABLE _POSIX_VDISABLE /* On HP-UX and Sun system, there is a bzero function but with a different signature. Use memset instead */ #if defined (__hpux__) || defined (sun) || defined (_AIX) # define bzero(s,n) memset (s,0,n) #endif /* POSIX does not specify how to open the master side of a terminal.Several methods are available (system specific): 1- using a cloning device (USE_CLONE_DEVICE) 2- getpt (USE_GETPT) 3- openpty (USE_OPENPTY) 4- _getpty (USE_GETPTY) When using the cloning device method, the macro USE_CLONE_DEVICE should contains a full path to the adequate device. When a new system is about to be supported, one of the previous macro should be set otherwise allocate_pty_desc will return an error */ /* Configurable part */ #if defined (__APPLE__) || defined (FREEBSD) #define USE_OPENPTY #elif defined (IRIX) #define USE_GETPTY #elif defined (linux) #define USE_GETPT #elif defined (sun) #define USE_CLONE_DEVICE "/dev/ptmx" #elif defined (_AIX) #define USE_CLONE_DEVICE "/dev/ptc" #elif defined (OSF1) /* On Tru64, the systems offers various interfaces to open a terminal: - /dev/ptmx: this the system V driver (stream based), - /dev/ptmx_bsd: the non stream based clone device, - the openpty function which use BSD interface. Using directly /dev/ptmx_bsd on Tru64 5.1B seems to consume all the available slave ptys (why ?). When using openpty it seems that the function handles the creation of entries in /dev/pts when necessary and so avoid this starvation issue. The pty man entry suggests also to use openpty. */ #define USE_OPENPTY #elif defined (__hpux__) /* On HP-UX we use the streamed version. Using the non streamed version is not recommanded (through "/dev/ptym/clone"). Indeed it seems that there are issues to detect process terminations. */ #define USE_CLONE_DEVICE "/dev/ptmx" #endif /* structure that holds information about the terminal used and the process connected on the slave side */ typedef struct pty_desc_struct { int master_fd; /* fd of the master side if the terminal */ int slave_fd; /* fd of the slave side */ char slave_name[32]; /* filename of the slave side */ int child_pid; /* PID of the child process connected to the slave side of the terminal */ } term_handler; /* relocate_fd - ensure that a file descriptor is greater than a given value * * PARAMETERS * fd file descriptor we want to relocate * min_fd min expected value after relocation * RETURN VALUE * the new fd */ static int relocate_fd (int fd, int min_fd) { int new_fd; if (fd >= min_fd) return fd; new_fd = dup (fd); if (new_fd == -1) exit (1); /* we need to hold the older fds in order to be sure that new_fd value is increasing. So only close the fd(s) once we have a candidate. */ new_fd = relocate_fd (new_fd, min_fd); close (fd); return new_fd; } /* allocate_pty_desc - allocate a pseudo terminal * * PARAMETERS * out desc returned pointer to a term_handler structure containing information * about the opened pseudo terminal * RETURN VALUE * -1 if failed * 0 if ok * COMMENTS * If the function is successful we should have at least the master side fd * and the slave side filename. On some system, the slave side will also be * opened. If this is not the case the slave side will be open once we are in * the child process (note that opening the slave side at this stage will * failed...). */ extern char* ptsname (int); static int allocate_pty_desc (term_handler **desc) { term_handler *result; int status = 0; int slave_fd = -1; int master_fd = -1; char *slave_name = NULL; #ifdef USE_GETPT master_fd = getpt (); #elif defined (USE_OPENPTY) status = openpty (&master_fd, &slave_fd, NULL, NULL, NULL); #elif defined (USE_GETPTY) slave_name = _getpty (&master_fd, O_RDWR | O_NDELAY, 0600, 0); if (slave_name == NULL) status = -1; #elif defined (USE_CLONE_DEVICE) master_fd = open (USE_CLONE_DEVICE, O_RDWR | O_NONBLOCK, 0); #else printf ("[error]: terminal support is not configured\n"); return -1; #endif /* at this stage we should have the master side fd and status should be 0 */ if (status != 0 || master_fd < 0) { /* If this is not the case close all opened files and return -1 */ printf ("[error]: cannot allocate master side of the pty\n"); if (master_fd >= 0) close (master_fd); if (slave_fd >= 0) close (slave_fd); *desc = NULL; return -1; } /* retrieve the file name of the slave side if necessary */ if (slave_name == NULL) slave_name = (char *) ptsname (master_fd); /* Now we should have slave file name */ if (slave_name == NULL) { /* If not the case close any opened file and return - 1 */ printf ("[error]: cannot allocate slave side of the pty\n"); if (master_fd >= 0) close (master_fd); if (slave_fd >= 0) close (slave_fd); *desc = NULL; return -1; } /* grant access to the slave side */ grantpt (master_fd); /* unlock the terminal */ unlockpt (master_fd); /* set desc and return 0 */ result = malloc (sizeof (term_handler)); result->master_fd = master_fd; result->slave_fd = slave_fd; /* the string returned by ptsname or _getpty is a static allocated string. So we should make a copy */ strncpy (result->slave_name, slave_name, sizeof (result->slave_name)); result->slave_name[sizeof (result->slave_name) - 1] = '\0'; result->child_pid = -1; *desc=result; return 0; } /* some utility macro that make the code of child_setup_tty easier to read */ #define __enable(a, b) ((a) |= (b)) #define __disable(a, b) ((a) &= ~(b)) /* some properties do not exist on all systems. Set their value to 0 in that case */ #ifndef IUCLC #define IUCLC 0 #endif #ifndef OLCUC #define OLCUC 0 #endif #ifndef NLDLY #define NLDLY 0 #define CRDLY 0 #define TABDLY 0 #define BSDLY 0 #define VTDLY 0 #define FFDLY 0 #endif /* child_setup_tty - set terminal properties * * PARAMETERS * file descriptor of the slave side of the terminal * * RETURN VALUE * 0 if success, any other value if failed. * * COMMENTS * None */ static int child_setup_tty (int fd) { struct termios s; int status; /* ensure that s is filled with 0 */ bzero (&s, sizeof (&s)); /* Get the current terminal settings */ status = tcgetattr (fd, &s); if (status != 0) return -1; /* Adjust input modes */ __disable (s.c_iflag, IUCLC); /* don't transform to lower case */ __disable (s.c_iflag, ISTRIP); /* don't delete 8th bit */ /* Adjust output modes */ __enable (s.c_oflag, OPOST); /* enable postprocessing */ __disable (s.c_oflag, ONLCR); /* don't map LF to CR-LF */ __disable (s.c_oflag, NLDLY|CRDLY|TABDLY|BSDLY|VTDLY|FFDLY); /* disable delays */ __disable (s.c_oflag, OLCUC); /* don't transform to upper case */ /* Adjust control modes */ s.c_cflag = (s.c_cflag & ~CSIZE) | CS8; /* Don't strip 8th bit */ /* Adjust local modes */ __disable (s.c_lflag, ECHO); /* disable echo */ __enable (s.c_lflag, ISIG); /* enable signals */ __enable (s.c_lflag, ICANON); /* erase/kill/eof processing */ /* Adjust control characters */ /* IMPORTANT: we need to ensure that Ctrl-C will trigger an interrupt signal otherwise send_signal_via_characters will fail */ s.c_cc[VEOF] = 04; /* insure that EOF is Control-D */ s.c_cc[VERASE] = CDISABLE; /* disable erase processing */ s.c_cc[VKILL] = CDISABLE; /* disable kill processing */ s.c_cc[VQUIT] = 28; /* Control-\ */ s.c_cc[VINTR] = 03; /* Control-C */ s.c_cc[VEOL] = CDISABLE; s.c_cc[VSUSP] = 26; /* Control-Z */ /* push our changes */ status = tcsetattr (fd, TCSADRAIN, &s); return status; } /* setup_communication - interface to the external world. Should be called * before forking. On Unixes this function only call allocate_pty_desc. * The Windows implementation (in different part of this file) is very * different. * * PARAMETERS * out desc returned pointer to a term_handler structure * RETURN VALUE * 0 if success, -1 otherwise */ int gvd_setup_communication (term_handler** desc) { int status; return allocate_pty_desc (desc); } /* gvd_setup_parent_communication - interface to the external world. Should * be called after forking in the parent process * * PARAMETERS * out in_fd out out_fd out err_fd fds corresponding to the parent side of the terminal in pid_out child process pid * RETRUN VALUE * 0 */ int gvd_setup_parent_communication (term_handler *desc, int* in_fd, /* input */ int* out_fd, /* output */ int* err_fd, /* error */ int* pid_out) { *in_fd = desc->master_fd; *out_fd= desc->master_fd; *err_fd= desc->master_fd; desc->child_pid = *pid_out; return 0; } /* gvd_setup_winsize - Sets up the size of the terminal * This lets the process know the size of the terminal */ void gvd_setup_winsize (term_handler *desc, int rows, int columns) { #ifdef TIOCGWINSZ struct winsize s; s.ws_row = (unsigned short)rows; s.ws_col = (unsigned short)columns; s.ws_xpixel = 0; s.ws_ypixel = 0; ioctl (desc->master_fd, TIOCSWINSZ, &s); #ifdef SIGWINCH if (desc->child_pid > 0) { /* Let the process know about the change in size */ kill (desc->child_pid, SIGWINCH); } #endif #endif } /* gvd_setup_child_communication - interface to external world. Should be * called after forking in the child process. On Unixes, this function * first adjust the line setting, set standard output, input and error and * then spawn the program. * * PARAMETERS * desc a term_handler structure containing the pty parameters * new_argv argv of the program to be spawned * RETURN VALUE * this function should not return */ int gvd_setup_child_communication (term_handler *desc, char **new_argv, int Use_Pipes) { int status; int pid = getpid (); setsid (); /* open the slave side of the terminal if necessary */ if (desc->slave_fd == -1) #if defined (_AIX) /* On AIX, if the slave process is not opened with O_NDELAY or O_NONBLOCK then we might have some processes hanging on I/O system calls. Not sure we can do that for all platforms so do it only on AIX for the moment. On AIX O_NONBLOCK and O_NDELAY have slightly different meanings. When reading on the slave fd, in case there is no data available, if O_NDELAY is set then 0 is returned. If O_NON_BLOCK is -1 is returned. It seems that interactive programs such as GDB prefer the O_NDELAY behavior. We chose O_NONBLOCK because it allows us to make the distinction between a true EOF and an EOF returned because there is no data available to be read. */ desc->slave_fd = open (desc->slave_name, O_RDWR | O_NONBLOCK, 0); #else desc->slave_fd = open (desc->slave_name, O_RDWR, 0); #endif #if defined (sun) || defined (__hpux__) /* On systems such as Solaris we are using stream. We need to push the right "modules" in order to get the expected terminal behaviors. Otherwise functionalities such as termios are not available. */ ioctl (desc->slave_fd, I_PUSH, "ptem"); ioctl (desc->slave_fd, I_PUSH, "ldterm"); ioctl (desc->slave_fd, I_PUSH, "ttcompat"); #endif #ifdef TIOCSCTTY /* make the tty the controling terminal */ status = ioctl (desc->slave_fd, TIOCSCTTY, 0); #endif /* adjust tty settings */ child_setup_tty (desc->slave_fd); gvd_setup_winsize (desc, 24, 80); /* To prevent errors in some shells */ /* stdin, stdout and stderr should be now our tty */ dup2 (desc->slave_fd, 0); dup2 (desc->slave_fd, 1); dup2 (desc->slave_fd, 2); if (desc->slave_fd > 2) close (desc->slave_fd); /* adjust process group settings */ status = setpgid (pid, pid); status = tcsetpgrp (0, pid); /* launch the program */ status = execvp (new_argv[0], new_argv); printf ("status: %d\n", status); /* return the pid */ return pid; } /* send_signal_via_characters - Send a characters that will trigger a signal * in the child process. * * PARAMETERS * desc a term_handler structure containing terminal information * int a signal number * RETURN VALUE * None */ static void send_signal_via_characters (term_handler *desc, int signal_number) { char ctrl_c = 03; char ctrl_backslash = 28; char ctrl_Z = 26; switch (signal_number) { case SIGINT: write (desc->master_fd, &ctrl_c, 1); return; case SIGQUIT: write (desc->master_fd, &ctrl_backslash, 1); return; case SIGTSTP: write (desc->master_fd, &ctrl_Z, 1); return; } } /* gvd_interrupt_process - interrupt the child process * * PARAMETERS * desc a term_handler structure */ int gvd_interrupt_process (term_handler *desc) { send_signal_via_characters (desc, SIGINT); return 0; } /* gvd_interrupt_pid - interrupt a process group * * PARAMETERS * pid pid of the process to interrupt */ int gvd_interrupt_pid (int pid) { kill (-pid, SIGINT); return 0; } /* gvd_terminate_process - kill a child process * * PARAMETERS * desc term_handler structure */ int gvd_terminate_process (term_handler *desc) { close(desc->master_fd); return kill (desc->child_pid, SIGKILL); } /* gvd_waitpid - wait for the child proces to die * * PARAMETERS * desc term_handler structure * RETURN VALUE * exit status of the child process */ int gvd_waitpid (term_handler *desc) { int status = 0; waitpid (desc->child_pid, &status, 0); return WEXITSTATUS (status); } /* gvd_tty_supported - Are tty supported ? * * RETURN VALUE * always 1 on Unix systems */ int gvd_tty_supported () { return 1; } /* gvd_free_process - free a term_handler structure * * PARAMETERS * in out desc: a pty desc structure */ void gvd_free_process (void* desc) { free ((term_handler *)desc); } /* gvd_send_header - dummy function. this interface is only used on Windows */ void gvd_send_header (term_handler* desc, char header[5], int size, int *ret) { *ret = 0; } /* gvd_reset_tty - reset line setting * * PARAMETERS * desc: a term_handler structure */ void gvd_reset_tty (term_handler* desc) { child_setup_tty (desc->master_fd); } /* gvd_new_tty - allocate a new terminal * * RETURN VALUE * a term_handler structure */ term_handler * gvd_new_tty () { int status; term_handler* desc; status = allocate_pty_desc (&desc); child_setup_tty (desc->master_fd); return desc; } /* gvd_close_tty - close a terminal * * PARAMETERS * desc a term_handler strucure */ void gvd_close_tty (term_handler* desc) { if (desc->master_fd >= 0) close (desc->master_fd); if (desc->slave_fd >= 0) close (desc->slave_fd); } /* gvd_tty_name - return slave side device name * * PARAMETERS * desc a term_handler strucure * RETURN VALUE * a string */ char * gvd_tty_name (term_handler* desc) { return desc->slave_name; } /* gvd_tty_name - return master side fd * * PARAMETERS * desc a term_handler strucure * RETURN VALUE * a fd */ int gvd_tty_fd (term_handler* desc) { return desc->master_fd; } #ifdef __hpux__ #include #endif #include #ifndef NO_FD_SET #define SELECT_MASK fd_set #else /* !NO_FD_SET */ #ifndef _AIX typedef long fd_mask; #endif /* _AIX */ #ifdef _IBMR2 #define SELECT_MASK void #else /* !_IBMR2 */ #define SELECT_MASK int #endif /* !_IBMR2 */ #endif /* !NO_FD_SET */ int __gnat_expect_poll (int *fd, int num_fd, int timeout, int *is_set) { struct timeval tv; SELECT_MASK rset; SELECT_MASK eset; int max_fd = 0; int ready; int i; int received; tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; do { FD_ZERO (&rset); FD_ZERO (&eset); for (i = 0; i < num_fd; i++) { FD_SET (fd[i], &rset); FD_SET (fd[i], &eset); if (fd[i] > max_fd) max_fd = fd[i]; } ready = select (max_fd + 1, &rset, NULL, &eset, timeout == -1 ? NULL : &tv); if (ready > 0) { received = 0; for (i = 0; i < num_fd; i++) { if (FD_ISSET (fd[i], &rset)) { is_set[i] = 1; received = 1; } else is_set[i] = 0; } #ifdef __hpux__ for (i = 0; i < num_fd; i++) { if (FD_ISSET (fd[i], &eset)) { struct request_info ei; /* Only query and reset error state if no file descriptor is ready to be read, otherwise we will be signalling a died process too early */ if (!received) { ioctl (fd[i], TIOCREQCHECK, &ei); if (ei.request == TIOCCLOSE) { ioctl (fd[i], TIOCREQSET, &ei); return -1; } ioctl (fd[i], TIOCREQSET, &ei); } ready--; } } #endif } } while (timeout == -1 && ready == 0); return ready; } #else /* WIN32 */ #include #include #include #include #include #include #include #define MAXPATHLEN 1024 #define NILP(x) ((x) == 0) #define Qnil 0 #define report_file_error(x, y) fprintf (stderr, "Error: %s\n", x); #define INTEGERP(x) 1 #define XINT(x) x typedef struct GVD_Process { int pid; /* Number of this process */ PROCESS_INFORMATION procinfo; HANDLE w_infd, w_outfd; HANDLE w_forkin, w_forkout; BOOL usePipe; int infd, outfd; } term_handler; /* Control whether create_child cause the process to inherit GPS' error mode setting. The default is 1, to minimize the possibility of subprocesses blocking when accessing unmounted drives. */ static int Vw32_start_process_inherit_error_mode = 1; /* Control whether spawnve quotes arguments as necessary to ensure correct parsing by child process. Because not all uses of spawnve are careful about constructing argv arrays, we make this behaviour conditional (off by default, since a similar operation is already done in g-expect.adb by calling Normalize_Argument). */ static int Vw32_quote_process_args = 0; static DWORD AbsoluteSeek(HANDLE, DWORD); static VOID ReadBytes(HANDLE, LPVOID, DWORD); #define XFER_BUFFER_SIZE 2048 /* This tell if the executable we're about to launch uses a GUI interface. */ /* if we can't determine it, we will return true */ static int is_gui_app (char *exe) { HANDLE hImage; DWORD bytes; DWORD iSection; DWORD SectionOffset; DWORD CoffHeaderOffset; DWORD MoreDosHeader[16]; CHAR *file; size_t nlen; ULONG ntSignature; IMAGE_DOS_HEADER image_dos_header; IMAGE_FILE_HEADER image_file_header; IMAGE_OPTIONAL_HEADER image_optional_header; IMAGE_SECTION_HEADER image_section_header; /* * Open the reference file. */ nlen = strlen (exe); file = exe; if (nlen > 2) { if (exe[0] == '"') { /* remove quotes */ nlen -= 2; file = malloc ((nlen + 1) * sizeof (char)); memcpy (file, &exe[1], nlen); file [nlen] = '\0'; } } hImage = CreateFile(file, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL); if (file != exe) { free (file); } if (INVALID_HANDLE_VALUE == hImage) { report_file_error ("Could not open exe: ", Qnil); report_file_error (exe, Qnil); report_file_error ("\n", Qnil); CloseHandle (hImage); return -1; } /* * Read the MS-DOS image header. */ ReadBytes(hImage, &image_dos_header, sizeof(IMAGE_DOS_HEADER)); if (IMAGE_DOS_SIGNATURE != image_dos_header.e_magic) { report_file_error("Sorry, I do not understand this file.\n", Qnil); CloseHandle (hImage); return -1; } /* * Read more MS-DOS header. */ ReadBytes(hImage, MoreDosHeader, sizeof(MoreDosHeader)); /* * Get actual COFF header. */ CoffHeaderOffset = AbsoluteSeek(hImage, image_dos_header.e_lfanew) + sizeof(ULONG); if (CoffHeaderOffset < 0) { CloseHandle (hImage); return -1; } ReadBytes (hImage, &ntSignature, sizeof(ULONG)); if (IMAGE_NT_SIGNATURE != ntSignature) { report_file_error ("Missing NT signature. Unknown file type.\n", Qnil); CloseHandle (hImage); return -1; } SectionOffset = CoffHeaderOffset + IMAGE_SIZEOF_FILE_HEADER + IMAGE_SIZEOF_NT_OPTIONAL_HEADER; ReadBytes(hImage, &image_file_header, IMAGE_SIZEOF_FILE_HEADER); /* * Read optional header. */ ReadBytes(hImage, &image_optional_header, IMAGE_SIZEOF_NT_OPTIONAL_HEADER); CloseHandle (hImage); switch (image_optional_header.Subsystem) { case IMAGE_SUBSYSTEM_UNKNOWN: return 1; break; case IMAGE_SUBSYSTEM_NATIVE: return 1; break; case IMAGE_SUBSYSTEM_WINDOWS_GUI: return 1; break; case IMAGE_SUBSYSTEM_WINDOWS_CUI: return 0; break; case IMAGE_SUBSYSTEM_OS2_CUI: return 0; break; case IMAGE_SUBSYSTEM_POSIX_CUI: return 0; break; default: /* Unknown, return GUI app to be preservative: if yes, it will be correctly launched, if no, it will be launched, and a console will be also displayed, which is not a big deal */ return 1; break; } } static DWORD AbsoluteSeek (HANDLE hFile, DWORD offset) { DWORD newOffset; newOffset = SetFilePointer (hFile, offset, NULL, FILE_BEGIN); if (newOffset == 0xFFFFFFFF) return -1; else return newOffset; } static VOID ReadBytes (HANDLE hFile, LPVOID buffer, DWORD size) { DWORD bytes; if (!ReadFile(hFile, buffer, size, &bytes, NULL)) { size = 0; return; } else if (size != bytes) { return; } } static int nt_spawnve (char *exe, char **argv, char *env, term_handler *process) { STARTUPINFO start; SECURITY_ATTRIBUTES sec_attrs; SECURITY_DESCRIPTOR sec_desc; DWORD flags; char dir[ MAXPATHLEN ]; int pid; int is_gui, use_cmd; char *cmdline, *parg, **targ; int do_quoting = 0; char escape_char; int arglen; /* we have to do some conjuring here to put argv and envp into the form CreateProcess wants... argv needs to be a space separated/null terminated list of parameters, and envp is a null separated/double-null terminated list of parameters. Additionally, zero-length args and args containing whitespace or quote chars need to be wrapped in double quotes - for this to work, embedded quotes need to be escaped as well. The aim is to ensure the child process reconstructs the argv array we start with exactly, so we treat quotes at the beginning and end of arguments as embedded quotes. Note that using backslash to escape embedded quotes requires additional special handling if an embedded quote is already preceeded by backslash, or if an arg requiring quoting ends with backslash. In such cases, the run of escape characters needs to be doubled. For consistency, we apply this special handling as long as the escape character is not quote. Since we have no idea how large argv and envp are likely to be we figure out list lengths on the fly and allocate them. */ if (!NILP (Vw32_quote_process_args)) { do_quoting = 1; /* Override escape char by binding w32-quote-process-args to desired character, or use t for auto-selection. */ if (INTEGERP (Vw32_quote_process_args)) escape_char = XINT (Vw32_quote_process_args); else escape_char = '\\'; } /* do argv... */ arglen = 0; targ = argv; while (*targ) { char *p = *targ; int need_quotes = 0; int escape_char_run = 0; if (*p == 0) need_quotes = 1; for ( ; *p; p++) { if (*p == '"') { /* allow for embedded quotes to be escaped */ arglen++; need_quotes = 1; /* handle the case where the embedded quote is already escaped */ if (escape_char_run > 0) { /* To preserve the arg exactly, we need to double the preceding escape characters (plus adding one to escape the quote character itself). */ arglen += escape_char_run; } } else if (*p == ' ' || *p == '\t') { need_quotes = 1; } if (*p == escape_char && escape_char != '"') escape_char_run++; else escape_char_run = 0; } if (need_quotes) { arglen += 2; /* handle the case where the arg ends with an escape char - we must not let the enclosing quote be escaped. */ if (escape_char_run > 0) arglen += escape_char_run; } arglen += strlen (*targ) + 1; targ++; } is_gui = is_gui_app (argv[0]); use_cmd = FALSE; if (is_gui == -1) { /* could not determine application type. Try launching with "cmd /c" */ is_gui = FALSE; arglen += 7; use_cmd = TRUE; } cmdline = (char*)malloc (arglen + 1); targ = argv; parg = cmdline; if (use_cmd == TRUE) { strcpy (parg, "cmd /c "); parg += 7; } while (*targ) { char * p = *targ; int need_quotes = 0; if (*p == 0) need_quotes = 1; if (do_quoting) { for ( ; *p; p++) if (*p == ' ' || *p == '\t' || *p == '"') need_quotes = 1; } if (need_quotes) { int escape_char_run = 0; char * first; char * last; p = *targ; first = p; last = p + strlen (p) - 1; *parg++ = '"'; for ( ; *p; p++) { if (*p == '"') { /* double preceding escape chars if any */ while (escape_char_run > 0) { *parg++ = escape_char; escape_char_run--; } /* escape all quote chars, even at beginning or end */ *parg++ = escape_char; } *parg++ = *p; if (*p == escape_char && escape_char != '"') escape_char_run++; else escape_char_run = 0; } /* double escape chars before enclosing quote */ while (escape_char_run > 0) { *parg++ = escape_char; escape_char_run--; } *parg++ = '"'; } else { strcpy (parg, *targ); parg += strlen (*targ); } *parg++ = ' '; targ++; } *--parg = '\0'; memset (&start, 0, sizeof (start)); start.cb = sizeof (start); if (process->usePipe == TRUE) { start.dwFlags = STARTF_USESTDHANDLES; start.hStdInput = process->w_forkin; start.hStdOutput = process->w_forkout; /* child's stderr is always redirected to outfd */ start.hStdError = process->w_forkout; } else { start.dwFlags = STARTF_USESTDHANDLES; /* We only need to redirect stderr/stdout here. Stdin will be forced to the spawned process console by explaunch */ start.hStdInput = NULL; start.hStdOutput = process->w_forkout; start.hStdError = process->w_forkout; } /* Explicitly specify no security */ if (!InitializeSecurityDescriptor (&sec_desc, SECURITY_DESCRIPTOR_REVISION)) goto EH_Fail; if (!SetSecurityDescriptorDacl (&sec_desc, TRUE, NULL, FALSE)) goto EH_Fail; sec_attrs.nLength = sizeof (sec_attrs); sec_attrs.lpSecurityDescriptor = &sec_desc; sec_attrs.bInheritHandle = FALSE; /* creating a new console allow easier close. Do not use CREATE_NEW_PROCESS_GROUP as this results in disabling Ctrl+C */ flags = CREATE_NEW_CONSOLE; if (NILP (Vw32_start_process_inherit_error_mode)) flags |= CREATE_DEFAULT_ERROR_MODE; /* if app is not a gui application, hide the console */ if (is_gui == FALSE) { start.dwFlags |= STARTF_USESHOWWINDOW; start.wShowWindow = SW_HIDE; } /* Set initial directory to null character to use current directory */ if (!CreateProcess (NULL, cmdline, &sec_attrs, NULL, TRUE, flags, env, NULL, &start, &process->procinfo)) goto EH_Fail; pid = (int) process->procinfo.hProcess; process->pid=pid; return pid; EH_Fail: return -1; } /*********************** ** gvd_send_header () ***********************/ #define EXP_SLAVE_CREATE 'c' #define EXP_SLAVE_KEY 'k' #define EXP_SLAVE_MOUSE 'm' #define EXP_SLAVE_WRITE 'w' #define EXP_SLAVE_KILL 'x' #define EXP_KILL_TERMINATE 0x1 #define EXP_KILL_CTRL_C 0x2 #define EXP_KILL_CTRL_BREAK 0x4 void gvd_send_header (term_handler* p, char header[5], int size, int *ret) { if (p->usePipe == FALSE) { header[0] = EXP_SLAVE_WRITE; header[1] = size & 0xff; header[2] = (size & 0xff00) >> 8; header[3] = (size & 0xff0000) >> 16; header[4] = (size & 0xff000000) >> 24; *ret = 1; } else { *ret = 0; } } /******************************** ** gvd_setup_communication () ********************************/ int gvd_setup_communication (term_handler** process_out) /* output param */ { term_handler* process; process = (term_handler *)malloc (sizeof (term_handler)); ZeroMemory (process, sizeof (term_handler)); *process_out = process; return 0; } #define EXP_PIPE_BASENAME "\\\\.\\pipe\\ExpectPipe" int gvd_setup_child_communication (term_handler* process, char** argv, int Use_Pipes) { int cpid; HANDLE parent; SECURITY_ATTRIBUTES sec_attrs; char slavePath [MAX_PATH]; char **nargv; int argc; int i; char pipeNameIn[100]; HANDLE hSlaveInDrv = NULL; /* Handle to communicate with slave driver */ parent = GetCurrentProcess (); /* Set inheritance for the pipe handles */ sec_attrs.nLength = sizeof (SECURITY_ATTRIBUTES); sec_attrs.bInheritHandle = TRUE; sec_attrs.lpSecurityDescriptor = NULL; if (Use_Pipes) { /* Create in and out pipes */ if (!CreatePipe (&process->w_forkin, &process->w_infd, &sec_attrs, 0)) report_file_error ("Creation of child's IN handle", Qnil); if (!CreatePipe (&process->w_outfd, &process->w_forkout, &sec_attrs, 0)) report_file_error ("Creation of child's OUT handle", Qnil); /* Do not inherit the parent's side of the pipes */ SetHandleInformation (&process->w_infd, HANDLE_FLAG_INHERIT, 0); SetHandleInformation (&process->w_outfd, HANDLE_FLAG_INHERIT, 0); /* use native argv */ nargv = argv; process->usePipe = TRUE; } else { static int pipeNameId = 0; process->w_infd = NULL; /* We create a named pipe for Input, as we handle input by sending special commands to the explaunch process, that uses it to feed the actual input of the process */ sprintf(pipeNameIn, "%sIn%08x_%08x", EXP_PIPE_BASENAME, GetCurrentProcessId(), pipeNameId); pipeNameId++; hSlaveInDrv = CreateNamedPipe(pipeNameIn, PIPE_ACCESS_OUTBOUND, PIPE_TYPE_BYTE | PIPE_WAIT, 1, 8192, 8192, 20000, NULL); if (hSlaveInDrv == NULL) goto end; if (!CreatePipe (&process->w_outfd, &process->w_forkout, &sec_attrs, 0)) report_file_error ("Creation of child's OUT handle", Qnil); if (SearchPath (NULL, "explaunch.exe", NULL, MAX_PATH, slavePath, NULL) == 0) goto end; for (argc=0; argv[argc] != NULL; argc++) ; nargv = (char **) malloc (sizeof (char*) * (argc + 3)); nargv[0] = slavePath; nargv[1] = pipeNameIn; for (i = 0; i <= argc; i++) nargv[i + 2] = argv[i]; process->usePipe = FALSE; } /* Spawn the child. */ cpid = nt_spawnve (nargv[0], nargv, NULL, process); /* close the duplicated handles passed to the child */ CloseHandle (process->w_forkout); if (process->usePipe == TRUE) { CloseHandle (process->w_forkin); } else { UCHAR buf[8]; /* enough space for child status info */ DWORD count; BOOL bRet; DWORD dwRet; /* * Wait for connection with the slave driver */ bRet = ConnectNamedPipe(hSlaveInDrv, NULL); if (bRet == FALSE) { dwRet = GetLastError(); if (dwRet == ERROR_PIPE_CONNECTED) { ; } else { goto end; } } process->w_infd = hSlaveInDrv; /* * wait for slave driver to initialize before allowing user to send to it */ bRet = ReadFile(process->w_outfd, buf, 8, &count, NULL); if (bRet == FALSE) { cpid = -1; } dwRet = buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24); if (dwRet != 0) { cpid = -1; } cpid = buf[4] | (buf[5] << 8) | (buf[6] << 16) | (buf[7] << 24); process->pid = cpid; } if (cpid == -1) /* An error occurred while trying to spawn the process. */ report_file_error ("Spawning child process", Qnil); return cpid; end: if (hSlaveInDrv != NULL) CloseHandle (hSlaveInDrv); return -1; } int gvd_setup_parent_communication (term_handler* process, int* in, int* out, int* err, int* pid) { process->infd = _open_osfhandle ((long) process->w_infd, 0); process->outfd = _open_osfhandle ((long) process->w_outfd, 0); *out = process->outfd; *in = process->infd; /* child's stderr is always redirected to outfd */ *err = *out; *pid = process->pid; } typedef struct _child_process { HWND hwnd; PROCESS_INFORMATION *procinfo; } child_process; /* The major and minor versions of NT. */ static int w32_major_version; static int w32_minor_version; /* Distinguish between Windows NT and Windows 95. */ static enum {OS_UNKNOWN, OS_WIN95, OS_NT} os_subtype = OS_UNKNOWN; /* Cache information describing the NT system for later use. */ static void cache_system_info (void) { union { struct info { char major; char minor; short platform; } info; DWORD data; } version; /* Cache the version of the operating system. */ version.data = GetVersion (); w32_major_version = version.info.major; w32_minor_version = version.info.minor; if (version.info.platform & 0x8000) os_subtype = OS_WIN95; else os_subtype = OS_NT; } static BOOL CALLBACK find_child_console (HWND hwnd, child_process * cp) { DWORD thread_id; DWORD process_id; thread_id = GetWindowThreadProcessId (hwnd, &process_id); if (process_id == cp->procinfo->dwProcessId) { char window_class[32]; GetClassName (hwnd, window_class, sizeof (window_class)); if (strcmp (window_class, (os_subtype == OS_WIN95) ? "tty" : "ConsoleWindowClass") == 0) { cp->hwnd = hwnd; return FALSE; } } /* keep looking */ return TRUE; } int gvd_interrupt_process (term_handler* p) { char buf[2]; DWORD written; BOOL bret; if (p->usePipe == TRUE) { bret = FALSE; } else { buf[0] = EXP_SLAVE_KILL; buf[1] = EXP_KILL_CTRL_C; bret = WriteFile (p->w_infd, buf, 2, &written, NULL); } if (bret == FALSE) { return gvd_interrupt_pid (p->procinfo.dwProcessId); } return 0; } int gvd_interrupt_pid (int pid) { volatile child_process cp; int rc = 0; cp.procinfo = (LPPROCESS_INFORMATION) malloc (sizeof (PROCESS_INFORMATION)); cp.procinfo->dwProcessId = pid; if (os_subtype == OS_UNKNOWN) cache_system_info (); /* Try to locate console window for process. */ EnumWindows ((WNDENUMPROC) find_child_console, (LPARAM) &cp); if (cp.hwnd) { BYTE control_scan_code = (BYTE) MapVirtualKey (VK_CONTROL, 0); /* Retrieve Ctrl-C scancode */ BYTE vk_break_code = 'C'; BYTE break_scan_code = (BYTE) MapVirtualKey (vk_break_code, 0); HWND foreground_window; foreground_window = GetForegroundWindow (); if (foreground_window) { /* NT 5.0, and apparently also Windows 98, will not allow a Window to be set to foreground directly without the user's involvement. The workaround is to attach ourselves to the thread that owns the foreground window, since that is the only thread that can set the foreground window. */ DWORD foreground_thread, child_thread; foreground_thread = GetWindowThreadProcessId (foreground_window, NULL); if (foreground_thread == GetCurrentThreadId () || !AttachThreadInput (GetCurrentThreadId (), foreground_thread, TRUE)) foreground_thread = 0; child_thread = GetWindowThreadProcessId (cp.hwnd, NULL); if (child_thread == GetCurrentThreadId () || !AttachThreadInput (GetCurrentThreadId (), child_thread, TRUE)) child_thread = 0; /* Set the foreground window to the child. */ if (SetForegroundWindow (cp.hwnd)) { /* Generate keystrokes as if user had typed Ctrl-Break or Ctrl-C. */ keybd_event (VK_CONTROL, control_scan_code, 0, 0); keybd_event (vk_break_code, break_scan_code, (vk_break_code == 'C' ? 0 : KEYEVENTF_EXTENDEDKEY), 0); keybd_event (vk_break_code, break_scan_code, (vk_break_code == 'C' ? 0 : KEYEVENTF_EXTENDEDKEY) | KEYEVENTF_KEYUP, 0); keybd_event (VK_CONTROL, control_scan_code, KEYEVENTF_KEYUP, 0); /* Sleep for a bit to give time for the main frame to respond to focus change events. */ Sleep (100); SetForegroundWindow (foreground_window); } /* Detach from the foreground and child threads now that the foreground switching is over. */ if (foreground_thread) AttachThreadInput (GetCurrentThreadId (), foreground_thread, FALSE); if (child_thread) AttachThreadInput (GetCurrentThreadId (), child_thread, FALSE); } } /* Ctrl-Break is NT equivalent of SIGINT. */ else if (!GenerateConsoleCtrlEvent (CTRL_BREAK_EVENT, cp.procinfo->dwProcessId)) { errno = EINVAL; rc = -1; } free (cp.procinfo); return rc; } /* kill a process, as this implementation use CreateProcess on Win32 we need to use Win32 TerminateProcess API */ int gvd_terminate_process (term_handler* p) { char buf[2]; DWORD written; BOOL bret; close(p->infd); close(p->outfd); if (p->usePipe == TRUE) { bret = FALSE; } else { buf[0] = EXP_SLAVE_KILL; buf[1] = EXP_KILL_TERMINATE; bret = WriteFile (p->w_infd, buf, 2, &written, NULL); } if (bret == FALSE) { if (!TerminateProcess (p->procinfo.hProcess, 1)) return -1; else return 0; } else return 0; } /* wait for process pid to terminate and return the process status. This implementation is different from the adaint.c one for Windows as it uses the Win32 API instead of the C one. */ int gvd_waitpid (term_handler* p) { int status = 0; DWORD exitcode; DWORD res; HANDLE proc_hand = p->procinfo.hProcess; res = WaitForSingleObject (proc_hand, 0); GetExitCodeProcess (proc_hand, &exitcode); CloseHandle (p->procinfo.hThread); CloseHandle (p->procinfo.hProcess); /* No need to close the handles: they were closed on the ada side */ return (int) exitcode; } /******************************** ** gvd_free_process () ********************************/ void gvd_free_process (void* process) { free ((term_handler *)process); } /* TTY handling */ typedef struct { int tty_fd; /* descriptor for the tty */ char tty_name[24]; /* Name of TTY device */ } TTY_Handle; int gvd_tty_supported () { return 0; } /* Return the tty name associated with p */ char * gvd_tty_name (TTY_Handle* t) { return t->tty_name; } int gvd_tty_fd (TTY_Handle* t) { return t->tty_fd; } TTY_Handle* gvd_new_tty () { return (TTY_Handle*)0; } void gvd_reset_tty (TTY_Handle* t) { return; } void gvd_close_tty (TTY_Handle* t) { free (t); } void gvd_setup_winsize (void *desc, int rows, int columns) { } int __gnat_expect_poll (int *fd, int num_fd, int timeout, int *is_set) { #define MAX_DELAY 100 int i, delay, infinite = 0; DWORD avail; HANDLE handles[num_fd]; for (i = 0; i < num_fd; i++) is_set[i] = 0; for (i = 0; i < num_fd; i++) handles[i] = (HANDLE) _get_osfhandle (fd [i]); /* Start with small delays, and then increase them, to avoid polling too much when waiting a long time */ delay = 5; if (timeout < 0) infinite = 1; while (1) { for (i = 0; i < num_fd; i++) { if (!PeekNamedPipe (handles [i], NULL, 0, NULL, &avail, NULL)) return -1; if (avail > 0) { is_set[i] = 1; return 1; } } if (!infinite && timeout <= 0) return 0; Sleep (delay); timeout -= delay; if (delay < MAX_DELAY) delay += 10; } } #endif /* WIN32 */ #undef _GNU_SOURCE #include "Python.h" /* python signature: non_blocking_spawn() */ static PyObject * non_blocking_spawn (PyObject *self, PyObject *args) { PyObject *py_cmd_args = PyTuple_GetItem(args, 0); int py_cmd_args_n = PyTuple_Size (py_cmd_args); char *cmd_args[py_cmd_args_n + 1]; int j; int pid, in_fd, out_fd, err_fd; term_handler *desc; PyObject *result; for (j=0; j 0) { PyTuple_SetItem(result, 1, PyString_FromStringAndSize(buffer, read_status)); } else { Py_INCREF(Py_None); PyTuple_SetItem(result, 1, Py_None); } return result; } static PyObject * expect_write(PyObject *self, PyObject *args) { int fd = (int) PyInt_AsLong(PyTuple_GetItem(args, 0)); int size = (int) PyString_Size(PyTuple_GetItem(args, 1)); char *buffer = PyString_AsString(PyTuple_GetItem(args, 1)); int write_status; PyObject *result; write_status = write(fd, buffer, size); return PyInt_FromLong((long) write_status); } static PyObject * expect_terminate_process(PyObject *self, PyObject *args) { term_handler *desc = (term_handler *) PyCObject_AsVoidPtr(PyTuple_GetItem(args, 0)); gvd_terminate_process(desc); Py_INCREF(Py_None); return Py_None; } static PyObject * expect_interrupt_process(PyObject *self, PyObject *args) { term_handler *desc = (term_handler *) PyCObject_AsVoidPtr(PyTuple_GetItem(args, 0)); gvd_interrupt_process(desc); Py_INCREF(Py_None); return Py_None; } static PyObject * expect_waitpid(PyObject *self, PyObject *args) { int result; term_handler *desc = (term_handler *) PyCObject_AsVoidPtr(PyTuple_GetItem(args, 0)); result = gvd_waitpid(desc); return PyInt_FromLong((long) result); } static PyMethodDef TermMethods[] = { {"non_blocking_spawn", non_blocking_spawn, METH_VARARGS, "spawn a command"}, {"poll", poll, METH_VARARGS, "poll"}, {"read", expect_read, METH_VARARGS, "read"}, {"write", expect_write, METH_VARARGS, "write"}, {"waitpid", expect_waitpid, METH_VARARGS, "waitpid"}, {"interrupt", expect_interrupt_process, METH_VARARGS, "interrupt"}, {"terminate", expect_terminate_process, METH_VARARGS, "terminate"}, {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init_term(void) { PyObject *m; m = Py_InitModule("_term", TermMethods); } python-gnatpython-54.orig/src/rlimit/0000755000175000017500000000000011654532465017435 5ustar xavierxavierpython-gnatpython-54.orig/src/rlimit/rlimit.c0000644000175000017500000001277311654532465021113 0ustar xavierxavier/**************************************************************************** * * * RLIMIT.C * * * * Copyright (C) 1996 - 2010 Ada Core Technologies, Inc. * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see * * * ****************************************************************************/ /* rlimit - limit the execution time of a command Usage: rlimit seconds command [args] */ #include #include #include #include #include #include #include #include void usage (void) { printf ("Usage:\n"); printf (" rlimit seconds command [args]\n"); exit (1); } static int pid = 0; /* pid of child (controlled) process */ int status = 0; int return_status = 0; /* * Handler for SIGTERM, also used for cleanup actions after receiving * SIGCHLD and SIGALRM. */ void terminate_group(int nsig) { if (nsig != SIGCHLD) { /* Set SIGCHLD back to default */ signal (SIGCHLD, SIG_DFL); } kill (-pid, SIGTERM); sleep (1); kill (-pid, SIGKILL); } /* Handler for SIGCHLD */ void reapchild (int nsig) { int delay; if (pid > 0) { int rc; /* * Wait for the (only) child process. Since we have received SIGCHLD, * we know that this will not return ECHILD or 0. Note that waitpid(3) * won't report information for indirect descendants, but only for direct * child processes, in any case. */ rc = waitpid (pid, &status, WNOHANG); if (rc < 0) { perror ("waitpid"); return; } /* Get child process exit status */ if (WIFEXITED (status) != 0) { return_status = WEXITSTATUS (status); } else { return_status = -125; /* ??? This junk value is invalid. */ } /* * Check for remaining processes in the child group. Give them * 5 seconds to die gracefully. */ delay = 5; while (delay > 0 && !(kill (-pid, 0) == -1 && errno == ESRCH)) { sleep (1); --delay; } if (delay == 0) { terminate_group (SIGCHLD); } /* Report exit status from child process to caller. */ exit (return_status); } else { /* Never happens (the child process does an execve and does not fork). */ exit (0); } } int main (int argc, char **argv) { sigset_t block_cld; /* we need at least 3 args */ if (argc < 3) usage (); /* argv[0] = .../rlimit argv[1] = seconds argv[2] = command argv[3] = args */ signal (SIGTERM, terminate_group); /* * When the child process exits early, SIGCHLD might be emitted before the * pid variable is set in the parent process. On the other hand, we do want * to receive the signal so we have a chance to kill any other process it * might have spawned in the meantime. So, we establish the SIGCHLD handler * early, and block SIGCHLD until pid has been set. */ signal (SIGCHLD, reapchild); sigemptyset(&block_cld); sigaddset(&block_cld, SIGCHLD); sigprocmask(SIG_BLOCK, &block_cld, NULL); pid = fork (); switch (pid) { case -1: perror ("fork"); exit (3); case 0: /* first unblock SIGCHLD */ sigprocmask(SIG_UNBLOCK, &block_cld, NULL); /* child exec the command in a new process group */ if (setpgid (0, 0)) { perror ("setpgid"); exit (4); } execvp ((const char *) argv[2], (char *const *) &argv[2]); perror ("execvp"); exit (5); default: { /* parent sleeps wake up when the sleep call returns or when SIGCHLD is received */ int seconds = atoi (argv[1]); /* pid variable is now set correctly so unblock SIGCHLD */ sigprocmask(SIG_UNBLOCK, &block_cld, NULL); seconds = sleep (seconds); if (seconds == 0) { /* Sleep call returns, time limit elapsed, children must be slaughtered. * * Print the diagnostic first: On some systems (eg. LynxOS) the * handler for SIGCHLD may interrupt write(2) and garble the * message. */ fprintf (stderr, "rlimit: Real time limit exceeded\n"); fflush (stderr); terminate_group (SIGALRM); exit (2); } else { /* sleep(3) was interrupted, assume it was a manual action. */ exit (0); } } } return return_status; } python-gnatpython-54.orig/src/rlimit/rlimit-NT.c0000644000175000017500000002235511654532465021427 0ustar xavierxavier/**************************************************************************** * * * RLIMIT-NT.C * * * * Copyright (C) 1996-2011, AdaCore * * * * This program is free software: you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation, either version 3 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program. If not, see * * * ****************************************************************************/ /* rlimit - limit the execution time of a command Usage: rlimit seconds command [args] */ /* Define _WIN32_WINNT at least to 0x0500 in order to have visibility on Job related API. */ #define _WIN32_WINNT 0x0500 #include #include #include /* The following feature is only available starting with Windows XP. When the last handle on the job is closed the job is automatically terminated (i.e all processes that are part of the job are killed). */ #define JOB_OBJECT_KILL_ON_JOB_CLOSE 0x00002000 void error_msg (char *msg) { fprintf (stderr, "rlimit: %s (error 0x%d)\n", msg, GetLastError ()); } HANDLE create_job () { SECURITY_ATTRIBUTES JobAttributes; JOBOBJECT_BASIC_LIMIT_INFORMATION JobBasicLimits; JOBOBJECT_EXTENDED_LIMIT_INFORMATION JobExtendedLimits; HANDLE result; BOOL status; /* We need to make sure that only the rlimit process holds a handler on the Job. This way, in case the rlimit process is killed, the associated Job and its processes will also be killed (JOB_OBJECT_KILL_ON_JOB_CLOSE is also set). */ JobAttributes.nLength = sizeof (SECURITY_ATTRIBUTES); JobAttributes.bInheritHandle = FALSE; JobAttributes.lpSecurityDescriptor = NULL; result = CreateJobObject (&JobAttributes, NULL); if (result == NULL) { error_msg ("cannot create new job"); return NULL; } /* Set job attributes */ JobBasicLimits.LimitFlags = JOB_OBJECT_LIMIT_BREAKAWAY_OK | JOB_OBJECT_KILL_ON_JOB_CLOSE; JobExtendedLimits.BasicLimitInformation = JobBasicLimits; status = SetInformationJobObject (result, JobObjectExtendedLimitInformation, &JobExtendedLimits, sizeof (JobExtendedLimits)); if (status == 0) error_msg ("warning cannot set job attributes"); return result; } usage () { printf ("Usage:\n"); printf (" rlimit seconds command [args]\n"); exit (1); } main (int argc, char* argv[]) { /* Hold various Win32 API return status */ BOOL result; /* Needed structures to spawn the subprocess */ STARTUPINFO StartupInfo; PROCESS_INFORMATION ProcessInfo; SECURITY_ATTRIBUTES ProcessAttr; HANDLE Job; char CommandLine[4096]; int Timeout; int child_finished; /* We need at least 3 args. */ if (argc < 3) usage (); /* Retrieve timeout and ensure that it is superior to 0 */ Timeout = atoi (argv[1]); if (Timeout <= 0) { printf ("rlimit: timeout should be > 0\n"); exit (1); } /* Compute command line string. When a parameter contains a " or a space we should quote it with doublequotes. Double quotes inside the string should be escaped by a backslash. All backslashes precedind a " should also be escaped. */ /* First copy the command name */ strcpy (CommandLine, argv[2]); strcat (CommandLine, " "); /* Now take care of the arguments */ { int k; for (k = 3; k < argc; k++) { char *ca = argv[k]; /* current arg */ int ca_index; /* index of the current character in ca */ int need_quote = 1; /* set to 1 if quotes are needed */ /* Should we quote the string ? */ if (strlen(ca) > 0) need_quote = 0; for (ca_index = 0; ca_index < strlen(ca); ca_index++) { if (ca[ca_index] == ' ' || ca[ca_index] == '"') { need_quote = 1; break; } } /* Do quoting if necessary. Note it is important not to quote arguments that do not need it as some buggy implementations such vxsim will see for example -p as "-p" :-). */ if (need_quote == 1) { int cl_index = strlen(CommandLine); /* Open the double quoted string */ CommandLine[cl_index] = '"'; cl_index++; for (ca_index = 0; ca_index < strlen(ca); ca_index++) { /* We have a double in the argument. It should be escaped along with all previous backslashes. */ if (ca[ca_index] == '"') { /* We have blackslashes before the double quote. They should be quoted. */ if (ca_index > 0 && ca[ca_index - 1] == '\\') { int j; for (j = ca_index - 1; j >= 0 && ca[j] == '\\' ;j--) { CommandLine[cl_index] = '\\'; cl_index++; } } CommandLine[cl_index] = '\\'; cl_index++; CommandLine[cl_index] = '"'; cl_index++; } else { /* This is not a double quote so just add the character */ CommandLine[cl_index] = ca[ca_index]; cl_index++; /* We have blackslashes before the ending double quote. They should be quoted. */ if (ca[ca_index] == '\\' && ca_index + 1 == strlen(ca)) { int j; for (j = ca_index; j >= 0 && ca[j] == '\\' ;j--) { CommandLine[cl_index] = '\\'; cl_index++; } } } } /* Close the doublequoted string */ CommandLine[cl_index] = '"'; cl_index++; CommandLine[cl_index] = ' '; cl_index++; CommandLine[cl_index] = '\0'; } else /* The argument does not need quoting. Just append it to the command line */ { strcat (CommandLine, ca); strcat (CommandLine, " "); } } } Job = create_job (); if (Job == NULL) exit (1); /* Startup info */ StartupInfo.cb = sizeof (STARTUPINFO); StartupInfo.lpReserved = NULL; StartupInfo.lpReserved2 = NULL; StartupInfo.lpDesktop = NULL; StartupInfo.cbReserved2 = 0; StartupInfo.lpTitle = NULL; StartupInfo.dwFlags = 0; StartupInfo.wShowWindow = SW_HIDE; /* Security attributes */ ProcessAttr.nLength = sizeof (SECURITY_ATTRIBUTES); ProcessAttr.bInheritHandle = TRUE; ProcessAttr.lpSecurityDescriptor = NULL; /* Spawn the process */ result = CreateProcess (NULL, (char *) CommandLine, &ProcessAttr, /* Process attributes */ NULL, /* Thread attributes */ TRUE, /* InheritHandles */ NORMAL_PRIORITY_CLASS | CREATE_NEW_PROCESS_GROUP | CREATE_SUSPENDED | CREATE_BREAKAWAY_FROM_JOB , /* Creation flags */ NULL, /* Environment */ NULL, /* Current dir */ &StartupInfo, /* Startup info */ &ProcessInfo); /* Process Information */ if (result == 0) { error_msg ("cannot spawn process"); exit (1); } /* Assign the process to the Job */ result = AssignProcessToJobObject (Job, ProcessInfo.hProcess); /* Resume the child process */ ResumeThread (ProcessInfo.hThread); /* Wait until child process terminates or until Timeout is reached. */ child_finished = WaitForSingleObject (ProcessInfo.hProcess, Timeout * 1000) == WAIT_OBJECT_0; if (child_finished) { DWORD exit_code; if (GetExitCodeProcess (ProcessInfo.hProcess, &exit_code)) { exit (exit_code); } else { exit (0); } } /* Child process is not finished and timeout has elapsed so terminate the Job. */ TerminateJobObject (Job, 1); CloseHandle (ProcessInfo.hProcess); fprintf (stderr, "rlimit: Real time limit exceeded\n"); exit (2); } python-gnatpython-54.orig/setup.py0000755000175000017500000001115611654532470017063 0ustar xavierxavier#!/usr/bin/env python ############################################################################ # # # SETUP.PY # # # # Copyright (C) 2010-2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ from distutils.command.build_ext import build_ext from distutils.command.build_scripts import build_scripts from distutils.core import setup from distutils.extension import Extension from distutils.sysconfig import get_python_inc, get_python_lib import glob import os import platform import sys from gnatpython import __version__ # Distutils does not support support for compiled programs. So override de # build_scripts command with ours. We first compile our program and copy # it along with the Python scripts. Then we call the regular build_scripts # command. class build_scripts_gnatpython(build_scripts): def run(self): if 'Windows' in platform.system() or 'CYGWIN' in platform.system(): os.system('gcc -o scripts/rlimit src/rlimit/rlimit-NT.c') else: os.system('gcc -o scripts/rlimit src/rlimit/rlimit.c') # Update the scripts list self.scripts = glob.glob('scripts/*') build_scripts.run(self) # Our C module requires a mingw compiler. On windows python will use by # default the Microsoft one and even if we use the --compiler=mingw option # it does not seem to work all the time. So override the build_ext command. # If the the platform is windows use out manual procedure. Otherwise use # the regular build_ext implementation. class build_ext_gnatpython(build_ext): def build_extension(self, ext): if 'Windows' not in platform.system() and \ 'CYGWIN' not in platform.system(): return build_ext.build_extension(self, ext) else: # Get the python installation prefix python_prefix = sys.prefix # The Python version python_version = "%d.%d" % \ (sys.version_info[0], sys.version_info[1]) # The location of the static library (in fact an import library) python_lib = "%s/libs/libpython%s%s.a" % \ (sys.prefix, sys.version_info[0], sys.version_info[1]) # Find the location of Python includes in various locations. python_stdlib_dir = get_python_lib(True, False) python_include_dir = None for p in (get_python_inc(False), python_stdlib_dir + '/config', python_prefix + '/include/python/%s' % (python_version)): if os.path.isfile(p + '/Python.h'): python_include_dir = p break # Build our module with mingw GCC os.system('gcc -shared -static-libgcc -o %s/gnatpython/_term.pyd %s -I%s %s' % \ (self.build_lib, ' '.join(ext.sources), python_include_dir, python_lib)) setup(name='gnatpython', version=__version__, author="AdaCore", author_email="report@adacore.com", packages=['gnatpython'], scripts=glob.glob('scripts/*'), cmdclass={'build_scripts': build_scripts_gnatpython, 'build_ext': build_ext_gnatpython}, ext_modules=[Extension('gnatpython._term', ['src/mod_term/terminals.c'])]) python-gnatpython-54.orig/gnatpython/0000755000175000017500000000000011654532470017535 5ustar xavierxavierpython-gnatpython-54.orig/gnatpython/tree.py0000644000175000017500000000654111654532470021054 0ustar xavierxavier ############################################################################ # # # TREE.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """list contents of directories in a tree-like format. Produces a depth indented listing of files. """ import os from os.path import walk def tree(directory, stdout=False): """Returns a depth indented listing of files If stdout is true, print directly on stdout else return the list of indented lines """ def print_line(output, line): """Print line to stdout or append in output""" if output is None: print line else: output.append(line) if not stdout: output = [] else: # Do not return a list but print all lines to stdout output = None print_line(output, directory) def print_files(output, dirname, fnames): """Add filename to the output""" dir_relative_path = os.path.normpath(dirname[len(directory):]) indent = '| ' nb_indent = 0 _, tail = os.path.split(dir_relative_path) # Count number of / in the path to compute the indent nb_indent = dir_relative_path.count(os.path.sep) if tail and tail != ".": # If not the root directory, output the directory name print_line(output, "%s|-- %s" % (indent * nb_indent, tail)) else: # Else no indent nb_indent = -1 # Print all file names in the current directory fnames.sort() for fname in fnames: if not os.path.isdir(os.path.join(dirname, fname)): if fname == fnames[-1] and nb_indent != -1: # Pretty print the last file sep = '`' else: sep = '|' print_line(output, "%s%s-- %s" % (indent * (nb_indent + 1), sep, fname)) walk(directory, print_files, output) return output python-gnatpython-54.orig/gnatpython/ex.py0000644000175000017500000004063111654532470020527 0ustar xavierxavier ############################################################################ # # # EX.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Subprocesses management This package provides a single class called run which ease spawn of processes in blocking or non blocking mode and redirection of its stdout, stderr and stdin""" from subprocess import Popen, STDOUT, PIPE import errno import logging import os import sys BUF_SIZE = 128 logger = logging.getLogger('gnatpython.ex') class Run(object): """ ATTRIBUTES cmds : The `cmds' argument passed to the __init__ method (a command line passed in a list, of a list of command lines passed as a list of list). status : exit status (meaningfull only after the end of the process) out : process standard output (if instanciated with output = PIPE) err : same as out but for standard error pid : PID """ def __init__(self, cmds, cwd=None, output=PIPE, error=STDOUT, input=None, bg=False, timeout=None, env=None, set_sigpipe=True, parse_shebang=False): """Spawn a process PARAMETERS cmds: two possibilities: (1) a command line: a tool name and its arguments, passed in a list. e.g. ['ls', '-a', '.'] (2) a list of command lines (as defined in (1)): the different commands will be piped. This means that [['ps', '-a'], ['grep', 'vxsim']] will be equivalent to the system command line 'ps -a | grep vxsim'. cwd : directory in which the process should be executed (string or None). If None then current directory is used output: can be PIPE (default), a filename string, a fd on an already opened file, a python file object or None (for stdout). error: same as output or STDOUT, which indicates that the stderr data from the applications should be captured into the same file handle as for stdout. input: same as output bg: if True then run in background timeout: limit execution time (in seconds) env: dictionary for environment variables (e.g. os.environ) set_sigpipe: reset SIGPIPE handler to default value parse_shebang: take the #! interpreter line into account RETURN VALUE Return an object of type run. EXCEPTIONS Raise OSError when trying to execute a non-existent file. REMARKS If you specify a filename for output or stderr then file content is reseted (equiv. to > in shell). If you prepend the filename with '+' then the file will be opened in append mode (equiv. to >> in shell) If you prepend the input with '|', then the content of input string will be used for process stdin. """ def subprocess_setup(): """Reset SIGPIPE hander Python installs a SIGPIPE handler by default. This is usually not what non-Python subprocesses expect. """ if set_sigpipe: # Set sigpipe only when set_sigpipe is True # This should fix HC16-020 and could be activated by default import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) def add_interpreter_command(cmd_line): """Add the interpreter defined in the #! line to cmd_line If the #! line cannot be parsed, just return the cmd_line unchanged REMARKS if the interpreter command line contains /usr/bin/env python it will be replaced by the value of sys.executable On windows, /usr/bin/env will be ignored to avoid a dependency on cygwin """ if not parse_shebang: # nothing to do return cmd_line # Import gnatpython.fileutils just now to avoid a circular # dependency from gnatpython.fileutils import which prog = which(cmd_line[0]) if not os.path.exists(prog): return cmd_line with open(prog) as f: header = f.read()[0:2] if header != "#!": # Unknown header return cmd_line # Header found, get the interpreter command in the first line f.seek(0) line = f.readline() interpreter_cmds = [l.strip() for l in line[line.find('!') + 1:].split()] # Pass the program path to the interpreter if len(cmd_line) > 1: cmd_line = [prog] + cmd_line[1:] else: cmd_line = [prog] # If the interpreter is '/usr/bin/env python', use # sys.executable instead to keep the same python executable if interpreter_cmds[0:1] == ['/usr/bin/env', 'python']: if len(interpreter_cmds > 2): return [sys.executable] + interpreter_cmds[2:] \ + cmd_line else: return [sys.executable] + cmd_line elif sys.platform == 'win32': if interpreter_cmds[0] == '/usr/bin/env': return interpreter_cmds[1:] + cmd_line return interpreter_cmds + cmd_line # First resolve output, error and input self.input_file = File(input, 'r') self.output_file = File(output, 'w') self.error_file = File(error, 'w') self.status = None self.out = '' self.err = '' if env is None: env = os.environ rlimit_args = [] if timeout is not None: # Import gnatpython.fileutils just now to avoid a circular # dependency from gnatpython.fileutils import get_rlimit rlimit = get_rlimit() assert rlimit, 'rlimit not found' rlimit_args = [rlimit, '%d' % timeout] try: if not isinstance(cmds[0], list): self.cmds = rlimit_args + add_interpreter_command(cmds) logger.debug('Run: %s' % self.command_line_image()) popen_args = { 'stdin': self.input_file.fd, 'stdout': self.output_file.fd, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': True} if sys.platform != 'win32': # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup self.internal = Popen(self.cmds, **popen_args) else: self.cmds = [add_interpreter_command(c) for c in cmds] self.cmds[0] = rlimit_args + self.cmds[0] logger.debug('Run: %s ' % " | ".join([" ".join(cmd) for cmd in self.cmds])) runs = [] for index, cmd in enumerate(self.cmds): if index == 0: stdin = self.input_file.fd else: stdin = runs[index - 1].stdout # When connecting two processes using a Pipe don't use # universal_newlines mode. Indeed commands transmitting # binary data between them will crash # (ex: gzip -dc toto.txt | tar -xf -) if index == len(self.cmds) - 1: stdout = self.output_file.fd txt_mode = True else: stdout = PIPE txt_mode = False popen_args = { 'stdin': stdin, 'stdout': stdout, 'stderr': self.error_file.fd, 'cwd': cwd, 'env': env, 'universal_newlines': txt_mode} if sys.platform != 'win32': # preexec_fn is no supported on windows popen_args['preexec_fn'] = subprocess_setup runs.append(Popen(cmd, **popen_args)) self.internal = runs[-1] except Exception, e: self.__error(e, self.cmds) raise self.pid = self.internal.pid if not bg: self.wait() def command_line_image(self): """Return a string representation of the command(s) that were run to create this object. REMARKS This method also handles quoting as defined for POSIX shells. This means that arguments containing special characters (such as a simple space, or a backslash, for instance), are properly quoted. This makes it possible to execute the same command by copy/pasting the image in a shell prompt. """ def quote_arg(arg): """Return a human-friendly representation of the given argument, but with all extra quoting done if necessary. The intent is to produce an argument image that can be copy/pasted on a POSIX shell command (at a shell prompt). """ need_quoting = ('|', '&', ';', '<', '>', '(', ')', '$', '`', '\\', '"', "'", ' ', '\t', '\n', # The POSIX spec says that the following # characters might need some extra quoting # depending on the circumstances. We just # always quote them, to be safe (and to avoid # things like file globbing which are sometimes # performed by the shell). We do leave '%' and # '=' alone, as I don't see how they could # cause problems. '*', '?', '[', '#', '~') for char in need_quoting: if char in arg: # The way we do this is by simply enclosing the argument # inside single quotes. However, we have to be careful # of single-quotes inside the argument, as they need # to be escaped (which we cannot do while still inside. # a single-quote string). arg = arg.replace("'", r"'\''") # Also, it seems to be nicer to print new-line characters # as '\n' rather than as a new-line... arg = arg.replace('\n', r"'\n'") return "'%s'" % arg # No quoting needed. Return the argument as is. return arg cmds = self.cmds if not isinstance(cmds[0], list): # Turn the simple command into a special case of # the multiple-commands case. This will allow us # to treat both cases the same way. cmds = [cmds] return ' | '.join([' '.join([quote_arg(arg) for arg in cmd]) for cmd in cmds]) def _close_files(self): """Internal procedure""" self.output_file.close() self.error_file.close() self.input_file.close() def __error(self, error, cmds): """Set pid to -1 and status to 127 before closing files""" self.pid = -1 self.status = 127 self._close_files() # Try to send an helpful message if one of the executable has not # been found. not_found = None # Import gnatpython.fileutils here to avoid a circular dependency from gnatpython.fileutils import which if not isinstance(cmds[0], list): if not which(cmds[0]): not_found = cmds[0] else: for cmd in cmds: if not which(cmd[0]): not_found = cmd[0] break if not_found is not None: logger.error("%s, %s not found" % (error, not_found)) raise OSError(getattr(error, 'errno', errno.ENOENT), getattr(error, 'strerror', 'No such file or directory') + " %s not found" % not_found) def wait(self): """Wait until process ends and return its status""" if self.status == 127: return self.status self.status = None # If there is no pipe in the loop then just do a wait. Otherwise # in order to avoid blocked processes due to full pipes, use # communicate. if self.output_file.fd != PIPE and self.error_file.fd != PIPE and \ self.input_file.fd != PIPE: self.status = self.internal.wait() else: tmp_input = None if self.input_file.fd == PIPE: tmp_input = self.input_file.get_command() (self.out, self.err) = self.internal.communicate(tmp_input) self.status = self.internal.returncode self._close_files() return self.status def poll(self): """Test if the process is still alive. If yes then return None, otherwise return process status""" if self.status != 127: result = self.internal.poll() if result is not None: self.status = result else: result = 127 return result class File(object): """Can be a PIPE, a file object""" def __init__(self, name, mode='r'): """Create a new File PARAMETERS name: can be PIPE, STDOUT, a filename string, an opened fd, a python file object, or a command to pipe (if starts with |) mode: can be 'r' or 'w' if name starts with + the mode will be a+ """ assert mode in 'rw', 'Mode should be r or w' self.name = name self.to_close = False if isinstance(name, str): # can be a pipe or a filename if mode == 'r' and name.startswith('|'): self.fd = PIPE else: if mode == 'w': if name.startswith('+'): open_mode = 'a+' name = name[1:] else: open_mode = 'w+' else: open_mode = 'r' self.fd = open(name, open_mode) if open_mode == 'a+': self.fd.seek(0, 2) self.to_close = True else: # this is a file descriptor self.fd = name def get_command(self): """Returns the command to run to create the pipe""" if self.fd == PIPE: return self.name[1:] def close(self): """Close the file if needed""" if self.to_close: self.fd.close() python-gnatpython-54.orig/gnatpython/vcs.py0000644000175000017500000003070611654532470020710 0ustar xavierxavier ############################################################################ # # # VCS.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Version control management systems interface Currently this module provide a single class called SVN to interact with Subversion repositories. """ from gnatpython.ex import Run from xml.dom import minidom import logging import os # Set the logger for this module svnlogger = logging.getLogger('gnatpython.vcs') class SVN_Error(Exception): pass class SVN(object): """Interface to Subversion ATTRIBUTES root : the root of the subversion directory module : the module path dest : the working directory path branch : the branch rev : the revision used url : the effective Subversion url """ def __init__(self, root, module, dest, branch='trunk', rev=None, use_externals=False): """Initialize a Subversion working environment PARAMETERS root : root of the subversion repository module : module path dest : working directory branch : branch to use rev : revision to use RETURN VALUE a SVN instance REMARKS Currently if the working directory is not a current checkout of the targeted subversion repository, the initialization routine will perform a full checkout. Do not rely on this in your script as the upcoming subversion 1.5 will allow us to set a working dir without doing the checkout. If you want to perform a full checkout of a repository you must call the update method without any argument after working dir initialization. """ self.root = root self.module = module self.dest = dest self.branch = branch self.rev = rev self.cached_status = {} if not use_externals: self.externals = '--ignore-externals' else: self.externals = '' # Resolve url self.url = self.__get_url() try: # Test if the dest directory is an actual Subversion checkout info = self.info() except SVN_Error: # If not then do a checkout. Once Subversion 1.5 is out we should # do only a 'partial' checkout in order to set up the dest # directory svncheckout = Run(['svn', 'checkout', self.externals, self.url, self.dest]) if svncheckout.status: self.__error('svn checkout error:\n' + svncheckout.out) return if info['URL'] != self.url: # The dest directory is actually a checkout but not on the right # URL. So do a svn switch svnswitch = Run(['svn', 'switch', self.url, self.dest]) if svnswitch.status: self.__error('svn switch error:\n' + svnswitch.out) def __info(self, url): """Internal function""" results = {} svninfo = Run(['svn', 'info', url]) if svninfo.status: self.__error('svn info error:\n' + svninfo.out) for line in svninfo.out.splitlines(): fields = line.split(':', 1) if len(fields) > 1: results[fields[0]] = fields[1].strip() return results def info(self, path=''): """Get info on a file PARAMETERS file : a path relative to the working dir. The default '' returns the status of '.' RETURN VALUE A dictionnary containing the following keys: 'Path' 'Name' 'URL' 'Repository Root' 'Repository UUID' 'Revision' 'Node Kind' 'Schedule' 'Last Changed Author' 'Last Changed Rev' 'Last Changed Date' 'Text Last Updated' 'Checksum' key values are strings REMARKS None """ return self.__info(os.path.join(self.dest, path)) def __get_url(self): """Internal function""" return self.root + '/' + self.branch + '/' + self.module def update(self, files=None): """Update a set of files PARAMETERS files : a list of path relative to the working dir. If not set then an update of the whole working dir is done. RETURN VALUE None REMARKS None """ if files is None: files = [''] for f in files: svnupdate = Run(['svn', 'update', self.externals, f], cwd=self.dest) if svnupdate.status: self.__error('svn update error:\n' + svnupdate.out) def add(self, files): """Add a set of files PARAMETERS files : the list of files to add. RETURN VALUE None """ svnadd = Run(['svn', 'add'] + files, cwd=self.dest) if svnadd.status: self.__error('svn add error:\n' + svnadd.out) def commit(self, msg, files=None): """Commit a set of files PARAMETERS msg : the commit message (should be different from '') files : the list of files to commit. If not set then do a commit on working dir RETURN VALUE None REMARKS Before commit a check is done to see if the local copy of the files are up-to-date. If not the checkin is aborted and SVN_Error is raised. """ if not self.is_uptodate(files): svnlogger.error('svn commit error: files not up-to-date') if not self.has_diff(files, True): # There are no local modifications so just return return if files is None: files = [] svncommit = Run(['svn', 'commit', '-m', msg] + files, cwd=self.dest) if svncommit.status: self.__error('svn commit error:\n' + svncommit.out) def is_uptodate(self, files=None, use_cached_status=False): """Check if a set of files are up-to-date PARAMETERS files : the list of files we are interested in. Otherwise check if the overall working is up-to-date use_cached_status : if True use cached status. RETURN VALUE True if the files are up-to-date, False otherwise REMARKS None """ svnstatus = self.status(use_cached_status) if files is None: # If an empty list is passed check that all the files are # up-to-date for f in svnstatus: if not svnstatus[f]['uptodate']: return False return True else: # Otherwise check only the files pass by the caller for f in files: if f in svnstatus and not svnstatus[f]['uptodate']: return False return True def status(self, use_cached_status=False): """Get the status of the working directory PARAMETERS use_cached_status : if True return the cached status. RETURN VALUE A dictionnary containing a key for each file for which the status changed Each key contains a dictionnary with the following keys: - status: a character identifying the current file status. (see svn help status for more info) - uptodate: True if the file is up-to-date, False otherwise - rev: the current revision string REMARKS None """ if use_cached_status: return self.cached_status result = {} svnstatus = Run(['svn', 'status', '-u', self.dest]) for line in svnstatus.out.splitlines(): if line.startswith('Status'): break status = line[0] if line[7] == '*': uptodate = False else: uptodate = True if status == '?': rev = '' f = line[8:].lstrip() else: fields = line[8:].lstrip().split(None, 1) rev = fields[0] f = fields[1] result[f] = {'status': status, 'rev': rev, 'uptodate': uptodate} self.cached_status = result return result def has_diff(self, files=None, use_cached_status=False): """Check if there some local changes on a set of files PARAMETERS files : a list of files. If not set the overall working dir is taken into account. use_cached_status : if True use cached status. RETURN VALUE True if a least one file contains local changes. False otherwise. REMARKS None """ svnstatus = self.status(use_cached_status) if files is None: # If an empty list is passed check that all files local modifs for f in svnstatus: if svnstatus[f]['status'] in ('A', 'M'): return True return False else: # Otherwise check only the files pass by the caller for f in [self.dest + '/' + f for f in files]: if f in svnstatus and svnstatus[f]['status'] in ('A', 'M'): return True return False def log(self, rev=None, path=None): """Returns logs messages PARAMETERS rev : the revision range. If not set, gets all logs from the beginning path : the file or directory to get logs from. If not set, gets the overall working dir's logs. RETURN VALUE a list of dictionnaries containg keys : revision, author, date, msg """ cmd = ['svn', 'log', '--xml'] if rev: cmd.append('-r') cmd.append(str(rev)) if path: cmd.append(path) svnlog = Run(cmd, cwd=self.dest) if svnlog.status: self.__error('svn log error:\n' + svnlog.out) # parse log xml_log = minidom.parseString(svnlog.out) logs = [] for node in xml_log.getElementsByTagName("logentry"): entry = {} if node.getAttribute('revision'): entry['rev'] = node.getAttribute('revision') if node.getElementsByTagName('author'): entry['author'] = node.getElementsByTagName( 'author')[0].firstChild.data if node.getElementsByTagName('date'): entry['date'] = node.getElementsByTagName( 'date')[0].firstChild.data if node.getElementsByTagName('msg'): entry['msg'] = node.getElementsByTagName( 'msg')[0].firstChild.data logs.append(entry) return logs @classmethod def __error(cls, msg): """Log the message and raise SVN_Error""" svnlogger.error(msg) raise SVN_Error(msg) python-gnatpython-54.orig/gnatpython/fileutils.py0000644000175000017500000011231711654532470022114 0ustar xavierxavier ############################################################################ # # # FILEUTILS.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """This module provides various functions to handle files and directories All this functionalities are already present in python but they are available in different modules with different interfaces. Here the interface of each function tries to be as close as possible to the Unix shell commands. """ from gnatpython.ex import Run from gnatpython.env import Env from gnatpython.logging_util import highlight from gnatpython.logging_util import COLOR_GREEN, COLOR_RED, COLOR_CYAN from difflib import SequenceMatcher, unified_diff import logging import os import shutil import glob import re import socket import sys import fnmatch logger = logging.getLogger('gnatpython.fileutils') # Check whether ln is supported on this platform # If ln is not supported, use shutil.copy2 instead HAS_LN = hasattr(os, "link") # When diff find a difference between two lines, we'll try to highlight # the differences if diff_within_line is True. This is currently disabled # because the output is not always more readable (the diff is too fine # grained, we should probably do it at the word level) diff_within_line = False class FileUtilsError (Exception): """Exception raised by functions defined in this module """ def __init__(self, cmd, msg): Exception.__init__(self, cmd, msg) self.cmd = cmd self.msg = msg def __str__(self): return "%s: %s\n" % (self.cmd, self.msg) def cd(path): """Change current directory PARAMETERS path: directory name RETURN VALUE None REMARKS In case of error then FileUtilsError is raised """ try: os.chdir(path) except Exception, E: logger.error(E) raise FileUtilsError('cd', "can't chdir to %s\n" % path) def cp(source, target, copy_attrs=True, recursive=False): """Copy files PARAMETERS source: a glob pattern target: target file or directory. If the source resolves as several files then target should be a directory copy_attrs: If True, also copy all the file attributes such as mode, timestamps, ownership, etc. recursive: If True, recursive copy. This also preserves attributes; if copy_attrs is False, a warning is emitted. RETURN VALUE None REMARKS If an error occurs then FileUtilsError is raised """ switches = '' if copy_attrs: switches += ' -p' if recursive: switches += ' -r' logger.debug('cp %s %s->%s' % (switches, source, target)) if recursive and not copy_attrs: logger.warning('recursive copy always preserves file attributes') # Compute file list and number of file to copy file_list = ls(source) file_number = len(file_list) if file_number == 0: # If there is no source files raise an error raise FileUtilsError('cp', "can't find files matching '%s'" % source) elif file_number > 1: # If we have more than one file to copy then check that target is a # directory if not os.path.isdir(target): raise FileUtilsError('cp', 'target should be a directory') for f in file_list: try: if recursive: shutil.copytree(f, target) elif copy_attrs: shutil.copy2(f, target) else: shutil.copy(f, target) except Exception, E: logger.error(E) raise FileUtilsError('cp', 'error occurred while copying %s' % f) def unixpath(path): """Convert path to Unix/Cygwin format PARAMETERS path: path string to convert RETURN VALUE None REMARKS On Unix systems this function is identity. On Win32 systems it needs cygpath to do the conversion. """ if sys.platform == 'win32': p = Run(['cygpath', '-u', path]) if p.status != 0: raise FileUtilsError('unixpath', 'cannot transform path %s' % path) return p.out.strip() else: return path def ln(source, target): """Create a symbolic link PARAMETERS source: a filename target: the target filename RETURN VALUE None """ try: if HAS_LN: os.link(source, target) else: shutil.copy2(source, target) except Exception, E: logger.error(E) raise FileUtilsError('ln', 'can not link %s to %s' % (source, target)) def df(path): """Disk space available on the filesystem containing the given path PARAMETERS path: a path string RETURN VALUE An integer representing the space left in Mo """ if Env().host.os.name.lower() == 'windows': import ctypes free_bytes = ctypes.c_ulonglong(0) ctypes.windll.kernel32.GetDiskFreeSpaceExW( ctypes.c_wchar_p(path), None, None, ctypes.pointer(free_bytes)) value = free_bytes.value else: stats = os.statvfs(path) value = stats.f_bsize * stats.f_bavail # The final value is in Mo so it can safely be converted to an integer return int(value / (1024 * 1024)) def colored_unified_diff(a, b, fromfile='', tofile='', fromfiledate='', tofiledate='', n=3, lineterm='\n', onequal=None, onreplaceA=None, onreplaceB=None): """Same parameters as difflib.unified_diff ONEQUAL is a callback: it is passed a substring matching parts of the input that are the same in A and B. It returns the version to be displayed (by default, no change). It can be used if you want to limit the output. Likewise, ONREPLACEA and ONREPLACEB are called when a substring of A is replaced by a substring of B. They should return the actual strings that will be compared to find the diffs within a line. """ if not Env().main_options or not Env().main_options.enable_color: for line in unified_diff( a, b, fromfile, tofile, fromfiledate, tofiledate, n, lineterm): yield line else: # Code inspired from difflib.py minus = highlight('-', fg=COLOR_CYAN) plus = highlight('+', fg=COLOR_CYAN) if not onequal: onequal = lambda x: x if not onreplaceA: onreplaceA = lambda x: x if not onreplaceB: onreplaceB = lambda x: x started = False for group in SequenceMatcher(None, a, b).get_grouped_opcodes(n): if not started: yield highlight('--- %s %s%s', fg=COLOR_CYAN) \ % (fromfile, fromfiledate, lineterm) yield highlight('+++ %s %s%s', fg=COLOR_CYAN) \ % (tofile, tofiledate, lineterm) started = True i1, i2, j1, j2 = (group[0][1], group[-1][2], group[0][3], group[-1][4]) yield highlight( "@@ -%d,%d +%d,%d @@%s" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1, lineterm), fg=COLOR_CYAN) for tag, i1, i2, j1, j2 in group: if tag == 'equal': for line in a[i1:i2]: yield ' ' + onequal(line) continue elif tag == 'replace': line1 = onreplaceA(("\n" + minus).join(a[i1:i2])) line2 = onreplaceB(("\n" + plus).join(b[j1:j2])) if diff_within_line: # Do a diff within the lines to highlight the difs d = list(SequenceMatcher( None, line1, line2).get_grouped_opcodes( len(line1) + len(line2))) result1 = "" result2 = "" for c in d: for t, e1, e2, f1, f2 in c: if t == 'equal': result1 += "".join(onequal(line1[e1:e2])) result2 += "".join(onequal(line2[f1:f2])) elif t == 'replace': result1 += highlight( "".join(line1[e1:e2]), COLOR_RED) result2 += highlight( "".join(line2[f1:f2]), COLOR_GREEN) elif t == 'delete': result1 += highlight( "".join(line1[e1:e2]), COLOR_RED) elif t == 'insert': result2 += highlight( "".join(line2[f1:f2]), COLOR_GREEN) yield minus + result1 yield plus + result2 else: yield minus + highlight(line1, COLOR_RED) yield plus + highlight(line2, COLOR_GREEN) elif tag == 'delete': for line in a[i1:i2]: if diff_within_line: yield minus + line else: yield minus + highlight(line, COLOR_RED) elif tag == 'insert': for line in b[j1:j2]: if diff_within_line: yield plus + line else: yield plus + highlight(line, COLOR_GREEN) def diff(item1, item2, ignore=None, item1name="expected", item2name="output"): """Compute diff between two files or list of strings PARAMETERS item1 : a filename or a list of strings item2 : a filename or a list of strings ignore : all lines matching this pattern in both files are ignored during comparison. If set to None, all lines are considered. item1name: name to display for item1 in the diff item2name: name to display for item2 in the diff color : whether colored diff should be displayed (even if True, this will be disabled unless the user specified the --enable-color switch). RETURN VALUE A diff string. If the string is equal to '' it means that there is no difference REMARKS White character at beginning and end of lines are ignored. On error, FileUtilsError is raised """ tmp = [[], []] # Read first item if isinstance(item1, list): tmp[0] = item1 else: try: file1_fd = open(item1, 'r') tmp[0] = file1_fd.readlines() file1_fd.close() except IOError: tmp[0] = [] # Do same thing for the second one if isinstance(item2, list): tmp[1] = item2 else: try: file2_fd = open(item2, 'r') tmp[1] = file2_fd.readlines() file2_fd.close() except IOError: tmp[1] = [] def is_line_junk(line): """Skip non useful lines""" return len(line.strip()) == 0 or \ (ignore is not None and re.search(ignore, line) is not None) # Filter empty lines in both items and lines that match ignore pattern for k in [0, 1]: tmp[k] = ["%s\n" % line.strip() \ for line in tmp[k] if not is_line_junk(line)] diff_content = colored_unified_diff( tmp[0], tmp[1], n=1, fromfile=item1name, tofile=item2name) return ''.join(diff_content) def ls(path): """List files PARAMETERS path: glob pattern or glob pattern list RETURN VALUE a list of filenames REMARKS This function do not raise an error if no file matching the glob pattern is encountered. The only consequence is that an empty list is returned. """ if not isinstance(path, list): path = [path] result = [] logger.debug('ls %s' % path) for p in path: result += glob.glob(p) result.sort() return result def mkdir(path, mode=0755): """Create a directory PARAMETERS path: path to create. If intermediate directories do not exist the procedure create them mode: default is 0755 RETURN VALUE None REMARKS This function behaves quite like mkdir -p command shell. So if the directory already exist no error is raised. If the directory cannot be created then FileUtilsError is raised. """ if os.path.isdir(path): return else: logger.debug('mkdir %s %s' % (path, mode)) try: os.makedirs(path, mode) except Exception, E: logger.error(E) raise FileUtilsError('mkdir', "can't create %s" % path) def mv(source, target): """Move files PARAMETERS source: a glob pattern target: target file or directory. If the source resolves as several files then target should be a directory RETURN VALUE None REMARKS If an error occurs then FileUtilsError is raised """ logger.debug('mv %s->%s' % (source, target)) try: # Compute file list and number of file to copy file_list = ls(source) file_number = len(file_list) assert file_number != 0, "can't find files matching '%s'" % source if len(file_list) == 1: f = file_list[0] if os.path.isdir(f) and os.path.isdir(target): shutil.move(f, os.path.join(target, os.path.basename(f))) else: shutil.move(f, target) else: # If we have more than one file to move then check that target is a # directory assert os.path.isdir(target), 'target should be a directory' for f in file_list: shutil.move(f, os.path.join(target, os.path.basename(f))) except Exception, E: logger.error(E) raise FileUtilsError('mv', E) def rm(path, recursive=False): """Remove files PARAMETERS path: a glob pattern recursive: if True do a recursive deletion. Default is False RETURN VALUE None REMARKS If an error occurs then FileUtilsError is raised. The function will not raise an Error is there are no file to delete. """ logger.debug('rm %s' % (path)) file_list = ls(path) def onerror(func, path, exc_info): """When shutil.rmtree fail, try again to delete the file""" if func == os.remove or func == os.rmdir: # Cannot remove path, call chmod and redo an attempt os.chmod(path, 0777) func(path) for f in file_list: try: # When calling rmtree or remove, ensure that the string that is # passed to this function is unicode on Windows. Otherwise, # the non-Unicode API will be used and so we won't be # able to remove these files. On Unix don't do that as # we got some strange unicode "ascii codec" errors # (need some further investigation at some point) if Env().host.os.name == 'windows': f = unicode(f) if recursive: shutil.rmtree(f, onerror=onerror) else: os.remove(f) except Exception, E: logger.error(E) raise FileUtilsError('rm', 'error occured while removing %s' % f) def rsync(source, target, files=None, protected_files=None, delete=False, options=None): """Wrapper around rsync utility PARAMETERS source: source directory to sync. Note that it will be always considered as the 'content' of source (i.e source is passed with a trailing '/') target: target destination directory files: if None all files from source are synchronized. Otherwise it should be a list of string that are patterns (rsync format) to select which files should be transfered. protected_files: type is the same as files parameters. Files that are matching these pattern will be protected in the destination directory delete: If true, files that don't exist in source will deleted in target. RETURN VALUE None REMARKS None """ rsync_args = ['rsync', '-a'] rsync_filename = '' if delete: rsync_args.append('--delete-excluded') if files is not None or protected_files is not None: rsync_filename = os.path.join(Env().tmp_dir, 'rsync.list.%d' % os.getpid()) f = open(rsync_filename, 'w') if files is not None: for filename in files: # add filename to the list f.write('+ /' + filename + '\n') # add also all its parent directories while filename != '': (filename, _) = os.path.split(filename) if filename != '': f.write('+ /' + filename + '/\n') if protected_files is not None: for filename in protected_files: f.write('P /' + filename + '\n') # exclude files that did not match the patterns f.write('- *\n') f.close() # Update rsync arguments rsync_args.append('--filter=. ' + rsync_filename) if options is not None: for opt in options: rsync_args.append(opt) # Note: source and target must be in Unix format. Windows style for path # will not work. rsync_args.append(unixpath(source) + '/') rsync_args.append(unixpath(target)) p = Run(rsync_args) # Clean temp file if necessary if files is not None or protected_files is not None: rm(rsync_filename) if p.status != 0: raise FileUtilsError('rsync', 'rsync failed with status %d\n%s\n%s' % (p.status, " ".join(rsync_args), p.out)) return def touch(filename): """Update file access and modification times PARAMETERS filename: file to update RETURN VALUE None REMARKS If the file does not exist it is created. """ if os.path.exists(filename): os.utime(filename, None) else: new_file = open(filename, 'w+') new_file.close() def which(prog): """Locate executable Returns the full path of prog executable that would have been executed by gnatpython.ex.Run. It does this by searching for an executable in the directories listed in the environment variable PATH PARAMETERS prog: program to find RETURN VALUE absolute path to the program on success, '' otherwise. """ def is_exe(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(prog) if fpath: # Full path given, check if executable for progname in set((prog, prog + Env().host.os.exeext)): if is_exe(progname): return progname else: # Check for all directories listed in $PATH for pathdir in os.environ["PATH"].split(os.pathsep): exe_file = os.path.join(pathdir, prog) for progname in set((exe_file, exe_file + Env().host.os.exeext)): if is_exe(progname): return progname return "" def split_file(filename, split_line=None, keys=None, ignore_errors=False, host=None): """Split a file into a list or a dictionary PARAMETERS filename: file to read split_line: if None then the file is split by line. Otherwise lines are also subdivided using split_line as separator keys: this is a list of string. If split_line is None then this parameter is ignored. Otherwise, each line is subdivided using split_line parameter and each field associated with a key to compose a dictionary. If the number of keys is not sufficient additional fields are ignored. If the number of keys is superior to the number of fields then last keys will have '' as value. host: if not None, this is a remote file RETURN VALUE A list. If split_line if None then each element is a string (i.e a line of the file), otherwise each element is list of string (i.e a list split using split_line separator) or a dictionary (if keys are passed). If an I/O error occurs and ignore_errors is set to True then an empty list is returned. """ result = [] try: if host is None: fd = open(filename, 'r') else: fd = Run(['ssh', host, 'cat', filename]).out.splitlines() for line in fd: line = line.rstrip() if split_line is not None and line != '': tmp = line.split(split_line) if keys is None: line = tmp else: line = {} tmp_last = len(tmp) - 1 for index, k in enumerate(keys): if tmp_last < index: line[k] = '' else: line[k] = tmp[index] result.append(line) elif split_line is None: result.append(line) if host is None: fd.close() except IOError, E: if not ignore_errors: logger.error(E) raise FileUtilsError('split_file', 'cannot open file %s' % filename) else: result = [] return result def echo_to_file(filename, content, append=False): """Output content into a file PARAMETERS filename: file to write into content: string to be written append: if True append to the file. Otherwise overwrite (Default) RETURN VALUE None REMARKS This function is useful when writing few content to a file for which we don't want to keep a file descriptor opened . In other cases, it's more efficient to open a file and use the regular python I/O functions. """ if append: fd = open(filename, 'a+') fd.seek(0, 2) else: fd = open(filename, 'w+') if isinstance(content, list): for l in content: fd.write(l + '\n') else: fd.write(content) fd.close() def unpack_archive(filename, dest, selected_files=None, remove_root_dir=False, tar='tar', unpack_cmd=None, force_extension=None): """Unpack an archive file (.tgz, .tar.gz, .tar or .zip) PARAMETERS filename: archive to unpack dest: destination directory (should exist) selected_files: list of files to unpack (partial extraction). If None all files are unpacked remove_root_dir: if True then the root dir of the archive is suppressed. tar: path/to/tar binary (else use 'tar') unpack_cmd: command to run to unpack the archive, if None use default methods or raise FileUtilsError if archive format is not supported. The unpack_cmd must raise FileUtilsError in case of failure. force_extension: specify the archive extension if not in the filename. If filename has no extension and force_extension is None unpack_archive will fail. RETURN VALUE None REMARKS rsync and cygpath (win32) utilities might be needed when using remove_root_dir option In case of error then FileUtilsError is raised """ # First do some checks such as archive existence or destination directory # existence. if not os.path.isfile(filename): raise FileUtilsError('unpack_archive', 'cannot find %s' % filename) if not os.path.isdir(dest): raise FileUtilsError('unpack_archive', 'dest dir %s does not exist' % dest) if selected_files is None: selected_files = [] logger.debug('unpack %s in %s' % (filename, dest)) # We need to resolve to an absolute path as the extraction related # processes will be run in the destination directory filename = os.path.abspath(filename) # If remove_root_dir is set then extract to a temp directory first. # Otherwise extract directly to the final destination try: if remove_root_dir: tmp_dest = '%s.%d' % (os.path.abspath(dest), os.getpid()) mkdir(tmp_dest) else: tmp_dest = dest if unpack_cmd is not None: # Use user defined unpack command unpack_cmd(filename, tmp_dest, selected_files=selected_files) elif Env().host.os.name == 'windows': import tarfile import zipfile if filename.endswith('.tar.gz') or filename.endswith('.tgz') or \ filename.endswith('.tar.bz2') or filename.endswith('.tar')\ or (force_extension is not None and force_extension in \ ['.tar.gz', '.tgz', '.tar.bz2', '.tar']): try: fd = tarfile.open(filename, mode='r') # selected_files must be converted to tarfile members selected_files = [fd.getmember(f) for f in selected_files] # detect directories. This is not done by default # For each directory, select all the tree selected_dirnames = [ d.name for d in selected_files if d.isdir()] for dname in selected_dirnames: selected_files += [ fd.getmember(n) for n in fd.getnames() if n.startswith(dname + '/')] except tarfile.TarError, e: raise FileUtilsError( 'unpack_archive', 'Cannot untar %s (%s) % (filename, e)') elif filename.endswith('.zip') or \ (force_extension is not None and force_extension == \ '.zip'): try: fd = zipfile.ZipFile(filename, mode='r') except zipfile.BadZipfile, e: raise FileUtilsError( 'unpack_archive', 'Cannot unzip %s (%s)' % (filename, e)) else: raise FileUtilsError( 'unpack_archive', 'unknown format %s' % filename) if len(selected_files) == 0: selected_files = None fd.extractall(tmp_dest, selected_files) fd.close() else: # Handle .zip, .tar.gz and .tar archives if filename.endswith('.tar.gz') or filename.endswith('.tgz') or\ (force_extension is not None and force_extension in \ ['.tar.gz', '.tgz']): p = Run([['gzip', '-dc', filename], [tar, '-xf', '-'] + selected_files], cwd=tmp_dest) elif filename.endswith('.tar.bz2') or\ (force_extension is not None and force_extension == \ '.tar.bz2'): p = Run([['bunzip2', '-dc', filename], [tar, '-xf', '-'] + selected_files], cwd=tmp_dest) elif filename.endswith('.tar') or\ (force_extension is not None and force_extension == \ '.tar'): p = Run([tar, '-xf', filename] + selected_files, cwd=tmp_dest) elif filename.endswith('.zip') or\ (force_extension is not None and force_extension == \ '.zip'): p = Run(['unzip', '-o', filename] + selected_files, cwd=tmp_dest) else: raise FileUtilsError('unpack_archive', 'unknown format "%s"' % filename) if p.status != 0: # The extract command failed raise FileUtilsError('unpack_archive', 'extraction of %s failed' % filename) if remove_root_dir: # First check that we have only one dir in our temp destination. If # not raise an error. file_list = ls(tmp_dest + '/*') if len(file_list) == 0: # Nothing to do... return if len(file_list) != 1: raise FileUtilsError('unpack_archive', 'archive does not have a unique root dir') root_dir = file_list[0] # Now check if the destination directory is empty. If this is the # case a simple move will work, otherwise we need to do a rsync # (which cost more) if not os.listdir(dest): mv([os.path.join(root_dir, f) for f in os.listdir(root_dir)], dest) else: rsync(root_dir, dest, delete=False) finally: # Always remove the temp directory before exiting if remove_root_dir: rm(tmp_dest, True) def find(root, pattern=None, include_dirs=False, include_files=True, follow_symlinks=False): """Find files or directory recursively PARAMETERS root: directory from which the research start pattern: glob pattern that files or directories should match in order to be included in the final result include_dirs: if True include directories include_files: if True include regular files follow_symlinks: if True include symbolic links RETURN VALUE a list of files (strings) """ result = [] for root, dirs, files in os.walk(root, followlinks=follow_symlinks): root = root.replace('\\', '/') if include_files: for f in files: if pattern is None or fnmatch.fnmatch(f, pattern): result.append(root + '/' + f) if include_dirs: for d in dirs: if pattern is None or fnmatch.fnmatch(d, pattern): result.append(root + '/' + d) return result def split_mountpoint(path): """Split a given path between it's mount point and the remaining part PARAMETERS A path string RETURN VALUE A length two tuple. First element is the mount point and second element is the remaining part of the path or None """ # If the path is invalid raise an exception if not os.path.exists(path): raise FileUtilsError('split_mountpoint', "path does not exist: %s" % path) # First get the absolute path path = os.path.realpath(os.path.abspath(path)) queue = [] # Iterate through the path until we found the mount point while not os.path.ismount(path): queue = [os.path.basename(path)] + queue path = os.path.dirname(path) if queue: return (path, os.path.join(*queue)) else: return (path, None) def get_path_nfs_export(path): """Guess NFS related information for a given path PARAMETERS path: a string containing a valid path RETURN VALUE a length four tuple containing: (machine IP, machine name, export, path relative to the export). Note that the function is just making a guess. We cannot really ensure that the return export really exist). If the function canot guess the NFS export then None is returned. """ def add_ip_info(machine, export, path): """Add ip information""" domain = '.' + e.host.domain if e.host.domain else '' return (socket.gethostbyname(machine), machine + domain, export, path) # First find the mount point e = Env() mountfiles = [] if e.host.os.name.lower() != 'windows': # Don't try to look into unix specific files or to use 'mount' command # on Windows platform (if the later exists it will be a cygwin tool # that is not useful in our case). mountfiles = ['/etc/mtab', '/etc/mnttab', '/proc/mounts', 'mount'] mount_point, path = split_mountpoint(path) # Then read system imports for fname in mountfiles: # Extract necessary fields if fname == 'mount': # Either by parsing the output of the mount command mount_bin = which('mount') if not mount_bin: # /sbin is not always in the PATH if os.path.exists('/sbin/mount'): mount_bin = '/sbin/mount' else: # No mount program found ! raise FileUtilsError( 'get_path_nfs_export', 'Cannot find mount') p = Run([mount_bin]) if p.status != 0: raise FileUtilsError( 'get_path_nfs_export', 'Error when calling mount') lines = p.out.splitlines() mount_index = 2 elif os.path.exists(fname): # Or by reading a system file with open(fname, 'r') as f: lines = f.readlines() mount_index = 1 else: continue for line in lines: fields = line.rstrip().split() if fields[mount_index] == mount_point: # We found a file system. It can either be a local # filesystem or on a remote machine. tmp = fields[0].split(':') if len(tmp) == 1: # This is a local fs. Here the heuristic is to # consider the export return add_ip_info(e.host.machine, mount_point, path) elif len(tmp) == 2: # Looks like 'nfs' import return add_ip_info(tmp[0], tmp[1], path) else: # What's that ? return add_ip_info(e.host.machine, mount_point, path) if e.host.os.name.lower() == 'windows': tmp = path.split('\\') if len(tmp) > 1: return add_ip_info(e.host.machine, '/' + tmp[0], '/'.join(tmp[1:])) def substitute_template(template, target, variables): """Create a file using a template and and a dictionnary. PARAMETERS template: path to the template target: path in which to dump the result variables: dictionary that will be applied to the template content using the '%' Python operator RETURN VALUE None """ if not os.path.isfile(template): raise FileUtilsError('process_template', 'cannot find template %s' % template) with open(template) as f_template: with open(target, 'wb') as fd: fd.write(f_template.read() % variables) def get_rlimit(): """Return rlimit path""" def get_path(relative_path): """Search for binary in directory parent PARAMETERS binary: the file or directory to search for parent: the directory where binary should be located RETURN VALUE Return the path or empty string """ start_dir = os.path.join(os.path.dirname(__file__)) # if current file equals to the already tested one, we stop previous_dir = '' while os.path.realpath(start_dir) != os.path.realpath(previous_dir): previous_dir = start_dir start_dir = os.path.join(start_dir, os.pardir) if not os.path.exists(start_dir): return "" if os.path.exists(os.path.join(start_dir, relative_path)): return os.path.join(start_dir, relative_path) return "" if Env().host.os.name.lower() == 'windows': path = get_path(os.path.join('Scripts', 'rlimit')) else: path = get_path(os.path.join('bin', 'rlimit')) return path or which("rlimit" + Env().host.os.exeext) python-gnatpython-54.orig/gnatpython/main.py0000644000175000017500000003205711654532470021042 0ustar xavierxavier ############################################################################ # # # MAIN.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Main program initialization This package provides a class called Main used to initialize a python script invoked from command line. The main goal is to ensure consistency in term of interface, documentation and logging activities for all scripts using gnatpython. When a script uses this module, it should contain a docstring formatted in the following way. Everything before the first empty line will be part of the usage. Everything after will be considered as part of the description. The script will support by default the following switches:: --target to set the target --host to set the host -v|--verbose to enable verbose mode (a console logger is added) -h|--help display information parsed in the docstring --log-file FILE to redirect logs to a given file (this is independant from verbose option *EXAMPLES* If you have the following script test.py:: \"\"\"test [options] [args] This is the description\"\"\" import logging from gnatpython.main import * m = Main(add_targets_options=True) m.add_option("-t", "--test", dest="test", metavar="STRING", default="default", help="option example") m.parse_args() logging.info('Test begin') logging.debug('test option value: ' + m.options.test) logging.debug('target option value: ' + m.options.target) logging.debug('host option value: ' + m.options.host) logging.info('Test end') Here are some invocation examples:: $ gnatpython test.py --help usage: test [options] [args] This is the description options: -h, --help show this help message and exit -v, --verbose add some verbosity for debugging purposes --target=TARGET set target --host=HOST set host -t STRING, --test=STRING option example $ gnatpython test.py -v root : INFO Test begin root : DEBUG test option value: default root : DEBUG target option value: root : DEBUG host option value: root : INFO Test end $ gnatpython test.py root : INFO Test begin root : INFO Test end """ from optparse import OptionGroup, OptionParser, TitledHelpFormatter import logging import os import re import sys import gnatpython.logging_util from gnatpython.logging_util import (highlight, COLOR_RED, COLOR_YELLOW, COLOR_GREEN, COLOR_CYAN) from gnatpython.env import Env class MainError (Exception): """MainError exception""" pass class MainHelpFormatter(TitledHelpFormatter): """Format help with underlined section headers. Do not modify description formatting. """ def format_description(self, description): """Do not modify description""" return description color_table = { ' (FAILED|DIFF)': COLOR_RED, ' (UOK)': COLOR_YELLOW, ' (OK|PASSED)': COLOR_GREEN, ' (XFAIL)': COLOR_RED, ' (DEAD)': COLOR_CYAN} class ConsoleColorFormatter(logging.Formatter): """Formatter with color support REMARKS If level is ERROR or CRITICAL then the output color is set to red. Futhermore if some keyword such as PASSED,FAILED are detected then they are highlighted with an adequate color """ def __init__(self, fmt=None, datefmt=None): logging.Formatter.__init__(self, fmt, datefmt) def format(self, record): output = logging.Formatter.format(self, record) if record.levelno >= logging.ERROR: output = highlight(output, fg=COLOR_RED) else: for k in color_table: output = re.sub( k, ' ' + highlight("\\1", fg=color_table[k]), output) return output class Main(object): """ ATTRIBUTES name : name of the program (default is the filename with the extension) usage : contains the usage retrived from the main docstring description: contains the description retrieved from the main docstring options : object containing the result of option parsing (see python optparse module). Note that this object is made global by putting its value in Env.main_options. args : list of positionnal parameters after processing options add_option : this is in fact a method that can be used to add other options (see documentation of the Python module optparse) """ def __init__(self, name=None, formatter=None, require_docstring=True, add_targets_options=False): """Init Main object PARAMETERS name: name of the program (if not specified the filename without extension is taken) formatter: override the default formatter for console output require_docstring: if True, raise MainError when the toplevel docstring is not found add_targets_options: add --target and --host options RETURN VALUE an instance of Main REMARKS None """ main = sys.modules['__main__'] if name is not None: self.name = name else: self.name = os.path.splitext(os.path.basename(main.__file__))[0] docstring = main.__doc__ if require_docstring and docstring is None: raise MainError('Doc string not found') if docstring is not None: usage_end = docstring.find('\n\n') if usage_end == -1 and require_docstring: raise MainError('Doc string must start with a usage,' 'followed by an empty line') if docstring is not None: self.usage = docstring[0:usage_end] self.description = docstring[usage_end + 2:] else: self.usage = "" self.description = "" self.add_targets_options = add_targets_options self.__option_parser = OptionParser( usage=self.usage, description=self.description, formatter=MainHelpFormatter()) # Make the add_option function directly available to Main objects self.add_option = self.__option_parser.add_option # And export add_option_group log_options = self.create_option_group("Various logging options") log_options.add_option( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="add some verbosity for debugging purposes. " + "Overrides --loglevel") log_options.add_option( "--log-file", dest="logfile", metavar="FILE", default="", help="add some logs into the specified file") log_options.add_option( "--enable-color", dest="enable_color", action="store_true", default=False, help="enable colors in log outputs") log_options.add_option( "--loglevel", default="INFO", action="store", help="defines a loglevel (RAW,DEBUG,INFO,ERROR) for" + " stdout") self.add_option_group(log_options) if add_targets_options: self.add_option("--target", dest="target", metavar="TARGET[,TARGET_VERSION[,TARGET_MACHINE]]", default="", help="set target") self.add_option("--host", dest="host", metavar="HOST[,HOST_VERSION]", default="", help="set host") self.options = None self.args = None self.formatter = formatter self.__log_handlers_set = False # By default do not filter anything. What is effectively logged will # be defined by setting/unsetting handlers logging.getLogger('').setLevel(gnatpython.logging_util.RAW) def disable_interspersed_args(self): """See optparse.disable_interspersed_args in standard python library""" self.__option_parser.disable_interspersed_args() def parse_args(self, args=None): """Parse options and set console logger PARAMETERS args: the list of positional parameters. If None then sys.argv[1:] is used RETURN VALUE None REMARKS None """ levels = {'RAW': gnatpython.logging_util.RAW, 'DEBUG': logging.DEBUG, 'INFO': logging.INFO, 'ERROR': logging.ERROR, 'CRITICAL': logging.CRITICAL} (self.options, self.args) = self.__option_parser.parse_args(args) if not self.__log_handlers_set: # First set level of verbosity if self.options.verbose: level = gnatpython.logging_util.RAW else: level = levels.get(self.options.loglevel, logging.INFO) # Set logging handlers default_format = '%(levelname)-8s %(message)s' handler = gnatpython.logging_util.add_handlers( level=level, format=default_format)[0] if self.formatter is not None: default_format = self.formatter if self.options.enable_color: handler.setFormatter(ConsoleColorFormatter(default_format)) else: if self.formatter is not None: handler.setFormatter(logging.Formatter(self.formatter)) # Log to a file if necessary if self.options.logfile != "": handler = gnatpython.logging_util.add_handlers( level=gnatpython.logging_util.RAW, format='%(asctime)s: %(name)-24s: ' \ '%(levelname)-8s %(message)s', filename=self.options.logfile) self.__log_handlers_set = True # Export options to env e = Env() e.main_options = self.options if self.add_targets_options: # Handle --target and --host options host_name = None host_version = None target_name = None target_version = None target_machine = None if self.options.host != "": tmp = self.options.host.split(',') host_name = tmp[0] if len(tmp) > 1: host_version = tmp[1] if self.options.target != "": tmp = self.options.target.split(',') target_name = tmp[0] if len(tmp) > 1: target_version = tmp[1] if len(tmp) > 2: target_machine = tmp[2] e.set_host(host_name, host_version) e.set_target(target_name, target_version, target_machine) def error(self, msg): """Print a usage message incorporating 'msg' to stderr and exit. PARAMETERS msg: Error message to display """ self.__option_parser.error(msg) def create_option_group(self, txt): """Create a new option group You need to call add_option_group after having added the options """ return OptionGroup(self.__option_parser, txt) def add_option_group(self, group): """Add groups to parsers""" self.__option_parser.add_option_group(group) python-gnatpython-54.orig/gnatpython/__init__.py0000644000175000017500000000440211654532470021646 0ustar xavierxavier ############################################################################ # # # __INIT__.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Root module of the GNATpython package.""" __version__ = "1.2" # Add a do-nothing handler to avoid "No handler could be found for logger..." # messages. import logging # The following setting allows us to prevent the logging module from # trying to use the threadding and/or multiprocessing modules. Depending # on how python was configured, these modules may not be available, and # without these settings, trying to use the logging module would cause # an exception. logging.logThreads = 0 logging.logMultiprocessing = 0 class NullHandler(logging.Handler): """Add a handler which does nothing""" def emit(self, _record): """emit nothing""" pass h = NullHandler() logging.getLogger("gnatpython").addHandler(h) python-gnatpython-54.orig/gnatpython/arch.py0000644000175000017500000003374511654532470021040 0ustar xavierxavier ############################################################################ # # # ARCH.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """This package contains a single class called Arch that allows the user to instantiate configuration objects containing information about the system (native or cross). """ import platform import re import os.path from gnatpython import config UNKNOWN = 'unknown' # __CPU and __OS are internal classes used only to create namespaces # and have the possibility to declare attributes such as cpu.name in # Arch class class _Arch__CPU: """CPU attributes ATTRIBUTES name: string containing the cpu name bits: int representing the number of bits for the cpu or 'unknown' endian: 'big', 'little' or 'unknown' """ def __init__(self): self.name = UNKNOWN self.bits = UNKNOWN self.endian = UNKNOWN class _Arch__OS: """OS attributes ATTRIBUTES name: os name version: string containing the os version exeext: default executable extension dllext: default shared library extension is_bareboard: True if the system is bareboard, False otherwise """ def __init__(self): self.name = UNKNOWN self.version = None self.exeext = '' self.dllext = '' self.is_bareboard = False self.kernel_version = None class Arch: """Class that allow user to retrieve os/cpu specific informations ATTRIBUTES cpu: CPU information (see _Arch__CPU) os: Operating system information (see _Arch__OS) is_hie: True if the system is a high integrity system platform: AdaCore platform product name. Ex: x86-linux triplet: GCC target machine: machine name domain: domain name is_host: True if this is not a cross context is_virtual: Set to True if the current system is a virtual one. Currently set only for Solaris containers """ def __init__(self, platform_name=None, version=None, is_host=False, machine=None): """Arch constructor PARAMETERS platform: if None then automatically detect current platform (native) Otherwise should be a valid platform string. version: if None, assume default OS version or find it automatically (native case only). Otherwise should be a valid version string. is_host: if True the system is not a cross one. Default is False except if a platform_name is specified or if the platform_name is equal to the automatically detected one. RETURN VALUE A Arch instance """ # Create necesarry namespaces using "dummy" classes __CPU and __OS self.cpu = __CPU() # pylint: disable-msg=E0602 self.os = __OS() # pylint: disable-msg=E0602 # Initialize attributes values self.platform = platform_name self.os.version = version self.machine = machine self.is_hie = False self.is_virtual = False if self.platform is None: self.is_host = True else: self.is_host = is_host if self.platform is None: # In this case we try to guess the host platform self.platform = self.__guess_platform() else: if self.platform == self.__guess_platform(): # This is a native platform self.is_host = True if self.is_host: # This is host so we can find the machine name using uname fields tmp = platform.uname()[1].lower().split('.', 1) self.machine = tmp[0] if len(tmp) > 1: self.domain = tmp[1] else: self.domain = "" # On solaris host detect if we are in a container context or not tmp = platform.uname() if tmp[0] == 'SunOS' and tmp[3] == 'Generic_Virtual': self.is_virtual = True else: # This is a target name. Sometimes it's suffixed by the host os # name. If the name is not a key in config.platform_info try to # to find a valid name by suppressing -linux, -solaris or -windows if self.platform not in config.platform_info: for suffix in ('-linux', '-solaris', '-windows'): if self.platform.endswith(suffix): self.platform = self.platform.replace(suffix, '') break # Fill other attributes self.__fill_info() # Find triplet self.triplet = config.build_targets[self.platform]['name'] % \ self.__get_dict() def __get_dict(self): """Export os and cpu variables as os_{var} and cpu_{var} Returns a dictionary containing os and cpu exported vars and self.__dict__ content """ str_dict = self.__dict__.copy() for (key, var) in self.os.__dict__.items(): str_dict["os_" + key] = var for (key, var) in self.cpu.__dict__.items(): str_dict["cpu_" + key] = var return str_dict def __str__(self): """Return a representation string of the object""" result = "platform: %(platform)s\n" \ "machine: %(machine)s\n" \ "is_hie: %(is_hie)s\n" \ "is_host: %(is_host)s\n" \ "triplet: %(triplet)s\n" \ "OS\n" \ " name: %(os_name)s\n" \ " version: %(os_version)s\n" \ " exeext: %(os_exeext)s\n" \ " dllext: %(os_dllext)s\n" \ " is_bareboard: %(os_is_bareboard)s\n" \ "CPU\n" \ " name: %(cpu_name)s\n" \ " bits: %(cpu_bits)s\n" \ " endian: %(cpu_endian)s" % self.__get_dict() return result def __fill_info(self): """Internal function that fill info related to the cpu, os, ... PARAMETERS None RETURN VALUE None REMARKS None """ self.os.name = config.platform_info[self.platform]['os'] self.cpu.name = config.platform_info[self.platform]['cpu'] self.is_hie = config.platform_info[self.platform]['is_hie'] self.cpu.bits = config.cpu_info[self.cpu.name]['bits'] self.cpu.endian = config.cpu_info[self.cpu.name]['endian'] self.os.is_bareboard = config.os_info[self.os.name]['is_bareboard'] self.os.exeext = config.os_info[self.os.name]['exeext'] self.os.dllext = config.os_info[self.os.name]['dllext'] # If version is not given by the user guess it or set it to the # default (cross case) if self.is_host and self.os.version is None: self.__guess_os_version() if self.os.version is None: self.os.version = config.os_info[self.os.name]['version'] def __guess_platform(self): """ Internal function that guess base on uname system call the current platform PARAMETERS None RETURN VALUE return a string object containing the platform name REMARKS None """ def re_contains(left, right): """Returns right in left (regexp aware)""" if re.match(left + '$', right) or \ re.match('^' + left, right): return True else: return False def re_endswith(left, right): """Returns right.endswith(left) (regexp aware)""" return re.match(left + '$', right) def guess(os_name, p_uname): """Guess based on os_name""" for p_name in config.host_guess: p_config = config.host_guess[p_name] if p_config['os'] is not None: if re_contains(p_config['os'], os_name): if p_config['cpu'] is None or \ re_endswith(p_config['cpu'], p_uname[4]) or \ re_endswith(p_config['cpu'], p_uname[5]): # The p_name config matched if p_name in config.host_aliases: return config.host_aliases[p_name] else: return p_name # wrong guess return None # First look for matching machine name for p_name in config.host_guess: if config.host_guess[p_name]['machine'] is not None: if re_endswith(config.host_guess[p_name]['machine'] + '$', self.machine): return p_name # Else we need to guess uname = platform.uname() p_name = guess(uname[0], uname) if p_name is not None: return p_name p_name = guess(uname[2], uname) if p_name is not None: return p_name # Not found ! return UNKNOWN def __guess_os_version(self): """Internal function used to guess the host OS version/dist PARAMETERS None RETURN VALUE None REMARKS Set the self.os.version attribute and on some platform the self.os.kernel_version """ if self.os.name in ('freebsd', 'tru64'): # Do not compute OS version but read config.os_info table return uname = platform.uname() if self.os.name == 'darwin': self.os.version = uname[2] elif self.os.name == 'linux': self.os.kernel_version = uname[2] if os.path.isfile('/etc/redhat-release'): # RedHat distributions with open('/etc/redhat-release') as rel_f: content = rel_f.read().strip() for sub in (('\(.*', ''), (' ', ''), ('Linux', ''), ('release', ''), ('Enterprise', ''), ('AdvancedServer', 'AS'), ('Server', 'ES'), ('RedHat', 'rh'), ('5\.[0-9]', '5')): content = re.sub(sub[0], sub[1], content) self.os.version = content elif os.path.isfile('/etc/SuSE-release'): # Suse distributions release = open('/etc/SuSE-release', 'r') for line in release: version = re.search('VERSION = ([0-9\.]+)', line) if version is not None: release.close() self.os.version = 'suse' + version.group(1) break release.close() if self.os.version is None: self.os.version = 'suse' elif os.path.isfile('/etc/lsb-release'): # /etc/lsb-release is present on the previous distrib # but is not useful. On ubuntu it contains the # distrib number release = open('/etc/lsb-release', 'r') distrib_name = '' distrib_version = '' for line in release: distrib_id = re.search('DISTRIB_ID=(.+)', line.rstrip()) if distrib_id is not None: distrib_name = distrib_id.group(1).lower() else: distrib_release = re.search('DISTRIB_RELEASE=(.*)', line.rstrip()) if distrib_release is not None: distrib_version = distrib_release.group(1) release.close() if distrib_name: self.os.version = distrib_name + distrib_version elif self.os.name == 'aix': self.os.version = uname[3] + '.' + uname[2] elif self.os.name == 'hp-ux': version = uname[2] if version[0:2] == 'B.': version = version[2:] self.os.version = version elif self.os.name == 'irix': self.os.version = uname[2] elif self.os.name == 'lynxos': self.os.version = '' elif self.os.name == 'solaris': self.os.version = '2' + uname[2][1:] elif self.os.name == 'windows': self.os.version = uname[2].replace('Server', '') self.os.kernel_version = uname[3] return if __name__ == "__main__": print Arch() python-gnatpython-54.orig/gnatpython/config.py0000644000175000017500000004266111654532470021365 0ustar xavierxavier ############################################################################ # # # CONFIG.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """This file contains various configuration tables""" cpu_info = { 'alpha' : {'endian' : 'little', 'bits' : 64}, 'arm' : {'endian' : 'big', 'bits' : 32}, 'avr' : {'endian' : 'little', 'bits' : 16}, 'erc32' : {'endian' : 'big', 'bits' : 32}, 'hppa' : {'endian' : 'big', 'bits' : 32}, 'ia64' : {'endian' : 'little', 'bits' : 64}, 'leon' : {'endian' : 'big', 'bits' : 32}, 'leon3' : {'endian' : 'big', 'bits' : 32}, 'mips' : {'endian' : 'big', 'bits' : 32}, 'powerpc' : {'endian' : 'big', 'bits' : 32}, 'powerpc64' : {'endian' : 'big', 'bits' : 64}, 'sparc' : {'endian' : 'big', 'bits' : 32}, 'sparc64' : {'endian' : 'big', 'bits' : 64}, 'x86' : {'endian' : 'little', 'bits' : 32}, 'x86_64' : {'endian' : 'little', 'bits' : 64}, 'dotnetvm' : {'endian' : 'unknown', 'bits' : 0}, 'jvm' : {'endian' : 'unknown', 'bits' : 32} } os_info = { 'aix' : { 'is_bareboard' : False, 'version' : '5.2' , 'exeext' : '' , 'dllext' : '.so'}, 'darwin' : { 'is_bareboard' : False, 'version' : '9.6.0' , 'exeext' : '' , 'dllext' : '.dylib'}, 'dotnet' : { 'is_bareboard' : False, 'version' : '2.0' , 'exeext' : '.exe' , 'dllext' : '.dll'}, 'ElinOS' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'freebsd' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'openbsd' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'netbsd' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'dragonfly' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'hp-ux' : { 'is_bareboard' : False, 'version' : '11.00' , 'exeext' : '' , 'dllext' : '.sl'}, 'irix' : { 'is_bareboard' : False, 'version' : '6.5' , 'exeext' : '' , 'dllext' : '.so'}, 'jvm' : { 'is_bareboard' : False, 'version' : '1.5' , 'exeext' : '.class', 'dllext' : '' }, 'linux' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'lynxos' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'nucleus' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'solaris' : { 'is_bareboard' : False, 'version' : '2.8' , 'exeext' : '' , 'dllext' : '.so'}, 'tru64' : { 'is_bareboard' : False, 'version' : '5.1' , 'exeext' : '' , 'dllext' : '.so'}, 'vms' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : ''}, 'vxworks' : { 'is_bareboard' : False, 'version' : '5.5' , 'exeext' : '.out' , 'dllext' : ''}, 'vxworks6' : { 'is_bareboard' : False, 'version' : '6.4' , 'exeext' : '.out' , 'dllext' : ''}, 'vxworksae' : { 'is_bareboard' : False, 'version' : '1.8' , 'exeext' : '.out' , 'dllext' : ''}, 'vxworksmils' : { 'is_bareboard' : False, 'version' : '2.0.1' , 'exeext' : '.out', 'dllext' : ''}, 'windows' : { 'is_bareboard' : False, 'version' : 'XP' , 'exeext' : '.exe' , 'dllext' : '.dll'}, 'wrslinux' : { 'is_bareboard' : False, 'version' : 'unknown', 'exeext' : '' , 'dllext' : '.so'}, 'none' : { 'is_bareboard' : True, 'version' : 'unknown', 'exeext' : '' , 'dllext' : ''} } platform_info = { 'alpha-tru64' : { 'cpu' : 'alpha', 'os' : 'tru64', 'is_hie' : False}, 'avr-elf' : { 'cpu' : 'avr', 'os' : 'none', 'is_hie' : False}, 'arm-nucleus' : { 'cpu' : 'arm', 'os' : 'nucleus', 'is_hie' : False}, 'dotnet' : { 'cpu' : 'dotnetvm','os' : 'dotnet', 'is_hie' : False}, 'e500v2-vx6' : { 'cpu' : 'powerpc', 'os' : 'vxworks6', 'is_hie' : False}, 'e500v2-vx653' : { 'cpu' : 'powerpc', 'os' : 'vxworksae', 'is_hie' : False}, 'erc32-elf' : { 'cpu' : 'erc32', 'os' : 'none', 'is_hie' : False}, 'ia64-hpux' : { 'cpu' : 'ia64', 'os' : 'hp-ux', 'is_hie' : False}, 'ia64-openvms' : { 'cpu' : 'ia64', 'os' : 'vms', 'is_hie' : False}, 'ia64-linux' : { 'cpu' : 'ia64', 'os' : 'linux', 'is_hie' : False}, 'jvm' : { 'cpu' : 'jvm', 'os' : 'jvm', 'is_hie' : False}, 'leon-elf' : { 'cpu' : 'leon', 'os' : 'none', 'is_hie' : False}, 'leon3-elf' : { 'cpu' : 'leon3', 'os' : 'none', 'is_hie' : False}, 'alpha-openvms' : { 'cpu' : 'alpha', 'os' : 'vms', 'is_hie' : False}, 'mips-irix' : { 'cpu' : 'mips', 'os' : 'irix', 'is_hie' : False}, 'p55-elf' : { 'cpu' : 'powerpc', 'os' : 'none', 'is_hie' : False}, 'pa-hpux' : { 'cpu' : 'hppa', 'os' : 'hp-ux', 'is_hie' : False}, 'ppc-aix' : { 'cpu' : 'powerpc', 'os' : 'aix', 'is_hie' : False}, 'ppc-elf' : { 'cpu' : 'powerpc', 'os' : 'none', 'is_hie' : False}, 'ppc-elinos' : { 'cpu' : 'powerpc', 'os' : 'ElinOS', 'is_hie' : False}, 'ppc-linux' : { 'cpu' : 'powerpc', 'os' : 'linux', 'is_hie' : False}, 'ppc-lynx' : { 'cpu' : 'powerpc', 'os' : 'lynxos', 'is_hie' : False}, 'ppc-lynx5' : { 'cpu' : 'powerpc', 'os' : 'lynxos', 'is_hie' : False}, 'ppc-vx178b' : { 'cpu' : 'powerpc', 'os' : 'vxworks', 'is_hie' : False}, 'ppc-vx6' : { 'cpu' : 'powerpc', 'os' : 'vxworks6', 'is_hie' : False}, 'ppc-vxmils' : { 'cpu' : 'powerpc', 'os' : 'vxworksmils','is_hie' : False}, 'ppc-vx653' : { 'cpu' : 'powerpc', 'os' : 'vxworksae','is_hie' : False}, 'ppc-vx6cert' : { 'cpu' : 'powerpc', 'os' : 'vxworks6', 'is_hie' : False}, 'ppc-vxw' : { 'cpu' : 'powerpc', 'os' : 'vxworks', 'is_hie' : False}, 'ppc-wrslinux' : { 'cpu' : 'powerpc', 'os' : 'wrslinux', 'is_hie' : False}, 'sparc64-solaris' : { 'cpu' : 'sparc64', 'os' : 'solaris', 'is_hie' : False}, 'sparc-elf' : { 'cpu' : 'sparc', 'os' : 'none', 'is_hie' : False}, 'sparc-solaris' : { 'cpu' : 'sparc', 'os' : 'solaris', 'is_hie' : False}, 'x86_64-linux' : { 'cpu' : 'x86_64', 'os' : 'linux', 'is_hie' : False}, 'x86-darwin' : { 'cpu' : 'x86', 'os' : 'darwin', 'is_hie' : False}, 'x86_64-darwin' : { 'cpu' : 'x86_64', 'os' : 'darwin', 'is_hie' : False}, 'x86-elinos' : { 'cpu' : 'x86', 'os' : 'ElinOS', 'is_hie' : False}, 'x86-freebsd' : { 'cpu' : 'x86', 'os' : 'freebsd', 'is_hie' : False}, 'x86-openbsd' : { 'cpu' : 'x86', 'os' : 'openbsd', 'is_hie' : False}, 'x86-netbsd' : { 'cpu' : 'x86', 'os' : 'netbsd', 'is_hie' : False}, 'x86-dragonfly' : { 'cpu' : 'x86', 'os' : 'dragonfly','is_hie' : False}, 'x86_64-freebsd' : { 'cpu' : 'x86_64', 'os' : 'freebsd', 'is_hie' : False}, 'x86_64-openbsd' : { 'cpu' : 'x86_64', 'os' : 'openbsd', 'is_hie' : False}, 'x86_64-netbsd' : { 'cpu' : 'x86_64', 'os' : 'netbsd', 'is_hie' : False}, 'x86_64-dragonfly' : { 'cpu' : 'x86_64', 'os' : 'dragonfly','is_hie' : False}, 'x86-linux' : { 'cpu' : 'x86', 'os' : 'linux', 'is_hie' : False}, 'x86-lynx' : { 'cpu' : 'x86', 'os' : 'lynxos', 'is_hie' : False}, 'x86-lynx5' : { 'cpu' : 'x86', 'os' : 'lynxos', 'is_hie' : False}, 'x86-rtx' : { 'cpu' : 'x86', 'os' : 'solaris', 'is_hie' : False}, 'x86-solaris' : { 'cpu' : 'x86', 'os' : 'solaris', 'is_hie' : False}, 'x86_64-solaris' : { 'cpu' : 'x86_64', 'os' : 'solaris', 'is_hie' : False}, 'x86-vx6' : { 'cpu' : 'x86', 'os' : 'vxworks6', 'is_hie' : False}, 'x86-vx653' : { 'cpu' : 'x86', 'os' : 'vxworksae','is_hie' : False}, 'x86-windows' : { 'cpu' : 'x86', 'os' : 'windows', 'is_hie' : False}, 'x86-wrslinux' : { 'cpu' : 'x86', 'os' : 'wrslinux', 'is_hie' : False}, 'x86_64-windows' : { 'cpu' : 'x86_64', 'os' : 'windows', 'is_hie' : False}, 'mindstorms-nxt' : { 'cpu' : 'arm', 'os' : 'none', 'is_hie' : False}, } build_targets = { 'alpha-tru64' : { 'name' : 'alphaev56-dec-osf%(os_version)s'}, 'avr-elf' : { 'name' : 'avr'}, 'arm-nucleus' : { 'name' : 'arm-mentor-nucleus'}, 'dotnet' : { 'name' : 'dotnet'}, 'e500v2-vx6' : { 'name' : 'e500v2-wrs-vxworks'}, 'e500v2-vx653' : { 'name' : 'e500v2-wrs-vxworksae'}, 'erc32-elf' : { 'name' : 'erc32-elf'}, 'ia64-hpux' : { 'name' : 'ia64-hp-hpux%(os_version)s'}, 'ia64-openvms' : { 'name' : 'ia64-openvms'}, 'ia64-linux' : { 'name' : 'ia64-generic-linux-gnu'}, 'jvm' : { 'name' : 'jvm'}, 'leon-elf' : { 'name' : 'leon-elf'}, 'leon3-elf' : { 'name' : 'leon3-elf'}, 'alpha-openvms' : { 'name' : 'alpha-openvms'}, 'mips-irix' : { 'name' : 'mips-sgi-irix%(os_version)s'}, 'p55-elf' : { 'name' : 'powerpc-eabispe'}, 'pa-hpux' : { 'name' : 'hppa1.1-hp-hpux%(os_version)s'}, 'ppc-aix' : { 'name' : 'powerpc-ibm-aix%(os_version)s'}, 'ppc-elf' : { 'name' : 'powerpc-elf'}, 'ppc-elinos' : { 'name' : 'powerpc-elinos-linux'}, 'ppc-linux' : { 'name' : 'powerpc-generic-linux-gnu'}, 'ppc-lynx' : { 'name' : 'powerpc-elf-lynxos'}, 'ppc-lynx5' : { 'name' : 'powerpc-elf-lynxos5'}, 'ppc-vx178b' : { 'name' : 'powerpc-wrs-vxworks'}, 'ppc-vx6' : { 'name' : 'powerpc-wrs-vxworks'}, 'ppc-vx653' : { 'name' : 'powerpc-wrs-vxworksae'}, 'ppc-vxmils' : { 'name' : 'powerpc-wrs-vxworksmils'}, 'ppc-vx6cert' : { 'name' : 'powerpc-wrs-vxworks'}, 'ppc-vxw' : { 'name' : 'powerpc-wrs-vxworks'}, 'ppc-wrslinux' : { 'name' : 'powerpc-wrs-linux'}, 'sparc64-solaris' : { 'name' : 'sparc64-sun-solaris%(os_version)s'}, 'sparc-jgnat' : { 'name' : None}, 'sparc-elf' : { 'name' : 'sparc-elf'}, 'sparc-solaris' : { 'name' : 'sparc-sun-solaris%(os_version)s'}, 'x86_64-linux' : { 'name' : 'x86_64-pc-linux-gnu'}, 'x86-darwin' : { 'name' : 'i386-apple-darwin%(os_version)s'}, 'x86_64-darwin' : { 'name' : 'x86_64-apple-darwin%(os_version)s'}, 'x86-elinos' : { 'name' : 'i686-elinos-linux'}, 'x86-freebsd' : { 'name' : 'i386-pc-freebsd%(os_version)s'}, 'x86-openbsd' : { 'name' : 'i386-pc-openbsd%(os_version)s'}, 'x86-netbsd' : { 'name' : 'i386-pc-netbsd%(os_version)s'}, 'x86-dragonfly' : { 'name' : 'i386-pc-dragonfly%(os_version)s'}, 'x86_64-freebsd' : { 'name' : 'x86_64-pc-freebsd%(os_version)s'}, 'x86_64-openbsd' : { 'name' : 'x86_64-pc-openbsd%(os_version)s'}, 'x86_64-netbsd' : { 'name' : 'x86_64-pc-netbsd%(os_version)s'}, 'x86_64-dragonfly' : { 'name' : 'x86_64-pc-dragonfly%(os_version)s'}, 'x86-jgnat' : { 'name' : None}, 'x86-linux' : { 'name' : 'i686-pc-linux-gnu'}, 'x86-lynx' : { 'name' : ''}, 'x86-lynx5' : { 'name' : 'i386-elf-lynxos5'}, 'x86-rtx' : { 'name' : ''}, 'x86-solaris' : { 'name' : 'i686-pc-solaris%(os_version)s'}, 'x86_64-solaris' : { 'name' : 'x86_64-sun-solaris%(os_version)s'}, 'x86-vx6' : { 'name' : 'i586-wrs-vxworks'}, 'x86-vx653' : { 'name' : 'i586-wrs-vxworksae'}, 'x86-windows' : { 'name' : 'i686-pc-mingw32'}, 'x86-wrslinux' : { 'name' : 'i586-wrs-linux'}, 'x86_64-windows' : { 'name' : 'x86_64-pc-mingw32'}, 'mindstorms-nxt' : { 'name' : 'arm-eabi'}, } # The following table is used to guess a product name from the output of # uname on the host. Users of this data are expected to match the specified # regular expressions against that output to find a matching key. Order is # not significant so if a traversal matches multiple entries the one matched # is undefined. It is critical therefore that the supplied expressions match # only the intended product and no other values of uname potentially output # by a different host. IMPORTANT: Systems that can be only used as target in # cross context should not be added to that table. host_guess = { # platform : OS (uname[0]), machine (uname[1]), proc (uname[4 or 5]) 'ppc-aix' : { 'os' : 'AIX', 'machine' : None, 'cpu' : None}, 'ppc-darwin' : { 'os' : 'Darwin', 'machine' : None, 'cpu' : 'powerpc'}, 'x86-darwin' : { 'os' : 'Darwin', 'machine' : None, 'cpu' : 'i386'}, 'x86-freebsd' : { 'os' : 'FreeBSD', 'machine' : None, 'cpu' : None}, 'x86-openbsd' : { 'os' : 'OpenBSD', 'machine' : None, 'cpu' : None}, 'x86-netbsd' : { 'os' : 'NetBSD', 'machine' : None, 'cpu' : None}, 'x86-dragonfly' : { 'os' : 'DragonFly', 'machine' : None, 'cpu' : None}, 'x86_64-freebsd' : { 'os' : 'FreeBSD', 'machine' : None, 'cpu' : None}, 'x86_64-openbsd' : { 'os' : 'OpenBSD', 'machine' : None, 'cpu' : None}, 'x86_64-netbsd' : { 'os' : 'NetBSD', 'machine' : None, 'cpu' : None}, 'x86_64-dragonfly' : { 'os' : 'DragonFly', 'machine' : None, 'cpu' : None}, 'ia64-hpux' : { 'os' : 'HP-UX', 'machine' : None, 'cpu' : 'ia64'}, 'pa-hpux' : { 'os' : 'HP-UX', 'machine' : None, 'cpu' : '9000/785'}, 'mips-irix' : { 'os' : 'IRIX64', 'machine' : None, 'cpu' : None}, 'x86-lynxos' : { 'os' : 'LynxOS', 'machine' : None, 'cpu' : None}, 'ia64-linux' : { 'os' : 'Linux', 'machine' : None, 'cpu' : 'ia64'}, 'ppc-linux' : { 'os' : 'Linux', 'machine' : None, 'cpu' : 'powerpc.*|ppc64'}, 'x86-linux' : { 'os' : 'Linux', 'machine' : None, 'cpu' : 'i.86|pentium'}, 'x86_64-linux' : { 'os' : 'Linux', 'machine' : None, 'cpu' : 'x86_64'}, 'sparc-solaris' : { 'os' : 'SunOS', 'machine' : None, 'cpu' : 'sparc'}, 'x86-solaris' : { 'os' : 'SunOS', 'machine' : None, 'cpu' : 'i386'}, 'alpha-tru64' : { 'os' : 'OSF1', 'machine' : None, 'cpu' : None}, 'x86-windows' : { 'os' : 'Windows', 'machine' : None, 'cpu' : None}, 'cygwin' : { 'os' : 'CYGWIN_NT', 'machine' : None, 'cpu' : None} } host_aliases = { 'cygwin' : 'x86-windows' } python-gnatpython-54.orig/gnatpython/testdriver.py0000644000175000017500000005571511654532470022317 0ustar xavierxavier ############################################################################ # # # TESTDRIVER.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Run a bugs test located in test_dir Define a default test driver: TestRunner """ from gnatpython.env import Env from gnatpython.fileutils import ( split_file, echo_to_file, diff, rm, mkdir, cp, get_rlimit) from gnatpython.optfileparser import OptFileParse from gnatpython.stringutils import Filter import logging import os import re import shutil import subprocess import sys IS_STATUS_FAILURE = { 'DEAD': False, 'CRASH': True, 'INVALID_TEST': True, 'INVALID_TEST_OPT': True, 'UNKNOWN': True, 'OK': False, 'DIFF': True} # Dictionary for which keys are the available test status. Associated value # is a boolean that is True if status should be considered as a failure, False # otherwise. Note that XFAIL and UOK are handled separately by the script. class TestRunner(object): """Default test driver ATTRIBUTES test: full path to test location discs: a list of discriminants (list of strings) cmd_line: the command line to be spawned (list of strings) test_name: name of the test result_prefix: prefix of files that are written in the result directory work_dir: the working directory in which the test will be executed output: name of the temporary file that hold the test output result: current state of the test. This is a dictionary with 3 keys: 'result' that contains the test status, 'msg' the associated short message and 'is_failure' a boolean that is True if the test should be considered as a failure opt_results: context dependent variable (dictionnary) bareboard_mode: True if in bareboard mode. Default is False REMARKS For code readability, methods are ordered following the invocation order used by the execute 'method' """ def __init__(self, test, discs, result_dir, temp_dir=Env().tmp_dir, enable_cleanup=True, restricted_discs=None, test_args=None, failed_only=False, default_timeout=780, use_basename=True): """TestRunner constructor PARAMETERS test: location of the test discs: list of discriminants result_dir: directory in which results will be stored temp_dir: temporary directory used during test run restricted_mode: None or a list of discriminants failed_only: run failed only use_basename: if True use the test basename to get the test name else use the relative path RETURN VALUE a TestRunner instance """ self.test = test.rstrip('/') self.discs = discs self.cmd_line = None self.test_args = test_args self.enable_cleanup = enable_cleanup self.restricted_discs = restricted_discs self.skip = False # if True, do not run execute() # Test name if use_basename: self.test_name = os.path.basename(self.test) else: self.test_name = os.path.relpath(self.test, os.getcwd()) # Prefix of files holding the test result self.result_prefix = result_dir + '/' + self.test_name mkdir(os.path.dirname(self.result_prefix)) # Temp directory in which the test will be run self.work_dir = os.path.realpath(os.path.join(temp_dir, 'tmp-test-%s-%d' % (self.test_name, os.getpid()))) self.output = self.work_dir + '/tmpout' self.diff_output = self.work_dir + '/diff' # Initial test status self.result = {'result': 'UNKNOWN', 'msg': '', 'is_failure': True} # Some tests save the pids of spawned background processes in # work_dir/.pids. The TEST_WORK_DIR environment variable is used to # pass the working directory location. os.environ['TEST_WORK_DIR'] = self.work_dir if failed_only: # Read old result now previous_result = self.read_result() if previous_result in IS_STATUS_FAILURE \ and not IS_STATUS_FAILURE[previous_result]: # We don't need to run this test. Return now self.skip = True return # Be sure to be a sane environment rm(self.result_prefix + '.result') rm(self.result_prefix + '.out') rm(self.result_prefix + '.expected') rm(self.result_prefix + '.diff') # Parse test.opt. By default a test is not DEAD, SKIP nor XFAIL. Its # maximum execution time is 780s. Test script is test.cmd and output is # compared against test.out. self.opt_results = {'RLIMIT': str(default_timeout), 'DEAD': None, 'XFAIL': False, 'SKIP': None, 'OUT': 'test.out', 'CMD': 'test.cmd', 'FILESIZE_LIMIT': None, 'NOTE': None} # test.cmd have priority, if not found use test.py if not os.path.isfile( self.test + '/test.cmd') and os.path.isfile( self.test + '/test.py'): self.opt_results['CMD'] = 'test.py' def cleanup(self, force=False): """Remove generated files""" rm(self.result_prefix + '.result') rm(self.result_prefix + '.out') rm(self.result_prefix + '.expected') rm(self.result_prefix + '.diff') def execute(self): """Complete test execution REMARKS Calls all the steps that are needed to run the test. """ if self.skip: logging.debug("SKIP %s - failed only mode" % self.test) return # Adjust test context self.adjust_to_context() for key in ('CMD', 'OUT'): # Read command file and expected output from working directory self.opt_results[key] = self.work_dir + '/src/' + \ self.opt_results[key] # Keep track of the discriminants that activate the test if self.opt_results['NOTE']: echo_to_file(self.result_prefix + '.note', '(' + self.opt_results['NOTE'] + ')\n') # If a test is either DEAD or SKIP then do not execute it. The only # difference is that a SKIP test will appear in the report whereas a # DEAD test won't. for opt_cmd in ('DEAD', 'SKIP'): if self.opt_results[opt_cmd] is not None: echo_to_file(self.result_prefix + '.result', opt_cmd + ':%s\n' % self.opt_results[opt_cmd]) return if self.result['result'] != 'UNKNOWN': self.write_results() return # Run the test self.prepare_working_space() self.compute_cmd_line() self.run() # Analyze the results and write them into result_dir self.set_output_filter() self.analyze() self.write_results() # Clean the working space if self.enable_cleanup: self.clean() def adjust_to_context(self): """Adjust test environment to context REMARKS At this stage we parse the test.opt and adjust the opt_results attribute value. The driver will check if the test should be run (i.e is DEAD) right after this step. """ if self.restricted_discs is not None: opt_file_content = ['ALL DEAD disabled by default'] if os.path.isfile(self.test + '/test.opt'): opt_file_content += split_file(self.test + '/test.opt') opt = OptFileParse(self.discs, opt_file_content) self.opt_results = opt.get_values(self.opt_results) if not self.opt_results['DEAD']: activating_tags = opt.get_note(sep='') for d in self.restricted_discs: if d not in activating_tags: self.opt_results['DEAD'] = \ '%s not in activating tags' % d else: opt = OptFileParse(self.discs, self.test + '/test.opt') self.opt_results = opt.get_values(self.opt_results) self.opt_results['NOTE'] = opt.get_note() if not os.path.isfile(self.test + '/' + self.opt_results['CMD']): self.result = { 'result': 'INVALID_TEST', 'msg': 'cannot find script file %s' % (self.opt_results['CMD']), 'is_failure': True} return if self.opt_results['OUT'][-8:] != 'test.out' and \ not os.path.isfile(self.test + '/' + self.opt_results['OUT']): tmp = os.path.basename(self.opt_results['OUT']) self.result = { 'result': 'INVALID_TEST', 'msg': 'cannot find output file %s' % (tmp), 'is_failure': True} return def prepare_working_space(self): """Prepare working space REMARKS Set the working space in self.work_dir. This resets the working directory and copies the test into /src. This directory can be used to hold temp files as it will be automatically deleted at the end of the test by the clean method """ # At this stage the test should be executed so start copying test # sources in a temporary location. rm(self.work_dir, True) mkdir(self.work_dir) try: shutil.copytree(self.test, self.work_dir + '/src') except shutil.Error: print >> sys.stderr, "Error when copying %s in %s" % ( self.test, self.work_dir + '/src') def compute_cmd_line_py(self, filesize_limit): """Compute self.cmd_line and preprocess the test script REMARKS This function is called by compute_cmd_line """ self.cmd_line += [sys.executable, self.opt_results['CMD']] if self.test_args: self.cmd_line += self.test_args def compute_cmd_line_cmd(self, filesize_limit): """Compute self.cmd_line and preprocess the test script REMARKS This function is called by compute_cmd_line """ cmd = self.opt_results['CMD'] if Env().host.os.name != 'windows': script = split_file(cmd) # The test is run on a Unix system but has a 'cmd' syntax. # Convert it to Bourne shell syntax. cmdfilter = Filter() cmdfilter.append([r'-o(.*).exe', r'-o \1']) cmdfilter.append([r'%([^ ]*)%', r'"$\1"']) cmdfilter.append([r'(\032|\015)', r'']) cmdfilter.append([r'set *([^ =]+) *= *([^ ]*)', r'\1="\2"; export \1']) script = cmdfilter.process(script) cmd = self.work_dir + '/__test.sh' echo_to_file(cmd, 'PATH=.:$PATH; export PATH\n') # Compute effective file size limit on Unix system. if filesize_limit > 0: # File size limit can be specified either by a default or by # mean of FILESIZE_LIMIT command in the test test.opt. When # both are specified use the upper limit (note that 0 means # unlimited). opt_limit = self.opt_results['FILESIZE_LIMIT'] if opt_limit is not None: try: opt_limit = int(opt_limit) except TypeError: opt_limit = filesize_limit else: opt_limit = filesize_limit if opt_limit != 0: if filesize_limit < opt_limit: filesize_limit = opt_limit # Limit filesize. Argument to ulimit is a number of blocks # (512 bytes) so multiply by two the argument given by the # user. Filesize limit is not supported on Windows. echo_to_file(cmd, 'ulimit -f %s\n' % (filesize_limit * 2), True) # Source support.sh in TEST_SUPPORT_DIR if set if 'TEST_SUPPORT_DIR' in os.environ and \ os.path.isfile(os.environ['TEST_SUPPORT_DIR'] + '/support.sh'): echo_to_file(cmd, '. $TEST_SUPPORT_DIR/support.sh\n', True) echo_to_file(cmd, script, True) self.cmd_line += ['bash', cmd] else: # On windows system, use cmd to run the script. if cmd[-4:] != '.cmd': # We are about to use cmd.exe to run a test. In this case, # ensure that the file extension is .cmd otherwise a dialog box # will popup asking to choose the program that should be used # to run the script. cp(cmd, self.work_dir + '/test__.cmd') cmd = self.work_dir + '/test__.cmd' self.cmd_line += ['cmd.exe', '/q', '/c', cmd] def compute_cmd_line(self, filesize_limit=36000): """Compute command line PARAMETERS filesize_limit: if set to something greater than 0 then a "ulimit -f" is inserted in the scripts. The unit of filesize_limit is Kb. REMARKS When this step is called we assume that we have all the context set and that the working space is in place. The main goal of this step is to compute self.cmd_line and do any processing on the test script file. If the script is in Windows CMD format, convert it to Bourne shell syntax on UNIX system and source TEST_SUPPORT_DIR/support.sh if exist """ # Find which script language is used. The default is to consider it # in Windows CMD format. _, ext = os.path.splitext(self.opt_results['CMD']) if ext in ['.cmd', '.py']: cmd_type = ext[1:] else: cmd_type = 'cmd' rlimit = get_rlimit() assert rlimit, 'rlimit not found' self.cmd_line = [rlimit, self.opt_results['RLIMIT']] if cmd_type == 'py': self.compute_cmd_line_py(filesize_limit) elif cmd_type == 'cmd': self.compute_cmd_line_cmd(filesize_limit) def run(self): """Run the test REMARKS This step should spawn the test using self.cmd_line and save its output in self.output. """ # Run the test # Here we are calling directly subprocess function as it is a bit # faster than using gnatpython.ex.Run logging.debug("RUN: %s" % " ".join(self.cmd_line)) fd = open(self.output, 'w') subprocess.call(self.cmd_line, cwd=self.work_dir + '/src', stdout=fd, bufsize=-1, stderr=subprocess.STDOUT) fd.close() def apply_output_filter(self, str_list): """Apply the output filters PARAMETERS str_list: a list of strings RETURN VALUE a list of string """ return self.output_filter.process(str_list) def set_output_filter(self): """Set output filters REMARKS output filters are applied both to expected output and test output before comparing them. """ self.output_filter = Filter() # General filters. Filter out CR and '.exe' and work_dir and replace # \ by / self.output_filter.append([r'\\', r'/']) self.output_filter.append([r'(.exe|\015)', r'']) self.output_filter.append([r'[^ \'"]*%s/src/' % os.path.basename(self.work_dir), r'']) def get_status_filter(self): """Get the status filters RETURN VALUE a list. Each element is a list containing two items. The first is a regexp, the second a dictionnary used to update self.result. REMARKS the return value will be used the following way. For each entry, if the test output match the regexp then we update self.result with its dictionnary. Only the first match is taken into account. """ result = [['Segmentation fault', {'result': 'CRASH', 'msg': 'Segmentation fault'}], ['Bus error', {'result': 'CRASH', 'msg': 'Bus error'}], ['Cputime limit exceeded', {'result': 'CRASH', 'msg': 'Cputime limit exceeded'}], ['Filesize limit exceeded', {'result': 'CRASH', 'msg': 'Filesize limit exceeded'}]] return result def analyze(self): """Compute test status REMARKS This method should set the final value of 'result' attribute """ # Retrieve the outputs and see if we match some of the CRASH or DEAD # patterns output = split_file(self.output, ignore_errors=True) if output: tmp = "\n".join(output) for pattern in self.get_status_filter(): if re.search(pattern[0], tmp): self.result.update(pattern[1]) break # If the test status has not been updated compare output with the # baseline if self.result['result'] == 'UNKNOWN': # Retrieve expected output expected = split_file(self.opt_results['OUT'], ignore_errors=True) # Process output and expected output with registered filters expected = self.apply_output_filter(expected) output = self.apply_output_filter(output) d = diff(expected, output) if d: logging.debug(d) self.result['result'] = 'DIFF' if len(expected) == 0: self.result['msg'] = 'unexpected output' else: self.result['msg'] = 'output' diff_file = open(self.diff_output, 'w') diff_file.write(d) diff_file.close() else: self.result = {'result': 'OK', 'msg': '', 'is_failure': False} self.result['is_failure'] = IS_STATUS_FAILURE[self.result['result']] # self.opt_results['XFAIL'] contains the XFAIL comment or False # The status should be set to XFAIL even if the comment is empty if self.opt_results['XFAIL'] != False: if self.result['result'] in ['DIFF', 'CRASH']: self.result.update({'result': 'XFAIL', 'msg': self.opt_results['XFAIL']}) elif self.result['result'] == 'OK': self.result.update({'result': 'UOK', 'msg': self.opt_results['XFAIL']}) def write_results(self): """Write results on disk REMARKS Write at least .result and maybe .out and .expected files in the result directory. """ echo_to_file(self.result_prefix + '.result', self.result['result'] + ':' + self.result['msg'] + '\n') if self.result['is_failure']: if os.path.isfile(self.opt_results['OUT']): cp(self.opt_results['OUT'], self.result_prefix + '.expected') if os.path.isfile(self.output): cp(self.output, self.result_prefix + '.out') if os.path.isfile(self.diff_output): cp(self.diff_output, self.result_prefix + '.diff') def read_result(self): """Read last result""" if os.path.exists(self.result_prefix + '.result'): with open(self.result_prefix + '.result') as f_res: return f_res.read().strip().split(':')[0] def clean(self): """Clean up working space REMARKS Clean any temporary files """ # Clean up before exiting rm(self.work_dir, True) def add_run_test_options(m): run_test_opts = m.create_option_group("Test driver options") run_test_opts.add_option( "-o", "--output-dir", dest="output_dir", metavar="DIR", default="./out", help="select output dir") run_test_opts.add_option( "--timeout", default='780', metavar="SECONDS", help="Default timeout") run_test_opts.add_option( "-d", "--discriminants", dest="discs", metavar="DISCS", default="ALL", help="set discriminants") run_test_opts.add_option( "-t", "--temp-dir", dest="tmp", metavar="DIR", default=Env().tmp_dir) run_test_opts.add_option( "-e", "--env-file", dest="env_file", metavar="FILE", default="load env file") run_test_opts.add_option( "--disable-cleanup", dest="enable_cleanup", action="store_false", default=True, help="disable cleanup of working space") run_test_opts.add_option( "-r", "--restricted-mode", dest="restricted_discs", metavar="DISCS", default=None, help="enable restricted mode") run_test_opts.add_option( '-f', '--failed-only', action="store_true", help="run failed only - skip the test is last result is OK") run_test_opts.add_option( '--use-basename', action='store_true', help="Use os.path.basename to get the real name of a test. " "Note that this will only work if you don't have two tests with " "the same name in your test directories") m.add_option_group(run_test_opts) python-gnatpython-54.orig/gnatpython/mainloop.py0000644000175000017500000006356211654532470021741 0ustar xavierxavier ############################################################################ # # # MAINLOOP.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Generic loop for testsuites This package provides a class called MainLoop that provides a generic implementation of a testsuite main loop. Parallelism, abortion and time control are the key features. Each MainLoop instance controls a set of Workers whose number is set by the user. The list of tasks/tests to be achieved by the workers, is provided by a list. The mainloop distribute the elements to the the workers when they have nothing to do. Usually, an element is a string identifying the test to be run. An element can also be a list in that case the worker will execute sequentially each "subelement". This case is used to adress dependency between tests (occurs for example with the ACATS). When a worker is asked to run a test, the command is executed by calling run_testcase (testid). Once a test is finished the function collect_result will be called with test id, and process (a gnatpython.ex.Run object) and the job_info as parameters. Both run_testcase and collect_result are user defined functions. Note also that from the user point view there is no parallelism to handle. The two user defined function run_testcase and collect_result are called sequentially. """ from time import sleep, strftime import logging import os import re import sys from gnatpython.env import Env from gnatpython.ex import Run from gnatpython.fileutils import (echo_to_file, FileUtilsError, mkdir, mv, rm, split_file) logger = logging.getLogger('gnatpython.mainloop') SKIP_EXECUTION = -1 # Ask the mainloop to stop execution for this test # See MainLoop documentation class NeedRequeue (Exception): """Raised by collect_result if a test need to be requeued""" pass class Worker (object): """Run run_testcase and collect_result""" def __init__(self, items, run_testcase, collect_result, slot): """Worker constructor PARAMETERS items: item or list of items to be run by the worker run_testcase: command builder function (see MainLoop doc) collect_result: result processing function (see MailLoop doc) RETURN VALUE a Worker instance REMARKS None """ self.run_testcase = run_testcase self.collect_result = collect_result self.slot = slot # Count the number of retry for the current test self.nb_retry = 0 if isinstance(items, list): items.reverse() self.jobs = items else: self.jobs = [items] logger.debug('Init worker %d with %r' % (self.slot, self.jobs)) self.current_process = None self.current_job = None self.execute_next() def execute_next(self): """Execute next worker item/test PARAMETERS None RETURN VALUE return False if the worker has nothing to do. True if a test is launched. REMARKS None """ if len(self.jobs) == 0: return False else: self.current_job = self.jobs.pop() job_info = (self.slot, self.nb_retry) self.current_process = self.run_testcase(self.current_job, job_info) return True def poll(self): """Test if a test/item is still executing PARAMETERS None RETURN VALUE True if busy, False otherwise. REMARKS None """ if self.current_process == SKIP_EXECUTION: # Test not run by run_testcase # Call directly wait() self.wait() return False else: if self.current_process.poll() is not None: # Current process has finished self.wait() return False else: return True def wait(self): """Wait for a test/item to finish PARAMETERS None RETURN VALUE None REMARKS The collect_result function is called upon test/item termination """ if self.current_process != SKIP_EXECUTION: self.current_process.wait() try: job_info = (self.slot, self.nb_retry) self.collect_result(self.current_job, self.current_process, job_info) self.current_job = None self.current_process = None except NeedRequeue: # Reinsert the current job in the job list self.nb_retry += 1 self.jobs.append(self.current_job) class MainLoop (object): """Run a list of jobs""" def __init__(self, item_list, run_testcase, collect_result, parallelism=None, abort_file=None, dyn_poll_interval=True): """Launch loop PARAMETERS item_list: a list of jobs run_testcase: a function that takes a job for argument and return the spawned process (ex.Run object). Its prototype should be func (name, job_info) with name the job identifier and job_info the related information, passed in a tuple (slot_number, job_retry) Note that if you want to take advantage of the parallelism the spawned process should be launched in background (ie with bg=True when using ex.Run) If run_testcase returns SKIP_EXECUTION instead of an ex.Run object the mainloop with directly call collect_result without waiting. collect_result: a function called when a job is finished. The prototype should be func (name, process, job_info). If collect_result raise NeedRequeue then the test will be requeued. job_info is a tuple: (slot_number, job_nb_retry) parallelism: number of workers abort_file: If specified, the loop will abort if the file is present dyn_poll_interval: If True the interval between each polling iteration is automatically updated. Otherwise it's set to 0.1 seconds. RETURN VALUE a MainLoop instance REMARKS None """ e = Env() self.parallelism = e.get_attr("main_options.mainloop_jobs", default_value=1, forced_value=parallelism) self.abort_file = e.get_attr("main_options.mainloop_abort_file", default_value=None, forced_value=abort_file) logger.debug("start main loop with %d workers (abort on %s)" % (self.parallelism, self.abort_file)) self.workers = [None] * self.parallelism iterator = item_list.__iter__() active_workers = 0 max_active_workers = self.parallelism poll_sleep = 0.1 try: while True: # Check for abortion if self.abort_file is not None and \ os.path.isfile(self.abort_file): logger.info('Aborting: file %s has been found' % self.abort_file) self.abort() return # Exit the loop # Find free workers for slot, worker in enumerate(self.workers): if worker is None: # a worker slot is free so use it for next job next_job = iterator.next() self.workers[slot] = Worker(next_job, run_testcase, collect_result, slot) active_workers += 1 poll_counter = 0 logger.debug('Wait for free worker') while active_workers >= max_active_workers: # All worker are occupied so wait for one to finish poll_counter += 1 for slot, worker in enumerate(self.workers): # Test if the worker is still active and have more # job pending if not (worker.poll() or worker.execute_next()): # If not the case free the worker slot active_workers -= 1 self.workers[slot] = None sleep(poll_sleep) if dyn_poll_interval: poll_sleep = compute_next_dyn_poll(poll_counter, poll_sleep) except StopIteration: # All the tests are finished. while active_workers > 0: for slot, worker in enumerate(self.workers): if not (worker is None or worker.poll() or worker.execute_next()): active_workers -= 1 self.workers[slot] = None sleep(0.1) def abort(self): """Abort the loop""" if self.abort_file is not None and os.path.isfile(self.abort_file): for worker in self.workers: if worker is not None: worker.wait() def generate_collect_result(result_dir=None, results_file=None, output_diff=False, use_basename=True, metrics=None, options=None): """Generate a collect result function The generated collect_result function is known to work with gnatpython default test driver: gnatpython.testdriver.TestRunner PARAMETERS result_dir: directory containing test results, if None use options.output_dir results_file: file containing the list of test status, if None use options.results_file output_diff: if True, output the .diff in case of failure (useful when debugging) use_basename: if True use the test basename to get the test name else use the relative path metrics: to collect metrics, just pass an empty dictionary or a dictionary containing a key named 'total' with an integer value equal to the number of test to run options: test driver and Main options NOTES When collecting metrics, a file named status will be created in result_dir and will contain some metrics """ # Set result_dir and results_file if needed if options is not None and result_dir is None: result_dir = options.output_dir if results_file is None: results_file = options.results_file # Save the startup time start_time_str = strftime('%Y-%m-%d %H:%M:%S') if metrics is not None: for m in ('run', 'failed', 'crashed', 'new_failed', 'new_crashed'): metrics[m] = 0 for m in ('old_diffs', 'old_crashes'): if m not in metrics: metrics[m] = [] if not 'total' in metrics: metrics['total'] = 0 DIFF_STATUS = ('DIFF', 'FAILED', 'PROBLEM') CRASH_STATUS = ('CRASH', ) XFAIL_STATUS = ('XFAIL', ) # Compute old metrics if needed if hasattr(options, 'old_output_dir') \ and options.old_output_dir is not None: old_results = [k.split(':') for k in split_file( os.path.join(options.old_output_dir, 'results'), ignore_errors=True)] if 'old_diffs' not in metrics: metrics['old_diffs'] = [ k[0] for k in old_results if k[1] in DIFF_STATUS] if 'old_crashes' not in metrics: metrics['old_crashes'] = [ k[0] for k in old_results if k[1] in CRASH_STATUS] def collect_result(name, process, _job_info): """Default collect result function Read .result and .note file in {result_dir}/{test_name} dir Then append result to {result_file} If output_diff is True, print the content of .diff files REMARKS: name should be the path to the test directory """ if metrics is not None: # Increment number of run tests metrics['run'] += 1 if use_basename: test_name = os.path.basename(name) else: test_name = os.path.relpath(name, os.getcwd()) try: test_result = split_file( result_dir + '/' + test_name + '.result')[0] except FileUtilsError: test_result = 'CRASH: cannot read result file' test_note = split_file(result_dir + '/' + test_name + '.note', ignore_errors=True) if not test_note: test_note = "" else: test_note = test_note[0] # Append result to results file echo_to_file(results_file, "%s:%s %s\n" % (test_name, test_result, test_note), append=True) if metrics is not None: diffs_format = options.diffs_format if hasattr( options, 'diffs_format') else None # Set last test name metrics['last'] = test_name # Update metrics and diffs or xfail_diffs file diffs_file = os.path.join(result_dir, 'diffs') xfail_diffs_file = os.path.join(result_dir, 'xfail_diffs') test_status = test_result.split(':')[0] if test_status in DIFF_STATUS: metrics['failed'] += 1 if test_name not in metrics['old_diffs']: metrics['new_failed'] += 1 get_test_diff(result_dir, test_name, test_note, test_result, diffs_file, diffs_format) elif test_status in CRASH_STATUS: metrics['crashed'] += 1 if test_name not in metrics['old_crashes']: metrics['new_crashed'] += 1 get_test_diff(result_dir, test_name, test_note, test_result, diffs_file, diffs_format) elif test_status in XFAIL_STATUS: get_test_diff(result_dir, test_name, test_note, test_result, xfail_diffs_file, diffs_format) # Update global status s = [] if "JOB_ID" in os.environ: s.append("%s running tests since %s\n" % ( os.environ['JOB_ID'], start_time_str)) s.append("%(run)s out of %(total)s processed (now at %(last)s)" % metrics) s.append("%(new_failed)s new potential regression(s)" " among %(failed)s" % metrics) s.append("%(new_crashed)s new crash(es) among %(crashed)s" % metrics) echo_to_file(os.path.join(result_dir, 'status'), '\n'.join(s) + '\n') if process != SKIP_EXECUTION: # else the test has been skipped. No need to print its status. if test_result.startswith( 'DIFF') or test_result.startswith('CRASH'): logging_func = logging.error else: logging_func = logging.info logging_func("%-30s %s %s" % (test_name, test_result, test_note)) if output_diff: diff_filename = result_dir + '/' + test_name + '.diff' if os.path.exists(diff_filename): with open(diff_filename) as diff_file: logging_func(diff_file.read().strip()) return collect_result def generate_run_testcase(driver, discs, options, use_basename=True): """Generate a basic run_test command PARAMETERS driver: test script to run discs: list of discriminants options: test driver and Main options use_basename: if True use the test basename to get the test name else use the relative path """ def run_testcase(test, job_info): """Run the given test See mainloop documentation """ skip_if_ok = hasattr(options, 'skip_if_ok') and options.skip_if_ok skip_if_run = hasattr(options, 'skip_if_already_run') and options.skip_if_already_run skip_if_dead = hasattr( options, 'skip_if_dead') and options.skip_if_dead result_dir = options.output_dir if skip_if_ok or skip_if_run or skip_if_dead: try: if use_basename: test_name = os.path.basename(test) else: test_name = os.path.relpath(test, os.getcwd()) old_result_file = os.path.join( result_dir, test_name + '.result') if os.path.exists(old_result_file): if skip_if_run: return SKIP_EXECUTION old_result = split_file(old_result_file)[0].split(':')[0] if skip_if_ok and old_result in ('OK', 'UOK', 'PASSED'): return SKIP_EXECUTION if skip_if_dead and old_result == 'DEAD': return SKIP_EXECUTION except FileUtilsError: logging.debug("Cannot get old result for %s" % test) pass # VxWorks tests needs WORKER_ID to be set in order to have an id for # vxsim that will not collide with other instances. os.environ['WORKER_ID'] = str(job_info[0]) cmd = [sys.executable, driver, '-d', ",".join(discs), '-o', result_dir, '-t', options.tmp, test] if options.verbose: cmd.append('-v') if hasattr(options, 'host'): if options.host: cmd.append('--host=' + options.host) if options.target: cmd.append('--target=' + options.target) if not options.enable_cleanup: cmd.append('--disable-cleanup') if options.failed_only: cmd.append('--failed-only') if options.timeout: cmd.append('--timeout=' + options.timeout) if options.use_basename: cmd.append('--use-basename') return Run(cmd, bg=True, output=None) return run_testcase def setup_result_dir(options): """Save old results and create new result dir PARAMETERS options: test driver and Main options. This dictionary will be modified in place to set: `results_file`, the path to the results file, `report_file`, the path to the report file. Note that `output_dir` and `old_output_dir` might be modified if keep_old_output_dir is True NOTES Required options are `output_dir`, `keep_old_output_dir`, `old_output_dir`, `skip_if_ok` and `skip_if_already_run`. Where: - output_dir: directory containing test result - keep_old_output_dir: if True, move last results in old_output_dir - old_output_dir:directory where the last results are kept. Note that if old_output_dir is None, and keep_old_output_dir is True, the last tests results will be moved in output_dir/old and the new ones in output_dir/new - skip_if_ok, skip_if_already_run: if one of these options is set to True, then just remove the results file. """ output_dir = options.output_dir if options.keep_old_output_dir and options.old_output_dir is None: options.old_output_dir = os.path.join(output_dir, 'old') options.output_dir = os.path.join(output_dir, 'new') options.results_file = os.path.join(options.output_dir, 'results') options.report_file = os.path.join(options.output_dir, 'report') if options.skip_if_ok or options.skip_if_already_run: # Remove only the results file rm(options.results_file) else: if not options.keep_old_output_dir: # We don't want to keep old results. Just clean the new output_dir if os.path.exists(options.output_dir): rm(options.output_dir, True) else: # Move output_dir to old_output_dir if os.path.exists(options.old_output_dir): rm(options.old_output_dir, True) if os.path.exists(options.output_dir): mv(options.output_dir, options.old_output_dir) else: mkdir(options.old_output_dir) mkdir(options.output_dir) def compute_next_dyn_poll(poll_counter, poll_sleep): """Adjust the polling delay""" # if two much polling is done, the loop might consume too # much resources. In the opposite case, we might wait too # much to launch new jobs. Adjust accordingly. if poll_counter > 8 and poll_sleep < 1.0: poll_sleep *= 1.25 logger.debug('Increase poll interval to %f' % poll_sleep) elif poll_sleep > 0.0001: poll_sleep *= 0.75 logger.debug('Decrease poll interval to %f' % poll_sleep) return poll_sleep def get_test_diff( result_dir, name, note, result_str, filename, diffs_format): """Update diffs and xfail_diffs files PARAMETERS name: test name note: annotation result_str: content of the test .result file filename: file to update RETURN VALUE None """ result = ["================ Bug %s %s" % (name, note)] if diffs_format == 'diff': result += split_file(result_dir + '/' + name + '.diff', ignore_errors=True)[0:2000] else: if re.match("DIFF:unexpected", result_str): result.append("---------------- unexpected output") result += split_file(result_dir + '/' + name + '.out', ignore_errors=True)[0:100] elif re.match("CRASH:", result_str): result.append("---------------- unexpected output") result += split_file(result_dir + '/' + name + '.out', ignore_errors=True)[0:30] elif re.match("DIFF:output|XFAIL:|FAILED:|PROBLEM:", result_str): result.append("---------------- expected output") result += split_file(result_dir + '/' + name + '.expected', ignore_errors=True)[0:2000] result.append("---------------- actual output") result += split_file(result_dir + '/' + name + '.out', ignore_errors=True) echo_to_file(filename, result, append=True) def add_mainloop_options(main, extended_options=False): """Add command line options to control mainloop default PARAMETERS main : a gnatpython.main.Main instance extended_options: if True, add additional options that require using the gnatpython testdriver and the generate_run_testcase, generate_collect_result functions. RETURN VALUE None """ mainloop_opts = main.create_option_group("Mainloop control""") mainloop_opts.add_option( "-j", "--jobs", dest="mainloop_jobs", type="int", metavar="N", default=1, help="Specify the number of jobs to run simultaneously") mainloop_opts.add_option( "--abort-file", dest="mainloop_abort_file", metavar="FILE", default="", help="Specify a file whose presence cause loop abortion") if extended_options: mainloop_opts.add_option( "--skip-if-ok", action="store_true", default=False, help="If the test result is found and is OK skip the test") mainloop_opts.add_option( "--skip-if-dead", action="store_true", default=False, help="If the test result is found and is DEAD skip the test") mainloop_opts.add_option( "--skip-if-already-run", action="store_true", default=False, help="If the test result is found skip the test") mainloop_opts.add_option( "--old-output-dir", dest="old_output_dir", metavar="DIR", default=None, help="Select old output dir") mainloop_opts.add_option( "--keep-old-output-dir", dest="keep_old_output_dir", action="store_true", help="Keep old output dir. Note that if --old-output-dir is not" " set, the old output dir will be stored in OUTPUT_DIR/old and" " the new test outputs in OUTPUT_DIR/new") main.add_option_group(mainloop_opts) python-gnatpython-54.orig/gnatpython/logging_util.py0000644000175000017500000001604211654532470022575 0ustar xavierxavier ############################################################################ # # # LOGGING_UTIL.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Extensions to the standard python logging system """ from logging import (addLevelName, StreamHandler, FileHandler, Filter, Formatter, getLogger, DEBUG, codecs) from gnatpython.env import Env import os import types # Define a new log level for which level number is lower then DEBUG RAW = 5 # Register the new level name addLevelName(RAW, 'RAW') COLOR_UNCHANGED = -1 COLOR_BLACK = 0 COLOR_RED = 1 COLOR_GREEN = 2 COLOR_YELLOW = 3 COLOR_BLUE = 4 COLOR_MAGENTA = 5 COLOR_CYAN = 6 COLOR_WHITE = 7 def highlight(string, fg=COLOR_UNCHANGED, bg=COLOR_UNCHANGED): """Return a version of string with color highlighting applied to it. This is suitable for display on a console. Nothing is done if color has been disabled """ if not Env().main_options.enable_color: return string else: if bg == COLOR_UNCHANGED: colors = "%d" % (30 + fg,) elif fg == COLOR_UNCHANGED: colors = "%d" % (40 + fg,) else: colors = "%d;%d" % (40 + bg, 30 + fg) return "\033[%sm%s\033[m" % (colors, string) class RawFilter (Filter): """Filter in/out RAW level records""" def __init__(self, include_raw=True): """RawFilter constructor PARAMETERS include_raw: if True then keep only RAW level records. If False discard RAW level record RETURN VALUE a Filter instance REMARKS None """ Filter.__init__(self) if include_raw: self.include_raw = 1 else: self.include_raw = 0 def filter(self, record): """Filter implementation (internal) PARAMETERS record: a record to be filtered RETURN VALUE 1 if we keep the record, 0 otherwise REMARKS This function should not be called directly by the user """ if record.levelno <= RAW: return self.include_raw else: return 1 - self.include_raw class RawStreamHandler(StreamHandler): """Logging system handler for 'raw' logging on streams """ def flush(self): """Flushes the stream. """ # In some cases instances of RawStreamHandler might share the same fd # as other StreamHandler. As we don't control the order in which these # objects will be finalized, we might try to flush an already closed # stream. That's why we protect the flush call with a try/except # statement try: self.stream.flush() except ValueError: return def emit(self, record): """Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline [N.B. this may be removed depending on feedback]. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. """ try: msg = self.format(record) fs = "%s" if not hasattr(types, "UnicodeType"): #if no unicode support... self.stream.write(fs % msg) else: try: self.stream.write(fs % msg) except UnicodeError: self.stream.write(fs % msg.encode("UTF-8")) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record) class RawFileHandler(RawStreamHandler): """Logging system handler for 'raw' logging on files Same as logging.FileHandler except that it inherits from RawStreamHandler instead of StreamHandler """ def __init__(self, filename, mode='a', encoding=None): """Handler constructor """ if codecs is None: encoding = None if encoding is None: stream = open(filename, mode) else: stream = codecs.open(filename, mode, encoding) RawStreamHandler.__init__(self, stream) #keep the absolute path, otherwise derived classes which use this #may come a cropper when the current directory changes self.baseFilename = os.path.abspath(filename) self.mode = mode def close(self): """Closes the file. """ self.flush() self.stream.close() StreamHandler.close(self) def add_handlers(level, format=None, filename=None): """Add handlers with support for 'RAW' logging""" # Case in which we add handler to the console handler = None raw_handler = None if filename is None: handler = StreamHandler() else: handler = FileHandler(filename) if format is not None: formatter = Formatter(format) handler.setFormatter(formatter) if level <= RAW: handler.setLevel(DEBUG) if filename is None: raw_handler = RawStreamHandler() else: raw_handler = RawStreamHandler(handler.stream) raw_handler.setLevel(RAW) raw_handler.addFilter(RawFilter()) getLogger('').addHandler(raw_handler) else: handler.setLevel(level) getLogger('').addHandler(handler) return (handler, raw_handler) def remove_handlers(handlers): """Remove handlers""" if handlers[1] is not None: getLogger('').removeHandler(handlers[1]) if handlers[0] is not None: getLogger('').removeHandler(handlers[0]) if hasattr(handlers[0], 'close'): handlers[0].close() python-gnatpython-54.orig/gnatpython/optfileparser.py0000644000175000017500000002407311654532470022774 0ustar xavierxavier ############################################################################ # # # OPTFILEPARSER.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """test.opt files processing This package provides a single class called OptFileParse which process the test.opt files as documented in AdaCore procedures. """ import re import logging import os.path OPTLINE_REGEXPS = re.compile("^([^\s]+)(\s+([a-zA-Z0-9_-]+)(\s+(.*))?)?$") # Regexp that matches valid lines in test.opt files class BadFormattingError(Exception): """Raised when an input line is not correctly formatted""" pass TAGS = 0 ARG = 1 OVERIDABLE = 2 logger = logging.getLogger("gnatpython.optfileparser") class OptFileParse(object): """ ATTRIBUTES system_tags: the list of tags applied to the test.opt is_dead : True if the test should be considered DEAD, False otherwise """ def __init__(self, system_tags, filename): """Parse a test.opt file PARAMETERS system_tags: either a list of tags or a string containing the list of tags separated by commas filename: the test.opt to be parsed. If this is a string then the argument is a filename otherwise if this is a list we consider it is the content of the .opt file RETURN VALUE an OptFileParse object REMARKS None """ if isinstance(system_tags, str): self.system_tags = system_tags.lower().split(',') else: self.system_tags = [] for tag in system_tags: self.system_tags.append(tag.lower()) # Append 'all' to system tags if not 'all' in system_tags: self.system_tags.append('all') self.is_dead = False self.__note = None self.__enable_note = False self.__matches = {} self.__parse_file(filename) def get_value(self, cmd, default_value=None): """Query on the parsing result PARAMETERS cmd: the command on which we do the query (ex: dead, cmd, out, ...) default_value: value returned by default RETURN VALUE a string REMARKS by default the query will return '' if there is no entry for the selected command. """ cmd = cmd.lower() if self.is_dead: if cmd == 'dead': return self.__matches[cmd][ARG] else: return default_value if cmd in self.__matches and cmd != 'dead': return self.__matches[cmd][ARG] else: return default_value def get_values(self, default_values): """Query on the parsing result PARAMETERS default_value: a dictionnary for which keys are the commands on which we do the query and the associated value the default values. RETURN VALUE a dictionnary containing the resulting value for each command REMARKS Doing get_values ({'CMD' : 'test.cmd', 'OUT' : 'test.out'}) is equivalent to do get_value ('CMD', 'test.cmd') and then get_value ('OUT', 'test.out') """ result = {} for key in default_values: result[key] = self.get_value(key, default_values[key]) return result def get_note(self, sep=None): """Get the note PARAMETERS sep: string used to join the activating tags. Default is ','. If '' is specified then a list is returned. RETURN VALUE a string (list of tags responsible for the activation of the test) is sep is not '' or a list. REMARKS If there is no note then '' or [] is returned depending on the sep value """ if sep is None: sep = ',' if len(sep) == 0: if self.__note is not None and not self.is_dead: return self.__note[TAGS] else: return [] else: if self.__note is not None and not self.is_dead: return ",".join(self.__note[TAGS]) else: return '' # INTERNAL FUNCTIONS def __process_opt_line(self, line): """process one line of a test.opt type file The format of each line is the following: tag1,tag2,tag3,...,tagN [COMMAND [PARAMETERS]] -if no COMMAND is given then we assume that the command is 'DEAD false' """ # Remove comments (Ada style) and trailing white characters processed_line = re.sub("--.*$", "", line).rstrip() # Line contains only comments and/or white characters so ignore it if not processed_line: return m = OPTLINE_REGEXPS.match(processed_line) if m is None: raise BadFormattingError("Can not parse line: " + line) # find command, tags and argument tags = m.group(1).split(',') cmd = "" arg = "" if m.group(3): # Check for command cmd = m.group(3).lower() if m.group(4): # Get optional argument arg = m.group(5) else: # If no command is set then the implicit command is: dead="false" cmd = "dead" arg = "false" if arg == '' and cmd == 'dead': arg = 'true' # Enable note only if a dead all is encountered if arg != 'false' and cmd == 'dead' and self.__is_all(tags): self.__enable_note = True if cmd != 'required' and self.__match(tags): logger.debug('match: ' + cmd + ', tags=' + '%s' % tags) if self.__is_overidable(cmd): self.__matches[cmd] = (tags, arg, self.__is_all(tags)) if not self.__is_dead_cmd(cmd) \ and (self.__note is None or self.__matches[cmd][OVERIDABLE]): self.__note = self.__matches[cmd] elif cmd == 'required' and not self.__match(tags): self.__matches['required'] = (tags, arg, False) def __is_overidable(self, cmd): return not cmd in self.__matches \ or self.__matches[cmd][OVERIDABLE] @classmethod def __is_all(cls, tag_list): return len(tag_list) == 1 and tag_list[0].lower() == 'all' def __is_dead_cmd(self, cmd): return cmd == 'dead' \ and 'dead' in self.__matches \ and self.__matches['dead'][ARG] != 'false' def __match(self, tag_list): """Match tags against the system tags. True if all non-negated tags and none of the negated tags in the given list are present in system tags.""" for tag in tag_list: if not tag.startswith("!"): # If tag is non-negated, it must be present in system tags if not (tag.lower() in self.system_tags): return False else: # If tag is negated, it must be absent from system tags if tag[1:].lower() in self.system_tags: return False return True def __parse_file(self, filename): have_opt_data = False if isinstance(filename, list): for line in filename: self.__process_opt_line(line) have_opt_data = True elif os.path.isfile(filename): optfile = open(filename, "r") for line in optfile: self.__process_opt_line(line) optfile.close() have_opt_data = True if have_opt_data: if 'required' in self.__matches: self.__matches['dead'] = self.__matches['required'] self.is_dead = True elif self.__note is not None: self.is_dead = False elif self.__is_dead_cmd('dead'): self.is_dead = True else: self.is_dead = False if (self.__note is not None and self.__note[OVERIDABLE]) \ or not self.__enable_note: self.__note = None def __str__(self): result = '' if self.is_dead: result += 'dead="' + \ re.sub('"', '\\"', self.__matches['dead'][ARG]) + '"\n' else: for k in self.__matches: if k != 'dead': result += k + '="' + \ re.sub('"', '\\"', self.__matches[k][ARG]) + '"\n' if self.__note is not None: result += 'activating_tag="%s' % \ re.sub('"', '\\"', ",".join(self.__note[TAGS])) + '"\n' result = result.rstrip() return result python-gnatpython-54.orig/gnatpython/expect.py0000644000175000017500000001651511654532470021407 0ustar xavierxavier ############################################################################ # # # EXPECT.PY # # # # Copyright (C) 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ import _term import os import re import datetime from time import sleep from gnatpython.fileutils import which EXPECT_TIMEOUT = -2 EXPECT_DIED = -3 class ExpectError (Exception): """Expect exception""" def __init__(self, cmd, msg): Exception.__init__(self, cmd, msg) self.cmd = cmd self.msg = msg def __str__(self): return "%s: %s" % (self.cmd, self.msg) class ExpectProcess(object): """Expect Main class ATTRIBUTES command_line: list of strings containg the command line used to spawn the process. status: The return code. None while the command is still running, and an integer after method "close" has been called. """ def __init__(self, command_line): """Constructor PARAMETERS command_line: A list of strings representing the command line to be spawned. RETURN VALUE A ExpectProcess object """ # Convert the command line to a list of string is needed command_line = [str(arg) for arg in command_line] if len(command_line) < 1: raise ExpectError('__init__', 'expect a non empty list as argument') command_line[0] = which(command_line[0]) # Store the command line used self.command_line = command_line # Spawn the process (self.input, self.output, self.error, self.pid, self.handler) = \ _term.non_blocking_spawn(tuple(command_line)) # Initialize our buffer self.buffer = "" # Keep the state of the process self.process_is_dead = False # This is where we store that last successful expect result self.last_match = None # This is where the command returned status will be stored # when the command has exited. For the moment, it is not # available. self.status = None def __poll(self, timeout): result = _term.poll((self.output, ), timeout) if result[0] > 0: read_status = _term.read(self.output, 16384) if read_status[0] > 0: self.buffer += read_status[1] elif read_status[0] < 0: self.process_is_dead = True def flush(self): """Flush all the output generated by the process up to the call to this method.""" self.__poll(0) self.buffer = "" def sendline(self, msg): return self.send(msg + '\n') def send(self, msg, add_lf=True, flush_buffer=False): """Send a msg to the program PARAMETERS msg: a string RETURN VALUE Return 1 if OK, 0 otherwise. """ if self.handler is None: raise ExpectError('send', 'process has been closed') if add_lf: msg += '\n' if flush_buffer: self.flush() write_status = _term.write(self.input, msg) if write_status < 0: return 0 else: return 1 def expect(self, patterns, timeout): if self.handler is None: raise ExpectError('expect', 'process has been closed') match = None result = 0 expect_start = datetime.datetime.utcnow() time_left = int(timeout * 1000.0) while match is None and time_left > 0: # Do we have a match with the current output for index, pattern in enumerate(patterns): match = re.search(pattern, self.buffer) if match is not None: result = index break if match is not None: break else: # We don't have a match so poll for new output self.__poll(time_left) if self.process_is_dead: return EXPECT_DIED # update time_left. # The update is done only if current time is superior to time # at which the function started. This test might seem a bit # weird but on some Linux machines on VmWare we have found # huge clock drift that the system tries to compensate. The # consequence is that we cannot assume that the clock is # monotonic. current_time = datetime.datetime.utcnow() if current_time > expect_start: time_spent = (current_time - expect_start) time_left = int(timeout * 1000.0) - \ (time_spent.seconds * 1000 + \ time_spent.microseconds / 1000) if match is not None: self.last_match = (result, self.buffer[:match.start(0)], match) self.buffer = self.buffer[match.end(0):] return result if time_left < 0: return EXPECT_TIMEOUT def out(self): if self.last_match is None: return ("", "") else: return (self.last_match[1], self.last_match[2].group(0)) def close(self): """If the underlying process is not dead yet, kill it. Set the status attribute to the command return code.""" if self.handler is not None: self.interrupt() sleep(0.05) _term.terminate(self.handler) self.status = _term.waitpid(self.handler) self.handler = None def interrupt(self): if not self.process_is_dead and self.handler is not None: _term.interrupt(self.handler) def set_timer(self, delay): self.timer_end = datetime.datetime.utcnow() + \ datetime.timedelta(seconds=delay) def has_timer_expired(self): if self.timer_end < datetime.datetime.utcnow(): return True else: return False python-gnatpython-54.orig/gnatpython/reports.py0000644000175000017500000003007611654532470021613 0ustar xavierxavier ############################################################################ # # # REPORTS.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """This package contains various classes to manipulate data generated by the testsuites. We assume a testsuite result is contained in a directory. The directory should have the following structure:: root_dir/ results (contains the test results) .note (a note associated with test - optional) .out (actual test output - optional) .expected (expected test output - optional) The format of the results file is the following:: TEST1_NAME:STATUS[:MESSAGE] TEST2_NAME:STATUS[:MESSAGE] ... STATUS is one of following string:: DIFF, PROBLEM, FAILED, CRASH, INVALID_TEST, XFAIL, SKIP, UOK, DEAD, PASSED, OK, NOT-APPLICABLE, TENTATIVELY_PASSED MESSAGE is an optional message """ from gnatpython.main import Main from gnatpython.fileutils import split_file, FileUtilsError import os.path # This following lists regroup the status in different categories. A given # status might appear in several categories. For convenience, the following # code is using only categories, so that adding new test status result only # in adding it in the following lists. FAIL = ['DIFF', 'PROBLEM', 'FAILED'] CRASH = ['CRASH'] INVALID = ['INVALID_TEST'] XFAIL = ['XFAIL', 'SKIP'] UPASS = ['UOK'] DEAD = ['DEAD'] PASS = ['PASSED', 'OK', 'NOT-APPLICABLE', 'TENTATIVELY_PASSED'] FAIL_OR_CRASH = CRASH + FAIL NON_DEAD = PASS + UPASS + XFAIL + INVALID + FAIL + CRASH class TestResult(object): """Class that holds test information ATTRIBUTES dir: result directory. We expect to find all test related file in it name: test name status: test status msg: associated message """ def __init__(self, dir, name, status, msg): """TestResult constructor PARAMETERS dir: result directory name: the test name status: test status msg: test message RETURN VALUE a TestResult object """ self.dir = dir self.name = name self.status = status self.msg = msg def __get_file(self, ext): """Internal function that retrieves a file associated with a test PARAMETERS ext: extension of the file to look at RETURN VALUE the content of the file or None """ filename = self.dir + '/' + self.name + ext if os.path.isfile(filename): fd = open(filename, 'rb') result = fd.read() fd.close() return result else: return None def get_note(self): """Retrieve the note associated with the test PARAMETERS None RETURN VALUE A string. If there is no note associated with the test, the null string is returned """ note = self.__get_file('.note') if note is None: return '' else: return note.strip() def get_expected_output(self): """Retrieve test actual output PARAMETERS None RETURN VALUE test actual output or None """ return self.__get_file('.expected') def get_actual_output(self): """Retrieve test expected output PARAMETERS None RETURN VALUE test expected output or None """ return self.__get_file('.out') def __str__(self): return '%s:%s:%s' % (self.name, self.status, self.msg) class Report(object): """Class that holds a complete testsuite result""" def __init__(self, dir): """Report constructor PARAMETERS dir: the directory that contains the testsuite results RETURN VALUE a Report Object """ self.dir = dir self.result_db = {} if self.dir is not None: assert os.path.isdir(self.dir), "invalid result directory" result_list = split_file(dir + '/results') result_list = (k.split(':', 2) for k in result_list) for item in result_list: msg = '' if len(item) > 2: msg = item[2] self.result_db[item[0]] = \ TestResult(self.dir, item[0], item[1], msg) def select(self, kind=None, return_set=False): """Retrieve the list/set of tests that match a given list of status PARAMETERS kind: None or a list of status. If None the complete list of test names will be returned return_set: if True then a set object is returned. Otherwise a sorted list RETURN VALUE a list or a set of test names """ result = None if kind is None: result = set([k for k in self.result_db]) else: result = set([k for k in self.result_db \ if self.result_db[k].status in kind]) if not return_set: result = list(result) result.sort() return result def test(self, name): """Get a test object from the testsuite PARAMETERS name: name of the test we want to retrieve RETURN VALUE a TestResult object """ return self.result_db[name] # A few constant use by ReportDiff.select method IN_ONE = 0 IN_BOTH = 1 IN_NEW_ONLY = 2 IN_OLD_ONLY = 3 class ReportDiff(object): """Class that allows comparison between two testsuite reports ATTRIBUTES new: a Report object that contains the new results old: a Report object that contains the old results """ def __init__(self, dir, old_dir=None): """ReportDiff constructor PARAMETERS dir: the directory of the new testsuite result old_dir: the directory of the old testsuite result RETURN VALUE a ReportDiff object """ self.new = Report(dir) if old_dir is not None and not ( os.path.isdir(old_dir) and os.path.isfile(old_dir + '/results')): # There is no old report. Skip it. old_dir = None try: self.old = Report(old_dir) except FileUtilsError: self.old = Report(None) def select(self, kind=None, status=IN_BOTH, old_kind=None): """Do a query PARAMETERS kind: a list of status or None used to filter the tests in the new report status: a selector IN_ONE, IN_BOTH, IN_NEW_ONLY or IN_OLD_ONLY. old_kind: a list of status used to filter in the old report. If None the parameter kind is reused RETURN VALUE a sorted list of test names """ if old_kind is None: old_kind = kind new = self.new.select(kind, return_set=True) old = self.old.select(old_kind, return_set=True) result = None if status == IN_BOTH: result = new & old elif status == IN_NEW_ONLY: result = new - old elif status == IN_OLD_ONLY: result = old - new else: result = old | new return result def txt_image(self, filename): if isinstance(filename, str): fd = open(filename, 'wb+') else: fd = filename def output_list(tests, tmpl_msg, output_if_null=False, from_old=False): if len(tests) > 0 or output_if_null: tmpl_msg = '---------------- %d %s\n' % (len(tests), tmpl_msg) fd.write('\n') fd.write(tmpl_msg) for test in tests: if from_old: fd.write('%s\n' % self.old.test(test)) else: fd.write('%s\n' % self.new.test(test)) def test_diff(test_name): test_obj = self.new.test(test_name) test_note = test_obj.get_note() test_out = test_obj.get_actual_output() test_exp = test_obj.get_expected_output() fd.write("================ Bug %s %s\n" % (test_name, test_note)) if test_exp is None: fd.write('---------------- unexpected output\n') if test_out is not None: fd.write(test_out) else: fd.write('---------------- expected output\n') if test_exp is not None: fd.write(test_exp) fd.write('---------------- actual output\n') if test_out is not None: fd.write(test_out) constant = self.select(FAIL, IN_BOTH) xfail_tests = self.new.select(XFAIL) uok_tests = self.new.select(UPASS) fixed_tests = self.select(PASS, IN_BOTH, FAIL_OR_CRASH) invalid_tests = self.new.select(INVALID) new_dead_tests = self.select(DEAD, IN_BOTH, NON_DEAD) removed_tests = self.select(status=IN_OLD_ONLY) crash_tests = self.new.select(CRASH) dead_tests = self.new.select(DEAD) complete_tests = self.new.select() diff_tests = self.new.select(FAIL) new_regressions = self.select(FAIL_OR_CRASH, IN_NEW_ONLY) non_dead_tests = self.new.select(NON_DEAD) fd.write('Out of %d tests:\n' % len(complete_tests)) fd.write('%5d executed test(s) (non dead)\n' % len(non_dead_tests)) fd.write('%5d crash(es) detected\n' % len(crash_tests)) fd.write('%5d other potential regression(s)\n' % len(diff_tests)) fd.write('%5d expected regression(s)\n' % len(xfail_tests)) fd.write('%5d unexpected passed test(s)\n' % len(uok_tests)) fd.write('%5d invalid test(s)\n' % len(invalid_tests)) fd.write('%5d new dead test(s)\n' % len(new_dead_tests)) fd.write('%5d test(s) removed\n' % len(removed_tests)) output_list(new_regressions, 'new regression(s)', True) output_list(constant, 'already detected regression(s)', True) output_list(xfail_tests, 'expected regression(s)') output_list(uok_tests, 'unexpected passed test(s)') output_list(fixed_tests, 'fixed regression(s)') output_list(invalid_tests, 'invalid test(s)') output_list(new_dead_tests, 'new dead test(s)') output_list(removed_tests, 'test(s) removed', from_old=True) fd.write('\n---------------- differences in output\n') for test in self.new.select(FAIL): test_diff(test) if len(xfail_tests) > 0: fd.write('\n---------------- XFAIL differences in output\n') for test in xfail_tests: test_diff(test) if isinstance(filename, str): fd.close() python-gnatpython-54.orig/gnatpython/env.py0000644000175000017500000003165511654532470020711 0ustar xavierxavier ############################################################################ # # # ENV.PY # # # # Copyright (C) 2008 - 2011 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """Global environment handling This package provide a class called Env used to store global information. Env is a singleton so there is in fact only one instance. Here is a description of the functionalities provided by the class: * host, target information retrieval/setting: >>> from gnatpython.env import Env >>> e = Env () >>> print e.target platform: x86-linux machine: barcelona is_hie: False is_host: True triplet: i686-pc-linux-gnu OS name: linux version: suse10.3 is_bareboard: False CPU name: x86 bits: 32 endian: little >>> e.set_target ('ppc-vxw') >>> print e.target.os.name vxworks >>> print e.target.os.version 5.5 * make some info global >>> e = Env () >>> d = Env () >>> e.example_of_global_info = 'hello' >>> print d.example_of_global_info hello * restoring/saving complete environment, including environment variables and current dir >>> e = Env () >>> e.example = 'hello' >>> e.store ('./saved_env') >>> ^D $ gnatpython >>> from gnatpython.env import Env >>> e = Env () >>> e.restore ('./saved_env') >>> print e.example hello """ from gnatpython.arch import Arch import pickle import os def putenv(key, value): """Portable version of os.putenv""" # When a variable is set by os.putenv, os.environ is not updated; # this is a problem, as Env and ex.Run use os.environ to store the # current environment or to spawn a process. This limitation is # documented in the Python Library Reference: # environ # A mapping object representing the string environment. For # example, environ['HOME'] is the pathname of your home directory # (on some platforms), and is equivalent to getenv("HOME") in C. # # This mapping is captured the first time the os module is # imported, typically during Python startup as part of # processing site.py. Changes to the environment made after # this time are not reflected in os.environ, except for # changes made by modifying os.environ directly. # # If the platform supports the putenv() function, this # mapping may be used to modify the environment as well as # query the environment. putenv() will be called # automatically when the mapping is modified. Note: Calling # putenv() directly does not change os.environ, so it's # better to modify os.environ. # ...For this reason, os.environ is prefered to os.putenv here. os.environ[key] = value def getenv(key, default=None): """Portable version of os.getenv""" # For the reason documented in putenv, and for consistency, it is better # to use os.environ instead of os.getenv here. if key in os.environ: return os.environ[key] else: return default class Env (object): """Environment Handling ATTRIBUTES build: default system (autodetected) host: host system target: target system is_cross: true if we are in a cross environment REMARKS build, host and target attributes are instances of Arch class. Do pydoc gnatpython.arch for more information """ # class variable that holds the current environment __instance = {} # class variable that holds the stack of saved environments state __context = [] def __init__(self): """Env constructor PARAMETERS None RETURN VALUE A Env instance REMARKS On first instantiation, build attribute will be computed and host and target set to the build attribute. """ if 'build' not in Env.__instance: self.is_cross = False self.build = Arch() self.host = self.build self.target = self.host self.environ = None self.cwd = None self.main_options = None # Command line switches def __getattr__(self, name): return Env.__instance[name] def __setattr__(self, name, value): Env.__instance[name] = value def set_host(self, host_name=None, host_version=None): """Set host platform PARAMETERS host_name: a string that identify the system to be considered as the host. If None then host is set to the build one (the autodetected platform). host_version: a string containing the system version. If set to None the version is either a default or autodetected when possible RETURN VALUE None REMARKS None """ if host_name is not None: self.host = Arch(platform_name=host_name, is_host=True, version=host_version) else: self.host = self.build if self.target != self.host: self.is_cross = True else: self.is_cross = False def set_target(self, target_name=None, target_version=None, target_machine=None): """Set target platform PARAMETERS host_name: a string that identify the system to be considered as the host. If None then host is set to the host one. host_version: a string containing the system version. If set to None the version is either a default or autodetected when possible RETURN VALUE None REMARKS None """ if target_name is not None: self.target = Arch(platform_name=target_name, version=target_version, machine=target_machine) else: self.target = self.host if self.target != self.host: self.is_cross = True else: self.is_cross = False def get_attr(self, name, default_value=None, forced_value=None): """Return an attribute value PARAMETERS name: name of the attribute to check. Name can contain '.' default_value: returned value if forced_value not set and the attribute does not exist forced_value: if not None, this is the return value RETURN VALUE the attribute value REMARKS This function is useful to get the value of optional functions parameters whose default value might depend on the environment. """ if forced_value is not None: return forced_value attributes = name.split('.') result = self for a in attributes: if not hasattr(result, a): return default_value else: result = getattr(result, a) if result is None or result == "": return default_value return result def store(self, filename=None): """Save environment into memory or file PARAMETERS filename: a string containing the path of the filename in which the environement will be saved. If set to None the environment is saved into memory in a stack like structure. RETURN VALUE None """ # Store environment variables self.environ = os.environ.copy() # Store cwd self.cwd = os.getcwd() if filename is None: Env.__context.append(pickle.dumps(Env.__instance)) else: fd = open(filename, 'w+') pickle.dump(Env.__instance, fd) fd.close() def restore(self, filename=None): """Restore environment from memory or a file PARAMETERS filename: a string containing the path of the filename from which the environement will be restored. If set to None the environment is pop the last saved one RETURN VALUE None """ if filename is None: # We are restoring from memory. In that case, just double-check # that we did store the Env object in memory beforehand (using # the store method). assert (self.environ is not None) if filename is None and Env.__context: Env.__instance = pickle.loads(Env.__context[-1]) Env.__context = Env.__context[:-1] elif filename is not None: fd = open(filename, 'r') Env.__instance = pickle.load(fd) fd.close() else: return # Restore environment variables value os.environ = self.environ.copy() # Restore current directory os.chdir(self.cwd) @classmethod def add_path(cls, path, append=False): """Set a path to PATH environment variable PARAMETERS path: path to add append: if True append, otherwise prepend. Default is prepend RETURN VALUE None REMARKS None """ if append: os.environ['PATH'] += os.path.pathsep + path else: os.environ['PATH'] = path + os.path.pathsep + os.environ['PATH'] @classmethod def add_search_path(cls, env_var, path, append=False): """Add a path to the env_var search paths PARAMETERS env_var: the environment variable name (e.g. PYTHONPATH, LD_LIBRARY_PATH, ...) path: path to add append: if True append, otherwise prepend. Default is prepend RETURN VALUE None REMARKS None """ if not env_var in os.environ: os.environ[env_var] = path else: if append: os.environ[env_var] += os.path.pathsep + path else: os.environ[env_var] = path + os.path.pathsep + \ os.environ[env_var] def add_dll_path(self, path, append=False): """Add a path to the dynamic libraries search paths PARAMETERS path: path to add append: if True append, otherwise prepend. Default is prepend RETURN VALUE None REMARKS None """ # On most platforms LD_LIBRARY_PATH is used. For others use: env_var_name = {'windows': 'PATH', 'hp-ux': 'SHLIB_PATH', 'darwin': 'DYLD_LIBRARY_PATH'} env_var = env_var_name.get( self.host.os.name.lower(), 'LD_LIBRARY_PATH') self.add_search_path(env_var, path, append) @property def discriminants(self): """Compute discriminants""" discs = [self.target.platform, self.target.triplet, self.target.cpu.endian + '-endian', self.target.cpu.name, self.target.os.name, self.target.os.name + '-' + self.target.os.version, self.host.os.name + '-host'] if not self.is_cross: discs.append('native') if self.target.cpu.bits == 64: discs.append('64bits') if self.target.os.name.lower() == 'windows': discs.append('NT') return discs @property def tmp_dir(self): return os.environ.get('TMPDIR', os.environ.get('TMP', '/tmp')) if __name__ == "__main__": print Env().host print "discs: " + ", ".join(Env().discriminants) python-gnatpython-54.orig/gnatpython/stringutils.py0000644000175000017500000000616311654532470022504 0ustar xavierxavier ############################################################################ # # # STRINGUTILS.PY # # # # Copyright (C) 2008 - 2010 Ada Core Technologies, Inc. # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see # # # ############################################################################ """This module provides various function to process/handle strings """ import re class Filter (object): """Class that provide an easy way to apply several filters at the same time on a string or a list of strings. ??? we should use tuple instead of list ATTRIBUTES filters: list of filters to apply. each element of filters is either a function or a list of the form [ pattern, sub ] where pattern and sub string representing regexp """ def __init__(self): """Filter constructor PARAMETERS None RETURN VALUE An instanciation of Filter """ self.filters = [] def process(self, item): """Apply the filters on the item PARAMETERS item: this is either a string or a list of strings RETURN VALUE return the filtered string or list """ def run_aux(line): """Apply the filters on a string""" result = line for p in self.filters: if isinstance(p, list): result = re.sub(p[0], p[1], result) else: result = p(result) return result if isinstance(item, list): return [run_aux(k) for k in item] else: return run_aux(item) def append(self, pattern): """Add a filter PARAMETERS pattern: eitheir a function of a list containing the matching pattern and the sub pattern. RETURN VALUE None """ self.filters.append(pattern) python-gnatpython-54.orig/examples/0000755000175000017500000000000011654532470017160 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/0000755000175000017500000000000011654532470022207 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/lib/0000755000175000017500000000000011654532470022755 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/lib/python/0000755000175000017500000000000011654532470024276 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/lib/python/__init__.py0000644000175000017500000000005311654532470026405 0ustar xavierxavier # This file creates a module "lib.python" python-gnatpython-54.orig/examples/echo_testsuite/lib/python/test_support.py0000644000175000017500000000065611654532470027432 0ustar xavierxavier """ This module contains support functions for all test.py """ import os import sys from gnatpython.ex import Run # Change directory TEST = sys.modules['__main__'] TESTDIR = os.path.dirname(TEST.__file__) TEST_NAME = os.path.basename(TESTDIR) os.chdir(TESTDIR) def echo(arg): """Invoke echo PARAMETERS arg: string to echo on stdout """ cmd = ["echo", arg] process = Run(cmd) print process.out python-gnatpython-54.orig/examples/echo_testsuite/lib/__init__.py0000644000175000017500000000004411654532470025064 0ustar xavierxavier # This file creates a module "lib" python-gnatpython-54.orig/examples/echo_testsuite/testsuite.py0000755000175000017500000000372211654532470024621 0ustar xavierxavier#!/usr/bin/env python """Run the testsuite for echo This module assumes that lib/python has been added to PYTHONPATH. """ from gnatpython.env import Env from gnatpython.main import Main from gnatpython.mainloop import (MainLoop, add_mainloop_options, generate_collect_result, generate_run_testcase, setup_result_dir) from gnatpython.testdriver import add_run_test_options from gnatpython.reports import ReportDiff from glob import glob import os def main(): """Run the testsuite""" m = Main() add_mainloop_options(m, extended_options=True) add_run_test_options(m) m.add_option("--diffs", dest="view_diffs", action="store_true", default=False, help="show diffs on stdout") m.parse_args() # Various files needed or created by the testsuite # creates : # the ouput directory (out by default) # the report file # the results file setup_result_dir(m.options) if m.args: test_list = [t.strip('/') for t in m.args] else: test_list = sorted(glob('tests/*')) env = Env() # add support module path python_lib = os.path.join(os.getcwd(), 'lib', 'python') Env().add_search_path("PYTHONPATH", python_lib) env.add_search_path('PYTHONPATH', os.getcwd()) discs = [env.target.platform] if m.options.discs: discs += m.options.discs.split(',') collect_result = generate_collect_result( m.options.output_dir, m.options.results_file, m.options.view_diffs) run_testcase = generate_run_testcase('run-test', discs, m.options) MainLoop(test_list, run_testcase, collect_result, m.options.mainloop_jobs) # Generate the report file ReportDiff(m.options.output_dir, m.options.old_output_dir).txt_image(m.options.report_file) if __name__ == "__main__": if os.path.dirname(__file__): os.chdir(os.path.dirname(__file__)) main() python-gnatpython-54.orig/examples/echo_testsuite/tests/0000755000175000017500000000000011654532470023351 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/tests/test3__in_shell/0000755000175000017500000000000011654532470026427 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/tests/test3__in_shell/test.out0000644000175000017500000000004311654532470030134 0ustar xavierxavierThis should match test.out as well python-gnatpython-54.orig/examples/echo_testsuite/tests/test3__in_shell/test.cmd0000644000175000017500000000005211654532470030070 0ustar xavierxavierecho "This should match test.out as well" python-gnatpython-54.orig/examples/echo_testsuite/tests/test1__always_fails/0000755000175000017500000000000011654532470027306 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/tests/test1__always_fails/test.out0000644000175000017500000000015711654532470031021 0ustar xavierxaviertest.out that never matches the test output; this is used to check that the testsuite actually does something. python-gnatpython-54.orig/examples/echo_testsuite/tests/test1__always_fails/test.opt0000644000175000017500000000001211654532470031002 0ustar xavierxavierALL XFAIL python-gnatpython-54.orig/examples/echo_testsuite/tests/test1__always_fails/test.py0000644000175000017500000000005211654532470030634 0ustar xavierxavierprint "This test will not match test.out" python-gnatpython-54.orig/examples/echo_testsuite/tests/test2__in_python/0000755000175000017500000000000011654532470026640 5ustar xavierxavierpython-gnatpython-54.orig/examples/echo_testsuite/tests/test2__in_python/test.out0000644000175000017500000000003311654532470030344 0ustar xavierxavierThis should match test.out python-gnatpython-54.orig/examples/echo_testsuite/tests/test2__in_python/test.py0000644000175000017500000000010211654532470030162 0ustar xavierxavierfrom test_support import echo echo("This should match test.out") python-gnatpython-54.orig/examples/echo_testsuite/run-test0000755000175000017500000000160511654532470023720 0ustar xavierxavier#!/usr/bin/env gnatpython """Usage: run-test [options] test_dir Run a test located in test_dir """ from gnatpython.main import Main from gnatpython.testdriver import TestRunner, add_run_test_options import sys def main(): """Run a single test""" m = Main() add_run_test_options(m) m.parse_args() if not m.args: sys.exit("Error: 1 argument expected. See -h") if m.options.restricted_discs is not None: m.options.restricted_discs = m.options.restricted_discs.split(',') t = TestRunner(m.args[0], m.options.discs, m.options.output_dir, m.options.tmp, m.options.enable_cleanup, m.options.restricted_discs, len(m.args) > 1 and m.args[1:] or None, m.options.failed_only) t.execute() if __name__ == '__main__': main()