pax_global_header00006660000000000000000000000064122163465700014520gustar00rootroot0000000000000052 comment=e47992d800fc789fc9926ee72b9c024c0ec4c431 gr-air-modes-0.0.0.e47992d/000077500000000000000000000000001221634657000150115ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/AUTHORS000066400000000000000000000002571221634657000160650ustar00rootroot00000000000000Mode S receiver: Nick Foster Parts of the ECC algorithm are from Eric Cottrell's gr-air program. gr-howto-write-a-block: Eric Blossom gr-air-modes-0.0.0.e47992d/CMakeLists.txt000066400000000000000000000116621221634657000175570ustar00rootroot00000000000000# Copyright 2011,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Project setup ######################################################################## cmake_minimum_required(VERSION 2.6) project(gr-gr-air-modes CXX) set(gr-gr-air-modes_VERSION_MAJOR 0) set(gr-gr-air-modes_VERSION_MINOR 0) enable_testing() #select the release build type by default to get optimization flags if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE "Release") message(STATUS "Build type not specified: defaulting to release.") endif(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE ${CMAKE_BUILD_TYPE} CACHE STRING "") list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/Modules) ######################################################################## # Compiler specific setup ######################################################################## if(CMAKE_COMPILER_IS_GNUCXX AND NOT WIN32) #http://gcc.gnu.org/wiki/Visibility add_definitions(-fvisibility=hidden) endif() ######################################################################## # Find boost ######################################################################## include(GrBoost) ######################################################################## # Install directories ######################################################################## include(GrPlatform) #define LIB_SUFFIX set(GR_RUNTIME_DIR bin) set(GR_LIBRARY_DIR lib${LIB_SUFFIX}) set(GR_INCLUDE_DIR include) set(GR_DATA_DIR share) set(GR_PKG_DATA_DIR ${GR_DATA_DIR}/${CMAKE_PROJECT_NAME}) set(GR_DOC_DIR ${GR_DATA_DIR}/doc) set(GR_PKG_DOC_DIR ${GR_DOC_DIR}/${CMAKE_PROJECT_NAME}) set(GR_CONF_DIR etc) set(GR_PKG_CONF_DIR ${GR_CONF_DIR}/${CMAKE_PROJECT_NAME}/conf.d) set(GR_LIBEXEC_DIR libexec) set(GR_PKG_LIBEXEC_DIR ${GR_LIBEXEC_DIR}/${CMAKE_PROJECT_NAME}) set(GRC_BLOCKS_DIR ${GR_PKG_DATA_DIR}/grc/blocks) ######################################################################## # Find gnuradio build dependencies ######################################################################## find_package(GnuradioRuntime) if(NOT GNURADIO_RUNTIME_FOUND) message(FATAL_ERROR "GnuRadio Runtime required to compile gr-air-modes") endif() ######################################################################## # Find ZMQ and get version ######################################################################## include(FindZeroMQ) if(NOT ZEROMQ_FOUND) message(FATAL_ERROR "ZMQ not found.") endif() ######################################################################## # Find PyZMQ bindings ######################################################################## include(GrPython) GR_PYTHON_CHECK_MODULE("PyZMQ" "zmq" "int(zmq.__version__.split('.')[0]) >= 13" PYZMQ_FOUND) if(NOT PYZMQ_FOUND) message(FATAL_ERROR "Python ZMQ bindings not found.") endif() ######################################################################## # Setup the include and linker paths ######################################################################## include_directories( ${CMAKE_SOURCE_DIR}/include ${Boost_INCLUDE_DIRS} ${GNURADIO_RUNTIME_INCLUDE_DIRS} ) link_directories( ${Boost_LIBRARY_DIRS} ${GNURADIO_RUNTIME_LIBRARY_DIRS} ) # Set component parameters set(GR_GR-AIR-MODES_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/include CACHE INTERNAL "" FORCE) set(GR_GR-AIR-MODES_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/swig CACHE INTERNAL "" FORCE) ######################################################################## # Create uninstall target ######################################################################## configure_file( ${CMAKE_SOURCE_DIR}/cmake/cmake_uninstall.cmake.in ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake @ONLY) add_custom_target(uninstall ${CMAKE_COMMAND} -P ${CMAKE_CURRENT_BINARY_DIR}/cmake_uninstall.cmake ) ######################################################################## # Add subdirectories ######################################################################## add_subdirectory(include/gr_air_modes) add_subdirectory(lib) add_subdirectory(swig) add_subdirectory(python) add_subdirectory(grc) add_subdirectory(apps) add_subdirectory(docs) add_subdirectory(res) gr-air-modes-0.0.0.e47992d/COPYING000066400000000000000000001045131221634657000160500ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . gr-air-modes-0.0.0.e47992d/README000066400000000000000000000157421221634657000157020ustar00rootroot00000000000000======================================================================== Copyright 2010, 2011, 2012 Nick Foster Quaternion.py copyright 2009 Smithsonian Astrophysical Observatory Released under New BSD / 3-Clause BSD License All rights reserved This file is part of gr-air-modes gr-air-modes is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. gr-air-modes is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with gr-air-modes; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Boston, MA 02110-1301, USA. ======================================================================== AUTHOR Nick Foster ======================================================================== OVERVIEW gr-air-modes implements a software-defined radio receiver for Mode S transponder signals, including ADS-B reports from equipped aircraft. Mode S is the transponder protocol used in modern commercial aircraft. A Mode S-equipped aircraft replies to radar interrogation by either ground radar (secondary surveillance) or other aircraft ("Traffic Collision Avoidance System", or TCAS). The protocol is an extended version of the Mode A/C protocol used in transponders since the 1940s. Mode S reports include a unique airframe identifier (referred to as the "ICAO number") and altitude (to facilitate separation control). This receiver listens to the 1090MHz downlink channel; interrogation requests at 1030MHz are not received or decoded by this program. Automatic Dependent Surveillance-Broadcast (ADS-B) is a communication protocol using the Extended Squitter capability of the Mode S transport layer. There are other implementations (VDL Mode 2 and UAT, for instance) but Mode S remains the primary ADS-B transport for commercial use. The protocol is: * Automatic: it requires no pilot input * Dependent: it is dependent on altimeter, GPS, and other aircraft instrumentation for information * Surveillance: it provides current information about the transmitting aircraft * Broadcast: it is one-way, broadcast to all receivers within range. ADS-B-equipped aircraft broadcast ("squitter") their position, velocity, flight number, and other interesting information to any receiver within range of the aircraft. Position reports are typically generated once per second and flight indentification every five seconds. Implementation of ADS-B is mandatory in European airspace as well as in Australia. North American implementation is still voluntary, with a mandate arriving in 2020 via the FAA's "NextGen" program. The receiver modes_rx is written for use with Ettus Research USRP devices, although the "RTLSDR" receivers are also supported via the Osmocom driver. In theory, any receiver which outputs complex samples at at least 2Msps should work via the file input or UDP input options, or by means of a Gnuradio interface. Multiple output formats are supported: * Raw (or minimally processed) output of packet data * Parsed text * SQLite database * KML for use with Google Earth * SBS-1-compatible output for use with e.g. PlanePlotter or Virtual Radar Server * FlightGear multiplayer interface for real-time display of traffic within the simulator Most of the common ADS-B reports are fully decoded per specification. Those that are not are generally ones which are not commonly used. Should you receive a large number of reports which result in "not implemented" or "No handler" messages, please use the -w option to save raw data and forward it to the author. To save time, note that receiving a small number of spurious reports is expected; false reports can be excluded by looking for multiple reports from the same aircraft (i.e., the same ICAO 6-digit hexadecimal number). ======================================================================== REQUIREMENTS gr-air-modes requires: * Python >= 2.5 (written for Python 2.7, Python 3.0 might work) ** NumPy and SciPy are required for the FlightGear output plugin. * Gnuradio >= 3.5.0 * Ettus UHD >= 3.4.0 for use with USRPs * osmosdr (any version) for use with RTLSDR dongles * SQLite 3.7 or later * CMake 2.6 or later ======================================================================== BUILDING gr-air-modes uses CMake as its build system. To build, from the top level directory, type: $ mkdir build $ cd build $ cmake ../ $ make $ sudo make install $ sudo ldconfig This will build gr-air-modes out of the source tree in build/ and install it on your system, generally in /usr/local/bin. ======================================================================== USAGE The main application is modes_rx. For a complete list of options, run: $ modes_rx --help For use with Ettus UHD-compatible devices, the defaults should suffice to receive reports and print to the screen. Use the -d option to look for an RTLSDR-type dongle using the osmosdr driver. In particular, the --location option can be used to set the receiving location's GPS coordinates. This enables range and bearing calculations in the printed display as well as range rings in the Google Earth interface. ======================================================================== FILES Interesting files and libraries included with the package: * apps/modes_rx: The main application. * apps/get_correlated_records.py: Demonstration program for computing multilaterated time error for two unsynchronized receiver stations. * lib/air_modes_int_and_dump.cc: Unused integrate-and-dump filter for demodulating Mode S waveforms. * lib/air_modes_preamble.cc: Mode S preamble detector. * lib/air_modes_slicer.cc: Bit slicer (1 vs 0) and packet aggregator. * lib/modes_crc.cc: Computes parity check for Mode S packets. * python/altitude.py: Mode S altitude encoding/decoding routines * python/cpr.py: Compact Position Reporting encoder/decoder * python/modes_flightgear.py: FlightGear (open-source flight simulator) plugin which inserts live traffic into the simulator via the multiplayer interface. * python/mlat.py: Multilateration algorithms for determining position of non-ADS-B-equipped or non-cooperative aircraft using multiple receivers. * python/modes_kml.py: KML output plugin for Google Earth. * python/modes_parse.py: Mode S/ADS-B packet parsing routines. * python/modes_print.py: Human-readable printout plugin * python/modes_raw_server.py: UDP output plugin for raw data output * python/modes_sbs1.py: SBS-1-compatible output plugin for use with Virtual Radar Server, PlanePlotter, or other compatible programs. * python/modes_sql.py: SQLite interface for storing reports in a database. * python/Quaternion.py: Quaternion library used to calculate orientation of aircraft for FlightGear plugin. gr-air-modes-0.0.0.e47992d/apps/000077500000000000000000000000001221634657000157545ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/apps/CMakeLists.txt000066400000000000000000000016041221634657000205150ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. include(GrPython) GR_PYTHON_INSTALL( PROGRAMS modes_rx modes_gui uhd_modes.py DESTINATION bin ) gr-air-modes-0.0.0.e47992d/apps/airports.png000066400000000000000000000060661221634657000203350ustar00rootroot00000000000000PNG  IHDR@@iqgAMA asRGB cHRMz%u0`:o_F pHYs   IDATx[ipFڕ:],c0 ' q͏˩TG*Jqp%US.'N NN8A1Ā8MtеZ!ݝIeu-&]Uf}~oFiKr`LvvC2C7C;gJbG zxe"CCa7(fd(ch$O?`PP< <#{e1 j#|5XmϞMM-Z^^Fc>vTG4 `"]quvt|Oj3H8vY0_&Bpμ]`&1F@ Q5vhTI;U>"Z0 PD'E&o0k7. ІQ@@ S) L:Lw a(@5o _F@c5S@o [H:(XȪ 0(3zRl*TwE$=BOw0 =Zp<}bdiŞcZg一䟏V2h0& gdx3ډ[HrҊ11ԣ/HD\!mApskJ$w<qbL4oxݺ#HG7^ہjqK}:%J91WЗ82xd"%'6އ:6`/PɯOde@Fs):' 2IH} 6~ְF$vq;*.Z &OJgSHHREE1-!m7;<6!cVɃ~U|άBXc&1z\=ڿw#Bdϑ>Al)P%{G-mBɮpaf#^ іYuOW\w U^z H4|җF7 Wh>MK4V7&D\LDž/#9%9ᜳ +f=i8i6#HZUݺQ㨽1>3zPmwF^㽥N |e(;$9w\lt2Xcm!,a<mF}W\}ox= V]*+]OĚpux+QyHtd#mV19[`XȰ`4Ӂ2K]ilX^GG!FQf(po7@G J|ɒo8>S2(3qK(0%V5Qk{RG5H{)hx~PW6Z9)|%Htelޏ vHpG!ZՌ SbmL[ux9ʌnVz`a{3N@Z`eJj} @|]O$TM2N1Q oVTMOLf)’Qk^$ʟD$bQԐGߨ 8G? _kTʛ")g.U% gYN֒3֣;$C1b>nJBPKQQ_zSlJ9Z'[Xk$l8 A嗘R.ڔI=ڞ uqJSY`08-~97>־ KyH ۂ!#ʚ"Et?ݥ86o6s_f䐿s>\]2zmJgӈq(QA@mDP"հ&-Fro*Ƀ QKc0uK f`xeXjXYAaѰz>EI_3z$N-C Kb>#+Ƅ`Mڼ RV)2"0ʗܚ߄0Sӂ;Cmt ;`9=u/1bq,)] -?iP;O Wf` \jhKQXV_(eHY^IۘiBE!*}0{ NĿJP<|iI-v"|'w-D D*͋ r„oHiE[yK{{7?3JKIENDB`gr-air-modes-0.0.0.e47992d/apps/get_correlated_records.py000077500000000000000000000141251221634657000230400ustar00rootroot00000000000000#!/usr/bin/env python from air_modes import modes_parse, mlat import numpy import sys #sffile = open("27augsf3.txt") #rudifile = open("27augrudi3.txt") #sfoutfile = open("sfout.txt", "w") #rudioutfile = open("rudiout.txt", "w") sfparse = modes_parse.modes_parse([37.762236,-122.442525]) sf_station = [37.762236,-122.442525, 100] mv_station = [37.409348,-122.07732, 100] bk_station = [37.854246, -122.266701, 100] raw_stamps = [] #first iterate through both files to find the estimated time difference. doesn't have to be accurate to more than 1ms or so. #to do this, look for type 17 position packets with the same data. assume they're unique. print the tdiff. #collect a list of raw timestamps for each aircraft from each station #the raw stamps have to be processed into corrected stamps OR distance has to be included in each #then postprocess to find clock delay for each and determine drift rate for each aircraft separately #then come up with an average clock drift rate #then find an average drift-corrected clock delay #then find rms error #ok so get [ICAO, [raw stamps], [distance]] for each matched record files = [open(arg) for arg in sys.argv[1:]] #files = [sffile, rudifile] stations = [sf_station, mv_station]#, bk_station] records = [] for each_file in files: recordlist = [] for line in each_file: [msgtype, shortdata, longdata, parity, ecc, reference, timestamp] = line.split() recordlist.append({"data": {"msgtype": long(msgtype, 10),\ "shortdata": long(shortdata, 16),\ "longdata": long(longdata, 16),\ "parity": long(parity, 16),\ "ecc": long(ecc, 16)}, "time": float(timestamp)\ }) records.append(recordlist) #ok now we have records parsed into something usable that we can == with def feet_to_meters(feet): return feet * 0.3048006096012 all_heard = [] #gather list of reports which were heard by all stations for station0_report in records[0]: #iterate over list of reports from station 0 for other_reports in records[1:]: stamps = [station0_report["time"]] stamp = [report["time"] for report in other_reports if report["data"] == station0_report["data"]]# for other_reports in records[1:]] if len(stamp) > 0: stamps.append(stamp[0]) if len(stamps) == len(records): #found same report in all records all_heard.append({"data": station0_report["data"], "times": stamps}) #print all_heard #ok, now let's pull out the location-bearing packets so we can find our time offset position_reports = [x for x in all_heard if x["data"]["msgtype"] == 17 and 9 <= (x["data"]["longdata"] >> 51) & 0x1F <= 18] offset_list = [] #there's probably a way to list-comprehension-ify this but it looks hard for msg in position_reports: data = msg["data"] [alt, lat, lon, rng, bearing] = sfparse.parseBDS05(data["shortdata"], data["longdata"], data["parity"], data["ecc"]) ac_pos = [lat, lon, feet_to_meters(alt)] rel_times = [] for time, station in zip(msg["times"], stations): #here we get the estimated time at the aircraft when it transmitted range_to_ac = numpy.linalg.norm(numpy.array(mlat.llh2ecef(station))-numpy.array(mlat.llh2ecef(ac_pos))) timestamp_at_ac = time - range_to_ac / mlat.c rel_times.append(timestamp_at_ac) offset_list.append({"aircraft": data["shortdata"] & 0xffffff, "times": rel_times}) #this is a list of unique aircraft, heard by all stations, which transmitted position packets #we do drift calcs separately for each aircraft in the set because mixing them seems to screw things up #i haven't really sat down and figured out why that is yet unique_aircraft = list(set([x["aircraft"] for x in offset_list])) print "Aircraft heard for clock drift estimate: %s" % [str("%x" % ac) for ac in unique_aircraft] print "Total reports used: %d over %.2f seconds" % (len(position_reports), position_reports[-1]["times"][0]-position_reports[0]["times"][0]) #get a list of reported times gathered by the unique aircraft that transmitted them #abs_unique_times = [report["times"] for ac in unique_aircraft for report in offset_list if report["aircraft"] == ac] #print abs_unique_times #todo: the below can probably be done cleaner with nested list comprehensions clock_rate_corrections = [0] for i in range(1,len(stations)): drift_error_limited = [] for ac in unique_aircraft: times = [report["times"] for report in offset_list if report["aircraft"] == ac] s0_times = [report[0] for report in times] rel_times = [report[i]-report[0] for report in times] #find drift error rate drift_error = [(y-x)/(b-a) for x,y,a,b in zip(rel_times, rel_times[1:], s0_times[0:], s0_times[1:])] drift_error_limited.append([x for x in drift_error if abs(x) < 1e-5]) #flatten the list of lists (tacky, there's a better way) drift_error_limited = [x for sublist in drift_error_limited for x in sublist] clock_rate_corrections.append(0-numpy.mean(drift_error_limited)) for i in range(len(clock_rate_corrections)): print "drift from %d relative to station 0: %.3fppm" % (i, clock_rate_corrections[i] * 1e6) #let's get the average clock offset (based on drift-corrected, TDOA-corrected derived timestamps) clock_offsets = [[numpy.mean([x["times"][i]*(1+clock_rate_corrections[i])-x["times"][0] for x in offset_list])][0] for i in range(0,len(stations))] for i in range(len(clock_offsets)): print "mean offset from %d relative to station 0: %.3f seconds" % (i, clock_offsets[i]) #for the two-station case, let's now go back, armed with our clock drift and offset, and get the variance between expected and observed timestamps error_list = [] for i in range(1,len(stations)): for report in offset_list: error = abs(((report["times"][i]*(1+clock_rate_corrections[i]) - report["times"][0]) - clock_offsets[i]) * mlat.c) error_list.append(error) #print error rms_error = (numpy.mean([error**2 for error in error_list]))**0.5 print "RMS error in TDOA: %.1f meters" % rms_error gr-air-modes-0.0.0.e47992d/apps/get_dupes.py000077500000000000000000000012571221634657000203150ustar00rootroot00000000000000#!/usr/bin/env python import sys, re if __name__== '__main__': data = sys.stdin.readlines() icaos = [] num_icaos = 0 for line in data: match = re.match(".*Type.*from (\w+)", line) if match is not None: icao = int(match.group(1), 16) icaos.append(icao) #get dupes dupes = sorted([icao for icao in set(icaos) if icaos.count(icao) > 1]) count = sum([icaos.count(icao) for icao in dupes]) for icao in dupes: print "%x" % icao print "Found %i replies from %i non-unique aircraft, out of a total %i replies (%i likely spurious replies)." \ % (count, len(dupes), len(icaos), len(icaos)-count) gr-air-modes-0.0.0.e47992d/apps/modes_gui000077500000000000000000000530761221634657000176700ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import os, sys, time, threading, datetime, math, csv, tempfile, ConfigParser from optparse import OptionParser from PyQt4 import QtCore,QtGui,QtWebKit from PyQt4.Qwt5 import Qwt from gnuradio import gr, eng_notation from gnuradio.eng_option import eng_option from gnuradio.gr.pubsub import pubsub import air_modes from air_modes.exceptions import * from air_modes.modes_rx_ui import Ui_MainWindow from air_modes.gui_model import * import sqlite3 import zmq class mainwindow(QtGui.QMainWindow): live_data_changed_signal = QtCore.pyqtSignal(QtCore.QString, name='liveDataChanged') def __init__(self): QtGui.QMainWindow.__init__(self) self.ui = Ui_MainWindow() self.ui.setupUi(self) #set defaults #add file, RTL, UHD sources self.ui.combo_source.addItems(["UHD", "Osmocom", "File/UDP"]) self.ui.combo_source.setCurrentIndex(0) #populate antenna, rate combo boxes based on source self.populate_source_options() defaults = self.get_defaults() #should round to actual achieved gain self.ui.line_gain.insert(defaults["gain"]) #default to 5dB self.ui.line_threshold.insert(defaults["threshold"]) if defaults["pmf"] is not None: self.ui.check_pmf.setChecked(bool(defaults["pmf"])) if defaults["dcblock"] is not None: self.ui.check_dcblock.setChecked(bool(defaults["dcblock"])) if defaults["samplerate"] is not None: if defaults["samplerate"] in self.rates: self.ui.combo_rate.setCurrentIndex(self.rates.index(int(defaults["samplerate"]))) self.ui.prog_rssi.setMinimum(-60) self.ui.prog_rssi.setMaximum(0) if defaults["antenna"] is None: self.ui.combo_ant.setCurrentIndex(self.ui.combo_ant.findText("RX2")) else: self.ui.combo_ant.setCurrentIndex(self.ui.combo_ant.findText(defaults["antenna"])) #check KML by default, leave the rest unchecked. self.ui.check_sbs1.setChecked(bool(defaults["sbs1"] == "1")) self.ui.check_raw.setChecked(bool(defaults["raw"] == "1")) self.ui.check_fgfs.setChecked(bool(defaults["fgfs"] == "1")) self.ui.check_kml.setChecked(bool(defaults["kml"] == "1")) self.ui.line_sbs1port.insert(defaults["sbs1port"])#"30003") self.ui.line_rawport.insert(defaults["rawport"])#"9988") self.ui.line_fgfsport.insert(defaults["fgfsport"])#"5500") self.ui.line_kmlfilename.insert(defaults["kmlfile"])#"modes.kml") if defaults["latitude"] is not None: self.ui.line_my_lat.insert(defaults["latitude"]) if defaults["longitude"] is not None: self.ui.line_my_lon.insert(defaults["longitude"]) #disable by default self.ui.check_adsbonly.setCheckState(QtCore.Qt.Unchecked) #set up the radio stuff self.queue = gr.msg_queue(10) self.running = False self.kmlgen = None #necessary bc we stop its thread in shutdown self.dbname = "air_modes.db" self.num_reports = 0 self.last_report = 0 self.context = zmq.Context(1) self.datamodel = dashboard_data_model(None) self.ui.list_aircraft.setModel(self.datamodel) self.ui.list_aircraft.setModelColumn(0) self.az_model = air_modes.az_map_model(None) self.ui.azimuth_map.setModel(self.az_model) #set up dashboard views self.icaodelegate = ICAOViewDelegate() self.ui.list_aircraft.setItemDelegate(self.icaodelegate) self.dashboard_mapper = QtGui.QDataWidgetMapper() self.dashboard_mapper.setModel(self.datamodel) self.dashboard_mapper.addMapping(self.ui.line_icao, 0) #self.dashboard_mapper.addMapping(self.ui.prog_rssi, 2) self.dashboard_mapper.addMapping(self.ui.line_latitude, 3) self.dashboard_mapper.addMapping(self.ui.line_longitude, 4) self.dashboard_mapper.addMapping(self.ui.line_alt, 5) self.dashboard_mapper.addMapping(self.ui.line_speed, 6) #self.dashboard_mapper.addMapping(self.ui.compass_heading, 7) self.dashboard_mapper.addMapping(self.ui.line_climb, 8) self.dashboard_mapper.addMapping(self.ui.line_ident, 9) self.dashboard_mapper.addMapping(self.ui.line_type, 10) self.dashboard_mapper.addMapping(self.ui.line_range, 11) compass_palette = QtGui.QPalette() compass_palette.setColor(QtGui.QPalette.Foreground, QtCore.Qt.white) self.ui.compass_heading.setPalette(compass_palette) self.ui.compass_bearing.setPalette(compass_palette) #TODO: change the needle to an aircraft silhouette self.ui.compass_heading.setNeedle(Qwt.QwtDialSimpleNeedle(Qwt.QwtDialSimpleNeedle.Ray, False, QtCore.Qt.black)) self.ui.compass_bearing.setNeedle(Qwt.QwtDialSimpleNeedle(Qwt.QwtDialSimpleNeedle.Ray, False, QtCore.Qt.black)) #hook up the update signal self.ui.list_aircraft.selectionModel().currentRowChanged.connect(self.dashboard_mapper.setCurrentModelIndex) self.ui.list_aircraft.selectionModel().currentRowChanged.connect(self.update_heading_widget) self.ui.list_aircraft.selectionModel().currentRowChanged.connect(self.update_bearing_widget) self.ui.list_aircraft.selectionModel().currentRowChanged.connect(self.update_rssi_widget) self.ui.list_aircraft.selectionModel().currentRowChanged.connect(self.update_map_highlight) self.datamodel.dataChanged.connect(self.unmapped_widgets_dataChanged) #hook up parameter-changed signals so we can change gain, rate, etc. while running self.ui.combo_rate.currentIndexChanged['QString'].connect(self.update_sample_rate) self.ui.line_gain.editingFinished.connect(self.update_gain) self.ui.combo_source.currentIndexChanged['QString'].connect(self.populate_source_options) #hook up live data text box update signal self.live_data_changed_signal.connect(self.on_append_live_data) self._last_live_data_update = time.time() self._pending_msgstr = "" self.prefs = None def update_sample_rate(self, rate): if self.running: self._radio.set_rate(int(float(rate)*1e6)) def update_gain(self): if self.running: self._radio.set_gain(float(self.ui.line_gain.text())) ############ widget update functions for non-mapped widgets ############ def update_heading_widget(self, index): if index.model() is not None: heading = index.model().data(index.model().index(index.row(), self.datamodel._colnames.index("heading"))).toDouble()[0] self.ui.compass_heading.setValue(heading) def update_bearing_widget(self, index): if index.model() is not None: bearing = index.model().data(index.model().index(index.row(), self.datamodel._colnames.index("bearing"))).toDouble()[0] self.ui.compass_bearing.setValue(bearing) def unmapped_widgets_dataChanged(self, startIndex, endIndex): index = self.ui.list_aircraft.selectionModel().currentIndex() if index.row() in range(startIndex.row(), endIndex.row()+1): #the current aircraft was affected if self.datamodel._colnames.index("heading") in range(startIndex.column(), endIndex.column()+1): self.update_heading_widget(index) if self.datamodel._colnames.index("bearing") in range(startIndex.column(), endIndex.column()+1): self.update_bearing_widget(index) if self.datamodel._colnames.index("rssi") in range(startIndex.column(), endIndex.column()+1): self.update_rssi_widget(index) def update_rssi_widget(self, index): if index.model() is not None: rssi = index.model().data(index.model().index(index.row(), 2)).toDouble()[0] self.ui.prog_rssi.setValue(rssi) def increment_reportspersec(self, msg): self.num_reports += 1 def update_reportspersec(self): dt = time.time() - self.last_report if dt >= 1.0: self.last_report = time.time() self.ui.line_reports.setText("%i" % self.num_reports) self.num_reports = 0 def update_map_highlight(self, index): if index.model() is not None: icaostr = index.model().data(index.model().index(index.row(), self.datamodel._colnames.index("icao"))).toString() icao = int(str(icaostr), 16) self.jsonpgen.set_highlight(icao) ##################### dynamic option population ######################## #goes and gets valid antenna, sample rate options from the device and grays out appropriate things def populate_source_options(self): sourceid = self.ui.combo_source.currentText() self.rates = [] self.ratetext = [] self.antennas = [] if sourceid == "UHD": try: from gnuradio import uhd self.src = uhd.single_usrp_source("", uhd.io_type_t.COMPLEX_FLOAT32, 1) self.rates = [rate.start() for rate in self.src.get_samp_rates() if (rate.start() % 2.e6) == 0 and rate >= 4e6] self.antennas = self.src.get_antennas() self.src = None #deconstruct UHD source for now self.ui.combo_ant.setEnabled(True) self.ui.combo_rate.setEnabled(True) self.ui.stack_source.setCurrentIndex(0) except: self.rates = [] self.antennas = [] self.ui.combo_ant.setEnabled(False) self.ui.combo_rate.setEnabled(False) self.ui.stack_source.setCurrentIndex(0) elif sourceid == "Osmocom": try: import osmosdr self.src = osmosdr.source("") self.rates = [rate.start() for rate in self.src.get_sample_rates() if (rate.start() % 2.e6) == 0] self.antennas = ["RX"] self.src = None self.ui.combo_ant.setEnabled(False) self.ui.combo_rate.setEnabled(True) self.ui.stack_source.setCurrentIndex(0) except: self.rates = [] self.antennas = [] self.ui.combo_ant.setEnabled(False) self.ui.combo_rate.setEnabled(False) self.ui.stack_source.setCurrentIndex(0) elif sourceid == "File/UDP": self.rates = [2e6*i for i in range(2,13)] self.antennas = ["None"] self.ui.combo_ant.setEnabled(False) self.ui.combo_rate.setEnabled(True) self.ui.stack_source.setCurrentIndex(1) self.ui.combo_rate.clear() self.ratetext = ["%.3f" % (rate / 1.e6) for rate in self.rates] for rate, text in zip(self.rates, self.ratetext): self.ui.combo_rate.addItem(text, rate) self.ui.combo_ant.clear() self.ui.combo_ant.addItems(self.antennas) #set up recommended sample rate if len(self.rates) > 1: recommended_rate = min(x for x in self.rates if x >= 4e6 and max(self.rates) % x == 0) if recommended_rate >= 8.e6: self.ui.check_pmf.setChecked(True) self.ui.combo_rate.setCurrentIndex(self.rates.index(recommended_rate)) ################ action handlers #################### def on_combo_source_currentIndexChanged(self, index): self.populate_source_options() def on_button_start_released(self): #if we're already running, kill it! if self.running is True: self.on_quit() self.num_reports = 0 self.ui.line_reports.setText("0") self.ui.button_start.setText("Start") self.running = False else: #we aren't already running, let's get this party started parser = OptionParser(option_class=eng_option) air_modes.modes_radio.add_radio_options(parser) (options, args) = parser.parse_args() #sets defaults nicely if str(self.ui.combo_source.currentText()) != "File/UDP": options.source = str(self.ui.combo_source.currentText()).lower() else: options.source = str(self.ui.line_inputfile.text()) options.rate = float(self.ui.combo_rate.currentText()) * 1e6 options.antenna = str(self.ui.combo_ant.currentText()) options.gain = float(self.ui.line_gain.text()) options.threshold = float(self.ui.line_threshold.text()) options.pmf = self.ui.check_pmf.isChecked() options.dcblock = self.ui.check_dcblock.isChecked() self._servers = ["inproc://modes-radio-pub"] #TODO ADD REMOTES self._relay = air_modes.zmq_pubsub_iface(self.context, subaddr=self._servers, pubaddr=None) if self.ui.check_raw.checkState(): options.tcp = int(self.ui.line_rawport.text()) self._radio = air_modes.modes_radio(options, self.context) self._publisher = pubsub() self._relay.subscribe("dl_data", air_modes.make_parser(self._publisher)) try: my_position = [float(self.ui.line_my_lat.text()), float(self.ui.line_my_lon.text())] except: my_position = None self._cpr_dec = air_modes.cpr_decoder(my_position) self.datamodelout = dashboard_output(self._cpr_dec, self.datamodel, self._publisher) self.lock = threading.Lock() #grab a lock to ensure sql and kml don't step on each other #output options to populate outputs, updates if self.ui.check_kml.checkState(): #we spawn a thread to run every 30 seconds (or whatever) to generate KML self.kmlgen = air_modes.output_kml(self.ui.line_kmlfilename.text(), self.dbname, my_position, self.lock) #create a KML generating thread if self.ui.check_sbs1.checkState(): sbs1port = int(self.ui.line_sbs1port.text()) sbs1out = air_modes.output_sbs1(self._cpr_dec, sbs1port, self._publisher) if self.ui.check_fgfs.checkState(): fghost = "127.0.0.1" #TODO FIXME fgport = self.ui.line_fgfsport.text() fgout = air_modes.output_flightgear(self._cpr_dec, fghost, int(fgport), self._publisher) #add azimuth map output and hook it up if my_position is not None: self.az_map_output = air_modes.az_map_output(self._cpr_dec, self.az_model, self._publisher) #self._relay.subscribe("dl_data", self.az_map_output.output) #set up map #NOTE this is busted on windows. WebKit requires .htm[l] extensions to render, #so using a temp file doesn't work. self._htmlfile = open("/tmp/mode_s.html", 'wb+')#tempfile.NamedTemporaryFile() self._jsonfile = tempfile.NamedTemporaryFile() self.livedata = air_modes.output_print(self._cpr_dec, self._publisher, self.live_data_changed_signal.emit) #create SQL database for KML and dashboard displays self.dbwriter = air_modes.output_sql(self._cpr_dec, self.dbname, self.lock, self._publisher) self.jsonpgen = air_modes.output_jsonp(self._jsonfile.name, self.dbname, my_position, self.lock, timeout=1) htmlstring = air_modes.html_template(my_position, self._jsonfile.name) self._htmlfile.write(htmlstring) self._htmlfile.flush() class WebPage(QtWebKit.QWebPage): def javaScriptConsoleMessage(self, msg, line, source): print '%s line %d: %s' % (source, line, msg) page = WebPage() self.ui.mapView.setPage(page) self.ui.mapView.load( QtCore.QUrl( QtCore.QUrl.fromLocalFile("/tmp/mode_s.html") ) ) self.ui.mapView.show() #output to update reports/sec widget self._relay.subscribe("dl_data", self.increment_reportspersec) self._rps_timer = QtCore.QTimer() self._rps_timer.timeout.connect(self.update_reportspersec) self._rps_timer.start(1000) #start the flowgraph self._radio.start() self.ui.button_start.setText("Stop") self.running = True #grab prefs and save them self.prefs = {} self.prefs["samplerate"] = options.rate self.prefs["antenna"] = options.antenna self.prefs["gain"] = options.gain self.prefs["pmf"] = "1" if options.pmf else "0" self.prefs["dcblock"] = "1" if options.dcblock else "0" self.prefs["source"] = self.ui.combo_source.currentText() self.prefs["threshold"] = options.threshold self.prefs["sbs1"] = "1" if self.ui.check_sbs1.isChecked() else "0" self.prefs["sbs1port"] = int(self.ui.line_sbs1port.text()) self.prefs["fgfs"] = "1" if self.ui.check_fgfs.isChecked() else "0" self.prefs["fgfsport"] = int(self.ui.line_fgfsport.text()) self.prefs["raw"] = "1" if self.ui.check_raw.isChecked() else "0" self.prefs["rawport"] = int(self.ui.line_rawport.text()) self.prefs["kml"] = "1" if self.ui.check_kml.isChecked() else "0" self.prefs["kmlfile"] = self.ui.line_kmlfilename.text() try: self.prefs["latitude"] = float(self.ui.line_my_lat.text()) self.prefs["longitude"] = float(self.ui.line_my_lon.text()) except: pass def on_quit(self): if self.running is True: self._relay.close() self._radio.close() self._relay = None self._radio = None self._rps_timer = None try: self.kmlgen.done = True #TODO FIXME need a way to kill kmlgen safely without delay #self.kmlgen.join() #self.kmlgen = None except: pass if self.prefs is not None: self.write_defaults(self.prefs) #slot to catch signal emitted by output_live_data (necessary for #thread safety since output_live_data is called by another thread) def on_append_live_data(self, msgstr): self._pending_msgstr += msgstr + "\n" if time.time() - self._last_live_data_update >= 0.1: self._last_live_data_update = time.time() self.update_live_data(self._pending_msgstr) self._pending_msgstr = "" def update_live_data(self, msgstr): #limit scrollback buffer size -- is there a faster way? if(self.ui.text_livedata.document().lineCount() > 500): cursor = self.ui.text_livedata.textCursor() cursor.movePosition(QtGui.QTextCursor.Start) cursor.select(QtGui.QTextCursor.LineUnderCursor) cursor.removeSelectedText() self.ui.text_livedata.append(msgstr) self.ui.text_livedata.verticalScrollBar().setSliderPosition(self.ui.text_livedata.verticalScrollBar().maximum()) opt_file = "~/.gr-air-modes/prefs" def get_defaults(self): defaults = {} defaults["samplerate"] = None #let app pick it defaults["pmf"] = None defaults["dcblock"] = None defaults["antenna"] = None defaults["gain"] = "25" defaults["kml"] = "1" defaults["kmlfile"] = "modes.kml" defaults["sbs1"] = "0" defaults["sbs1port"] = "30003" defaults["raw"] = "0" defaults["rawport"] = "9988" defaults["fgfs"] = "0" defaults["fgfsport"] = "5500" defaults["source"] = "UHD" defaults["threshold"] = "5" defaults["latitude"] = None defaults["longitude"] = None prefs = ConfigParser.ConfigParser(defaults) prefs.optionxform = str try: prefs.read(os.path.expanduser(self.opt_file)) for item in prefs.items("GUI"): defaults[item[0]] = item[1] except (IOError, ConfigParser.NoSectionError): print "No preferences file %s found, creating..." % os.path.expanduser(self.opt_file) self.write_defaults(defaults) return defaults def write_defaults(self, defaults): config = ConfigParser.RawConfigParser() config.add_section('GUI') for item in defaults: config.set('GUI', item, str(defaults[item])) dirname = os.path.dirname(os.path.expanduser(self.opt_file)) if not os.path.exists(dirname): os.makedirs(dirname) with open(os.path.expanduser(self.opt_file), 'wb') as prefsfile: config.write(prefsfile) if __name__ == '__main__': app = QtGui.QApplication(sys.argv) window = mainwindow() app.lastWindowClosed.connect(window.on_quit) window.setWindowTitle("Mode S/ADS-B receiver") window.show() sys.exit(app.exec_()) gr-air-modes-0.0.0.e47992d/apps/modes_rx000077500000000000000000000073331221634657000175300ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2010, 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio.eng_option import eng_option from gnuradio.gr.pubsub import pubsub from optparse import OptionParser import time, os, sys, threading, math from string import split, join import air_modes from air_modes.types import * from air_modes.exceptions import * import zmq #todo: maybe move plugins to separate programs (flightgear, SBS1, etc.) def main(): my_position = None usage = "%prog: [options]" optparser = OptionParser(option_class=eng_option, usage=usage) air_modes.modes_radio.add_radio_options(optparser) optparser.add_option("-l","--location", type="string", default=None, help="GPS coordinates of receiving station in format xx.xxxxx,xx.xxxxx") #data source options optparser.add_option("-a","--remote", type="string", default=None, help="specify additional servers from which to take data in format tcp://x.x.x.x:y,tcp://....") optparser.add_option("-n","--no-print", action="store_true", default=False, help="disable printing decoded packets to stdout") #output plugins optparser.add_option("-K","--kml", type="string", default=None, help="filename for Google Earth KML output") optparser.add_option("-P","--sbs1", action="store_true", default=False, help="open an SBS-1-compatible server on port 30003") optparser.add_option("-m","--multiplayer", type="string", default=None, help="FlightGear server to send aircraft data, in format host:port") (options, args) = optparser.parse_args() #construct the radio context = zmq.Context(1) tb = air_modes.modes_radio(options, context) servers = ["inproc://modes-radio-pub"] if options.remote is not None: servers += options.remote.split(",") relay = air_modes.zmq_pubsub_iface(context, subaddr=servers, pubaddr=None) publisher = pubsub() relay.subscribe("dl_data", air_modes.make_parser(publisher)) if options.location is not None: my_position = [float(n) for n in options.location.split(",")] #CPR decoder obj to handle getting position from BDS0,5 and BDS0,6 pkts cpr_dec = air_modes.cpr_decoder(my_position) if options.kml is not None: dbname = 'adsb.db' lock = threading.Lock() sqldb = air_modes.output_sql(cpr_dec, dbname, lock, publisher) #input into the db kmlgen = air_modes.output_kml(options.kml, dbname, my_position, lock) #create a KML generating thread to read from the db if options.no_print is not True: printer = air_modes.output_print(cpr_dec, publisher) if options.multiplayer is not None: [fghost, fgport] = options.multiplayer.split(':') fgout = air_modes.output_flightgear(cpr_dec, fghost, int(fgport), publisher) if options.sbs1 is True: sbs1port = air_modes.output_sbs1(cpr_dec, 30003, publisher) tb.run() time.sleep(0.2) tb.close() time.sleep(0.2) relay.close() if options.kml is not None: kmlgen.close() if __name__ == '__main__': main() gr-air-modes-0.0.0.e47992d/apps/uhd_modes.py000077500000000000000000000002151221634657000202760ustar00rootroot00000000000000#!/usr/bin/env python if __name__ == '__main__': print "ERROR: uhd_modes.py has been deprecated. The new application name is modes_rx." gr-air-modes-0.0.0.e47992d/cmake/000077500000000000000000000000001221634657000160715ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/cmake/Modules/000077500000000000000000000000001221634657000175015ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/cmake/Modules/CMakeParseArgumentsCopy.cmake000066400000000000000000000134031221634657000252000ustar00rootroot00000000000000# CMAKE_PARSE_ARGUMENTS( args...) # # CMAKE_PARSE_ARGUMENTS() is intended to be used in macros or functions for # parsing the arguments given to that macro or function. # It processes the arguments and defines a set of variables which hold the # values of the respective options. # # The argument contains all options for the respective macro, # i.e. keywords which can be used when calling the macro without any value # following, like e.g. the OPTIONAL keyword of the install() command. # # The argument contains all keywords for this macro # which are followed by one value, like e.g. DESTINATION keyword of the # install() command. # # The argument contains all keywords for this macro # which can be followed by more than one value, like e.g. the TARGETS or # FILES keywords of the install() command. # # When done, CMAKE_PARSE_ARGUMENTS() will have defined for each of the # keywords listed in , and # a variable composed of the given # followed by "_" and the name of the respective keyword. # These variables will then hold the respective value from the argument list. # For the keywords this will be TRUE or FALSE. # # All remaining arguments are collected in a variable # _UNPARSED_ARGUMENTS, this can be checked afterwards to see whether # your macro was called with unrecognized parameters. # # As an example here a my_install() macro, which takes similar arguments as the # real install() command: # # function(MY_INSTALL) # set(options OPTIONAL FAST) # set(oneValueArgs DESTINATION RENAME) # set(multiValueArgs TARGETS CONFIGURATIONS) # cmake_parse_arguments(MY_INSTALL "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN} ) # ... # # Assume my_install() has been called like this: # my_install(TARGETS foo bar DESTINATION bin OPTIONAL blub) # # After the cmake_parse_arguments() call the macro will have set the following # variables: # MY_INSTALL_OPTIONAL = TRUE # MY_INSTALL_FAST = FALSE (this option was not used when calling my_install() # MY_INSTALL_DESTINATION = "bin" # MY_INSTALL_RENAME = "" (was not used) # MY_INSTALL_TARGETS = "foo;bar" # MY_INSTALL_CONFIGURATIONS = "" (was not used) # MY_INSTALL_UNPARSED_ARGUMENTS = "blub" (no value expected after "OPTIONAL" # # You can the continue and process these variables. # # Keywords terminate lists of values, e.g. if directly after a one_value_keyword # another recognized keyword follows, this is interpreted as the beginning of # the new option. # E.g. my_install(TARGETS foo DESTINATION OPTIONAL) would result in # MY_INSTALL_DESTINATION set to "OPTIONAL", but MY_INSTALL_DESTINATION would # be empty and MY_INSTALL_OPTIONAL would be set to TRUE therefor. #============================================================================= # Copyright 2010 Alexander Neundorf # # Distributed under the OSI-approved BSD License (the "License"); # see accompanying file Copyright.txt for details. # # This software is distributed WITHOUT ANY WARRANTY; without even the # implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the License for more information. #============================================================================= # (To distribute this file outside of CMake, substitute the full # License text for the above reference.) if(__CMAKE_PARSE_ARGUMENTS_INCLUDED) return() endif() set(__CMAKE_PARSE_ARGUMENTS_INCLUDED TRUE) function(CMAKE_PARSE_ARGUMENTS prefix _optionNames _singleArgNames _multiArgNames) # first set all result variables to empty/FALSE foreach(arg_name ${_singleArgNames} ${_multiArgNames}) set(${prefix}_${arg_name}) endforeach(arg_name) foreach(option ${_optionNames}) set(${prefix}_${option} FALSE) endforeach(option) set(${prefix}_UNPARSED_ARGUMENTS) set(insideValues FALSE) set(currentArgName) # now iterate over all arguments and fill the result variables foreach(currentArg ${ARGN}) list(FIND _optionNames "${currentArg}" optionIndex) # ... then this marks the end of the arguments belonging to this keyword list(FIND _singleArgNames "${currentArg}" singleArgIndex) # ... then this marks the end of the arguments belonging to this keyword list(FIND _multiArgNames "${currentArg}" multiArgIndex) # ... then this marks the end of the arguments belonging to this keyword if(${optionIndex} EQUAL -1 AND ${singleArgIndex} EQUAL -1 AND ${multiArgIndex} EQUAL -1) if(insideValues) if("${insideValues}" STREQUAL "SINGLE") set(${prefix}_${currentArgName} ${currentArg}) set(insideValues FALSE) elseif("${insideValues}" STREQUAL "MULTI") list(APPEND ${prefix}_${currentArgName} ${currentArg}) endif() else(insideValues) list(APPEND ${prefix}_UNPARSED_ARGUMENTS ${currentArg}) endif(insideValues) else() if(NOT ${optionIndex} EQUAL -1) set(${prefix}_${currentArg} TRUE) set(insideValues FALSE) elseif(NOT ${singleArgIndex} EQUAL -1) set(currentArgName ${currentArg}) set(${prefix}_${currentArgName}) set(insideValues "SINGLE") elseif(NOT ${multiArgIndex} EQUAL -1) set(currentArgName ${currentArg}) set(${prefix}_${currentArgName}) set(insideValues "MULTI") endif() endif() endforeach(currentArg) # propagate the result variables to the caller: foreach(arg_name ${_singleArgNames} ${_multiArgNames} ${_optionNames}) set(${prefix}_${arg_name} ${${prefix}_${arg_name}} PARENT_SCOPE) endforeach(arg_name) set(${prefix}_UNPARSED_ARGUMENTS ${${prefix}_UNPARSED_ARGUMENTS} PARENT_SCOPE) endfunction(CMAKE_PARSE_ARGUMENTS _options _singleArgs _multiArgs) gr-air-modes-0.0.0.e47992d/cmake/Modules/FindGnuradioRuntime.cmake000066400000000000000000000021751221634657000244250ustar00rootroot00000000000000INCLUDE(FindPkgConfig) PKG_CHECK_MODULES(PC_GNURADIO_RUNTIME gnuradio-runtime) if(PC_GNURADIO_RUNTIME_FOUND) # look for include files FIND_PATH( GNURADIO_RUNTIME_INCLUDE_DIRS NAMES gnuradio/top_block.h HINTS $ENV{GNURADIO_RUNTIME_DIR}/include ${PC_GNURADIO_RUNTIME_INCLUDE_DIRS} ${CMAKE_INSTALL_PREFIX}/include PATHS /usr/local/include /usr/include ) # look for libs FIND_LIBRARY( GNURADIO_RUNTIME_LIBRARIES NAMES gnuradio-runtime HINTS $ENV{GNURADIO_RUNTIME_DIR}/lib ${PC_GNURADIO_RUNTIME_LIBDIR} ${CMAKE_INSTALL_PREFIX}/lib/ ${CMAKE_INSTALL_PREFIX}/lib64/ PATHS /usr/local/lib /usr/local/lib64 /usr/lib /usr/lib64 ) set(GNURADIO_RUNTIME_FOUND ${PC_GNURADIO_RUNTIME_FOUND}) endif(PC_GNURADIO_RUNTIME_FOUND) INCLUDE(FindPackageHandleStandardArgs) # do not check GNURADIO_RUNTIME_INCLUDE_DIRS, is not set when default include path us used. FIND_PACKAGE_HANDLE_STANDARD_ARGS(GNURADIO_RUNTIME DEFAULT_MSG GNURADIO_RUNTIME_LIBRARIES) MARK_AS_ADVANCED(GNURADIO_RUNTIME_LIBRARIES GNURADIO_RUNTIME_INCLUDE_DIRS) gr-air-modes-0.0.0.e47992d/cmake/Modules/FindPyQt.py000066400000000000000000000013761221634657000215600ustar00rootroot00000000000000# Copyright (c) 2007, Simon Edwards # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. import PyQt4.pyqtconfig pyqtcfg = PyQt4.pyqtconfig.Configuration() print("pyqt_version:%06.0x" % pyqtcfg.pyqt_version) print("pyqt_version_str:%s" % pyqtcfg.pyqt_version_str) pyqt_version_tag = "" in_t = False for item in pyqtcfg.pyqt_sip_flags.split(' '): if item=="-t": in_t = True elif in_t: if item.startswith("Qt_4"): pyqt_version_tag = item else: in_t = False print("pyqt_version_tag:%s" % pyqt_version_tag) print("pyqt_sip_dir:%s" % pyqtcfg.pyqt_sip_dir) print("pyqt_sip_flags:%s" % pyqtcfg.pyqt_sip_flags) gr-air-modes-0.0.0.e47992d/cmake/Modules/FindPyQt4.cmake000066400000000000000000000045321221634657000222710ustar00rootroot00000000000000# Find PyQt4 # ~~~~~~~~~~ # Copyright (c) 2007-2008, Simon Edwards # Copyright (c) 2012, Nicholas Corgan # Redistribution and use is allowed according to the terms of the BSD license. # For details see the accompanying COPYING-CMAKE-SCRIPTS file. # # PyQt4 website: http://www.riverbankcomputing.co.uk/pyqt/index.php # # Find the installed version of PyQt4. FindPyQt4 should only be called after # Python has been found. # # This file defines the following variables: # # PYQT4_VERSION - The version of PyQt4 found expressed as a 6 digit hex number # suitable for comparision as a string # # PYQT4_VERSION_STR - The version of PyQt4 as a human readable string. # # PYQT4_VERSION_TAG - The PyQt version tag using by PyQt's sip files. # # PYQT4_SIP_DIR - The directory holding the PyQt4 .sip files. # # PYQT4_SIP_FLAGS - The SIP flags used to build PyQt. IF(EXISTS PYQT4_VERSION AND EXISTS PYUIC4_EXECUTABLE) # Already in cache, be silent SET(PYQT4_FOUND TRUE) SET(PYUIC4_FOUND TRUE) ELSE(EXISTS PYQT4_VERSION AND EXISTS PYUIC4_EXECUTABLE) FIND_FILE(_find_pyqt_py FindPyQt.py PATHS ${CMAKE_MODULE_PATH}) EXECUTE_PROCESS(COMMAND ${PYTHON_EXECUTABLE} ${_find_pyqt_py} OUTPUT_VARIABLE pyqt_config) IF(pyqt_config) STRING(REGEX REPLACE "^pyqt_version:([^\n]+).*$" "\\1" PYQT4_VERSION ${pyqt_config}) STRING(REGEX REPLACE ".*\npyqt_version_str:([^\n]+).*$" "\\1" PYQT4_VERSION_STR ${pyqt_config}) STRING(REGEX REPLACE ".*\npyqt_version_tag:([^\n]+).*$" "\\1" PYQT4_VERSION_TAG ${pyqt_config}) STRING(REGEX REPLACE ".*\npyqt_sip_dir:([^\n]+).*$" "\\1" PYQT4_SIP_DIR ${pyqt_config}) STRING(REGEX REPLACE ".*\npyqt_sip_flags:([^\n]+).*$" "\\1" PYQT4_SIP_FLAGS ${pyqt_config}) SET(PYQT4_FOUND TRUE) ENDIF(pyqt_config) FIND_PROGRAM(PYUIC4_EXECUTABLE NAMES pyuic4) IF(PYUIC4_EXECUTABLE) SET(PYUIC4_FOUND TRUE) ENDIF(PYUIC4_EXECUTABLE) IF(PYQT4_FOUND AND PYUIC4_FOUND) IF(NOT PYQT4_FIND_QUIETLY) MESSAGE(STATUS "Found PyQt4 version: ${PYQT4_VERSION_STR}") MESSAGE(STATUS "Found pyuic4: ${PYUIC4_EXECUTABLE}") ENDIF(NOT PYQT4_FIND_QUIETLY) ELSE(PYQT4_FOUND AND PYUIC4_FOUND) IF(PYQT4_FIND_REQUIRED) MESSAGE(FATAL_ERROR "Could not find Python") ENDIF(PYQT4_FIND_REQUIRED) ENDIF(PYQT4_FOUND AND PYUIC4_FOUND) ENDIF(EXISTS PYQT4_VERSION AND EXISTS PYUIC4_EXECUTABLE) gr-air-modes-0.0.0.e47992d/cmake/Modules/FindQwt.cmake000066400000000000000000000013711221634657000220610ustar00rootroot00000000000000# - try to find Qwt libraries and include files # QWT_INCLUDE_DIR where to find qwt_plot.h, etc. # QWT_LIBRARIES libraries to link against # QWT_FOUND If false, do not try to use Qwt find_path (QWT_INCLUDE_DIRS NAMES qwt_plot.h PATHS /usr/local/include/qwt-qt4 /usr/local/include/qwt /usr/include/qwt-qt4 /usr/include/qwt /opt/local/include/qwt /sw/include/qwt ) find_library (QWT_LIBRARIES NAMES qwt-qt4 qwt PATHS /usr/local/lib /usr/lib /opt/local/lib /sw/lib ) # handle the QUIETLY and REQUIRED arguments and set QWT_FOUND to TRUE if # all listed variables are TRUE include ( FindPackageHandleStandardArgs ) find_package_handle_standard_args( Qwt DEFAULT_MSG QWT_LIBRARIES QWT_INCLUDE_DIRS ) MARK_AS_ADVANCED(QWT_LIBRARIES QWT_INCLUDE_DIRS) gr-air-modes-0.0.0.e47992d/cmake/Modules/FindZeroMQ.cmake000066400000000000000000000032141221634657000224610ustar00rootroot00000000000000# - Find zeromq libraries # This module finds zeromq if it is installed and determines where the # include files and libraries are. It also determines what the name of # the library is. This code sets the following variables: # # ZEROMQ_FOUND - have the zeromq libs been found # ZEROMQ_LIBRARIES - path to the zeromq library # ZEROMQ_INCLUDE_DIRS - path to where zmq.h is found # ZEROMQ_DEBUG_LIBRARIES - path to the debug library #INCLUDE(CMakeFindFrameworks) # Search for the zeromq framework on Apple. #CMAKE_FIND_FRAMEWORKS(ZeroMQ) IF(WIN32) FIND_LIBRARY(ZEROMQ_DEBUG_LIBRARY NAMES libzmq_d zmq_d PATHS ${ZEROMQ_LIBRARIES} ) ENDIF(WIN32) FIND_LIBRARY(ZEROMQ_LIBRARY NAMES libzmq zmq PATHS ${ZEROMQ_LIBRARIES} ${NSCP_LIBRARYDIR} ) # IF(ZeroMQ_FRAMEWORKS AND NOT ZEROMQ_INCLUDE_DIR) # FOREACH(dir ${ZeroMQ_FRAMEWORKS}) # SET(ZEROMQ_FRAMEWORK_INCLUDES ${ZEROMQ_FRAMEWORK_INCLUDES} # ${dir}/Versions/${_CURRENT_VERSION}/include/zeromq${_CURRENT_VERSION}) # ENDFOREACH(dir) # ENDIF(ZeroMQ_FRAMEWORKS AND NOT ZEROMQ_INCLUDE_DIR) FIND_PATH(ZEROMQ_INCLUDE_DIR NAMES zmq.hpp PATHS # ${ZEROMQ_FRAMEWORK_INCLUDES} ${ZEROMQ_INCLUDE_DIRS} ${NSCP_INCLUDEDIR} ${ZEROMQ_INCLUDE_DIR} ) MARK_AS_ADVANCED( ZEROMQ_DEBUG_LIBRARY ZEROMQ_LIBRARY ZEROMQ_INCLUDE_DIR ) SET(ZEROMQ_INCLUDE_DIRS "${ZEROMQ_INCLUDE_DIR}") SET(ZEROMQ_LIBRARIES "${ZEROMQ_LIBRARY}") SET(ZEROMQ_DEBUG_LIBRARIES "${ZEROMQ_DEBUG_LIBRARY}") INCLUDE(FindPackageHandleStandardArgs) FIND_PACKAGE_HANDLE_STANDARD_ARGS(ZeroMQ DEFAULT_MSG ZEROMQ_LIBRARIES ZEROMQ_INCLUDE_DIRS) gr-air-modes-0.0.0.e47992d/cmake/Modules/GrBoost.cmake000066400000000000000000000075421221634657000220720ustar00rootroot00000000000000# Copyright 2010-2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_BOOST_CMAKE) return() endif() set(__INCLUDED_GR_BOOST_CMAKE TRUE) ######################################################################## # Setup Boost and handle some system specific things ######################################################################## set(BOOST_REQUIRED_COMPONENTS date_time program_options filesystem system thread ) if(UNIX AND NOT BOOST_ROOT AND EXISTS "/usr/lib64") list(APPEND BOOST_LIBRARYDIR "/usr/lib64") #fedora 64-bit fix endif(UNIX AND NOT BOOST_ROOT AND EXISTS "/usr/lib64") if(MSVC) set(BOOST_REQUIRED_COMPONENTS ${BOOST_REQUIRED_COMPONENTS} chrono) if (NOT DEFINED BOOST_ALL_DYN_LINK) set(BOOST_ALL_DYN_LINK TRUE) endif() set(BOOST_ALL_DYN_LINK "${BOOST_ALL_DYN_LINK}" CACHE BOOL "boost enable dynamic linking") if(BOOST_ALL_DYN_LINK) add_definitions(-DBOOST_ALL_DYN_LINK) #setup boost auto-linking in msvc else(BOOST_ALL_DYN_LINK) unset(BOOST_REQUIRED_COMPONENTS) #empty components list for static link endif(BOOST_ALL_DYN_LINK) endif(MSVC) find_package(Boost "1.35" COMPONENTS ${BOOST_REQUIRED_COMPONENTS}) # This does not allow us to disable specific versions. It is used # internally by cmake to know the formation newer versions. As newer # Boost version beyond what is shown here are produced, we must extend # this list. To disable Boost versions, see below. set(Boost_ADDITIONAL_VERSIONS "1.35.0" "1.35" "1.36.0" "1.36" "1.37.0" "1.37" "1.38.0" "1.38" "1.39.0" "1.39" "1.40.0" "1.40" "1.41.0" "1.41" "1.42.0" "1.42" "1.43.0" "1.43" "1.44.0" "1.44" "1.45.0" "1.45" "1.46.0" "1.46" "1.47.0" "1.47" "1.48.0" "1.48" "1.49.0" "1.49" "1.50.0" "1.50" "1.51.0" "1.51" "1.52.0" "1.52" "1.53.0" "1.53" "1.54.0" "1.54" "1.55.0" "1.55" "1.56.0" "1.56" "1.57.0" "1.57" "1.58.0" "1.58" "1.59.0" "1.59" "1.60.0" "1.60" "1.61.0" "1.61" "1.62.0" "1.62" "1.63.0" "1.63" "1.64.0" "1.64" "1.65.0" "1.65" "1.66.0" "1.66" "1.67.0" "1.67" "1.68.0" "1.68" "1.69.0" "1.69" ) # Boost 1.52 disabled, see https://svn.boost.org/trac/boost/ticket/7669 # Similar problems with Boost 1.46 and 1.47. OPTION(ENABLE_BAD_BOOST "Enable known bad versions of Boost" OFF) if(ENABLE_BAD_BOOST) MESSAGE(STATUS "Enabling use of known bad versions of Boost.") endif(ENABLE_BAD_BOOST) # For any unsuitable Boost version, add the version number below in # the following format: XXYYZZ # Where: # XX is the major version ('10' for version 1) # YY is the minor version number ('46' for 1.46) # ZZ is the patcher version number (typically just '00') set(Boost_NOGO_VERSIONS 104600 104601 104700 105200 ) foreach(ver ${Boost_NOGO_VERSIONS}) if(${Boost_VERSION} EQUAL ${ver}) if(NOT ENABLE_BAD_BOOST) MESSAGE(STATUS "WARNING: Found a known bad version of Boost (v${Boost_VERSION}). Disabling.") set(Boost_FOUND FALSE) else(NOT ENABLE_BAD_BOOST) MESSAGE(STATUS "WARNING: Found a known bad version of Boost (v${Boost_VERSION}). Continuing anyway.") set(Boost_FOUND TRUE) endif(NOT ENABLE_BAD_BOOST) endif(${Boost_VERSION} EQUAL ${ver}) endforeach(ver) gr-air-modes-0.0.0.e47992d/cmake/Modules/GrMiscUtils.cmake000066400000000000000000000207621221634657000227170ustar00rootroot00000000000000# Copyright 2010-2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_MISC_UTILS_CMAKE) return() endif() set(__INCLUDED_GR_MISC_UTILS_CMAKE TRUE) ######################################################################## # Set global variable macro. # Used for subdirectories to export settings. # Example: include and library paths. ######################################################################## function(GR_SET_GLOBAL var) set(${var} ${ARGN} CACHE INTERNAL "" FORCE) endfunction(GR_SET_GLOBAL) ######################################################################## # Set the pre-processor definition if the condition is true. # - def the pre-processor definition to set and condition name ######################################################################## function(GR_ADD_COND_DEF def) if(${def}) add_definitions(-D${def}) endif(${def}) endfunction(GR_ADD_COND_DEF) ######################################################################## # Check for a header and conditionally set a compile define. # - hdr the relative path to the header file # - def the pre-processor definition to set ######################################################################## function(GR_CHECK_HDR_N_DEF hdr def) include(CheckIncludeFileCXX) CHECK_INCLUDE_FILE_CXX(${hdr} ${def}) GR_ADD_COND_DEF(${def}) endfunction(GR_CHECK_HDR_N_DEF) ######################################################################## # Include subdirectory macro. # Sets the CMake directory variables, # includes the subdirectory CMakeLists.txt, # resets the CMake directory variables. # # This macro includes subdirectories rather than adding them # so that the subdirectory can affect variables in the level above. # This provides a work-around for the lack of convenience libraries. # This way a subdirectory can append to the list of library sources. ######################################################################## macro(GR_INCLUDE_SUBDIRECTORY subdir) #insert the current directories on the front of the list list(INSERT _cmake_source_dirs 0 ${CMAKE_CURRENT_SOURCE_DIR}) list(INSERT _cmake_binary_dirs 0 ${CMAKE_CURRENT_BINARY_DIR}) #set the current directories to the names of the subdirs set(CMAKE_CURRENT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}) set(CMAKE_CURRENT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/${subdir}) #include the subdirectory CMakeLists to run it file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) include(${CMAKE_CURRENT_SOURCE_DIR}/CMakeLists.txt) #reset the value of the current directories list(GET _cmake_source_dirs 0 CMAKE_CURRENT_SOURCE_DIR) list(GET _cmake_binary_dirs 0 CMAKE_CURRENT_BINARY_DIR) #pop the subdir names of the front of the list list(REMOVE_AT _cmake_source_dirs 0) list(REMOVE_AT _cmake_binary_dirs 0) endmacro(GR_INCLUDE_SUBDIRECTORY) ######################################################################## # Check if a compiler flag works and conditionally set a compile define. # - flag the compiler flag to check for # - have the variable to set with result ######################################################################## macro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE flag have) include(CheckCXXCompilerFlag) CHECK_CXX_COMPILER_FLAG(${flag} ${have}) if(${have}) add_definitions(${flag}) endif(${have}) endmacro(GR_ADD_CXX_COMPILER_FLAG_IF_AVAILABLE) ######################################################################## # Generates the .la libtool file # This appears to generate libtool files that cannot be used by auto*. # Usage GR_LIBTOOL(TARGET [target] DESTINATION [dest]) # Notice: there is not COMPONENT option, these will not get distributed. ######################################################################## function(GR_LIBTOOL) if(NOT DEFINED GENERATE_LIBTOOL) set(GENERATE_LIBTOOL OFF) #disabled by default endif() if(GENERATE_LIBTOOL) include(CMakeParseArgumentsCopy) CMAKE_PARSE_ARGUMENTS(GR_LIBTOOL "" "TARGET;DESTINATION" "" ${ARGN}) find_program(LIBTOOL libtool) if(LIBTOOL) include(CMakeMacroLibtoolFile) CREATE_LIBTOOL_FILE(${GR_LIBTOOL_TARGET} /${GR_LIBTOOL_DESTINATION}) endif(LIBTOOL) endif(GENERATE_LIBTOOL) endfunction(GR_LIBTOOL) ######################################################################## # Do standard things to the library target # - set target properties # - make install rules # Also handle gnuradio custom naming conventions w/ extras mode. ######################################################################## function(GR_LIBRARY_FOO target) #parse the arguments for component names include(CMakeParseArgumentsCopy) CMAKE_PARSE_ARGUMENTS(GR_LIBRARY "" "RUNTIME_COMPONENT;DEVEL_COMPONENT" "" ${ARGN}) #set additional target properties set_target_properties(${target} PROPERTIES SOVERSION ${LIBVER}) #install the generated files like so... install(TARGETS ${target} LIBRARY DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .so/.dylib file ARCHIVE DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_DEVEL_COMPONENT} # .lib file RUNTIME DESTINATION ${GR_RUNTIME_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} # .dll file ) #extras mode enabled automatically on linux if(NOT DEFINED LIBRARY_EXTRAS) set(LIBRARY_EXTRAS ${LINUX}) endif() #special extras mode to enable alternative naming conventions if(LIBRARY_EXTRAS) #create .la file before changing props GR_LIBTOOL(TARGET ${target} DESTINATION ${GR_LIBRARY_DIR}) #give the library a special name with ultra-zero soversion set_target_properties(${target} PROPERTIES OUTPUT_NAME ${target}-${LIBVER} SOVERSION "0.0.0") set(target_name lib${target}-${LIBVER}.so.0.0.0) #custom command to generate symlinks add_custom_command( TARGET ${target} POST_BUILD COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so COMMAND ${CMAKE_COMMAND} -E create_symlink ${target_name} ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 COMMAND ${CMAKE_COMMAND} -E touch ${target_name} #so the symlinks point to something valid so cmake 2.6 will install ) #and install the extra symlinks install( FILES ${CMAKE_CURRENT_BINARY_DIR}/lib${target}.so ${CMAKE_CURRENT_BINARY_DIR}/lib${target}-${LIBVER}.so.0 DESTINATION ${GR_LIBRARY_DIR} COMPONENT ${GR_LIBRARY_RUNTIME_COMPONENT} ) endif(LIBRARY_EXTRAS) endfunction(GR_LIBRARY_FOO) ######################################################################## # Create a dummy custom command that depends on other targets. # Usage: # GR_GEN_TARGET_DEPS(unique_name target_deps ...) # ADD_CUSTOM_COMMAND( ${target_deps}) # # Custom command cant depend on targets, but can depend on executables, # and executables can depend on targets. So this is the process: ######################################################################## function(GR_GEN_TARGET_DEPS name var) file( WRITE ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in "int main(void){return 0;}\n" ) execute_process( COMMAND ${CMAKE_COMMAND} -E copy_if_different ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp.in ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp ) add_executable(${name} ${CMAKE_CURRENT_BINARY_DIR}/${name}.cpp) if(ARGN) add_dependencies(${name} ${ARGN}) endif(ARGN) if(CMAKE_CROSSCOMPILING) set(${var} "DEPENDS;${name}" PARENT_SCOPE) #cant call command when cross else() set(${var} "DEPENDS;${name};COMMAND;${name}" PARENT_SCOPE) endif() endfunction(GR_GEN_TARGET_DEPS) gr-air-modes-0.0.0.e47992d/cmake/Modules/GrPlatform.cmake000066400000000000000000000034451221634657000225660ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_PLATFORM_CMAKE) return() endif() set(__INCLUDED_GR_PLATFORM_CMAKE TRUE) ######################################################################## # Setup additional defines for OS types ######################################################################## if(CMAKE_SYSTEM_NAME STREQUAL "Linux") set(LINUX TRUE) endif() if(LINUX AND EXISTS "/etc/debian_version") set(DEBIAN TRUE) endif() if(LINUX AND EXISTS "/etc/redhat-release") set(REDHAT TRUE) endif() if(LINUX AND EXISTS "/etc/slackware-version") set(SLACKWARE TRUE) endif() ######################################################################## # when the library suffix should be 64 (applies to redhat linux family) ######################################################################## if (REDHAT OR SLACKWARE) set(LIB64_CONVENTION TRUE) endif() if(NOT DEFINED LIB_SUFFIX AND LIB64_CONVENTION AND CMAKE_SYSTEM_PROCESSOR MATCHES "64$") set(LIB_SUFFIX 64) endif() set(LIB_SUFFIX ${LIB_SUFFIX} CACHE STRING "lib directory suffix") gr-air-modes-0.0.0.e47992d/cmake/Modules/GrPython.cmake000066400000000000000000000221201221634657000222520ustar00rootroot00000000000000# Copyright 2010-2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_PYTHON_CMAKE) return() endif() set(__INCLUDED_GR_PYTHON_CMAKE TRUE) ######################################################################## # Setup the python interpreter: # This allows the user to specify a specific interpreter, # or finds the interpreter via the built-in cmake module. ######################################################################## #this allows the user to override PYTHON_EXECUTABLE if(PYTHON_EXECUTABLE) set(PYTHONINTERP_FOUND TRUE) #otherwise if not set, try to automatically find it else(PYTHON_EXECUTABLE) #use the built-in find script find_package(PythonInterp) #and if that fails use the find program routine if(NOT PYTHONINTERP_FOUND) find_program(PYTHON_EXECUTABLE NAMES python python2.7 python2.6 python2.5) if(PYTHON_EXECUTABLE) set(PYTHONINTERP_FOUND TRUE) endif(PYTHON_EXECUTABLE) endif(NOT PYTHONINTERP_FOUND) endif(PYTHON_EXECUTABLE) #make the path to the executable appear in the cmake gui set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE} CACHE FILEPATH "python interpreter") #make sure we can use -B with python (introduced in 2.6) if(PYTHON_EXECUTABLE) execute_process( COMMAND ${PYTHON_EXECUTABLE} -B -c "" OUTPUT_QUIET ERROR_QUIET RESULT_VARIABLE PYTHON_HAS_DASH_B_RESULT ) if(PYTHON_HAS_DASH_B_RESULT EQUAL 0) set(PYTHON_DASH_B "-B") endif() endif(PYTHON_EXECUTABLE) ######################################################################## # Check for the existence of a python module: # - desc a string description of the check # - mod the name of the module to import # - cmd an additional command to run # - have the result variable to set ######################################################################## macro(GR_PYTHON_CHECK_MODULE desc mod cmd have) message(STATUS "") message(STATUS "Python checking for ${desc}") execute_process( COMMAND ${PYTHON_EXECUTABLE} -c " ######################################### try: import ${mod} assert ${cmd} except ImportError, AssertionError: exit(-1) except: pass #########################################" RESULT_VARIABLE ${have} ) if(${have} EQUAL 0) message(STATUS "Python checking for ${desc} - found") set(${have} TRUE) else(${have} EQUAL 0) message(STATUS "Python checking for ${desc} - not found") set(${have} FALSE) endif(${have} EQUAL 0) endmacro(GR_PYTHON_CHECK_MODULE) ######################################################################## # Sets the python installation directory GR_PYTHON_DIR ######################################################################## execute_process(COMMAND ${PYTHON_EXECUTABLE} -c " from distutils import sysconfig print sysconfig.get_python_lib(plat_specific=True, prefix='') " OUTPUT_VARIABLE GR_PYTHON_DIR OUTPUT_STRIP_TRAILING_WHITESPACE ) file(TO_CMAKE_PATH ${GR_PYTHON_DIR} GR_PYTHON_DIR) ######################################################################## # Create an always-built target with a unique name # Usage: GR_UNIQUE_TARGET( ) ######################################################################## function(GR_UNIQUE_TARGET desc) file(RELATIVE_PATH reldir ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_BINARY_DIR}) execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import re, hashlib unique = hashlib.md5('${reldir}${ARGN}').hexdigest()[:5] print(re.sub('\\W', '_', '${desc} ${reldir} ' + unique))" OUTPUT_VARIABLE _target OUTPUT_STRIP_TRAILING_WHITESPACE) add_custom_target(${_target} ALL DEPENDS ${ARGN}) endfunction(GR_UNIQUE_TARGET) ######################################################################## # Install python sources (also builds and installs byte-compiled python) ######################################################################## function(GR_PYTHON_INSTALL) include(CMakeParseArgumentsCopy) CMAKE_PARSE_ARGUMENTS(GR_PYTHON_INSTALL "" "DESTINATION;COMPONENT" "FILES;PROGRAMS" ${ARGN}) #################################################################### if(GR_PYTHON_INSTALL_FILES) #################################################################### install(${ARGN}) #installs regular python files #create a list of all generated files unset(pysrcfiles) unset(pycfiles) unset(pyofiles) foreach(pyfile ${GR_PYTHON_INSTALL_FILES}) get_filename_component(pyfile ${pyfile} ABSOLUTE) list(APPEND pysrcfiles ${pyfile}) #determine if this file is in the source or binary directory file(RELATIVE_PATH source_rel_path ${CMAKE_CURRENT_SOURCE_DIR} ${pyfile}) string(LENGTH "${source_rel_path}" source_rel_path_len) file(RELATIVE_PATH binary_rel_path ${CMAKE_CURRENT_BINARY_DIR} ${pyfile}) string(LENGTH "${binary_rel_path}" binary_rel_path_len) #and set the generated path appropriately if(${source_rel_path_len} GREATER ${binary_rel_path_len}) set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${binary_rel_path}) else() set(pygenfile ${CMAKE_CURRENT_BINARY_DIR}/${source_rel_path}) endif() list(APPEND pycfiles ${pygenfile}c) list(APPEND pyofiles ${pygenfile}o) #ensure generation path exists get_filename_component(pygen_path ${pygenfile} PATH) file(MAKE_DIRECTORY ${pygen_path}) endforeach(pyfile) #the command to generate the pyc files add_custom_command( DEPENDS ${pysrcfiles} OUTPUT ${pycfiles} COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pycfiles} ) #the command to generate the pyo files add_custom_command( DEPENDS ${pysrcfiles} OUTPUT ${pyofiles} COMMAND ${PYTHON_EXECUTABLE} -O ${CMAKE_BINARY_DIR}/python_compile_helper.py ${pysrcfiles} ${pyofiles} ) #create install rule and add generated files to target list set(python_install_gen_targets ${pycfiles} ${pyofiles}) install(FILES ${python_install_gen_targets} DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} ) #################################################################### elseif(GR_PYTHON_INSTALL_PROGRAMS) #################################################################### file(TO_NATIVE_PATH ${PYTHON_EXECUTABLE} pyexe_native) if (CMAKE_CROSSCOMPILING) set(pyexe_native /usr/bin/env python) endif() foreach(pyfile ${GR_PYTHON_INSTALL_PROGRAMS}) get_filename_component(pyfile_name ${pyfile} NAME) get_filename_component(pyfile ${pyfile} ABSOLUTE) string(REPLACE "${CMAKE_SOURCE_DIR}" "${CMAKE_BINARY_DIR}" pyexefile "${pyfile}.exe") list(APPEND python_install_gen_targets ${pyexefile}) get_filename_component(pyexefile_path ${pyexefile} PATH) file(MAKE_DIRECTORY ${pyexefile_path}) add_custom_command( OUTPUT ${pyexefile} DEPENDS ${pyfile} COMMAND ${PYTHON_EXECUTABLE} -c \"open('${pyexefile}', 'w').write('\#!${pyexe_native}\\n'+open('${pyfile}').read())\" COMMENT "Shebangin ${pyfile_name}" ) #on windows, python files need an extension to execute get_filename_component(pyfile_ext ${pyfile} EXT) if(WIN32 AND NOT pyfile_ext) set(pyfile_name "${pyfile_name}.py") endif() install(PROGRAMS ${pyexefile} RENAME ${pyfile_name} DESTINATION ${GR_PYTHON_INSTALL_DESTINATION} COMPONENT ${GR_PYTHON_INSTALL_COMPONENT} ) endforeach(pyfile) endif() GR_UNIQUE_TARGET("pygen" ${python_install_gen_targets}) endfunction(GR_PYTHON_INSTALL) ######################################################################## # Write the python helper script that generates byte code files ######################################################################## file(WRITE ${CMAKE_BINARY_DIR}/python_compile_helper.py " import sys, py_compile files = sys.argv[1:] srcs, gens = files[:len(files)/2], files[len(files)/2:] for src, gen in zip(srcs, gens): py_compile.compile(file=src, cfile=gen, doraise=True) ") gr-air-modes-0.0.0.e47992d/cmake/Modules/GrSwig.cmake000066400000000000000000000212331221634657000217060ustar00rootroot00000000000000# Copyright 2010-2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_SWIG_CMAKE) return() endif() set(__INCLUDED_GR_SWIG_CMAKE TRUE) include(GrPython) ######################################################################## # Builds a swig documentation file to be generated into python docstrings # Usage: GR_SWIG_MAKE_DOCS(output_file input_path input_path....) # # Set the following variable to specify extra dependent targets: # - GR_SWIG_DOCS_SOURCE_DEPS # - GR_SWIG_DOCS_TARGET_DEPS ######################################################################## function(GR_SWIG_MAKE_DOCS output_file) if(ENABLE_DOXYGEN) #setup the input files variable list, quote formated set(input_files) unset(INPUT_PATHS) foreach(input_path ${ARGN}) if (IS_DIRECTORY ${input_path}) #when input path is a directory file(GLOB input_path_h_files ${input_path}/*.h) else() #otherwise its just a file, no glob set(input_path_h_files ${input_path}) endif() list(APPEND input_files ${input_path_h_files}) set(INPUT_PATHS "${INPUT_PATHS} \"${input_path}\"") endforeach(input_path) #determine the output directory get_filename_component(name ${output_file} NAME_WE) get_filename_component(OUTPUT_DIRECTORY ${output_file} PATH) set(OUTPUT_DIRECTORY ${OUTPUT_DIRECTORY}/${name}_swig_docs) make_directory(${OUTPUT_DIRECTORY}) #generate the Doxyfile used by doxygen configure_file( ${CMAKE_SOURCE_DIR}/docs/doxygen/Doxyfile.swig_doc.in ${OUTPUT_DIRECTORY}/Doxyfile @ONLY) #Create a dummy custom command that depends on other targets include(GrMiscUtils) GR_GEN_TARGET_DEPS(_${name}_tag tag_deps ${GR_SWIG_DOCS_TARGET_DEPS}) #call doxygen on the Doxyfile + input headers add_custom_command( OUTPUT ${OUTPUT_DIRECTORY}/xml/index.xml DEPENDS ${input_files} ${GR_SWIG_DOCS_SOURCE_DEPS} ${tag_deps} COMMAND ${DOXYGEN_EXECUTABLE} ${OUTPUT_DIRECTORY}/Doxyfile COMMENT "Generating doxygen xml for ${name} docs" ) #call the swig_doc script on the xml files add_custom_command( OUTPUT ${output_file} DEPENDS ${input_files} ${stamp-file} ${OUTPUT_DIRECTORY}/xml/index.xml COMMAND ${PYTHON_EXECUTABLE} ${PYTHON_DASH_B} ${CMAKE_SOURCE_DIR}/docs/doxygen/swig_doc.py ${OUTPUT_DIRECTORY}/xml ${output_file} COMMENT "Generating python docstrings for ${name}" WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/docs/doxygen ) else(ENABLE_DOXYGEN) file(WRITE ${output_file} "\n") #no doxygen -> empty file endif(ENABLE_DOXYGEN) endfunction(GR_SWIG_MAKE_DOCS) ######################################################################## # Build a swig target for the common gnuradio use case. Usage: # GR_SWIG_MAKE(target ifile ifile ifile...) # # Set the following variables before calling: # - GR_SWIG_FLAGS # - GR_SWIG_INCLUDE_DIRS # - GR_SWIG_LIBRARIES # - GR_SWIG_SOURCE_DEPS # - GR_SWIG_TARGET_DEPS # - GR_SWIG_DOC_FILE # - GR_SWIG_DOC_DIRS ######################################################################## macro(GR_SWIG_MAKE name) set(ifiles ${ARGN}) list(APPEND GR_SWIG_TARGET_DEPS ${GR_SWIG_LIBRARIES}) #do swig doc generation if specified if (GR_SWIG_DOC_FILE) set(GR_SWIG_DOCS_SOURCE_DEPS ${GR_SWIG_SOURCE_DEPS}) set(GR_SWIG_DOCS_TAREGT_DEPS ${GR_SWIG_TARGET_DEPS}) GR_SWIG_MAKE_DOCS(${GR_SWIG_DOC_FILE} ${GR_SWIG_DOC_DIRS}) add_custom_target(${name}_swig_doc DEPENDS ${GR_SWIG_DOC_FILE}) list(APPEND GR_SWIG_TARGET_DEPS ${name}_swig_doc) endif() #append additional include directories find_package(PythonLibs) list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_PATH}) #deprecated name (now dirs) list(APPEND GR_SWIG_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) list(APPEND GR_SWIG_INCLUDE_DIRS ${CMAKE_CURRENT_BINARY_DIR}) #determine include dependencies for swig file execute_process( COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_BINARY_DIR}/get_swig_deps.py "${ifiles}" "${GR_SWIG_INCLUDE_DIRS}" OUTPUT_STRIP_TRAILING_WHITESPACE OUTPUT_VARIABLE SWIG_MODULE_${name}_EXTRA_DEPS WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} ) #Create a dummy custom command that depends on other targets include(GrMiscUtils) GR_GEN_TARGET_DEPS(_${name}_swig_tag tag_deps ${GR_SWIG_TARGET_DEPS}) set(tag_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.tag) add_custom_command( OUTPUT ${tag_file} DEPENDS ${GR_SWIG_SOURCE_DEPS} ${tag_deps} COMMAND ${CMAKE_COMMAND} -E touch ${tag_file} ) #append the specified include directories include_directories(${GR_SWIG_INCLUDE_DIRS}) list(APPEND SWIG_MODULE_${name}_EXTRA_DEPS ${tag_file}) #setup the swig flags with flags and include directories set(CMAKE_SWIG_FLAGS -fvirtual -modern -keyword -w511 -module ${name} ${GR_SWIG_FLAGS}) foreach(dir ${GR_SWIG_INCLUDE_DIRS}) list(APPEND CMAKE_SWIG_FLAGS "-I${dir}") endforeach(dir) #set the C++ property on the swig .i file so it builds set_source_files_properties(${ifiles} PROPERTIES CPLUSPLUS ON) #setup the actual swig library target to be built include(UseSWIG) SWIG_ADD_MODULE(${name} python ${ifiles}) SWIG_LINK_LIBRARIES(${name} ${PYTHON_LIBRARIES} ${GR_SWIG_LIBRARIES}) endmacro(GR_SWIG_MAKE) ######################################################################## # Install swig targets generated by GR_SWIG_MAKE. Usage: # GR_SWIG_INSTALL( # TARGETS target target target... # [DESTINATION destination] # [COMPONENT component] # ) ######################################################################## macro(GR_SWIG_INSTALL) include(CMakeParseArgumentsCopy) CMAKE_PARSE_ARGUMENTS(GR_SWIG_INSTALL "" "DESTINATION;COMPONENT" "TARGETS" ${ARGN}) foreach(name ${GR_SWIG_INSTALL_TARGETS}) install(TARGETS ${SWIG_MODULE_${name}_REAL_NAME} DESTINATION ${GR_SWIG_INSTALL_DESTINATION} COMPONENT ${GR_SWIG_INSTALL_COMPONENT} ) include(GrPython) GR_PYTHON_INSTALL(FILES ${CMAKE_CURRENT_BINARY_DIR}/${name}.py DESTINATION ${GR_SWIG_INSTALL_DESTINATION} COMPONENT ${GR_SWIG_INSTALL_COMPONENT} ) GR_LIBTOOL( TARGET ${SWIG_MODULE_${name}_REAL_NAME} DESTINATION ${GR_SWIG_INSTALL_DESTINATION} ) endforeach(name) endmacro(GR_SWIG_INSTALL) ######################################################################## # Generate a python file that can determine swig dependencies. # Used by the make macro above to determine extra dependencies. # When you build C++, CMake figures out the header dependencies. # This code essentially performs that logic for swig includes. ######################################################################## file(WRITE ${CMAKE_BINARY_DIR}/get_swig_deps.py " import os, sys, re include_matcher = re.compile('[#|%]include\\s*[<|\"](.*)[>|\"]') include_dirs = sys.argv[2].split(';') def get_swig_incs(file_path): file_contents = open(file_path, 'r').read() return include_matcher.findall(file_contents, re.MULTILINE) def get_swig_deps(file_path, level): deps = [file_path] if level == 0: return deps for inc_file in get_swig_incs(file_path): for inc_dir in include_dirs: inc_path = os.path.join(inc_dir, inc_file) if not os.path.exists(inc_path): continue deps.extend(get_swig_deps(inc_path, level-1)) return deps if __name__ == '__main__': ifiles = sys.argv[1].split(';') deps = sum([get_swig_deps(ifile, 3) for ifile in ifiles], []) #sys.stderr.write(';'.join(set(deps)) + '\\n\\n') print(';'.join(set(deps))) ") gr-air-modes-0.0.0.e47992d/cmake/Modules/GrTest.cmake000066400000000000000000000126771221634657000217300ustar00rootroot00000000000000# Copyright 2010-2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. if(DEFINED __INCLUDED_GR_TEST_CMAKE) return() endif() set(__INCLUDED_GR_TEST_CMAKE TRUE) ######################################################################## # Add a unit test and setup the environment for a unit test. # Takes the same arguments as the ADD_TEST function. # # Before calling set the following variables: # GR_TEST_TARGET_DEPS - built targets for the library path # GR_TEST_LIBRARY_DIRS - directories for the library path # GR_TEST_PYTHON_DIRS - directories for the python path ######################################################################## function(GR_ADD_TEST test_name) #Ensure that the build exe also appears in the PATH. list(APPEND GR_TEST_TARGET_DEPS ${ARGN}) #In the land of windows, all libraries must be in the PATH. #Since the dependent libraries are not yet installed, #we must manually set them in the PATH to run tests. #The following appends the path of a target dependency. foreach(target ${GR_TEST_TARGET_DEPS}) get_target_property(location ${target} LOCATION) if(location) get_filename_component(path ${location} PATH) string(REGEX REPLACE "\\$\\(.*\\)" ${CMAKE_BUILD_TYPE} path ${path}) list(APPEND GR_TEST_LIBRARY_DIRS ${path}) endif(location) endforeach(target) if(WIN32) #SWIG generates the python library files into a subdirectory. #Therefore, we must append this subdirectory into PYTHONPATH. #Only do this for the python directories matching the following: foreach(pydir ${GR_TEST_PYTHON_DIRS}) get_filename_component(name ${pydir} NAME) if(name MATCHES "^(swig|lib|src)$") list(APPEND GR_TEST_PYTHON_DIRS ${pydir}/${CMAKE_BUILD_TYPE}) endif() endforeach(pydir) endif(WIN32) file(TO_NATIVE_PATH ${CMAKE_CURRENT_SOURCE_DIR} srcdir) file(TO_NATIVE_PATH "${GR_TEST_LIBRARY_DIRS}" libpath) #ok to use on dir list? file(TO_NATIVE_PATH "${GR_TEST_PYTHON_DIRS}" pypath) #ok to use on dir list? set(environs "GR_DONT_LOAD_PREFS=1" "srcdir=${srcdir}") #http://www.cmake.org/pipermail/cmake/2009-May/029464.html #Replaced this add test + set environs code with the shell script generation. #Its nicer to be able to manually run the shell script to diagnose problems. #ADD_TEST(${ARGV}) #SET_TESTS_PROPERTIES(${test_name} PROPERTIES ENVIRONMENT "${environs}") if(UNIX) set(LD_PATH_VAR "LD_LIBRARY_PATH") if(APPLE) set(LD_PATH_VAR "DYLD_LIBRARY_PATH") endif() set(binpath "${CMAKE_CURRENT_BINARY_DIR}:$PATH") list(APPEND libpath "$${LD_PATH_VAR}") list(APPEND pypath "$PYTHONPATH") #replace list separator with the path separator string(REPLACE ";" ":" libpath "${libpath}") string(REPLACE ";" ":" pypath "${pypath}") list(APPEND environs "PATH=${binpath}" "${LD_PATH_VAR}=${libpath}" "PYTHONPATH=${pypath}") #generate a bat file that sets the environment and runs the test find_program(SHELL sh) set(sh_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.sh) file(WRITE ${sh_file} "#!${SHELL}\n") #each line sets an environment variable foreach(environ ${environs}) file(APPEND ${sh_file} "export ${environ}\n") endforeach(environ) #load the command to run with its arguments foreach(arg ${ARGN}) file(APPEND ${sh_file} "${arg} ") endforeach(arg) file(APPEND ${sh_file} "\n") #make the shell file executable execute_process(COMMAND chmod +x ${sh_file}) add_test(${test_name} ${SHELL} ${sh_file}) endif(UNIX) if(WIN32) list(APPEND libpath ${DLL_PATHS} "%PATH%") list(APPEND pypath "%PYTHONPATH%") #replace list separator with the path separator (escaped) string(REPLACE ";" "\\;" libpath "${libpath}") string(REPLACE ";" "\\;" pypath "${pypath}") list(APPEND environs "PATH=${libpath}" "PYTHONPATH=${pypath}") #generate a bat file that sets the environment and runs the test set(bat_file ${CMAKE_CURRENT_BINARY_DIR}/${test_name}_test.bat) file(WRITE ${bat_file} "@echo off\n") #each line sets an environment variable foreach(environ ${environs}) file(APPEND ${bat_file} "SET ${environ}\n") endforeach(environ) #load the command to run with its arguments foreach(arg ${ARGN}) file(APPEND ${bat_file} "${arg} ") endforeach(arg) file(APPEND ${bat_file} "\n") add_test(${test_name} ${bat_file}) endif(WIN32) endfunction(GR_ADD_TEST) gr-air-modes-0.0.0.e47992d/cmake/cmake_uninstall.cmake.in000066400000000000000000000025321221634657000226530ustar00rootroot00000000000000# http://www.vtk.org/Wiki/CMake_FAQ#Can_I_do_.22make_uninstall.22_with_CMake.3F IF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") MESSAGE(FATAL_ERROR "Cannot find install manifest: \"@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt\"") ENDIF(NOT EXISTS "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt") FILE(READ "@CMAKE_CURRENT_BINARY_DIR@/install_manifest.txt" files) STRING(REGEX REPLACE "\n" ";" files "${files}") FOREACH(file ${files}) MESSAGE(STATUS "Uninstalling \"$ENV{DESTDIR}${file}\"") IF(EXISTS "$ENV{DESTDIR}${file}") EXEC_PROGRAM( "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) IF(NOT "${rm_retval}" STREQUAL 0) MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") ENDIF(NOT "${rm_retval}" STREQUAL 0) ELSEIF(IS_SYMLINK "$ENV{DESTDIR}${file}") EXEC_PROGRAM( "@CMAKE_COMMAND@" ARGS "-E remove \"$ENV{DESTDIR}${file}\"" OUTPUT_VARIABLE rm_out RETURN_VALUE rm_retval ) IF(NOT "${rm_retval}" STREQUAL 0) MESSAGE(FATAL_ERROR "Problem when removing \"$ENV{DESTDIR}${file}\"") ENDIF(NOT "${rm_retval}" STREQUAL 0) ELSE(EXISTS "$ENV{DESTDIR}${file}") MESSAGE(STATUS "File \"$ENV{DESTDIR}${file}\" does not exist.") ENDIF(EXISTS "$ENV{DESTDIR}${file}") ENDFOREACH(file) gr-air-modes-0.0.0.e47992d/docs/000077500000000000000000000000001221634657000157415ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/CMakeLists.txt000066400000000000000000000025551221634657000205100ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Setup dependencies ######################################################################## find_package(Doxygen) ######################################################################## # Begin conditional configuration ######################################################################## if(ENABLE_DOXYGEN) ######################################################################## # Add subdirectories ######################################################################## add_subdirectory(doxygen) endif(ENABLE_DOXYGEN) gr-air-modes-0.0.0.e47992d/docs/doxygen/000077500000000000000000000000001221634657000174165ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/CMakeLists.txt000066400000000000000000000037411221634657000221630ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Create the doxygen configuration file ######################################################################## file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} top_srcdir) file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} top_builddir) file(TO_NATIVE_PATH ${CMAKE_SOURCE_DIR} abs_top_srcdir) file(TO_NATIVE_PATH ${CMAKE_BINARY_DIR} abs_top_builddir) set(HAVE_DOT ${DOXYGEN_DOT_FOUND}) set(enable_html_docs YES) set(enable_latex_docs NO) set(enable_xml_docs YES) configure_file( ${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile @ONLY) set(BUILT_DIRS ${CMAKE_CURRENT_BINARY_DIR}/xml ${CMAKE_CURRENT_BINARY_DIR}/html) ######################################################################## # Make and install doxygen docs ######################################################################## add_custom_command( OUTPUT ${BUILT_DIRS} COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_CURRENT_BINARY_DIR}/Doxyfile WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} COMMENT "Generating documentation with doxygen" ) add_custom_target(doxygen_target ALL DEPENDS ${BUILT_DIRS}) install(DIRECTORY ${BUILT_DIRS} DESTINATION ${GR_PKG_DOC_DIR}) gr-air-modes-0.0.0.e47992d/docs/doxygen/Doxyfile.in000066400000000000000000001741771221634657000215520ustar00rootroot00000000000000# Doxyfile 1.5.7.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = "GNU Radio's GR-AIR-MODES Package" # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Farsi, Finnish, French, German, Greek, # Hungarian, Italian, Japanese, Japanese-en (Japanese with English messages), # Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, Polish, # Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, Slovene, # Spanish, Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = YES # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = NO # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = YES # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 4 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = NO # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = NO # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = NO # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= NO # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = NO # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text " # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @top_srcdir@ @top_builddir@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.h \ *.dox # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = @abs_top_builddir@/docs/doxygen/html \ @abs_top_builddir@/docs/doxygen/xml \ @abs_top_builddir@/docs/doxygen/other/doxypy.py \ @abs_top_builddir@/_CPack_Packages \ @abs_top_srcdir@/cmake # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = */.deps/* \ */.libs/* \ */.svn/* \ */CVS/* \ */__init__.py \ */qa_*.cc \ */qa_*.h \ */qa_*.py # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = ad9862 \ numpy \ *swig* \ *Swig* \ *my_top_block* \ *my_graph* \ *app_top_block* \ *am_rx_graph* \ *_queue_watcher_thread* \ *parse* \ *MyFrame* \ *MyApp* \ *PyObject* \ *wfm_rx_block* \ *_sptr* \ *debug* \ *wfm_rx_sca_block* \ *tv_rx_block* \ *wxapt_rx_block* \ *example_signal* # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = *.py=@top_srcdir@/gnuradio-core/doc/other/doxypy.py # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = NO # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. Otherwise they will link to the documentstion. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = YES # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = @enable_html_docs@ # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = YES # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # Qt Help Project / Namespace. QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # Qt Help Project / Virtual Folders. QHP_VIRTUAL_FOLDER = doc # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file . QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = YES # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to FRAME, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. Other possible values # for this tag are: HIERARCHIES, which will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list; # ALL, which combines the behavior of FRAME and HIERARCHIES; and NONE, which # disables this behavior completely. For backwards compatibility with previous # releases of Doxygen, the values YES and NO are equivalent to FRAME and NONE # respectively. GENERATE_TREEVIEW = YES # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 180 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = @enable_latex_docs@ # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = letter # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = @enable_xml_docs@ # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = NO #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = @HAVE_DOT@ # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = NO # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO gr-air-modes-0.0.0.e47992d/docs/doxygen/Doxyfile.swig_doc.in000066400000000000000000001736171221634657000233450ustar00rootroot00000000000000# Doxyfile 1.6.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = @CPACK_PACKAGE_NAME@ # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = @CPACK_PACKAGE_VERSION@ # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = @OUTPUT_DIRECTORY@ # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = YES # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = @INPUT_PATHS@ # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = *.h # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = NO # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enable doxygen will generate a search box for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP) or Qt help (GENERATE_QHP) # there is already a search function so this one should typically # be disabled. SEARCHENGINE = YES #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/000077500000000000000000000000001221634657000211225ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/.gitignore000066400000000000000000000000301221634657000231030ustar00rootroot00000000000000/Makefile /Makefile.in gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/Makefile.am000066400000000000000000000026021221634657000231560ustar00rootroot00000000000000# # Copyright 2007,2009,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # include $(top_srcdir)/Makefile.common EXTRA_DIST = \ example/aadvark.cc \ example/aadvark.h \ example/Doxyfile \ example/xml/aadvark_8cc.xml \ example/xml/aadvark_8h.xml \ example/xml/classAadvark.xml \ example/xml/combine.xslt \ example/xml/compound.xsd \ example/xml/index.xml \ example/xml/index.xsd if PYTHON utilspythondir = $(grpythondir)/doxyxml TESTS = \ run_tests nobase_utilspython_PYTHON = \ __init__.py \ base.py \ doxyindex.py \ text.py \ generated/__init__.py \ generated/index.py \ generated/indexsuper.py \ generated/compound.py \ generated/compoundsuper.py endifgr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/__init__.py000066400000000000000000000046611221634657000232420ustar00rootroot00000000000000# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Python interface to contents of doxygen xml documentation. Example use: See the contents of the example folder for the C++ and doxygen-generated xml used in this example. >>> # Parse the doxygen docs. >>> import os >>> this_dir = os.path.dirname(globals()['__file__']) >>> xml_path = this_dir + "/example/xml/" >>> di = DoxyIndex(xml_path) Get a list of all top-level objects. >>> print([mem.name() for mem in di.members()]) [u'Aadvark', u'aadvarky_enough', u'main'] Get all functions. >>> print([mem.name() for mem in di.in_category(DoxyFunction)]) [u'aadvarky_enough', u'main'] Check if an object is present. >>> di.has_member(u'Aadvark') True >>> di.has_member(u'Fish') False Get an item by name and check its properties. >>> aad = di.get_member(u'Aadvark') >>> print(aad.brief_description) Models the mammal Aadvark. >>> print(aad.detailed_description) Sadly the model is incomplete and cannot capture all aspects of an aadvark yet. This line is uninformative and is only to test line breaks in the comments. >>> [mem.name() for mem in aad.members()] [u'aadvarkness', u'print', u'Aadvark', u'get_aadvarkness'] >>> aad.get_member(u'print').brief_description u'Outputs the vital aadvark statistics.' """ from doxyindex import DoxyIndex, DoxyFunction, DoxyParam, DoxyClass, DoxyFile, DoxyNamespace, DoxyGroup, DoxyFriend, DoxyOther def _test(): import os this_dir = os.path.dirname(globals()['__file__']) xml_path = this_dir + "/example/xml/" di = DoxyIndex(xml_path) # Get the Aadvark class aad = di.get_member('Aadvark') aad.brief_description import doctest return doctest.testmod() if __name__ == "__main__": _test() gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/base.py000066400000000000000000000152571221634657000224200ustar00rootroot00000000000000# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ A base class is created. Classes based upon this are used to make more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ import os import pdb from xml.parsers.expat import ExpatError from generated import compound class Base(object): class Duplicate(StandardError): pass class NoSuchMember(StandardError): pass class ParsingError(StandardError): pass def __init__(self, parse_data, top=None): self._parsed = False self._error = False self._parse_data = parse_data self._members = [] self._dict_members = {} self._in_category = {} self._data = {} if top is not None: self._xml_path = top._xml_path # Set up holder of references else: top = self self._refs = {} self._xml_path = parse_data self.top = top @classmethod def from_refid(cls, refid, top=None): """ Instantiate class from a refid rather than parsing object. """ # First check to see if its already been instantiated. if top is not None and refid in top._refs: return top._refs[refid] # Otherwise create a new instance and set refid. inst = cls(None, top=top) inst.refid = refid inst.add_ref(inst) return inst @classmethod def from_parse_data(cls, parse_data, top=None): refid = getattr(parse_data, 'refid', None) if refid is not None and top is not None and refid in top._refs: return top._refs[refid] inst = cls(parse_data, top=top) if refid is not None: inst.refid = refid inst.add_ref(inst) return inst def add_ref(self, obj): if hasattr(obj, 'refid'): self.top._refs[obj.refid] = obj mem_classes = [] def get_cls(self, mem): for cls in self.mem_classes: if cls.can_parse(mem): return cls raise StandardError(("Did not find a class for object '%s'." \ % (mem.get_name()))) def convert_mem(self, mem): try: cls = self.get_cls(mem) converted = cls.from_parse_data(mem, self.top) if converted is None: raise StandardError('No class matched this object.') self.add_ref(converted) return converted except StandardError, e: print e @classmethod def includes(cls, inst): return isinstance(inst, cls) @classmethod def can_parse(cls, obj): return False def _parse(self): self._parsed = True def _get_dict_members(self, cat=None): """ For given category a dictionary is returned mapping member names to members of that category. For names that are duplicated the name is mapped to None. """ self.confirm_no_error() if cat not in self._dict_members: new_dict = {} for mem in self.in_category(cat): if mem.name() not in new_dict: new_dict[mem.name()] = mem else: new_dict[mem.name()] = self.Duplicate self._dict_members[cat] = new_dict return self._dict_members[cat] def in_category(self, cat): self.confirm_no_error() if cat is None: return self._members if cat not in self._in_category: self._in_category[cat] = [mem for mem in self._members if cat.includes(mem)] return self._in_category[cat] def get_member(self, name, cat=None): self.confirm_no_error() # Check if it's in a namespace or class. bits = name.split('::') first = bits[0] rest = '::'.join(bits[1:]) member = self._get_dict_members(cat).get(first, self.NoSuchMember) # Raise any errors that are returned. if member in set([self.NoSuchMember, self.Duplicate]): raise member() if rest: return member.get_member(rest, cat=cat) return member def has_member(self, name, cat=None): try: mem = self.get_member(name, cat=cat) return True except self.NoSuchMember: return False def data(self): self.confirm_no_error() return self._data def members(self): self.confirm_no_error() return self._members def process_memberdefs(self): mdtss = [] for sec in self._retrieved_data.compounddef.sectiondef: mdtss += sec.memberdef # At the moment we lose all information associated with sections. # Sometimes a memberdef is in several sectiondef. # We make sure we don't get duplicates here. uniques = set([]) for mem in mdtss: converted = self.convert_mem(mem) pair = (mem.name, mem.__class__) if pair not in uniques: uniques.add(pair) self._members.append(converted) def retrieve_data(self): filename = os.path.join(self._xml_path, self.refid + '.xml') try: self._retrieved_data = compound.parse(filename) except ExpatError: print('Error in xml in file %s' % filename) self._error = True self._retrieved_data = None def check_parsed(self): if not self._parsed: self._parse() def confirm_no_error(self): self.check_parsed() if self._error: raise self.ParsingError() def error(self): self.check_parsed() return self._error def name(self): # first see if we can do it without processing. if self._parse_data is not None: return self._parse_data.name self.check_parsed() return self._retrieved_data.compounddef.name gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/doxyindex.py000066400000000000000000000147561221634657000235240ustar00rootroot00000000000000# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Classes providing more user-friendly interfaces to the doxygen xml docs than the generated classes provide. """ import os from generated import index from base import Base from text import description class DoxyIndex(Base): """ Parses a doxygen xml directory. """ __module__ = "gnuradio.utils.doxyxml" def _parse(self): if self._parsed: return super(DoxyIndex, self)._parse() self._root = index.parse(os.path.join(self._xml_path, 'index.xml')) for mem in self._root.compound: converted = self.convert_mem(mem) # For files we want the contents to be accessible directly # from the parent rather than having to go through the file # object. if self.get_cls(mem) == DoxyFile: if mem.name.endswith('.h'): self._members += converted.members() self._members.append(converted) else: self._members.append(converted) def generate_swig_doc_i(self): """ %feature("docstring") gr_make_align_on_samplenumbers_ss::align_state " Wraps the C++: gr_align_on_samplenumbers_ss::align_state"; """ pass class DoxyCompMem(Base): kind = None def __init__(self, *args, **kwargs): super(DoxyCompMem, self).__init__(*args, **kwargs) @classmethod def can_parse(cls, obj): return obj.kind == cls.kind def set_descriptions(self, parse_data): bd = description(getattr(parse_data, 'briefdescription', None)) dd = description(getattr(parse_data, 'detaileddescription', None)) self._data['brief_description'] = bd self._data['detailed_description'] = dd class DoxyCompound(DoxyCompMem): pass class DoxyMember(DoxyCompMem): pass class DoxyFunction(DoxyMember): __module__ = "gnuradio.utils.doxyxml" kind = 'function' def _parse(self): if self._parsed: return super(DoxyFunction, self)._parse() self.set_descriptions(self._parse_data) self._data['params'] = [] prms = self._parse_data.param for prm in prms: self._data['params'].append(DoxyParam(prm)) brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) params = property(lambda self: self.data()['params']) Base.mem_classes.append(DoxyFunction) class DoxyParam(DoxyMember): __module__ = "gnuradio.utils.doxyxml" def _parse(self): if self._parsed: return super(DoxyParam, self)._parse() self.set_descriptions(self._parse_data) self._data['declname'] = self._parse_data.declname brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) declname = property(lambda self: self.data()['declname']) class DoxyClass(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'class' def _parse(self): if self._parsed: return super(DoxyClass, self)._parse() self.retrieve_data() if self._error: return self.set_descriptions(self._retrieved_data.compounddef) # Sectiondef.kind tells about whether private or public. # We just ignore this for now. self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) Base.mem_classes.append(DoxyClass) class DoxyFile(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'file' def _parse(self): if self._parsed: return super(DoxyFile, self)._parse() self.retrieve_data() self.set_descriptions(self._retrieved_data.compounddef) if self._error: return self.process_memberdefs() brief_description = property(lambda self: self.data()['brief_description']) detailed_description = property(lambda self: self.data()['detailed_description']) Base.mem_classes.append(DoxyFile) class DoxyNamespace(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'namespace' Base.mem_classes.append(DoxyNamespace) class DoxyGroup(DoxyCompound): __module__ = "gnuradio.utils.doxyxml" kind = 'group' def _parse(self): if self._parsed: return super(DoxyGroup, self)._parse() self.retrieve_data() if self._error: return cdef = self._retrieved_data.compounddef self._data['title'] = description(cdef.title) # Process inner groups grps = cdef.innergroup for grp in grps: converted = DoxyGroup.from_refid(grp.refid, top=self.top) self._members.append(converted) # Process inner classes klasses = cdef.innerclass for kls in klasses: converted = DoxyClass.from_refid(kls.refid, top=self.top) self._members.append(converted) # Process normal members self.process_memberdefs() title = property(lambda self: self.data()['title']) Base.mem_classes.append(DoxyGroup) class DoxyFriend(DoxyMember): __module__ = "gnuradio.utils.doxyxml" kind = 'friend' Base.mem_classes.append(DoxyFriend) class DoxyOther(Base): __module__ = "gnuradio.utils.doxyxml" kinds = set(['variable', 'struct', 'union', 'define', 'typedef', 'enum', 'dir', 'page']) @classmethod def can_parse(cls, obj): return obj.kind in cls.kinds Base.mem_classes.append(DoxyOther) gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/000077500000000000000000000000001221634657000225555ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/Doxyfile000066400000000000000000001775601221634657000243030ustar00rootroot00000000000000# Doxyfile 1.6.3 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # This tag specifies the encoding used for all characters in the config file # that follow. The default is UTF-8 which is also the encoding used for all # text before the first occurrence of this tag. Doxygen uses libiconv (or the # iconv built into libc) for the transcoding. See # http://www.gnu.org/software/libiconv for the list of possible encodings. DOXYFILE_ENCODING = UTF-8 # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, # Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, # Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English # messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, # Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrilic, Slovak, # Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. OUTPUT_LANGUAGE = English # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like regular Qt-style comments # (thus requiring an explicit @brief command for a brief description.) JAVADOC_AUTOBRIEF = NO # If the QT_AUTOBRIEF tag is set to YES then Doxygen will # interpret the first line (until the first dot) of a Qt-style # comment as the brief description. If set to NO, the comments # will behave just like regular Qt-style comments (thus requiring # an explicit \brief command for a brief description.) QT_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce # a new page for each member. If set to NO, the documentation of a member will # be part of the file/class/namespace that contains it. SEPARATE_MEMBER_PAGES = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java # sources only. Doxygen will then generate output that is more tailored for # Java. For instance, namespaces will be presented as packages, qualified # scopes will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran # sources only. Doxygen will then generate output that is more tailored for # Fortran. OPTIMIZE_FOR_FORTRAN = NO # Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL # sources. Doxygen will then generate output that is tailored for # VHDL. OPTIMIZE_OUTPUT_VHDL = NO # Doxygen selects the parser to use depending on the extension of the files it parses. # With this tag you can assign which parser to use for a given extension. # Doxygen has a built-in mapping, but you can override or extend it using this tag. # The format is ext=language, where ext is a file extension, and language is one of # the parsers supported by doxygen: IDL, Java, Javascript, C#, C, C++, D, PHP, # Objective-C, Python, Fortran, VHDL, C, C++. For instance to make doxygen treat # .inc files as Fortran files (default is PHP), and .f files as C (default is Fortran), # use: inc=Fortran f=C. Note that for custom extensions you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. EXTENSION_MAPPING = # If you use STL classes (i.e. std::string, std::vector, etc.) but do not want # to include (a tag file for) the STL sources as input, then you should # set this tag to YES in order to let doxygen match functions declarations and # definitions whose arguments contain STL classes (e.g. func(std::string); v.s. # func(std::string) {}). This also make the inheritance and collaboration # diagrams that involve STL classes more complete and accurate. BUILTIN_STL_SUPPORT = NO # If you use Microsoft's C++/CLI language, you should set this option to YES to # enable parsing support. CPP_CLI_SUPPORT = NO # Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. # Doxygen will parse them like normal C++ but will assume all classes use public # instead of private inheritance when no explicit protection keyword is present. SIP_SUPPORT = NO # For Microsoft's IDL there are propget and propput attributes to indicate getter # and setter methods for a property. Setting this option to YES (the default) # will make doxygen to replace the get and set methods by a property in the # documentation. This will only work if the methods are indeed getting or # setting a simple type. If this is not the case, or you want to show the # methods anyway, you should set this option to NO. IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES # When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum # is documented as struct, union, or enum with the name of the typedef. So # typedef struct TypeS {} TypeT, will appear in the documentation as a struct # with name TypeT. When disabled the typedef will appear as a member of a file, # namespace, or class. And the struct will be named TypeS. This can typically # be useful for C code in case the coding convention dictates that all compound # types are typedef'ed and only the typedef is referenced, never the tag name. TYPEDEF_HIDES_STRUCT = NO # The SYMBOL_CACHE_SIZE determines the size of the internal cache use to # determine which symbols to keep in memory and which to flush to disk. # When the cache is full, less often used symbols will be written to disk. # For small to medium size projects (<1000 input files) the default value is # probably good enough. For larger projects a too small cache size can cause # doxygen to be busy swapping symbols to and from disk most of the time # causing a significant performance penality. # If the system has enough physical memory increasing the cache will improve the # performance by keeping more symbols in memory. Note that the value works on # a logarithmic scale so increasing the size by one will rougly double the # memory usage. The cache size is given by this formula: # 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, # corresponding to a cache size of 2^16 = 65536 symbols SYMBOL_CACHE_SIZE = 0 #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = NO # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = NO # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = NO # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If this flag is set to YES, the members of anonymous namespaces will be # extracted and appear in the documentation as a namespace called # 'anonymous_namespace{file}', where file will be replaced with the base # name of the file that contains the anonymous namespace. By default # anonymous namespace are hidden. EXTRACT_ANON_NSPACES = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen # will list include files with double quotes in the documentation # rather than with sharp brackets. FORCE_LOCAL_INCLUDES = NO # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the (brief and detailed) documentation of class members so that constructors and destructors are listed first. If set to NO (the default) the constructors will appear in the respective orders defined by SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. SORT_MEMBERS_CTORS_1ST = NO # If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the # hierarchy of group names into alphabetical order. If set to NO (the default) # the group names will appear in their defined order. SORT_GROUP_NAMES = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. The default is NO. SHOW_DIRECTORIES = NO # Set the SHOW_FILES tag to NO to disable the generation of the Files page. # This will remove the Files entry from the Quick Index and from the # Folder Tree View (if specified). The default is YES. SHOW_FILES = YES # Set the SHOW_NAMESPACES tag to NO to disable the generation of the # Namespaces page. # This will remove the Namespaces entry from the Quick Index # and from the Folder Tree View (if specified). The default is YES. SHOW_NAMESPACES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from # the version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the program writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = # The LAYOUT_FILE tag can be used to specify a layout file which will be parsed by # doxygen. The layout file controls the global structure of the generated output files # in an output format independent way. The create the layout file that represents # doxygen's defaults, run doxygen with the -l option. You can optionally specify a # file name after the option, if omitted DoxygenLayout.xml will be used as the name # of the layout file. LAYOUT_FILE = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = # This tag can be used to specify the character encoding of the source files # that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is # also the default input encoding. Doxygen uses libiconv (or the iconv built # into libc) for the transcoding. See http://www.gnu.org/software/libiconv for # the list of possible encodings. INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.py *.f90 FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = NO # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. Note that the wildcards are matched # against the file with absolute path, so to exclude all test directories # for example use the pattern */test/* EXCLUDE_PATTERNS = # The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names # (namespaces, classes, functions, etc.) that should be excluded from the # output. The symbol name can be a fully qualified name, a word, or if the # wildcard * is used, a substring. Examples: ANamespace, AClass, # AClass::ANamespace, ANamespace::*Test EXCLUDE_SYMBOLS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. # If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. # Doxygen will compare the file name with each pattern and apply the # filter if there is a match. # The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = NO # If the REFERENCES_RELATION tag is set to YES # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES (the default) # and SOURCE_BROWSER tag is set to YES, then the hyperlinks from # functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will # link to the source code. # Otherwise they will link to the documentation. REFERENCES_LINK_SOURCE = YES # If the USE_HTAGS tag is set to YES then the references to source code # will point to the HTML generated by the htags(1) tool instead of doxygen # built-in source browser. The htags tool is part of GNU's global source # tagging system (see http://www.gnu.org/software/global/global.html). You # will need version 4.8.6 or higher. USE_HTAGS = NO # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting # this to NO can help when comparing the output of multiple runs. HTML_TIMESTAMP = YES # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML # documentation will contain sections that can be hidden and shown after the # page has loaded. For this to work a browser that supports # JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox # Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). HTML_DYNAMIC_SECTIONS = NO # If the GENERATE_DOCSET tag is set to YES, additional index files # will be generated that can be used as input for Apple's Xcode 3 # integrated development environment, introduced with OSX 10.5 (Leopard). # To create a documentation set, doxygen will generate a Makefile in the # HTML output directory. Running make will produce the docset in that # directory and running "make install" will install the docset in # ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find # it at startup. # See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html for more information. GENERATE_DOCSET = NO # When GENERATE_DOCSET tag is set to YES, this tag determines the name of the # feed. A documentation feed provides an umbrella under which multiple # documentation sets from a single provider (such as a company or product suite) # can be grouped. DOCSET_FEEDNAME = "Doxygen generated docs" # When GENERATE_DOCSET tag is set to YES, this tag specifies a string that # should uniquely identify the documentation set bundle. This should be a # reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen # will append .docset to the name. DOCSET_BUNDLE_ID = org.doxygen.Project # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compiled HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING # is used to encode HtmlHelp index (hhk), content (hhc) and project file # content. CHM_INDEX_ENCODING = # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and QHP_VIRTUAL_FOLDER # are set, an additional index file will be generated that can be used as input for # Qt's qhelpgenerator to generate a Qt Compressed Help (.qch) of the generated # HTML documentation. GENERATE_QHP = NO # If the QHG_LOCATION tag is specified, the QCH_FILE tag can # be used to specify the file name of the resulting .qch file. # The path specified is relative to the HTML output folder. QCH_FILE = # The QHP_NAMESPACE tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#namespace QHP_NAMESPACE = org.doxygen.Project # The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating # Qt Help Project output. For more information please see # http://doc.trolltech.com/qthelpproject.html#virtual-folders QHP_VIRTUAL_FOLDER = doc # If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to add. # For more information please see # http://doc.trolltech.com/qthelpproject.html#custom-filters QHP_CUST_FILTER_NAME = # The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the custom filter to add.For more information please see # Qt Help Project / Custom Filters. QHP_CUST_FILTER_ATTRS = # The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this project's # filter section matches. # Qt Help Project / Filter Attributes. QHP_SECT_FILTER_ATTRS = # If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can # be used to specify the location of Qt's qhelpgenerator. # If non-empty doxygen will try to run qhelpgenerator on the generated # .qhp file. QHG_LOCATION = # If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files # will be generated, which together with the HTML files, form an Eclipse help # plugin. To install this plugin and make it available under the help contents # menu in Eclipse, the contents of the directory containing the HTML and XML # files needs to be copied into the plugins directory of eclipse. The name of # the directory within the plugins directory should be the same as # the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before the help appears. GENERATE_ECLIPSEHELP = NO # A unique identifier for the eclipse help plugin. When installing the plugin # the directory name containing the HTML and XML files should also have # this name. ECLIPSE_DOC_ID = org.doxygen.Project # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # The GENERATE_TREEVIEW tag is used to specify whether a tree-like index # structure should be generated to display hierarchical information. # If the tag value is set to YES, a side panel will be generated # containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). # Windows users are probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories, # and Class Hierarchy pages using a tree view instead of an ordered list. USE_INLINE_TREES = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 # Use this tag to change the font size of Latex formulas included # as images in the HTML documentation. The default is 10. Note that # when you change the font size after a successful doxygen run you need # to manually remove any form_*.png images from the HTML output directory # to force them to be regenerated. FORMULA_FONTSIZE = 10 # When the SEARCHENGINE tag is enabled doxygen will generate a search box for the HTML output. The underlying search engine uses javascript # and DHTML and should work on any modern browser. Note that when using HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET) there is already a search function so this one should # typically be disabled. For large projects the javascript based search engine # can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. SEARCHENGINE = YES # When the SERVER_BASED_SEARCH tag is enabled the search engine will be implemented using a PHP enabled web server instead of at the web client using Javascript. Doxygen will generate the search PHP script and index # file to put on the web server. The advantage of the server based approach is that it scales better to large projects and allows full text search. The disadvances is that it is more difficult to setup # and does not have live searching capabilities. SERVER_BASED_SEARCH = NO #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = YES # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. # Note that when enabling USE_PDFLATEX this option is only used for # generating bitmaps for formulas in the HTML output, but not in the # Makefile that is written to the output directory. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = YES # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO # If LATEX_SOURCE_CODE is set to YES then doxygen will include source code with syntax highlighting in the LaTeX output. Note that which sources are shown also depends on other settings such as SOURCE_BROWSER. LATEX_SOURCE_CODE = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. # This is useful # if you want to understand what is going on. # On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_DEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # You can define message sequence charts within doxygen comments using the \msc # command. Doxygen will then run the mscgen tool (see # http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the # documentation. The MSCGEN_PATH tag allows you to specify the directory where # the mscgen tool resides. If left empty the tool is assumed to be found in the # default search path. MSCGEN_PATH = # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = YES # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = NO # By default doxygen will write a font called FreeSans.ttf to the output # directory and reference it in all dot files that doxygen generates. This # font does not include all possible unicode characters however, so when you need # these (or just want a differently looking font) you can specify the font name # using DOT_FONTNAME. You need need to make sure dot is able to find the font, # which can be done by putting it in a standard location or by setting the # DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory # containing the font. DOT_FONTNAME = FreeSans # The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. # The default size is 10pt. DOT_FONTSIZE = 10 # By default doxygen will tell dot to use the output directory to look for the # FreeSans.ttf font (which doxygen will put there itself). If you specify a # different font using DOT_FONTNAME you can set the path where dot # can find it using this tag. DOT_FONTPATH = # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT options are set to YES then # doxygen will generate a call dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable call graphs # for selected functions only using the \callgraph command. CALL_GRAPH = NO # If the CALLER_GRAPH and HAVE_DOT tags are set to YES then # doxygen will generate a caller dependency graph for every global function # or class method. Note that enabling this option will significantly increase # the time of a run. So in most cases it will be better to enable caller # graphs for selected functions only using the \callergraph command. CALLER_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of # nodes that will be shown in the graph. If the number of nodes in a graph # becomes larger than this value, doxygen will truncate the graph, which is # visualized by representing a node as a red box. Note that doxygen if the # number of direct children of the root node in a graph is already larger than # DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note # that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. DOT_GRAPH_MAX_NODES = 50 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that the size of a graph can be further restricted by # DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, because dot on Windows does not # seem to support this out of the box. Warning: Depending on the platform used, # enabling this option may lead to badly anti-aliased labels on the edges of # a graph (i.e. they become hard to read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = YES # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/aadvark.cc000066400000000000000000000026021221634657000244750ustar00rootroot00000000000000/* -*- c++ -*- */ /* * Copyright 2010 Free Software Foundation, Inc. * * This file is part of GNU Radio * * GNU Radio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3, or (at your option) * any later version. * * GNU Radio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Radio; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #include #include "aadvark.h" void Aadvark::print() { std::cout << "aadvark is " << aadvarkness << "/10 aadvarky" << std::endl; } Aadvark::Aadvark(int aaness): aadvarkness(aaness) {} bool aadvarky_enough(Aadvark aad) { if (aad.get_aadvarkness() > 6) return true; else return false; } int Aadvark::get_aadvarkness() { return aadvarkness; } int main() { Aadvark arold = Aadvark(6); arold.print(); if (aadvarky_enough(arold)) std::cout << "He is aadvarky enough" << std::endl; else std::cout << "He is not aadvarky enough" << std::endl; } gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/aadvark.h000066400000000000000000000025241221634657000243420ustar00rootroot00000000000000/* -*- c++ -*- */ /* * Copyright 2010 Free Software Foundation, Inc. * * This file is part of GNU Radio * * GNU Radio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3, or (at your option) * any later version. * * GNU Radio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Radio; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #include /*! * \brief Models the mammal Aadvark. * * Sadly the model is incomplete and cannot capture all aspects of an aadvark yet. * * This line is uninformative and is only to test line breaks in the comments. */ class Aadvark { public: //! \brief Outputs the vital aadvark statistics. void print(); //! \param aaness The aadvarkness of an aadvark is a measure of how aadvarky it is. Aadvark(int aaness); int get_aadvarkness(); private: int aadvarkness; }; bool aadvarky_enough(Aadvark aad); int main(); gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/000077500000000000000000000000001221634657000233555ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/aadvark_8cc.xml000066400000000000000000000174611221634657000262560ustar00rootroot00000000000000 aadvark.cc iostream aadvark.h aadvark.cc bool bool aadvarky_enough (Aadvark aad) aadvarky_enough Aadvark aad int int main () main #include<iostream> #include"aadvark.h" voidAadvark::print(){ std::cout<<"aadvarkis"<<aadvarkness<<"/10aadvarky"<<std::endl; } Aadvark::Aadvark(intaaness):aadvarkness(aaness){} boolaadvarky_enough(Aadvarkaad){ if(aad.get_aadvarkness()>6) returntrue; else returnfalse; } intAadvark::get_aadvarkness(){ returnaadvarkness; } intmain(){ Aadvarkarold=Aadvark(6); arold.print(); if(aadvarky_enough(arold)) std::cout<<"Heisaadvarkyenough"<<std::endl; else std::cout<<"Heisnotaadvarkyenough"<<std::endl; } gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/aadvark_8h.xml000066400000000000000000000117011221634657000261070ustar00rootroot00000000000000 aadvark.h iostream Aadvark bool bool aadvarky_enough (Aadvark aad) aadvarky_enough Aadvark aad int int main () main #include<iostream> classAadvark{ public: voidprint(); Aadvark(intaaness); intget_aadvarkness(); private: intaadvarkness; }; boolaadvarky_enough(Aadvarkaad); intmain(); gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/classAadvark.xml000066400000000000000000000121371221634657000265020ustar00rootroot00000000000000 Aadvark aadvark.h int int Aadvark::aadvarkness aadvarkness void void Aadvark::print () print Outputs the vital aadvark statistics. Aadvark::Aadvark (int aaness) Aadvark int aaness aaness The aadvarkness of an aadvark is a measure of how aadvarky it is. int int Aadvark::get_aadvarkness () get_aadvarkness Models the mammal Aadvark. Sadly the model is incomplete and cannot capture all aspects of an aadvark yet.This line is uninformative and is only to test line breaks in the comments. AadvarkAadvark Aadvarkaadvarkness Aadvarkget_aadvarkness Aadvarkprint gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/combine.xslt000066400000000000000000000012061221634657000257040ustar00rootroot00000000000000 gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/compound.xsd000066400000000000000000001020631221634657000257230ustar00rootroot00000000000000 gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/index.xml000066400000000000000000000025271221634657000252140ustar00rootroot00000000000000 Aadvark aadvarkness print Aadvark get_aadvarkness aadvark.cc aadvarky_enough main aadvark.h aadvarky_enough main gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/example/xml/index.xsd000066400000000000000000000045231221634657000252100ustar00rootroot00000000000000 gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/000077500000000000000000000000001221634657000230605ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/__init__.py000066400000000000000000000003531221634657000251720ustar00rootroot00000000000000""" Contains generated files produced by generateDS.py. These do the real work of parsing the doxygen xml files but the resultant classes are not very friendly to navigate so the rest of the doxyxml module processes them further. """ gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/compound.py000066400000000000000000000475101221634657000252650ustar00rootroot00000000000000#!/usr/bin/env python """ Generated Mon Feb 9 19:08:05 2009 by generateDS.py. """ from string import lower as str_lower from xml.dom import minidom from xml.dom import Node import sys import compoundsuper as supermod from compoundsuper import MixedContainer class DoxygenTypeSub(supermod.DoxygenType): def __init__(self, version=None, compounddef=None): supermod.DoxygenType.__init__(self, version, compounddef) def find(self, details): return self.compounddef.find(details) supermod.DoxygenType.subclass = DoxygenTypeSub # end class DoxygenTypeSub class compounddefTypeSub(supermod.compounddefType): def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers) def find(self, details): if self.id == details.refid: return self for sectiondef in self.sectiondef: result = sectiondef.find(details) if result: return result supermod.compounddefType.subclass = compounddefTypeSub # end class compounddefTypeSub class listofallmembersTypeSub(supermod.listofallmembersType): def __init__(self, member=None): supermod.listofallmembersType.__init__(self, member) supermod.listofallmembersType.subclass = listofallmembersTypeSub # end class listofallmembersTypeSub class memberRefTypeSub(supermod.memberRefType): def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''): supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name) supermod.memberRefType.subclass = memberRefTypeSub # end class memberRefTypeSub class compoundRefTypeSub(supermod.compoundRefType): def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): supermod.compoundRefType.__init__(self, mixedclass_, content_) supermod.compoundRefType.subclass = compoundRefTypeSub # end class compoundRefTypeSub class reimplementTypeSub(supermod.reimplementType): def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): supermod.reimplementType.__init__(self, mixedclass_, content_) supermod.reimplementType.subclass = reimplementTypeSub # end class reimplementTypeSub class incTypeSub(supermod.incType): def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): supermod.incType.__init__(self, mixedclass_, content_) supermod.incType.subclass = incTypeSub # end class incTypeSub class refTypeSub(supermod.refType): def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): supermod.refType.__init__(self, mixedclass_, content_) supermod.refType.subclass = refTypeSub # end class refTypeSub class refTextTypeSub(supermod.refTextType): def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): supermod.refTextType.__init__(self, mixedclass_, content_) supermod.refTextType.subclass = refTextTypeSub # end class refTextTypeSub class sectiondefTypeSub(supermod.sectiondefType): def __init__(self, kind=None, header='', description=None, memberdef=None): supermod.sectiondefType.__init__(self, kind, header, description, memberdef) def find(self, details): for memberdef in self.memberdef: if memberdef.id == details.refid: return memberdef return None supermod.sectiondefType.subclass = sectiondefTypeSub # end class sectiondefTypeSub class memberdefTypeSub(supermod.memberdefType): def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby) supermod.memberdefType.subclass = memberdefTypeSub # end class memberdefTypeSub class descriptionTypeSub(supermod.descriptionType): def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None): supermod.descriptionType.__init__(self, mixedclass_, content_) supermod.descriptionType.subclass = descriptionTypeSub # end class descriptionTypeSub class enumvalueTypeSub(supermod.enumvalueType): def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): supermod.enumvalueType.__init__(self, mixedclass_, content_) supermod.enumvalueType.subclass = enumvalueTypeSub # end class enumvalueTypeSub class templateparamlistTypeSub(supermod.templateparamlistType): def __init__(self, param=None): supermod.templateparamlistType.__init__(self, param) supermod.templateparamlistType.subclass = templateparamlistTypeSub # end class templateparamlistTypeSub class paramTypeSub(supermod.paramType): def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None): supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription) supermod.paramType.subclass = paramTypeSub # end class paramTypeSub class linkedTextTypeSub(supermod.linkedTextType): def __init__(self, ref=None, mixedclass_=None, content_=None): supermod.linkedTextType.__init__(self, mixedclass_, content_) supermod.linkedTextType.subclass = linkedTextTypeSub # end class linkedTextTypeSub class graphTypeSub(supermod.graphType): def __init__(self, node=None): supermod.graphType.__init__(self, node) supermod.graphType.subclass = graphTypeSub # end class graphTypeSub class nodeTypeSub(supermod.nodeType): def __init__(self, id=None, label='', link=None, childnode=None): supermod.nodeType.__init__(self, id, label, link, childnode) supermod.nodeType.subclass = nodeTypeSub # end class nodeTypeSub class childnodeTypeSub(supermod.childnodeType): def __init__(self, relation=None, refid=None, edgelabel=None): supermod.childnodeType.__init__(self, relation, refid, edgelabel) supermod.childnodeType.subclass = childnodeTypeSub # end class childnodeTypeSub class linkTypeSub(supermod.linkType): def __init__(self, refid=None, external=None, valueOf_=''): supermod.linkType.__init__(self, refid, external) supermod.linkType.subclass = linkTypeSub # end class linkTypeSub class listingTypeSub(supermod.listingType): def __init__(self, codeline=None): supermod.listingType.__init__(self, codeline) supermod.listingType.subclass = listingTypeSub # end class listingTypeSub class codelineTypeSub(supermod.codelineType): def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight) supermod.codelineType.subclass = codelineTypeSub # end class codelineTypeSub class highlightTypeSub(supermod.highlightType): def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None): supermod.highlightType.__init__(self, mixedclass_, content_) supermod.highlightType.subclass = highlightTypeSub # end class highlightTypeSub class referenceTypeSub(supermod.referenceType): def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): supermod.referenceType.__init__(self, mixedclass_, content_) supermod.referenceType.subclass = referenceTypeSub # end class referenceTypeSub class locationTypeSub(supermod.locationType): def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file) supermod.locationType.subclass = locationTypeSub # end class locationTypeSub class docSect1TypeSub(supermod.docSect1Type): def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None): supermod.docSect1Type.__init__(self, mixedclass_, content_) supermod.docSect1Type.subclass = docSect1TypeSub # end class docSect1TypeSub class docSect2TypeSub(supermod.docSect2Type): def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None): supermod.docSect2Type.__init__(self, mixedclass_, content_) supermod.docSect2Type.subclass = docSect2TypeSub # end class docSect2TypeSub class docSect3TypeSub(supermod.docSect3Type): def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None): supermod.docSect3Type.__init__(self, mixedclass_, content_) supermod.docSect3Type.subclass = docSect3TypeSub # end class docSect3TypeSub class docSect4TypeSub(supermod.docSect4Type): def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None): supermod.docSect4Type.__init__(self, mixedclass_, content_) supermod.docSect4Type.subclass = docSect4TypeSub # end class docSect4TypeSub class docInternalTypeSub(supermod.docInternalType): def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): supermod.docInternalType.__init__(self, mixedclass_, content_) supermod.docInternalType.subclass = docInternalTypeSub # end class docInternalTypeSub class docInternalS1TypeSub(supermod.docInternalS1Type): def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): supermod.docInternalS1Type.__init__(self, mixedclass_, content_) supermod.docInternalS1Type.subclass = docInternalS1TypeSub # end class docInternalS1TypeSub class docInternalS2TypeSub(supermod.docInternalS2Type): def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): supermod.docInternalS2Type.__init__(self, mixedclass_, content_) supermod.docInternalS2Type.subclass = docInternalS2TypeSub # end class docInternalS2TypeSub class docInternalS3TypeSub(supermod.docInternalS3Type): def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): supermod.docInternalS3Type.__init__(self, mixedclass_, content_) supermod.docInternalS3Type.subclass = docInternalS3TypeSub # end class docInternalS3TypeSub class docInternalS4TypeSub(supermod.docInternalS4Type): def __init__(self, para=None, mixedclass_=None, content_=None): supermod.docInternalS4Type.__init__(self, mixedclass_, content_) supermod.docInternalS4Type.subclass = docInternalS4TypeSub # end class docInternalS4TypeSub class docURLLinkSub(supermod.docURLLink): def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): supermod.docURLLink.__init__(self, mixedclass_, content_) supermod.docURLLink.subclass = docURLLinkSub # end class docURLLinkSub class docAnchorTypeSub(supermod.docAnchorType): def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): supermod.docAnchorType.__init__(self, mixedclass_, content_) supermod.docAnchorType.subclass = docAnchorTypeSub # end class docAnchorTypeSub class docFormulaTypeSub(supermod.docFormulaType): def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): supermod.docFormulaType.__init__(self, mixedclass_, content_) supermod.docFormulaType.subclass = docFormulaTypeSub # end class docFormulaTypeSub class docIndexEntryTypeSub(supermod.docIndexEntryType): def __init__(self, primaryie='', secondaryie=''): supermod.docIndexEntryType.__init__(self, primaryie, secondaryie) supermod.docIndexEntryType.subclass = docIndexEntryTypeSub # end class docIndexEntryTypeSub class docListTypeSub(supermod.docListType): def __init__(self, listitem=None): supermod.docListType.__init__(self, listitem) supermod.docListType.subclass = docListTypeSub # end class docListTypeSub class docListItemTypeSub(supermod.docListItemType): def __init__(self, para=None): supermod.docListItemType.__init__(self, para) supermod.docListItemType.subclass = docListItemTypeSub # end class docListItemTypeSub class docSimpleSectTypeSub(supermod.docSimpleSectType): def __init__(self, kind=None, title=None, para=None): supermod.docSimpleSectType.__init__(self, kind, title, para) supermod.docSimpleSectType.subclass = docSimpleSectTypeSub # end class docSimpleSectTypeSub class docVarListEntryTypeSub(supermod.docVarListEntryType): def __init__(self, term=None): supermod.docVarListEntryType.__init__(self, term) supermod.docVarListEntryType.subclass = docVarListEntryTypeSub # end class docVarListEntryTypeSub class docRefTextTypeSub(supermod.docRefTextType): def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): supermod.docRefTextType.__init__(self, mixedclass_, content_) supermod.docRefTextType.subclass = docRefTextTypeSub # end class docRefTextTypeSub class docTableTypeSub(supermod.docTableType): def __init__(self, rows=None, cols=None, row=None, caption=None): supermod.docTableType.__init__(self, rows, cols, row, caption) supermod.docTableType.subclass = docTableTypeSub # end class docTableTypeSub class docRowTypeSub(supermod.docRowType): def __init__(self, entry=None): supermod.docRowType.__init__(self, entry) supermod.docRowType.subclass = docRowTypeSub # end class docRowTypeSub class docEntryTypeSub(supermod.docEntryType): def __init__(self, thead=None, para=None): supermod.docEntryType.__init__(self, thead, para) supermod.docEntryType.subclass = docEntryTypeSub # end class docEntryTypeSub class docHeadingTypeSub(supermod.docHeadingType): def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): supermod.docHeadingType.__init__(self, mixedclass_, content_) supermod.docHeadingType.subclass = docHeadingTypeSub # end class docHeadingTypeSub class docImageTypeSub(supermod.docImageType): def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): supermod.docImageType.__init__(self, mixedclass_, content_) supermod.docImageType.subclass = docImageTypeSub # end class docImageTypeSub class docDotFileTypeSub(supermod.docDotFileType): def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): supermod.docDotFileType.__init__(self, mixedclass_, content_) supermod.docDotFileType.subclass = docDotFileTypeSub # end class docDotFileTypeSub class docTocItemTypeSub(supermod.docTocItemType): def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): supermod.docTocItemType.__init__(self, mixedclass_, content_) supermod.docTocItemType.subclass = docTocItemTypeSub # end class docTocItemTypeSub class docTocListTypeSub(supermod.docTocListType): def __init__(self, tocitem=None): supermod.docTocListType.__init__(self, tocitem) supermod.docTocListType.subclass = docTocListTypeSub # end class docTocListTypeSub class docLanguageTypeSub(supermod.docLanguageType): def __init__(self, langid=None, para=None): supermod.docLanguageType.__init__(self, langid, para) supermod.docLanguageType.subclass = docLanguageTypeSub # end class docLanguageTypeSub class docParamListTypeSub(supermod.docParamListType): def __init__(self, kind=None, parameteritem=None): supermod.docParamListType.__init__(self, kind, parameteritem) supermod.docParamListType.subclass = docParamListTypeSub # end class docParamListTypeSub class docParamListItemSub(supermod.docParamListItem): def __init__(self, parameternamelist=None, parameterdescription=None): supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription) supermod.docParamListItem.subclass = docParamListItemSub # end class docParamListItemSub class docParamNameListSub(supermod.docParamNameList): def __init__(self, parametername=None): supermod.docParamNameList.__init__(self, parametername) supermod.docParamNameList.subclass = docParamNameListSub # end class docParamNameListSub class docParamNameSub(supermod.docParamName): def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): supermod.docParamName.__init__(self, mixedclass_, content_) supermod.docParamName.subclass = docParamNameSub # end class docParamNameSub class docXRefSectTypeSub(supermod.docXRefSectType): def __init__(self, id=None, xreftitle=None, xrefdescription=None): supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription) supermod.docXRefSectType.subclass = docXRefSectTypeSub # end class docXRefSectTypeSub class docCopyTypeSub(supermod.docCopyType): def __init__(self, link=None, para=None, sect1=None, internal=None): supermod.docCopyType.__init__(self, link, para, sect1, internal) supermod.docCopyType.subclass = docCopyTypeSub # end class docCopyTypeSub class docCharTypeSub(supermod.docCharType): def __init__(self, char=None, valueOf_=''): supermod.docCharType.__init__(self, char) supermod.docCharType.subclass = docCharTypeSub # end class docCharTypeSub class docParaTypeSub(supermod.docParaType): def __init__(self, char=None, valueOf_=''): supermod.docParaType.__init__(self, char) self.parameterlist = [] self.simplesects = [] self.content = [] def buildChildren(self, child_, nodeName_): supermod.docParaType.buildChildren(self, child_, nodeName_) if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == "ref": obj_ = supermod.docRefTextType.factory() obj_.build(child_) self.content.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameterlist': obj_ = supermod.docParamListType.factory() obj_.build(child_) self.parameterlist.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'simplesect': obj_ = supermod.docSimpleSectType.factory() obj_.build(child_) self.simplesects.append(obj_) supermod.docParaType.subclass = docParaTypeSub # end class docParaTypeSub def parse(inFilename): doc = minidom.parse(inFilename) rootNode = doc.documentElement rootObj = supermod.DoxygenType.factory() rootObj.build(rootNode) return rootObj gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/compoundsuper.py000066400000000000000000012770141221634657000263510ustar00rootroot00000000000000#!/usr/bin/env python # # Generated Thu Jun 11 18:44:25 2009 by generateDS.py. # import sys import getopt from string import lower as str_lower from xml.dom import minidom from xml.dom import Node # # User methods # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class # in a module named generatedssuper.py. try: from generatedssuper import GeneratedsSuper except ImportError, exp: class GeneratedsSuper: def format_string(self, input_data, input_name=''): return input_data def format_integer(self, input_data, input_name=''): return '%d' % input_data def format_float(self, input_data, input_name=''): return '%f' % input_data def format_double(self, input_data, input_name=''): return '%e' % input_data def format_boolean(self, input_data, input_name=''): return '%s' % input_data # # If you have installed IPython you can uncomment and use the following. # IPython is available from http://ipython.scipy.org/. # ## from IPython.Shell import IPShellEmbed ## args = '' ## ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', ## exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: # ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') # # Globals # ExternalEncoding = 'ascii' # # Support/utility functions. # def showIndent(outfile, level): for idx in range(level): outfile.write(' ') def quote_xml(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 def quote_attrib(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') if '"' in s1: if "'" in s1: s1 = '"%s"' % s1.replace('"', """) else: s1 = "'%s'" % s1 else: s1 = '"%s"' % s1 return s1 def quote_python(inStr): s1 = inStr if s1.find("'") == -1: if s1.find('\n') == -1: return "'%s'" % s1 else: return "'''%s'''" % s1 else: if s1.find('"') != -1: s1 = s1.replace('"', '\\"') if s1.find('\n') == -1: return '"%s"' % s1 else: return '"""%s"""' % s1 class MixedContainer: # Constants for category: CategoryNone = 0 CategoryText = 1 CategorySimple = 2 CategoryComplex = 3 # Constants for content_type: TypeNone = 0 TypeText = 1 TypeString = 2 TypeInteger = 3 TypeFloat = 4 TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value def getCategory(self): return self.category def getContenttype(self, content_type): return self.content_type def getValue(self): return self.value def getName(self): return self.name def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex self.value.export(outfile, level, namespace,name) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: outfile.write('<%s>%d' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: outfile.write('<%s>%f' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g' % (self.name, self.value, self.name)) def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s",\n' % \ (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class _MemberSpec(object): def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container # # Data representation classes. # class DoxygenType(GeneratedsSuper): subclass = None superclass = None def __init__(self, version=None, compounddef=None): self.version = version self.compounddef = compounddef def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) else: return DoxygenType(*args_, **kwargs_) factory = staticmethod(factory) def get_compounddef(self): return self.compounddef def set_compounddef(self, compounddef): self.compounddef = compounddef def get_version(self): return self.version def set_version(self, version): self.version = version def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): outfile.write(' version=%s' % (quote_attrib(self.version), )) def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): if self.compounddef: self.compounddef.export(outfile, level, namespace_, name_='compounddef') def hasContent_(self): if ( self.compounddef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = "%s",\n' % (self.version,)) def exportLiteralChildren(self, outfile, level, name_): if self.compounddef: showIndent(outfile, level) outfile.write('compounddef=model_.compounddefType(\n') self.compounddef.exportLiteral(outfile, level, name_='compounddef') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compounddef': obj_ = compounddefType.factory() obj_.build(child_) self.set_compounddef(obj_) # end class DoxygenType class compounddefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, prot=None, id=None, compoundname=None, title=None, basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None): self.kind = kind self.prot = prot self.id = id self.compoundname = compoundname self.title = title if basecompoundref is None: self.basecompoundref = [] else: self.basecompoundref = basecompoundref if derivedcompoundref is None: self.derivedcompoundref = [] else: self.derivedcompoundref = derivedcompoundref if includes is None: self.includes = [] else: self.includes = includes if includedby is None: self.includedby = [] else: self.includedby = includedby self.incdepgraph = incdepgraph self.invincdepgraph = invincdepgraph if innerdir is None: self.innerdir = [] else: self.innerdir = innerdir if innerfile is None: self.innerfile = [] else: self.innerfile = innerfile if innerclass is None: self.innerclass = [] else: self.innerclass = innerclass if innernamespace is None: self.innernamespace = [] else: self.innernamespace = innernamespace if innerpage is None: self.innerpage = [] else: self.innerpage = innerpage if innergroup is None: self.innergroup = [] else: self.innergroup = innergroup self.templateparamlist = templateparamlist if sectiondef is None: self.sectiondef = [] else: self.sectiondef = sectiondef self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inheritancegraph = inheritancegraph self.collaborationgraph = collaborationgraph self.programlisting = programlisting self.location = location self.listofallmembers = listofallmembers def factory(*args_, **kwargs_): if compounddefType.subclass: return compounddefType.subclass(*args_, **kwargs_) else: return compounddefType(*args_, **kwargs_) factory = staticmethod(factory) def get_compoundname(self): return self.compoundname def set_compoundname(self, compoundname): self.compoundname = compoundname def get_title(self): return self.title def set_title(self, title): self.title = title def get_basecompoundref(self): return self.basecompoundref def set_basecompoundref(self, basecompoundref): self.basecompoundref = basecompoundref def add_basecompoundref(self, value): self.basecompoundref.append(value) def insert_basecompoundref(self, index, value): self.basecompoundref[index] = value def get_derivedcompoundref(self): return self.derivedcompoundref def set_derivedcompoundref(self, derivedcompoundref): self.derivedcompoundref = derivedcompoundref def add_derivedcompoundref(self, value): self.derivedcompoundref.append(value) def insert_derivedcompoundref(self, index, value): self.derivedcompoundref[index] = value def get_includes(self): return self.includes def set_includes(self, includes): self.includes = includes def add_includes(self, value): self.includes.append(value) def insert_includes(self, index, value): self.includes[index] = value def get_includedby(self): return self.includedby def set_includedby(self, includedby): self.includedby = includedby def add_includedby(self, value): self.includedby.append(value) def insert_includedby(self, index, value): self.includedby[index] = value def get_incdepgraph(self): return self.incdepgraph def set_incdepgraph(self, incdepgraph): self.incdepgraph = incdepgraph def get_invincdepgraph(self): return self.invincdepgraph def set_invincdepgraph(self, invincdepgraph): self.invincdepgraph = invincdepgraph def get_innerdir(self): return self.innerdir def set_innerdir(self, innerdir): self.innerdir = innerdir def add_innerdir(self, value): self.innerdir.append(value) def insert_innerdir(self, index, value): self.innerdir[index] = value def get_innerfile(self): return self.innerfile def set_innerfile(self, innerfile): self.innerfile = innerfile def add_innerfile(self, value): self.innerfile.append(value) def insert_innerfile(self, index, value): self.innerfile[index] = value def get_innerclass(self): return self.innerclass def set_innerclass(self, innerclass): self.innerclass = innerclass def add_innerclass(self, value): self.innerclass.append(value) def insert_innerclass(self, index, value): self.innerclass[index] = value def get_innernamespace(self): return self.innernamespace def set_innernamespace(self, innernamespace): self.innernamespace = innernamespace def add_innernamespace(self, value): self.innernamespace.append(value) def insert_innernamespace(self, index, value): self.innernamespace[index] = value def get_innerpage(self): return self.innerpage def set_innerpage(self, innerpage): self.innerpage = innerpage def add_innerpage(self, value): self.innerpage.append(value) def insert_innerpage(self, index, value): self.innerpage[index] = value def get_innergroup(self): return self.innergroup def set_innergroup(self, innergroup): self.innergroup = innergroup def add_innergroup(self, value): self.innergroup.append(value) def insert_innergroup(self, index, value): self.innergroup[index] = value def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_sectiondef(self): return self.sectiondef def set_sectiondef(self, sectiondef): self.sectiondef = sectiondef def add_sectiondef(self, value): self.sectiondef.append(value) def insert_sectiondef(self, index, value): self.sectiondef[index] = value def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inheritancegraph(self): return self.inheritancegraph def set_inheritancegraph(self, inheritancegraph): self.inheritancegraph = inheritancegraph def get_collaborationgraph(self): return self.collaborationgraph def set_collaborationgraph(self, collaborationgraph): self.collaborationgraph = collaborationgraph def get_programlisting(self): return self.programlisting def set_programlisting(self, programlisting): self.programlisting = programlisting def get_location(self): return self.location def set_location(self, location): self.location = location def get_listofallmembers(self): return self.listofallmembers def set_listofallmembers(self, listofallmembers): self.listofallmembers = listofallmembers def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='compounddefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compounddefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='compounddefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='compounddefType'): if self.compoundname is not None: showIndent(outfile, level) outfile.write('<%scompoundname>%s\n' % (namespace_, self.format_string(quote_xml(self.compoundname).encode(ExternalEncoding), input_name='compoundname'), namespace_)) if self.title is not None: showIndent(outfile, level) outfile.write('<%stitle>%s\n' % (namespace_, self.format_string(quote_xml(self.title).encode(ExternalEncoding), input_name='title'), namespace_)) for basecompoundref_ in self.basecompoundref: basecompoundref_.export(outfile, level, namespace_, name_='basecompoundref') for derivedcompoundref_ in self.derivedcompoundref: derivedcompoundref_.export(outfile, level, namespace_, name_='derivedcompoundref') for includes_ in self.includes: includes_.export(outfile, level, namespace_, name_='includes') for includedby_ in self.includedby: includedby_.export(outfile, level, namespace_, name_='includedby') if self.incdepgraph: self.incdepgraph.export(outfile, level, namespace_, name_='incdepgraph') if self.invincdepgraph: self.invincdepgraph.export(outfile, level, namespace_, name_='invincdepgraph') for innerdir_ in self.innerdir: innerdir_.export(outfile, level, namespace_, name_='innerdir') for innerfile_ in self.innerfile: innerfile_.export(outfile, level, namespace_, name_='innerfile') for innerclass_ in self.innerclass: innerclass_.export(outfile, level, namespace_, name_='innerclass') for innernamespace_ in self.innernamespace: innernamespace_.export(outfile, level, namespace_, name_='innernamespace') for innerpage_ in self.innerpage: innerpage_.export(outfile, level, namespace_, name_='innerpage') for innergroup_ in self.innergroup: innergroup_.export(outfile, level, namespace_, name_='innergroup') if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') for sectiondef_ in self.sectiondef: sectiondef_.export(outfile, level, namespace_, name_='sectiondef') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inheritancegraph: self.inheritancegraph.export(outfile, level, namespace_, name_='inheritancegraph') if self.collaborationgraph: self.collaborationgraph.export(outfile, level, namespace_, name_='collaborationgraph') if self.programlisting: self.programlisting.export(outfile, level, namespace_, name_='programlisting') if self.location: self.location.export(outfile, level, namespace_, name_='location') if self.listofallmembers: self.listofallmembers.export(outfile, level, namespace_, name_='listofallmembers') def hasContent_(self): if ( self.compoundname is not None or self.title is not None or self.basecompoundref is not None or self.derivedcompoundref is not None or self.includes is not None or self.includedby is not None or self.incdepgraph is not None or self.invincdepgraph is not None or self.innerdir is not None or self.innerfile is not None or self.innerclass is not None or self.innernamespace is not None or self.innerpage is not None or self.innergroup is not None or self.templateparamlist is not None or self.sectiondef is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inheritancegraph is not None or self.collaborationgraph is not None or self.programlisting is not None or self.location is not None or self.listofallmembers is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compounddefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('compoundname=%s,\n' % quote_python(self.compoundname).encode(ExternalEncoding)) if self.title: showIndent(outfile, level) outfile.write('title=model_.xsd_string(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('basecompoundref=[\n') level += 1 for basecompoundref in self.basecompoundref: showIndent(outfile, level) outfile.write('model_.basecompoundref(\n') basecompoundref.exportLiteral(outfile, level, name_='basecompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('derivedcompoundref=[\n') level += 1 for derivedcompoundref in self.derivedcompoundref: showIndent(outfile, level) outfile.write('model_.derivedcompoundref(\n') derivedcompoundref.exportLiteral(outfile, level, name_='derivedcompoundref') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includes=[\n') level += 1 for includes in self.includes: showIndent(outfile, level) outfile.write('model_.includes(\n') includes.exportLiteral(outfile, level, name_='includes') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('includedby=[\n') level += 1 for includedby in self.includedby: showIndent(outfile, level) outfile.write('model_.includedby(\n') includedby.exportLiteral(outfile, level, name_='includedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.incdepgraph: showIndent(outfile, level) outfile.write('incdepgraph=model_.graphType(\n') self.incdepgraph.exportLiteral(outfile, level, name_='incdepgraph') showIndent(outfile, level) outfile.write('),\n') if self.invincdepgraph: showIndent(outfile, level) outfile.write('invincdepgraph=model_.graphType(\n') self.invincdepgraph.exportLiteral(outfile, level, name_='invincdepgraph') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('innerdir=[\n') level += 1 for innerdir in self.innerdir: showIndent(outfile, level) outfile.write('model_.innerdir(\n') innerdir.exportLiteral(outfile, level, name_='innerdir') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerfile=[\n') level += 1 for innerfile in self.innerfile: showIndent(outfile, level) outfile.write('model_.innerfile(\n') innerfile.exportLiteral(outfile, level, name_='innerfile') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerclass=[\n') level += 1 for innerclass in self.innerclass: showIndent(outfile, level) outfile.write('model_.innerclass(\n') innerclass.exportLiteral(outfile, level, name_='innerclass') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innernamespace=[\n') level += 1 for innernamespace in self.innernamespace: showIndent(outfile, level) outfile.write('model_.innernamespace(\n') innernamespace.exportLiteral(outfile, level, name_='innernamespace') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innerpage=[\n') level += 1 for innerpage in self.innerpage: showIndent(outfile, level) outfile.write('model_.innerpage(\n') innerpage.exportLiteral(outfile, level, name_='innerpage') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('innergroup=[\n') level += 1 for innergroup in self.innergroup: showIndent(outfile, level) outfile.write('model_.innergroup(\n') innergroup.exportLiteral(outfile, level, name_='innergroup') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('sectiondef=[\n') level += 1 for sectiondef in self.sectiondef: showIndent(outfile, level) outfile.write('model_.sectiondef(\n') sectiondef.exportLiteral(outfile, level, name_='sectiondef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inheritancegraph: showIndent(outfile, level) outfile.write('inheritancegraph=model_.graphType(\n') self.inheritancegraph.exportLiteral(outfile, level, name_='inheritancegraph') showIndent(outfile, level) outfile.write('),\n') if self.collaborationgraph: showIndent(outfile, level) outfile.write('collaborationgraph=model_.graphType(\n') self.collaborationgraph.exportLiteral(outfile, level, name_='collaborationgraph') showIndent(outfile, level) outfile.write('),\n') if self.programlisting: showIndent(outfile, level) outfile.write('programlisting=model_.listingType(\n') self.programlisting.exportLiteral(outfile, level, name_='programlisting') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') if self.listofallmembers: showIndent(outfile, level) outfile.write('listofallmembers=model_.listofallmembersType(\n') self.listofallmembers.exportLiteral(outfile, level, name_='listofallmembers') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compoundname': compoundname_ = '' for text__content_ in child_.childNodes: compoundname_ += text__content_.nodeValue self.compoundname = compoundname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'basecompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.basecompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'derivedcompoundref': obj_ = compoundRefType.factory() obj_.build(child_) self.derivedcompoundref.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includes': obj_ = incType.factory() obj_.build(child_) self.includes.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'includedby': obj_ = incType.factory() obj_.build(child_) self.includedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'incdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_incdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'invincdepgraph': obj_ = graphType.factory() obj_.build(child_) self.set_invincdepgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerdir': obj_ = refType.factory() obj_.build(child_) self.innerdir.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerfile': obj_ = refType.factory() obj_.build(child_) self.innerfile.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerclass': obj_ = refType.factory() obj_.build(child_) self.innerclass.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innernamespace': obj_ = refType.factory() obj_.build(child_) self.innernamespace.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innerpage': obj_ = refType.factory() obj_.build(child_) self.innerpage.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'innergroup': obj_ = refType.factory() obj_.build(child_) self.innergroup.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sectiondef': obj_ = sectiondefType.factory() obj_.build(child_) self.sectiondef.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inheritancegraph': obj_ = graphType.factory() obj_.build(child_) self.set_inheritancegraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'collaborationgraph': obj_ = graphType.factory() obj_.build(child_) self.set_collaborationgraph(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'programlisting': obj_ = listingType.factory() obj_.build(child_) self.set_programlisting(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listofallmembers': obj_ = listofallmembersType.factory() obj_.build(child_) self.set_listofallmembers(obj_) # end class compounddefType class listofallmembersType(GeneratedsSuper): subclass = None superclass = None def __init__(self, member=None): if member is None: self.member = [] else: self.member = member def factory(*args_, **kwargs_): if listofallmembersType.subclass: return listofallmembersType.subclass(*args_, **kwargs_) else: return listofallmembersType(*args_, **kwargs_) factory = staticmethod(factory) def get_member(self): return self.member def set_member(self, member): self.member = member def add_member(self, value): self.member.append(value) def insert_member(self, index, value): self.member[index] = value def export(self, outfile, level, namespace_='', name_='listofallmembersType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listofallmembersType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listofallmembersType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listofallmembersType'): for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') def hasContent_(self): if ( self.member is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listofallmembersType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('member=[\n') level += 1 for member in self.member: showIndent(outfile, level) outfile.write('model_.member(\n') member.exportLiteral(outfile, level, name_='member') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'member': obj_ = memberRefType.factory() obj_.build(child_) self.member.append(obj_) # end class listofallmembersType class memberRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope=None, name=None): self.virt = virt self.prot = prot self.refid = refid self.ambiguityscope = ambiguityscope self.scope = scope self.name = name def factory(*args_, **kwargs_): if memberRefType.subclass: return memberRefType.subclass(*args_, **kwargs_) else: return memberRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_scope(self): return self.scope def set_scope(self, scope): self.scope = scope def get_name(self): return self.name def set_name(self, name): self.name = name def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_ambiguityscope(self): return self.ambiguityscope def set_ambiguityscope(self, ambiguityscope): self.ambiguityscope = ambiguityscope def export(self, outfile, level, namespace_='', name_='memberRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberRefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.ambiguityscope is not None: outfile.write(' ambiguityscope=%s' % (self.format_string(quote_attrib(self.ambiguityscope).encode(ExternalEncoding), input_name='ambiguityscope'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberRefType'): if self.scope is not None: showIndent(outfile, level) outfile.write('<%sscope>%s\n' % (namespace_, self.format_string(quote_xml(self.scope).encode(ExternalEncoding), input_name='scope'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) def hasContent_(self): if ( self.scope is not None or self.name is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.ambiguityscope is not None: showIndent(outfile, level) outfile.write('ambiguityscope = %s,\n' % (self.ambiguityscope,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('scope=%s,\n' % quote_python(self.scope).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('ambiguityscope'): self.ambiguityscope = attrs.get('ambiguityscope').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'scope': scope_ = '' for text__content_ in child_.childNodes: scope_ += text__content_.nodeValue self.scope = scope_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ # end class memberRefType class scope(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if scope.subclass: return scope.subclass(*args_, **kwargs_) else: return scope(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='scope', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='scope') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='scope'): pass def exportChildren(self, outfile, level, namespace_='', name_='scope'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='scope'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class scope class name(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if name.subclass: return name.subclass(*args_, **kwargs_) else: return name(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='name', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='name') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='name'): pass def exportChildren(self, outfile, level, namespace_='', name_='name'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='name'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class name class compoundRefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.virt = virt self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if compoundRefType.subclass: return compoundRefType.subclass(*args_, **kwargs_) else: return compoundRefType(*args_, **kwargs_) factory = staticmethod(factory) def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='compoundRefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='compoundRefType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='compoundRefType'): if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='compoundRefType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='compoundRefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class compoundRefType class reimplementType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if reimplementType.subclass: return reimplementType.subclass(*args_, **kwargs_) else: return reimplementType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='reimplementType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='reimplementType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='reimplementType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='reimplementType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='reimplementType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class reimplementType class incType(GeneratedsSuper): subclass = None superclass = None def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.local = local self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if incType.subclass: return incType.subclass(*args_, **kwargs_) else: return incType(*args_, **kwargs_) factory = staticmethod(factory) def get_local(self): return self.local def set_local(self, local): self.local = local def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='incType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='incType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='incType'): if self.local is not None: outfile.write(' local=%s' % (quote_attrib(self.local), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='incType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='incType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.local is not None: showIndent(outfile, level) outfile.write('local = "%s",\n' % (self.local,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('local'): self.local = attrs.get('local').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class incType class refType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None): self.prot = prot self.refid = refid if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refType.subclass: return refType.subclass(*args_, **kwargs_) else: return refType(*args_, **kwargs_) factory = staticmethod(factory) def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='refType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refType class refTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if refTextType.subclass: return refTextType.subclass(*args_, **kwargs_) else: return refTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='refTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='refTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='refTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='refTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='refTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class refTextType class sectiondefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, header=None, description=None, memberdef=None): self.kind = kind self.header = header self.description = description if memberdef is None: self.memberdef = [] else: self.memberdef = memberdef def factory(*args_, **kwargs_): if sectiondefType.subclass: return sectiondefType.subclass(*args_, **kwargs_) else: return sectiondefType(*args_, **kwargs_) factory = staticmethod(factory) def get_header(self): return self.header def set_header(self, header): self.header = header def get_description(self): return self.description def set_description(self, description): self.description = description def get_memberdef(self): return self.memberdef def set_memberdef(self, memberdef): self.memberdef = memberdef def add_memberdef(self, value): self.memberdef.append(value) def insert_memberdef(self, index, value): self.memberdef[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='sectiondefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sectiondefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sectiondefType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='sectiondefType'): if self.header is not None: showIndent(outfile, level) outfile.write('<%sheader>%s\n' % (namespace_, self.format_string(quote_xml(self.header).encode(ExternalEncoding), input_name='header'), namespace_)) if self.description: self.description.export(outfile, level, namespace_, name_='description') for memberdef_ in self.memberdef: memberdef_.export(outfile, level, namespace_, name_='memberdef') def hasContent_(self): if ( self.header is not None or self.description is not None or self.memberdef is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sectiondefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('header=%s,\n' % quote_python(self.header).encode(ExternalEncoding)) if self.description: showIndent(outfile, level) outfile.write('description=model_.descriptionType(\n') self.description.exportLiteral(outfile, level, name_='description') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('memberdef=[\n') level += 1 for memberdef in self.memberdef: showIndent(outfile, level) outfile.write('model_.memberdef(\n') memberdef.exportLiteral(outfile, level, name_='memberdef') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'header': header_ = '' for text__content_ in child_.childNodes: header_ += text__content_.nodeValue self.header = header_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'description': obj_ = descriptionType.factory() obj_.build(child_) self.set_description(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'memberdef': obj_ = memberdefType.factory() obj_.build(child_) self.memberdef.append(obj_) # end class sectiondefType class memberdefType(GeneratedsSuper): subclass = None superclass = None def __init__(self, initonly=None, kind=None, volatile=None, const=None, raisexx=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition=None, argsstring=None, name=None, read=None, write=None, bitfield=None, reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None): self.initonly = initonly self.kind = kind self.volatile = volatile self.const = const self.raisexx = raisexx self.virt = virt self.readable = readable self.prot = prot self.explicit = explicit self.new = new self.final = final self.writable = writable self.add = add self.static = static self.remove = remove self.sealed = sealed self.mutable = mutable self.gettable = gettable self.inline = inline self.settable = settable self.id = id self.templateparamlist = templateparamlist self.type_ = type_ self.definition = definition self.argsstring = argsstring self.name = name self.read = read self.write = write self.bitfield = bitfield if reimplements is None: self.reimplements = [] else: self.reimplements = reimplements if reimplementedby is None: self.reimplementedby = [] else: self.reimplementedby = reimplementedby if param is None: self.param = [] else: self.param = param if enumvalue is None: self.enumvalue = [] else: self.enumvalue = enumvalue self.initializer = initializer self.exceptions = exceptions self.briefdescription = briefdescription self.detaileddescription = detaileddescription self.inbodydescription = inbodydescription self.location = location if references is None: self.references = [] else: self.references = references if referencedby is None: self.referencedby = [] else: self.referencedby = referencedby def factory(*args_, **kwargs_): if memberdefType.subclass: return memberdefType.subclass(*args_, **kwargs_) else: return memberdefType(*args_, **kwargs_) factory = staticmethod(factory) def get_templateparamlist(self): return self.templateparamlist def set_templateparamlist(self, templateparamlist): self.templateparamlist = templateparamlist def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_definition(self): return self.definition def set_definition(self, definition): self.definition = definition def get_argsstring(self): return self.argsstring def set_argsstring(self, argsstring): self.argsstring = argsstring def get_name(self): return self.name def set_name(self, name): self.name = name def get_read(self): return self.read def set_read(self, read): self.read = read def get_write(self): return self.write def set_write(self, write): self.write = write def get_bitfield(self): return self.bitfield def set_bitfield(self, bitfield): self.bitfield = bitfield def get_reimplements(self): return self.reimplements def set_reimplements(self, reimplements): self.reimplements = reimplements def add_reimplements(self, value): self.reimplements.append(value) def insert_reimplements(self, index, value): self.reimplements[index] = value def get_reimplementedby(self): return self.reimplementedby def set_reimplementedby(self, reimplementedby): self.reimplementedby = reimplementedby def add_reimplementedby(self, value): self.reimplementedby.append(value) def insert_reimplementedby(self, index, value): self.reimplementedby[index] = value def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def get_enumvalue(self): return self.enumvalue def set_enumvalue(self, enumvalue): self.enumvalue = enumvalue def add_enumvalue(self, value): self.enumvalue.append(value) def insert_enumvalue(self, index, value): self.enumvalue[index] = value def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_exceptions(self): return self.exceptions def set_exceptions(self, exceptions): self.exceptions = exceptions def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_inbodydescription(self): return self.inbodydescription def set_inbodydescription(self, inbodydescription): self.inbodydescription = inbodydescription def get_location(self): return self.location def set_location(self, location): self.location = location def get_references(self): return self.references def set_references(self, references): self.references = references def add_references(self, value): self.references.append(value) def insert_references(self, index, value): self.references[index] = value def get_referencedby(self): return self.referencedby def set_referencedby(self, referencedby): self.referencedby = referencedby def add_referencedby(self, value): self.referencedby.append(value) def insert_referencedby(self, index, value): self.referencedby[index] = value def get_initonly(self): return self.initonly def set_initonly(self, initonly): self.initonly = initonly def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_volatile(self): return self.volatile def set_volatile(self, volatile): self.volatile = volatile def get_const(self): return self.const def set_const(self, const): self.const = const def get_raise(self): return self.raisexx def set_raise(self, raisexx): self.raisexx = raisexx def get_virt(self): return self.virt def set_virt(self, virt): self.virt = virt def get_readable(self): return self.readable def set_readable(self, readable): self.readable = readable def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_explicit(self): return self.explicit def set_explicit(self, explicit): self.explicit = explicit def get_new(self): return self.new def set_new(self, new): self.new = new def get_final(self): return self.final def set_final(self, final): self.final = final def get_writable(self): return self.writable def set_writable(self, writable): self.writable = writable def get_add(self): return self.add def set_add(self, add): self.add = add def get_static(self): return self.static def set_static(self, static): self.static = static def get_remove(self): return self.remove def set_remove(self, remove): self.remove = remove def get_sealed(self): return self.sealed def set_sealed(self, sealed): self.sealed = sealed def get_mutable(self): return self.mutable def set_mutable(self, mutable): self.mutable = mutable def get_gettable(self): return self.gettable def set_gettable(self, gettable): self.gettable = gettable def get_inline(self): return self.inline def set_inline(self, inline): self.inline = inline def get_settable(self): return self.settable def set_settable(self, settable): self.settable = settable def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='memberdefType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='memberdefType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='memberdefType'): if self.initonly is not None: outfile.write(' initonly=%s' % (quote_attrib(self.initonly), )) if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) if self.volatile is not None: outfile.write(' volatile=%s' % (quote_attrib(self.volatile), )) if self.const is not None: outfile.write(' const=%s' % (quote_attrib(self.const), )) if self.raisexx is not None: outfile.write(' raise=%s' % (quote_attrib(self.raisexx), )) if self.virt is not None: outfile.write(' virt=%s' % (quote_attrib(self.virt), )) if self.readable is not None: outfile.write(' readable=%s' % (quote_attrib(self.readable), )) if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.explicit is not None: outfile.write(' explicit=%s' % (quote_attrib(self.explicit), )) if self.new is not None: outfile.write(' new=%s' % (quote_attrib(self.new), )) if self.final is not None: outfile.write(' final=%s' % (quote_attrib(self.final), )) if self.writable is not None: outfile.write(' writable=%s' % (quote_attrib(self.writable), )) if self.add is not None: outfile.write(' add=%s' % (quote_attrib(self.add), )) if self.static is not None: outfile.write(' static=%s' % (quote_attrib(self.static), )) if self.remove is not None: outfile.write(' remove=%s' % (quote_attrib(self.remove), )) if self.sealed is not None: outfile.write(' sealed=%s' % (quote_attrib(self.sealed), )) if self.mutable is not None: outfile.write(' mutable=%s' % (quote_attrib(self.mutable), )) if self.gettable is not None: outfile.write(' gettable=%s' % (quote_attrib(self.gettable), )) if self.inline is not None: outfile.write(' inline=%s' % (quote_attrib(self.inline), )) if self.settable is not None: outfile.write(' settable=%s' % (quote_attrib(self.settable), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='memberdefType'): if self.templateparamlist: self.templateparamlist.export(outfile, level, namespace_, name_='templateparamlist') if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.definition is not None: showIndent(outfile, level) outfile.write('<%sdefinition>%s\n' % (namespace_, self.format_string(quote_xml(self.definition).encode(ExternalEncoding), input_name='definition'), namespace_)) if self.argsstring is not None: showIndent(outfile, level) outfile.write('<%sargsstring>%s\n' % (namespace_, self.format_string(quote_xml(self.argsstring).encode(ExternalEncoding), input_name='argsstring'), namespace_)) if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) if self.read is not None: showIndent(outfile, level) outfile.write('<%sread>%s\n' % (namespace_, self.format_string(quote_xml(self.read).encode(ExternalEncoding), input_name='read'), namespace_)) if self.write is not None: showIndent(outfile, level) outfile.write('<%swrite>%s\n' % (namespace_, self.format_string(quote_xml(self.write).encode(ExternalEncoding), input_name='write'), namespace_)) if self.bitfield is not None: showIndent(outfile, level) outfile.write('<%sbitfield>%s\n' % (namespace_, self.format_string(quote_xml(self.bitfield).encode(ExternalEncoding), input_name='bitfield'), namespace_)) for reimplements_ in self.reimplements: reimplements_.export(outfile, level, namespace_, name_='reimplements') for reimplementedby_ in self.reimplementedby: reimplementedby_.export(outfile, level, namespace_, name_='reimplementedby') for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') for enumvalue_ in self.enumvalue: enumvalue_.export(outfile, level, namespace_, name_='enumvalue') if self.initializer: self.initializer.export(outfile, level, namespace_, name_='initializer') if self.exceptions: self.exceptions.export(outfile, level, namespace_, name_='exceptions') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') if self.detaileddescription: self.detaileddescription.export(outfile, level, namespace_, name_='detaileddescription') if self.inbodydescription: self.inbodydescription.export(outfile, level, namespace_, name_='inbodydescription') if self.location: self.location.export(outfile, level, namespace_, name_='location', ) for references_ in self.references: references_.export(outfile, level, namespace_, name_='references') for referencedby_ in self.referencedby: referencedby_.export(outfile, level, namespace_, name_='referencedby') def hasContent_(self): if ( self.templateparamlist is not None or self.type_ is not None or self.definition is not None or self.argsstring is not None or self.name is not None or self.read is not None or self.write is not None or self.bitfield is not None or self.reimplements is not None or self.reimplementedby is not None or self.param is not None or self.enumvalue is not None or self.initializer is not None or self.exceptions is not None or self.briefdescription is not None or self.detaileddescription is not None or self.inbodydescription is not None or self.location is not None or self.references is not None or self.referencedby is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='memberdefType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.initonly is not None: showIndent(outfile, level) outfile.write('initonly = "%s",\n' % (self.initonly,)) if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.volatile is not None: showIndent(outfile, level) outfile.write('volatile = "%s",\n' % (self.volatile,)) if self.const is not None: showIndent(outfile, level) outfile.write('const = "%s",\n' % (self.const,)) if self.raisexx is not None: showIndent(outfile, level) outfile.write('raisexx = "%s",\n' % (self.raisexx,)) if self.virt is not None: showIndent(outfile, level) outfile.write('virt = "%s",\n' % (self.virt,)) if self.readable is not None: showIndent(outfile, level) outfile.write('readable = "%s",\n' % (self.readable,)) if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.explicit is not None: showIndent(outfile, level) outfile.write('explicit = "%s",\n' % (self.explicit,)) if self.new is not None: showIndent(outfile, level) outfile.write('new = "%s",\n' % (self.new,)) if self.final is not None: showIndent(outfile, level) outfile.write('final = "%s",\n' % (self.final,)) if self.writable is not None: showIndent(outfile, level) outfile.write('writable = "%s",\n' % (self.writable,)) if self.add is not None: showIndent(outfile, level) outfile.write('add = "%s",\n' % (self.add,)) if self.static is not None: showIndent(outfile, level) outfile.write('static = "%s",\n' % (self.static,)) if self.remove is not None: showIndent(outfile, level) outfile.write('remove = "%s",\n' % (self.remove,)) if self.sealed is not None: showIndent(outfile, level) outfile.write('sealed = "%s",\n' % (self.sealed,)) if self.mutable is not None: showIndent(outfile, level) outfile.write('mutable = "%s",\n' % (self.mutable,)) if self.gettable is not None: showIndent(outfile, level) outfile.write('gettable = "%s",\n' % (self.gettable,)) if self.inline is not None: showIndent(outfile, level) outfile.write('inline = "%s",\n' % (self.inline,)) if self.settable is not None: showIndent(outfile, level) outfile.write('settable = "%s",\n' % (self.settable,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): if self.templateparamlist: showIndent(outfile, level) outfile.write('templateparamlist=model_.templateparamlistType(\n') self.templateparamlist.exportLiteral(outfile, level, name_='templateparamlist') showIndent(outfile, level) outfile.write('),\n') if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('definition=%s,\n' % quote_python(self.definition).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('argsstring=%s,\n' % quote_python(self.argsstring).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('read=%s,\n' % quote_python(self.read).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('write=%s,\n' % quote_python(self.write).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('bitfield=%s,\n' % quote_python(self.bitfield).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('reimplements=[\n') level += 1 for reimplements in self.reimplements: showIndent(outfile, level) outfile.write('model_.reimplements(\n') reimplements.exportLiteral(outfile, level, name_='reimplements') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('reimplementedby=[\n') level += 1 for reimplementedby in self.reimplementedby: showIndent(outfile, level) outfile.write('model_.reimplementedby(\n') reimplementedby.exportLiteral(outfile, level, name_='reimplementedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('enumvalue=[\n') level += 1 for enumvalue in self.enumvalue: showIndent(outfile, level) outfile.write('model_.enumvalue(\n') enumvalue.exportLiteral(outfile, level, name_='enumvalue') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.initializer: showIndent(outfile, level) outfile.write('initializer=model_.linkedTextType(\n') self.initializer.exportLiteral(outfile, level, name_='initializer') showIndent(outfile, level) outfile.write('),\n') if self.exceptions: showIndent(outfile, level) outfile.write('exceptions=model_.linkedTextType(\n') self.exceptions.exportLiteral(outfile, level, name_='exceptions') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') if self.detaileddescription: showIndent(outfile, level) outfile.write('detaileddescription=model_.descriptionType(\n') self.detaileddescription.exportLiteral(outfile, level, name_='detaileddescription') showIndent(outfile, level) outfile.write('),\n') if self.inbodydescription: showIndent(outfile, level) outfile.write('inbodydescription=model_.descriptionType(\n') self.inbodydescription.exportLiteral(outfile, level, name_='inbodydescription') showIndent(outfile, level) outfile.write('),\n') if self.location: showIndent(outfile, level) outfile.write('location=model_.locationType(\n') self.location.exportLiteral(outfile, level, name_='location') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('references=[\n') level += 1 for references in self.references: showIndent(outfile, level) outfile.write('model_.references(\n') references.exportLiteral(outfile, level, name_='references') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('referencedby=[\n') level += 1 for referencedby in self.referencedby: showIndent(outfile, level) outfile.write('model_.referencedby(\n') referencedby.exportLiteral(outfile, level, name_='referencedby') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('initonly'): self.initonly = attrs.get('initonly').value if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('volatile'): self.volatile = attrs.get('volatile').value if attrs.get('const'): self.const = attrs.get('const').value if attrs.get('raise'): self.raisexx = attrs.get('raise').value if attrs.get('virt'): self.virt = attrs.get('virt').value if attrs.get('readable'): self.readable = attrs.get('readable').value if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('explicit'): self.explicit = attrs.get('explicit').value if attrs.get('new'): self.new = attrs.get('new').value if attrs.get('final'): self.final = attrs.get('final').value if attrs.get('writable'): self.writable = attrs.get('writable').value if attrs.get('add'): self.add = attrs.get('add').value if attrs.get('static'): self.static = attrs.get('static').value if attrs.get('remove'): self.remove = attrs.get('remove').value if attrs.get('sealed'): self.sealed = attrs.get('sealed').value if attrs.get('mutable'): self.mutable = attrs.get('mutable').value if attrs.get('gettable'): self.gettable = attrs.get('gettable').value if attrs.get('inline'): self.inline = attrs.get('inline').value if attrs.get('settable'): self.settable = attrs.get('settable').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'templateparamlist': obj_ = templateparamlistType.factory() obj_.build(child_) self.set_templateparamlist(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'definition': definition_ = '' for text__content_ in child_.childNodes: definition_ += text__content_.nodeValue self.definition = definition_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'argsstring': argsstring_ = '' for text__content_ in child_.childNodes: argsstring_ += text__content_.nodeValue self.argsstring = argsstring_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'read': read_ = '' for text__content_ in child_.childNodes: read_ += text__content_.nodeValue self.read = read_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'write': write_ = '' for text__content_ in child_.childNodes: write_ += text__content_.nodeValue self.write = write_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'bitfield': bitfield_ = '' for text__content_ in child_.childNodes: bitfield_ += text__content_.nodeValue self.bitfield = bitfield_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplements': obj_ = reimplementType.factory() obj_.build(child_) self.reimplements.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'reimplementedby': obj_ = reimplementType.factory() obj_.build(child_) self.reimplementedby.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'enumvalue': obj_ = enumvalueType.factory() obj_.build(child_) self.enumvalue.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': obj_ = linkedTextType.factory() obj_.build(child_) self.set_initializer(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'exceptions': obj_ = linkedTextType.factory() obj_.build(child_) self.set_exceptions(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_detaileddescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'inbodydescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_inbodydescription(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'location': obj_ = locationType.factory() obj_.build(child_) self.set_location(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'references': obj_ = referenceType.factory() obj_.build(child_) self.references.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'referencedby': obj_ = referenceType.factory() obj_.build(child_) self.referencedby.append(obj_) # end class memberdefType class definition(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if definition.subclass: return definition.subclass(*args_, **kwargs_) else: return definition(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='definition', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='definition') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='definition'): pass def exportChildren(self, outfile, level, namespace_='', name_='definition'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='definition'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class definition class argsstring(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if argsstring.subclass: return argsstring.subclass(*args_, **kwargs_) else: return argsstring(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='argsstring', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='argsstring') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='argsstring'): pass def exportChildren(self, outfile, level, namespace_='', name_='argsstring'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='argsstring'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class argsstring class read(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if read.subclass: return read.subclass(*args_, **kwargs_) else: return read(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='read', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='read') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='read'): pass def exportChildren(self, outfile, level, namespace_='', name_='read'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='read'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class read class write(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if write.subclass: return write.subclass(*args_, **kwargs_) else: return write(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='write', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='write') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='write'): pass def exportChildren(self, outfile, level, namespace_='', name_='write'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='write'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class write class bitfield(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if bitfield.subclass: return bitfield.subclass(*args_, **kwargs_) else: return bitfield(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='bitfield', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='bitfield') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='bitfield'): pass def exportChildren(self, outfile, level, namespace_='', name_='bitfield'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='bitfield'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class bitfield class descriptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, title=None, para=None, sect1=None, internal=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if descriptionType.subclass: return descriptionType.subclass(*args_, **kwargs_) else: return descriptionType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def export(self, outfile, level, namespace_='', name_='descriptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='descriptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='descriptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='descriptionType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='descriptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class descriptionType class enumvalueType(GeneratedsSuper): subclass = None superclass = None def __init__(self, prot=None, id=None, name=None, initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None): self.prot = prot self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if enumvalueType.subclass: return enumvalueType.subclass(*args_, **kwargs_) else: return enumvalueType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def get_initializer(self): return self.initializer def set_initializer(self, initializer): self.initializer = initializer def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def get_detaileddescription(self): return self.detaileddescription def set_detaileddescription(self, detaileddescription): self.detaileddescription = detaileddescription def get_prot(self): return self.prot def set_prot(self, prot): self.prot = prot def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='enumvalueType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='enumvalueType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='enumvalueType'): if self.prot is not None: outfile.write(' prot=%s' % (quote_attrib(self.prot), )) if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='enumvalueType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.name is not None or self.initializer is not None or self.briefdescription is not None or self.detaileddescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='enumvalueType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.prot is not None: showIndent(outfile, level) outfile.write('prot = "%s",\n' % (self.prot,)) if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('prot'): self.prot = attrs.get('prot').value if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'name', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'initializer': childobj_ = linkedTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'initializer', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'briefdescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'detaileddescription': childobj_ = descriptionType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'detaileddescription', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class enumvalueType class templateparamlistType(GeneratedsSuper): subclass = None superclass = None def __init__(self, param=None): if param is None: self.param = [] else: self.param = param def factory(*args_, **kwargs_): if templateparamlistType.subclass: return templateparamlistType.subclass(*args_, **kwargs_) else: return templateparamlistType(*args_, **kwargs_) factory = staticmethod(factory) def get_param(self): return self.param def set_param(self, param): self.param = param def add_param(self, value): self.param.append(value) def insert_param(self, index, value): self.param[index] = value def export(self, outfile, level, namespace_='', name_='templateparamlistType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='templateparamlistType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='templateparamlistType'): pass def exportChildren(self, outfile, level, namespace_='', name_='templateparamlistType'): for param_ in self.param: param_.export(outfile, level, namespace_, name_='param') def hasContent_(self): if ( self.param is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='templateparamlistType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('param=[\n') level += 1 for param in self.param: showIndent(outfile, level) outfile.write('model_.param(\n') param.exportLiteral(outfile, level, name_='param') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'param': obj_ = paramType.factory() obj_.build(child_) self.param.append(obj_) # end class templateparamlistType class paramType(GeneratedsSuper): subclass = None superclass = None def __init__(self, type_=None, declname=None, defname=None, array=None, defval=None, briefdescription=None): self.type_ = type_ self.declname = declname self.defname = defname self.array = array self.defval = defval self.briefdescription = briefdescription def factory(*args_, **kwargs_): if paramType.subclass: return paramType.subclass(*args_, **kwargs_) else: return paramType(*args_, **kwargs_) factory = staticmethod(factory) def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_declname(self): return self.declname def set_declname(self, declname): self.declname = declname def get_defname(self): return self.defname def set_defname(self, defname): self.defname = defname def get_array(self): return self.array def set_array(self, array): self.array = array def get_defval(self): return self.defval def set_defval(self, defval): self.defval = defval def get_briefdescription(self): return self.briefdescription def set_briefdescription(self, briefdescription): self.briefdescription = briefdescription def export(self, outfile, level, namespace_='', name_='paramType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='paramType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='paramType'): pass def exportChildren(self, outfile, level, namespace_='', name_='paramType'): if self.type_: self.type_.export(outfile, level, namespace_, name_='type') if self.declname is not None: showIndent(outfile, level) outfile.write('<%sdeclname>%s\n' % (namespace_, self.format_string(quote_xml(self.declname).encode(ExternalEncoding), input_name='declname'), namespace_)) if self.defname is not None: showIndent(outfile, level) outfile.write('<%sdefname>%s\n' % (namespace_, self.format_string(quote_xml(self.defname).encode(ExternalEncoding), input_name='defname'), namespace_)) if self.array is not None: showIndent(outfile, level) outfile.write('<%sarray>%s\n' % (namespace_, self.format_string(quote_xml(self.array).encode(ExternalEncoding), input_name='array'), namespace_)) if self.defval: self.defval.export(outfile, level, namespace_, name_='defval') if self.briefdescription: self.briefdescription.export(outfile, level, namespace_, name_='briefdescription') def hasContent_(self): if ( self.type_ is not None or self.declname is not None or self.defname is not None or self.array is not None or self.defval is not None or self.briefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='paramType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.type_: showIndent(outfile, level) outfile.write('type_=model_.linkedTextType(\n') self.type_.exportLiteral(outfile, level, name_='type') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('declname=%s,\n' % quote_python(self.declname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('defname=%s,\n' % quote_python(self.defname).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('array=%s,\n' % quote_python(self.array).encode(ExternalEncoding)) if self.defval: showIndent(outfile, level) outfile.write('defval=model_.linkedTextType(\n') self.defval.exportLiteral(outfile, level, name_='defval') showIndent(outfile, level) outfile.write('),\n') if self.briefdescription: showIndent(outfile, level) outfile.write('briefdescription=model_.descriptionType(\n') self.briefdescription.exportLiteral(outfile, level, name_='briefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'type': obj_ = linkedTextType.factory() obj_.build(child_) self.set_type(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'declname': declname_ = '' for text__content_ in child_.childNodes: declname_ += text__content_.nodeValue self.declname = declname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defname': defname_ = '' for text__content_ in child_.childNodes: defname_ += text__content_.nodeValue self.defname = defname_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'array': array_ = '' for text__content_ in child_.childNodes: array_ += text__content_.nodeValue self.array = array_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'defval': obj_ = linkedTextType.factory() obj_.build(child_) self.set_defval(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'briefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_briefdescription(obj_) # end class paramType class declname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if declname.subclass: return declname.subclass(*args_, **kwargs_) else: return declname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='declname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='declname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='declname'): pass def exportChildren(self, outfile, level, namespace_='', name_='declname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='declname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class declname class defname(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if defname.subclass: return defname.subclass(*args_, **kwargs_) else: return defname(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='defname', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='defname') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='defname'): pass def exportChildren(self, outfile, level, namespace_='', name_='defname'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='defname'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class defname class array(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if array.subclass: return array.subclass(*args_, **kwargs_) else: return array(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='array', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='array') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='array'): pass def exportChildren(self, outfile, level, namespace_='', name_='array'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='array'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class array class linkedTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, ref=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if linkedTextType.subclass: return linkedTextType.subclass(*args_, **kwargs_) else: return linkedTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def export(self, outfile, level, namespace_='', name_='linkedTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkedTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='linkedTextType'): pass def exportChildren(self, outfile, level, namespace_='', name_='linkedTextType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkedTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class linkedTextType class graphType(GeneratedsSuper): subclass = None superclass = None def __init__(self, node=None): if node is None: self.node = [] else: self.node = node def factory(*args_, **kwargs_): if graphType.subclass: return graphType.subclass(*args_, **kwargs_) else: return graphType(*args_, **kwargs_) factory = staticmethod(factory) def get_node(self): return self.node def set_node(self, node): self.node = node def add_node(self, value): self.node.append(value) def insert_node(self, index, value): self.node[index] = value def export(self, outfile, level, namespace_='', name_='graphType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='graphType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='graphType'): pass def exportChildren(self, outfile, level, namespace_='', name_='graphType'): for node_ in self.node: node_.export(outfile, level, namespace_, name_='node') def hasContent_(self): if ( self.node is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='graphType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('node=[\n') level += 1 for node in self.node: showIndent(outfile, level) outfile.write('model_.node(\n') node.exportLiteral(outfile, level, name_='node') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'node': obj_ = nodeType.factory() obj_.build(child_) self.node.append(obj_) # end class graphType class nodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, label=None, link=None, childnode=None): self.id = id self.label = label self.link = link if childnode is None: self.childnode = [] else: self.childnode = childnode def factory(*args_, **kwargs_): if nodeType.subclass: return nodeType.subclass(*args_, **kwargs_) else: return nodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_label(self): return self.label def set_label(self, label): self.label = label def get_link(self): return self.link def set_link(self, link): self.link = link def get_childnode(self): return self.childnode def set_childnode(self, childnode): self.childnode = childnode def add_childnode(self, value): self.childnode.append(value) def insert_childnode(self, index, value): self.childnode[index] = value def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='nodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='nodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='nodeType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='nodeType'): if self.label is not None: showIndent(outfile, level) outfile.write('<%slabel>%s\n' % (namespace_, self.format_string(quote_xml(self.label).encode(ExternalEncoding), input_name='label'), namespace_)) if self.link: self.link.export(outfile, level, namespace_, name_='link') for childnode_ in self.childnode: childnode_.export(outfile, level, namespace_, name_='childnode') def hasContent_(self): if ( self.label is not None or self.link is not None or self.childnode is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='nodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('label=%s,\n' % quote_python(self.label).encode(ExternalEncoding)) if self.link: showIndent(outfile, level) outfile.write('link=model_.linkType(\n') self.link.exportLiteral(outfile, level, name_='link') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('childnode=[\n') level += 1 for childnode in self.childnode: showIndent(outfile, level) outfile.write('model_.childnode(\n') childnode.exportLiteral(outfile, level, name_='childnode') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'label': label_ = '' for text__content_ in child_.childNodes: label_ += text__content_.nodeValue self.label = label_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'link': obj_ = linkType.factory() obj_.build(child_) self.set_link(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'childnode': obj_ = childnodeType.factory() obj_.build(child_) self.childnode.append(obj_) # end class nodeType class label(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if label.subclass: return label.subclass(*args_, **kwargs_) else: return label(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='label', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='label') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='label'): pass def exportChildren(self, outfile, level, namespace_='', name_='label'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='label'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class label class childnodeType(GeneratedsSuper): subclass = None superclass = None def __init__(self, relation=None, refid=None, edgelabel=None): self.relation = relation self.refid = refid if edgelabel is None: self.edgelabel = [] else: self.edgelabel = edgelabel def factory(*args_, **kwargs_): if childnodeType.subclass: return childnodeType.subclass(*args_, **kwargs_) else: return childnodeType(*args_, **kwargs_) factory = staticmethod(factory) def get_edgelabel(self): return self.edgelabel def set_edgelabel(self, edgelabel): self.edgelabel = edgelabel def add_edgelabel(self, value): self.edgelabel.append(value) def insert_edgelabel(self, index, value): self.edgelabel[index] = value def get_relation(self): return self.relation def set_relation(self, relation): self.relation = relation def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='childnodeType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='childnodeType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='childnodeType'): if self.relation is not None: outfile.write(' relation=%s' % (quote_attrib(self.relation), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='childnodeType'): for edgelabel_ in self.edgelabel: showIndent(outfile, level) outfile.write('<%sedgelabel>%s\n' % (namespace_, self.format_string(quote_xml(edgelabel_).encode(ExternalEncoding), input_name='edgelabel'), namespace_)) def hasContent_(self): if ( self.edgelabel is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='childnodeType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.relation is not None: showIndent(outfile, level) outfile.write('relation = "%s",\n' % (self.relation,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('edgelabel=[\n') level += 1 for edgelabel in self.edgelabel: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(edgelabel).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('relation'): self.relation = attrs.get('relation').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'edgelabel': edgelabel_ = '' for text__content_ in child_.childNodes: edgelabel_ += text__content_.nodeValue self.edgelabel.append(edgelabel_) # end class childnodeType class edgelabel(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if edgelabel.subclass: return edgelabel.subclass(*args_, **kwargs_) else: return edgelabel(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='edgelabel', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='edgelabel') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='edgelabel'): pass def exportChildren(self, outfile, level, namespace_='', name_='edgelabel'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='edgelabel'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class edgelabel class linkType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, external=None, valueOf_=''): self.refid = refid self.external = external self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if linkType.subclass: return linkType.subclass(*args_, **kwargs_) else: return linkType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='linkType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='linkType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='linkType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='linkType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='linkType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class linkType class listingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, codeline=None): if codeline is None: self.codeline = [] else: self.codeline = codeline def factory(*args_, **kwargs_): if listingType.subclass: return listingType.subclass(*args_, **kwargs_) else: return listingType(*args_, **kwargs_) factory = staticmethod(factory) def get_codeline(self): return self.codeline def set_codeline(self, codeline): self.codeline = codeline def add_codeline(self, value): self.codeline.append(value) def insert_codeline(self, index, value): self.codeline[index] = value def export(self, outfile, level, namespace_='', name_='listingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='listingType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='listingType'): pass def exportChildren(self, outfile, level, namespace_='', name_='listingType'): for codeline_ in self.codeline: codeline_.export(outfile, level, namespace_, name_='codeline') def hasContent_(self): if ( self.codeline is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='listingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('codeline=[\n') level += 1 for codeline in self.codeline: showIndent(outfile, level) outfile.write('model_.codeline(\n') codeline.exportLiteral(outfile, level, name_='codeline') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'codeline': obj_ = codelineType.factory() obj_.build(child_) self.codeline.append(obj_) # end class listingType class codelineType(GeneratedsSuper): subclass = None superclass = None def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None): self.external = external self.lineno = lineno self.refkind = refkind self.refid = refid if highlight is None: self.highlight = [] else: self.highlight = highlight def factory(*args_, **kwargs_): if codelineType.subclass: return codelineType.subclass(*args_, **kwargs_) else: return codelineType(*args_, **kwargs_) factory = staticmethod(factory) def get_highlight(self): return self.highlight def set_highlight(self, highlight): self.highlight = highlight def add_highlight(self, value): self.highlight.append(value) def insert_highlight(self, index, value): self.highlight[index] = value def get_external(self): return self.external def set_external(self, external): self.external = external def get_lineno(self): return self.lineno def set_lineno(self, lineno): self.lineno = lineno def get_refkind(self): return self.refkind def set_refkind(self, refkind): self.refkind = refkind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='codelineType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='codelineType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='codelineType'): if self.external is not None: outfile.write(' external=%s' % (quote_attrib(self.external), )) if self.lineno is not None: outfile.write(' lineno="%s"' % self.format_integer(self.lineno, input_name='lineno')) if self.refkind is not None: outfile.write(' refkind=%s' % (quote_attrib(self.refkind), )) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='codelineType'): for highlight_ in self.highlight: highlight_.export(outfile, level, namespace_, name_='highlight') def hasContent_(self): if ( self.highlight is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='codelineType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.external is not None: showIndent(outfile, level) outfile.write('external = "%s",\n' % (self.external,)) if self.lineno is not None: showIndent(outfile, level) outfile.write('lineno = %s,\n' % (self.lineno,)) if self.refkind is not None: showIndent(outfile, level) outfile.write('refkind = "%s",\n' % (self.refkind,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('highlight=[\n') level += 1 for highlight in self.highlight: showIndent(outfile, level) outfile.write('model_.highlight(\n') highlight.exportLiteral(outfile, level, name_='highlight') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('external'): self.external = attrs.get('external').value if attrs.get('lineno'): try: self.lineno = int(attrs.get('lineno').value) except ValueError, exp: raise ValueError('Bad integer attribute (lineno): %s' % exp) if attrs.get('refkind'): self.refkind = attrs.get('refkind').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'highlight': obj_ = highlightType.factory() obj_.build(child_) self.highlight.append(obj_) # end class codelineType class highlightType(GeneratedsSuper): subclass = None superclass = None def __init__(self, classxx=None, sp=None, ref=None, mixedclass_=None, content_=None): self.classxx = classxx if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if highlightType.subclass: return highlightType.subclass(*args_, **kwargs_) else: return highlightType(*args_, **kwargs_) factory = staticmethod(factory) def get_sp(self): return self.sp def set_sp(self, sp): self.sp = sp def add_sp(self, value): self.sp.append(value) def insert_sp(self, index, value): self.sp[index] = value def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def add_ref(self, value): self.ref.append(value) def insert_ref(self, index, value): self.ref[index] = value def get_class(self): return self.classxx def set_class(self, classxx): self.classxx = classxx def export(self, outfile, level, namespace_='', name_='highlightType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='highlightType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='highlightType'): if self.classxx is not None: outfile.write(' class=%s' % (quote_attrib(self.classxx), )) def exportChildren(self, outfile, level, namespace_='', name_='highlightType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.sp is not None or self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='highlightType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.classxx is not None: showIndent(outfile, level) outfile.write('classxx = "%s",\n' % (self.classxx,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('class'): self.classxx = attrs.get('class').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sp': value_ = [] for text_ in child_.childNodes: value_.append(text_.nodeValue) valuestr_ = ''.join(value_) obj_ = self.mixedclass_(MixedContainer.CategorySimple, MixedContainer.TypeString, 'sp', valuestr_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class highlightType class sp(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if sp.subclass: return sp.subclass(*args_, **kwargs_) else: return sp(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='sp', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='sp') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='sp'): pass def exportChildren(self, outfile, level, namespace_='', name_='sp'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='sp'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class sp class referenceType(GeneratedsSuper): subclass = None superclass = None def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None): self.endline = endline self.startline = startline self.refid = refid self.compoundref = compoundref if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if referenceType.subclass: return referenceType.subclass(*args_, **kwargs_) else: return referenceType(*args_, **kwargs_) factory = staticmethod(factory) def get_endline(self): return self.endline def set_endline(self, endline): self.endline = endline def get_startline(self): return self.startline def set_startline(self, startline): self.startline = startline def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_compoundref(self): return self.compoundref def set_compoundref(self, compoundref): self.compoundref = compoundref def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='referenceType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='referenceType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='referenceType'): if self.endline is not None: outfile.write(' endline="%s"' % self.format_integer(self.endline, input_name='endline')) if self.startline is not None: outfile.write(' startline="%s"' % self.format_integer(self.startline, input_name='startline')) if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.compoundref is not None: outfile.write(' compoundref=%s' % (self.format_string(quote_attrib(self.compoundref).encode(ExternalEncoding), input_name='compoundref'), )) def exportChildren(self, outfile, level, namespace_='', name_='referenceType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='referenceType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.endline is not None: showIndent(outfile, level) outfile.write('endline = %s,\n' % (self.endline,)) if self.startline is not None: showIndent(outfile, level) outfile.write('startline = %s,\n' % (self.startline,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.compoundref is not None: showIndent(outfile, level) outfile.write('compoundref = %s,\n' % (self.compoundref,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('endline'): try: self.endline = int(attrs.get('endline').value) except ValueError, exp: raise ValueError('Bad integer attribute (endline): %s' % exp) if attrs.get('startline'): try: self.startline = int(attrs.get('startline').value) except ValueError, exp: raise ValueError('Bad integer attribute (startline): %s' % exp) if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('compoundref'): self.compoundref = attrs.get('compoundref').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class referenceType class locationType(GeneratedsSuper): subclass = None superclass = None def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''): self.bodystart = bodystart self.line = line self.bodyend = bodyend self.bodyfile = bodyfile self.file = file self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if locationType.subclass: return locationType.subclass(*args_, **kwargs_) else: return locationType(*args_, **kwargs_) factory = staticmethod(factory) def get_bodystart(self): return self.bodystart def set_bodystart(self, bodystart): self.bodystart = bodystart def get_line(self): return self.line def set_line(self, line): self.line = line def get_bodyend(self): return self.bodyend def set_bodyend(self, bodyend): self.bodyend = bodyend def get_bodyfile(self): return self.bodyfile def set_bodyfile(self, bodyfile): self.bodyfile = bodyfile def get_file(self): return self.file def set_file(self, file): self.file = file def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='locationType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='locationType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='locationType'): if self.bodystart is not None: outfile.write(' bodystart="%s"' % self.format_integer(self.bodystart, input_name='bodystart')) if self.line is not None: outfile.write(' line="%s"' % self.format_integer(self.line, input_name='line')) if self.bodyend is not None: outfile.write(' bodyend="%s"' % self.format_integer(self.bodyend, input_name='bodyend')) if self.bodyfile is not None: outfile.write(' bodyfile=%s' % (self.format_string(quote_attrib(self.bodyfile).encode(ExternalEncoding), input_name='bodyfile'), )) if self.file is not None: outfile.write(' file=%s' % (self.format_string(quote_attrib(self.file).encode(ExternalEncoding), input_name='file'), )) def exportChildren(self, outfile, level, namespace_='', name_='locationType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='locationType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.bodystart is not None: showIndent(outfile, level) outfile.write('bodystart = %s,\n' % (self.bodystart,)) if self.line is not None: showIndent(outfile, level) outfile.write('line = %s,\n' % (self.line,)) if self.bodyend is not None: showIndent(outfile, level) outfile.write('bodyend = %s,\n' % (self.bodyend,)) if self.bodyfile is not None: showIndent(outfile, level) outfile.write('bodyfile = %s,\n' % (self.bodyfile,)) if self.file is not None: showIndent(outfile, level) outfile.write('file = %s,\n' % (self.file,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('bodystart'): try: self.bodystart = int(attrs.get('bodystart').value) except ValueError, exp: raise ValueError('Bad integer attribute (bodystart): %s' % exp) if attrs.get('line'): try: self.line = int(attrs.get('line').value) except ValueError, exp: raise ValueError('Bad integer attribute (line): %s' % exp) if attrs.get('bodyend'): try: self.bodyend = int(attrs.get('bodyend').value) except ValueError, exp: raise ValueError('Bad integer attribute (bodyend): %s' % exp) if attrs.get('bodyfile'): self.bodyfile = attrs.get('bodyfile').value if attrs.get('file'): self.file = attrs.get('file').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class locationType class docSect1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect2=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect1Type.subclass: return docSect1Type.subclass(*args_, **kwargs_) else: return docSect1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect1Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect2 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect1Type class docSect2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect3=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect2Type.subclass: return docSect2Type.subclass(*args_, **kwargs_) else: return docSect2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect2Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect3 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect2Type class docSect3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, sect4=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect3Type.subclass: return docSect3Type.subclass(*args_, **kwargs_) else: return docSect3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect4(self): return self.sect4 def set_sect4(self, sect4): self.sect4 = sect4 def add_sect4(self, value): self.sect4.append(value) def insert_sect4(self, index, value): self.sect4[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect3Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.sect4 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect4': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect4', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect3Type class docSect4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, title=None, para=None, internal=None, mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docSect4Type.subclass: return docSect4Type.subclass(*args_, **kwargs_) else: return docSect4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docSect4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSect4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docSect4Type'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docSect4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.title is not None or self.para is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSect4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': childobj_ = docTitleType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'title', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': childobj_ = docInternalS4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'internal', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docSect4Type class docInternalType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalType.subclass: return docInternalType.subclass(*args_, **kwargs_) else: return docInternalType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def export(self, outfile, level, namespace_='', name_='docInternalType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalType'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect1 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': childobj_ = docSect1Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect1', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalType class docInternalS1Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS1Type.subclass: return docInternalS1Type.subclass(*args_, **kwargs_) else: return docInternalS1Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect2(self): return self.sect2 def set_sect2(self, sect2): self.sect2 = sect2 def add_sect2(self, value): self.sect2.append(value) def insert_sect2(self, index, value): self.sect2[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS1Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS1Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS1Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS1Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect2 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS1Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect2': childobj_ = docSect2Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect2', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS1Type class docInternalS2Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS2Type.subclass: return docInternalS2Type.subclass(*args_, **kwargs_) else: return docInternalS2Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS2Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS2Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS2Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS2Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS2Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect3Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS2Type class docInternalS3Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS3Type.subclass: return docInternalS3Type.subclass(*args_, **kwargs_) else: return docInternalS3Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect3(self): return self.sect3 def set_sect3(self, sect3): self.sect3 = sect3 def add_sect3(self, value): self.sect3.append(value) def insert_sect3(self, index, value): self.sect3[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS3Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS3Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS3Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS3Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None or self.sect3 is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS3Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect3': childobj_ = docSect4Type.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'sect3', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS3Type class docInternalS4Type(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None, mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docInternalS4Type.subclass: return docInternalS4Type.subclass(*args_, **kwargs_) else: return docInternalS4Type(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docInternalS4Type', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docInternalS4Type') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docInternalS4Type'): pass def exportChildren(self, outfile, level, namespace_='', name_='docInternalS4Type'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docInternalS4Type'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': childobj_ = docParaType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'para', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docInternalS4Type class docTitleType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTitleType.subclass: return docTitleType.subclass(*args_, **kwargs_) else: return docTitleType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTitleType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTitleType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTitleType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTitleType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTitleType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTitleType class docParaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParaType.subclass: return docParaType.subclass(*args_, **kwargs_) else: return docParaType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docParaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParaType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docParaType class docMarkupType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docMarkupType.subclass: return docMarkupType.subclass(*args_, **kwargs_) else: return docMarkupType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docMarkupType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docMarkupType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docMarkupType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docMarkupType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docMarkupType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docMarkupType class docURLLink(GeneratedsSuper): subclass = None superclass = None def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None): self.url = url if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docURLLink.subclass: return docURLLink.subclass(*args_, **kwargs_) else: return docURLLink(*args_, **kwargs_) factory = staticmethod(factory) def get_url(self): return self.url def set_url(self, url): self.url = url def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docURLLink', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docURLLink') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docURLLink'): if self.url is not None: outfile.write(' url=%s' % (self.format_string(quote_attrib(self.url).encode(ExternalEncoding), input_name='url'), )) def exportChildren(self, outfile, level, namespace_='', name_='docURLLink'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docURLLink'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.url is not None: showIndent(outfile, level) outfile.write('url = %s,\n' % (self.url,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('url'): self.url = attrs.get('url').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docURLLink class docAnchorType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docAnchorType.subclass: return docAnchorType.subclass(*args_, **kwargs_) else: return docAnchorType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docAnchorType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docAnchorType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docAnchorType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docAnchorType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docAnchorType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docAnchorType class docFormulaType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docFormulaType.subclass: return docFormulaType.subclass(*args_, **kwargs_) else: return docFormulaType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docFormulaType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docFormulaType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docFormulaType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docFormulaType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docFormulaType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docFormulaType class docIndexEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, primaryie=None, secondaryie=None): self.primaryie = primaryie self.secondaryie = secondaryie def factory(*args_, **kwargs_): if docIndexEntryType.subclass: return docIndexEntryType.subclass(*args_, **kwargs_) else: return docIndexEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_primaryie(self): return self.primaryie def set_primaryie(self, primaryie): self.primaryie = primaryie def get_secondaryie(self): return self.secondaryie def set_secondaryie(self, secondaryie): self.secondaryie = secondaryie def export(self, outfile, level, namespace_='', name_='docIndexEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docIndexEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docIndexEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docIndexEntryType'): if self.primaryie is not None: showIndent(outfile, level) outfile.write('<%sprimaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.primaryie).encode(ExternalEncoding), input_name='primaryie'), namespace_)) if self.secondaryie is not None: showIndent(outfile, level) outfile.write('<%ssecondaryie>%s\n' % (namespace_, self.format_string(quote_xml(self.secondaryie).encode(ExternalEncoding), input_name='secondaryie'), namespace_)) def hasContent_(self): if ( self.primaryie is not None or self.secondaryie is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docIndexEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('primaryie=%s,\n' % quote_python(self.primaryie).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('secondaryie=%s,\n' % quote_python(self.secondaryie).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'primaryie': primaryie_ = '' for text__content_ in child_.childNodes: primaryie_ += text__content_.nodeValue self.primaryie = primaryie_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'secondaryie': secondaryie_ = '' for text__content_ in child_.childNodes: secondaryie_ += text__content_.nodeValue self.secondaryie = secondaryie_ # end class docIndexEntryType class docListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, listitem=None): if listitem is None: self.listitem = [] else: self.listitem = listitem def factory(*args_, **kwargs_): if docListType.subclass: return docListType.subclass(*args_, **kwargs_) else: return docListType(*args_, **kwargs_) factory = staticmethod(factory) def get_listitem(self): return self.listitem def set_listitem(self, listitem): self.listitem = listitem def add_listitem(self, value): self.listitem.append(value) def insert_listitem(self, index, value): self.listitem[index] = value def export(self, outfile, level, namespace_='', name_='docListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListType'): for listitem_ in self.listitem: listitem_.export(outfile, level, namespace_, name_='listitem') def hasContent_(self): if ( self.listitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('listitem=[\n') level += 1 for listitem in self.listitem: showIndent(outfile, level) outfile.write('model_.listitem(\n') listitem.exportLiteral(outfile, level, name_='listitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'listitem': obj_ = docListItemType.factory() obj_.build(child_) self.listitem.append(obj_) # end class docListType class docListItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, para=None): if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docListItemType.subclass: return docListItemType.subclass(*args_, **kwargs_) else: return docListItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def export(self, outfile, level, namespace_='', name_='docListItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docListItemType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docListItemType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docListItemType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docListItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docListItemType class docSimpleSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, title=None, para=None): self.kind = kind self.title = title if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docSimpleSectType.subclass: return docSimpleSectType.subclass(*args_, **kwargs_) else: return docSimpleSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_title(self): return self.title def set_title(self, title): self.title = title def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docSimpleSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docSimpleSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docSimpleSectType'): if self.title: self.title.export(outfile, level, namespace_, name_='title') for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.title is not None or self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docSimpleSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): if self.title: showIndent(outfile, level) outfile.write('title=model_.docTitleType(\n') self.title.exportLiteral(outfile, level, name_='title') showIndent(outfile, level) outfile.write('),\n') showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'title': obj_ = docTitleType.factory() obj_.build(child_) self.set_title(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docSimpleSectType class docVarListEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, term=None): self.term = term def factory(*args_, **kwargs_): if docVarListEntryType.subclass: return docVarListEntryType.subclass(*args_, **kwargs_) else: return docVarListEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_term(self): return self.term def set_term(self, term): self.term = term def export(self, outfile, level, namespace_='', name_='docVarListEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVarListEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVarListEntryType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVarListEntryType'): if self.term: self.term.export(outfile, level, namespace_, name_='term', ) def hasContent_(self): if ( self.term is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVarListEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): if self.term: showIndent(outfile, level) outfile.write('term=model_.docTitleType(\n') self.term.exportLiteral(outfile, level, name_='term') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'term': obj_ = docTitleType.factory() obj_.build(child_) self.set_term(obj_) # end class docVarListEntryType class docVariableListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docVariableListType.subclass: return docVariableListType.subclass(*args_, **kwargs_) else: return docVariableListType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docVariableListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docVariableListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docVariableListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docVariableListType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docVariableListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docVariableListType class docRefTextType(GeneratedsSuper): subclass = None superclass = None def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None): self.refid = refid self.kindref = kindref self.external = external if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docRefTextType.subclass: return docRefTextType.subclass(*args_, **kwargs_) else: return docRefTextType(*args_, **kwargs_) factory = staticmethod(factory) def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def get_kindref(self): return self.kindref def set_kindref(self, kindref): self.kindref = kindref def get_external(self): return self.external def set_external(self, external): self.external = external def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docRefTextType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRefTextType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docRefTextType'): if self.refid is not None: outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) if self.kindref is not None: outfile.write(' kindref=%s' % (quote_attrib(self.kindref), )) if self.external is not None: outfile.write(' external=%s' % (self.format_string(quote_attrib(self.external).encode(ExternalEncoding), input_name='external'), )) def exportChildren(self, outfile, level, namespace_='', name_='docRefTextType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRefTextType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) if self.kindref is not None: showIndent(outfile, level) outfile.write('kindref = "%s",\n' % (self.kindref,)) if self.external is not None: showIndent(outfile, level) outfile.write('external = %s,\n' % (self.external,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('refid'): self.refid = attrs.get('refid').value if attrs.get('kindref'): self.kindref = attrs.get('kindref').value if attrs.get('external'): self.external = attrs.get('external').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docRefTextType class docTableType(GeneratedsSuper): subclass = None superclass = None def __init__(self, rows=None, cols=None, row=None, caption=None): self.rows = rows self.cols = cols if row is None: self.row = [] else: self.row = row self.caption = caption def factory(*args_, **kwargs_): if docTableType.subclass: return docTableType.subclass(*args_, **kwargs_) else: return docTableType(*args_, **kwargs_) factory = staticmethod(factory) def get_row(self): return self.row def set_row(self, row): self.row = row def add_row(self, value): self.row.append(value) def insert_row(self, index, value): self.row[index] = value def get_caption(self): return self.caption def set_caption(self, caption): self.caption = caption def get_rows(self): return self.rows def set_rows(self, rows): self.rows = rows def get_cols(self): return self.cols def set_cols(self, cols): self.cols = cols def export(self, outfile, level, namespace_='', name_='docTableType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTableType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTableType'): if self.rows is not None: outfile.write(' rows="%s"' % self.format_integer(self.rows, input_name='rows')) if self.cols is not None: outfile.write(' cols="%s"' % self.format_integer(self.cols, input_name='cols')) def exportChildren(self, outfile, level, namespace_='', name_='docTableType'): for row_ in self.row: row_.export(outfile, level, namespace_, name_='row') if self.caption: self.caption.export(outfile, level, namespace_, name_='caption') def hasContent_(self): if ( self.row is not None or self.caption is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTableType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.rows is not None: showIndent(outfile, level) outfile.write('rows = %s,\n' % (self.rows,)) if self.cols is not None: showIndent(outfile, level) outfile.write('cols = %s,\n' % (self.cols,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('row=[\n') level += 1 for row in self.row: showIndent(outfile, level) outfile.write('model_.row(\n') row.exportLiteral(outfile, level, name_='row') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.caption: showIndent(outfile, level) outfile.write('caption=model_.docCaptionType(\n') self.caption.exportLiteral(outfile, level, name_='caption') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('rows'): try: self.rows = int(attrs.get('rows').value) except ValueError, exp: raise ValueError('Bad integer attribute (rows): %s' % exp) if attrs.get('cols'): try: self.cols = int(attrs.get('cols').value) except ValueError, exp: raise ValueError('Bad integer attribute (cols): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'row': obj_ = docRowType.factory() obj_.build(child_) self.row.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'caption': obj_ = docCaptionType.factory() obj_.build(child_) self.set_caption(obj_) # end class docTableType class docRowType(GeneratedsSuper): subclass = None superclass = None def __init__(self, entry=None): if entry is None: self.entry = [] else: self.entry = entry def factory(*args_, **kwargs_): if docRowType.subclass: return docRowType.subclass(*args_, **kwargs_) else: return docRowType(*args_, **kwargs_) factory = staticmethod(factory) def get_entry(self): return self.entry def set_entry(self, entry): self.entry = entry def add_entry(self, value): self.entry.append(value) def insert_entry(self, index, value): self.entry[index] = value def export(self, outfile, level, namespace_='', name_='docRowType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docRowType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docRowType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docRowType'): for entry_ in self.entry: entry_.export(outfile, level, namespace_, name_='entry') def hasContent_(self): if ( self.entry is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docRowType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('entry=[\n') level += 1 for entry in self.entry: showIndent(outfile, level) outfile.write('model_.entry(\n') entry.exportLiteral(outfile, level, name_='entry') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'entry': obj_ = docEntryType.factory() obj_.build(child_) self.entry.append(obj_) # end class docRowType class docEntryType(GeneratedsSuper): subclass = None superclass = None def __init__(self, thead=None, para=None): self.thead = thead if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docEntryType.subclass: return docEntryType.subclass(*args_, **kwargs_) else: return docEntryType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_thead(self): return self.thead def set_thead(self, thead): self.thead = thead def export(self, outfile, level, namespace_='', name_='docEntryType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEntryType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEntryType'): if self.thead is not None: outfile.write(' thead=%s' % (quote_attrib(self.thead), )) def exportChildren(self, outfile, level, namespace_='', name_='docEntryType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEntryType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.thead is not None: showIndent(outfile, level) outfile.write('thead = "%s",\n' % (self.thead,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('thead'): self.thead = attrs.get('thead').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docEntryType class docCaptionType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_='', mixedclass_=None, content_=None): if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docCaptionType.subclass: return docCaptionType.subclass(*args_, **kwargs_) else: return docCaptionType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCaptionType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCaptionType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docCaptionType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docCaptionType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCaptionType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCaptionType class docHeadingType(GeneratedsSuper): subclass = None superclass = None def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None): self.level = level if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docHeadingType.subclass: return docHeadingType.subclass(*args_, **kwargs_) else: return docHeadingType(*args_, **kwargs_) factory = staticmethod(factory) def get_level(self): return self.level def set_level(self, level): self.level = level def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docHeadingType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docHeadingType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docHeadingType'): if self.level is not None: outfile.write(' level="%s"' % self.format_integer(self.level, input_name='level')) def exportChildren(self, outfile, level, namespace_='', name_='docHeadingType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docHeadingType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.level is not None: showIndent(outfile, level) outfile.write('level = %s,\n' % (self.level,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('level'): try: self.level = int(attrs.get('level').value) except ValueError, exp: raise ValueError('Bad integer attribute (level): %s' % exp) def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docHeadingType class docImageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None): self.width = width self.type_ = type_ self.name = name self.height = height if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docImageType.subclass: return docImageType.subclass(*args_, **kwargs_) else: return docImageType(*args_, **kwargs_) factory = staticmethod(factory) def get_width(self): return self.width def set_width(self, width): self.width = width def get_type(self): return self.type_ def set_type(self, type_): self.type_ = type_ def get_name(self): return self.name def set_name(self, name): self.name = name def get_height(self): return self.height def set_height(self, height): self.height = height def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docImageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docImageType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docImageType'): if self.width is not None: outfile.write(' width=%s' % (self.format_string(quote_attrib(self.width).encode(ExternalEncoding), input_name='width'), )) if self.type_ is not None: outfile.write(' type=%s' % (quote_attrib(self.type_), )) if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) if self.height is not None: outfile.write(' height=%s' % (self.format_string(quote_attrib(self.height).encode(ExternalEncoding), input_name='height'), )) def exportChildren(self, outfile, level, namespace_='', name_='docImageType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docImageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.width is not None: showIndent(outfile, level) outfile.write('width = %s,\n' % (self.width,)) if self.type_ is not None: showIndent(outfile, level) outfile.write('type_ = "%s",\n' % (self.type_,)) if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) if self.height is not None: showIndent(outfile, level) outfile.write('height = %s,\n' % (self.height,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('width'): self.width = attrs.get('width').value if attrs.get('type'): self.type_ = attrs.get('type').value if attrs.get('name'): self.name = attrs.get('name').value if attrs.get('height'): self.height = attrs.get('height').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docImageType class docDotFileType(GeneratedsSuper): subclass = None superclass = None def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None): self.name = name if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docDotFileType.subclass: return docDotFileType.subclass(*args_, **kwargs_) else: return docDotFileType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docDotFileType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docDotFileType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docDotFileType'): if self.name is not None: outfile.write(' name=%s' % (self.format_string(quote_attrib(self.name).encode(ExternalEncoding), input_name='name'), )) def exportChildren(self, outfile, level, namespace_='', name_='docDotFileType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docDotFileType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.name is not None: showIndent(outfile, level) outfile.write('name = %s,\n' % (self.name,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('name'): self.name = attrs.get('name').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docDotFileType class docTocItemType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None): self.id = id if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docTocItemType.subclass: return docTocItemType.subclass(*args_, **kwargs_) else: return docTocItemType(*args_, **kwargs_) factory = staticmethod(factory) def get_id(self): return self.id def set_id(self, id): self.id = id def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docTocItemType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocItemType') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docTocItemType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docTocItemType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocItemType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docTocItemType class docTocListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, tocitem=None): if tocitem is None: self.tocitem = [] else: self.tocitem = tocitem def factory(*args_, **kwargs_): if docTocListType.subclass: return docTocListType.subclass(*args_, **kwargs_) else: return docTocListType(*args_, **kwargs_) factory = staticmethod(factory) def get_tocitem(self): return self.tocitem def set_tocitem(self, tocitem): self.tocitem = tocitem def add_tocitem(self, value): self.tocitem.append(value) def insert_tocitem(self, index, value): self.tocitem[index] = value def export(self, outfile, level, namespace_='', name_='docTocListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docTocListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docTocListType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docTocListType'): for tocitem_ in self.tocitem: tocitem_.export(outfile, level, namespace_, name_='tocitem') def hasContent_(self): if ( self.tocitem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docTocListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('tocitem=[\n') level += 1 for tocitem in self.tocitem: showIndent(outfile, level) outfile.write('model_.tocitem(\n') tocitem.exportLiteral(outfile, level, name_='tocitem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'tocitem': obj_ = docTocItemType.factory() obj_.build(child_) self.tocitem.append(obj_) # end class docTocListType class docLanguageType(GeneratedsSuper): subclass = None superclass = None def __init__(self, langid=None, para=None): self.langid = langid if para is None: self.para = [] else: self.para = para def factory(*args_, **kwargs_): if docLanguageType.subclass: return docLanguageType.subclass(*args_, **kwargs_) else: return docLanguageType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_langid(self): return self.langid def set_langid(self, langid): self.langid = langid def export(self, outfile, level, namespace_='', name_='docLanguageType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docLanguageType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docLanguageType'): if self.langid is not None: outfile.write(' langid=%s' % (self.format_string(quote_attrib(self.langid).encode(ExternalEncoding), input_name='langid'), )) def exportChildren(self, outfile, level, namespace_='', name_='docLanguageType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') def hasContent_(self): if ( self.para is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docLanguageType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.langid is not None: showIndent(outfile, level) outfile.write('langid = %s,\n' % (self.langid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('langid'): self.langid = attrs.get('langid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) # end class docLanguageType class docParamListType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, parameteritem=None): self.kind = kind if parameteritem is None: self.parameteritem = [] else: self.parameteritem = parameteritem def factory(*args_, **kwargs_): if docParamListType.subclass: return docParamListType.subclass(*args_, **kwargs_) else: return docParamListType(*args_, **kwargs_) factory = staticmethod(factory) def get_parameteritem(self): return self.parameteritem def set_parameteritem(self, parameteritem): self.parameteritem = parameteritem def add_parameteritem(self, value): self.parameteritem.append(value) def insert_parameteritem(self, index, value): self.parameteritem[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def export(self, outfile, level, namespace_='', name_='docParamListType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListType'): if self.kind is not None: outfile.write(' kind=%s' % (quote_attrib(self.kind), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamListType'): for parameteritem_ in self.parameteritem: parameteritem_.export(outfile, level, namespace_, name_='parameteritem') def hasContent_(self): if ( self.parameteritem is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameteritem=[\n') level += 1 for parameteritem in self.parameteritem: showIndent(outfile, level) outfile.write('model_.parameteritem(\n') parameteritem.exportLiteral(outfile, level, name_='parameteritem') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameteritem': obj_ = docParamListItem.factory() obj_.build(child_) self.parameteritem.append(obj_) # end class docParamListType class docParamListItem(GeneratedsSuper): subclass = None superclass = None def __init__(self, parameternamelist=None, parameterdescription=None): if parameternamelist is None: self.parameternamelist = [] else: self.parameternamelist = parameternamelist self.parameterdescription = parameterdescription def factory(*args_, **kwargs_): if docParamListItem.subclass: return docParamListItem.subclass(*args_, **kwargs_) else: return docParamListItem(*args_, **kwargs_) factory = staticmethod(factory) def get_parameternamelist(self): return self.parameternamelist def set_parameternamelist(self, parameternamelist): self.parameternamelist = parameternamelist def add_parameternamelist(self, value): self.parameternamelist.append(value) def insert_parameternamelist(self, index, value): self.parameternamelist[index] = value def get_parameterdescription(self): return self.parameterdescription def set_parameterdescription(self, parameterdescription): self.parameterdescription = parameterdescription def export(self, outfile, level, namespace_='', name_='docParamListItem', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamListItem') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamListItem'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamListItem'): for parameternamelist_ in self.parameternamelist: parameternamelist_.export(outfile, level, namespace_, name_='parameternamelist') if self.parameterdescription: self.parameterdescription.export(outfile, level, namespace_, name_='parameterdescription', ) def hasContent_(self): if ( self.parameternamelist is not None or self.parameterdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamListItem'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parameternamelist=[\n') level += 1 for parameternamelist in self.parameternamelist: showIndent(outfile, level) outfile.write('model_.parameternamelist(\n') parameternamelist.exportLiteral(outfile, level, name_='parameternamelist') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.parameterdescription: showIndent(outfile, level) outfile.write('parameterdescription=model_.descriptionType(\n') self.parameterdescription.exportLiteral(outfile, level, name_='parameterdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameternamelist': obj_ = docParamNameList.factory() obj_.build(child_) self.parameternamelist.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parameterdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_parameterdescription(obj_) # end class docParamListItem class docParamNameList(GeneratedsSuper): subclass = None superclass = None def __init__(self, parametername=None): if parametername is None: self.parametername = [] else: self.parametername = parametername def factory(*args_, **kwargs_): if docParamNameList.subclass: return docParamNameList.subclass(*args_, **kwargs_) else: return docParamNameList(*args_, **kwargs_) factory = staticmethod(factory) def get_parametername(self): return self.parametername def set_parametername(self, parametername): self.parametername = parametername def add_parametername(self, value): self.parametername.append(value) def insert_parametername(self, index, value): self.parametername[index] = value def export(self, outfile, level, namespace_='', name_='docParamNameList', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamNameList') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docParamNameList'): pass def exportChildren(self, outfile, level, namespace_='', name_='docParamNameList'): for parametername_ in self.parametername: parametername_.export(outfile, level, namespace_, name_='parametername') def hasContent_(self): if ( self.parametername is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamNameList'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('parametername=[\n') level += 1 for parametername in self.parametername: showIndent(outfile, level) outfile.write('model_.parametername(\n') parametername.exportLiteral(outfile, level, name_='parametername') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'parametername': obj_ = docParamName.factory() obj_.build(child_) self.parametername.append(obj_) # end class docParamNameList class docParamName(GeneratedsSuper): subclass = None superclass = None def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None): self.direction = direction if mixedclass_ is None: self.mixedclass_ = MixedContainer else: self.mixedclass_ = mixedclass_ if content_ is None: self.content_ = [] else: self.content_ = content_ def factory(*args_, **kwargs_): if docParamName.subclass: return docParamName.subclass(*args_, **kwargs_) else: return docParamName(*args_, **kwargs_) factory = staticmethod(factory) def get_ref(self): return self.ref def set_ref(self, ref): self.ref = ref def get_direction(self): return self.direction def set_direction(self, direction): self.direction = direction def export(self, outfile, level, namespace_='', name_='docParamName', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docParamName') outfile.write('>') self.exportChildren(outfile, level + 1, namespace_, name_) outfile.write('\n' % (namespace_, name_)) def exportAttributes(self, outfile, level, namespace_='', name_='docParamName'): if self.direction is not None: outfile.write(' direction=%s' % (quote_attrib(self.direction), )) def exportChildren(self, outfile, level, namespace_='', name_='docParamName'): for item_ in self.content_: item_.export(outfile, level, item_.name, namespace_) def hasContent_(self): if ( self.ref is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docParamName'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.direction is not None: showIndent(outfile, level) outfile.write('direction = "%s",\n' % (self.direction,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('content_ = [\n') for item_ in self.content_: item_.exportLiteral(outfile, level, name_) showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('direction'): self.direction = attrs.get('direction').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'ref': childobj_ = docRefTextType.factory() childobj_.build(child_) obj_ = self.mixedclass_(MixedContainer.CategoryComplex, MixedContainer.TypeNone, 'ref', childobj_) self.content_.append(obj_) elif child_.nodeType == Node.TEXT_NODE: obj_ = self.mixedclass_(MixedContainer.CategoryText, MixedContainer.TypeNone, '', child_.nodeValue) self.content_.append(obj_) # end class docParamName class docXRefSectType(GeneratedsSuper): subclass = None superclass = None def __init__(self, id=None, xreftitle=None, xrefdescription=None): self.id = id if xreftitle is None: self.xreftitle = [] else: self.xreftitle = xreftitle self.xrefdescription = xrefdescription def factory(*args_, **kwargs_): if docXRefSectType.subclass: return docXRefSectType.subclass(*args_, **kwargs_) else: return docXRefSectType(*args_, **kwargs_) factory = staticmethod(factory) def get_xreftitle(self): return self.xreftitle def set_xreftitle(self, xreftitle): self.xreftitle = xreftitle def add_xreftitle(self, value): self.xreftitle.append(value) def insert_xreftitle(self, index, value): self.xreftitle[index] = value def get_xrefdescription(self): return self.xrefdescription def set_xrefdescription(self, xrefdescription): self.xrefdescription = xrefdescription def get_id(self): return self.id def set_id(self, id): self.id = id def export(self, outfile, level, namespace_='', name_='docXRefSectType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docXRefSectType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docXRefSectType'): if self.id is not None: outfile.write(' id=%s' % (self.format_string(quote_attrib(self.id).encode(ExternalEncoding), input_name='id'), )) def exportChildren(self, outfile, level, namespace_='', name_='docXRefSectType'): for xreftitle_ in self.xreftitle: showIndent(outfile, level) outfile.write('<%sxreftitle>%s\n' % (namespace_, self.format_string(quote_xml(xreftitle_).encode(ExternalEncoding), input_name='xreftitle'), namespace_)) if self.xrefdescription: self.xrefdescription.export(outfile, level, namespace_, name_='xrefdescription', ) def hasContent_(self): if ( self.xreftitle is not None or self.xrefdescription is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docXRefSectType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.id is not None: showIndent(outfile, level) outfile.write('id = %s,\n' % (self.id,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('xreftitle=[\n') level += 1 for xreftitle in self.xreftitle: showIndent(outfile, level) outfile.write('%s,\n' % quote_python(xreftitle).encode(ExternalEncoding)) level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.xrefdescription: showIndent(outfile, level) outfile.write('xrefdescription=model_.descriptionType(\n') self.xrefdescription.exportLiteral(outfile, level, name_='xrefdescription') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('id'): self.id = attrs.get('id').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xreftitle': xreftitle_ = '' for text__content_ in child_.childNodes: xreftitle_ += text__content_.nodeValue self.xreftitle.append(xreftitle_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'xrefdescription': obj_ = descriptionType.factory() obj_.build(child_) self.set_xrefdescription(obj_) # end class docXRefSectType class docCopyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, link=None, para=None, sect1=None, internal=None): self.link = link if para is None: self.para = [] else: self.para = para if sect1 is None: self.sect1 = [] else: self.sect1 = sect1 self.internal = internal def factory(*args_, **kwargs_): if docCopyType.subclass: return docCopyType.subclass(*args_, **kwargs_) else: return docCopyType(*args_, **kwargs_) factory = staticmethod(factory) def get_para(self): return self.para def set_para(self, para): self.para = para def add_para(self, value): self.para.append(value) def insert_para(self, index, value): self.para[index] = value def get_sect1(self): return self.sect1 def set_sect1(self, sect1): self.sect1 = sect1 def add_sect1(self, value): self.sect1.append(value) def insert_sect1(self, index, value): self.sect1[index] = value def get_internal(self): return self.internal def set_internal(self, internal): self.internal = internal def get_link(self): return self.link def set_link(self, link): self.link = link def export(self, outfile, level, namespace_='', name_='docCopyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCopyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCopyType'): if self.link is not None: outfile.write(' link=%s' % (self.format_string(quote_attrib(self.link).encode(ExternalEncoding), input_name='link'), )) def exportChildren(self, outfile, level, namespace_='', name_='docCopyType'): for para_ in self.para: para_.export(outfile, level, namespace_, name_='para') for sect1_ in self.sect1: sect1_.export(outfile, level, namespace_, name_='sect1') if self.internal: self.internal.export(outfile, level, namespace_, name_='internal') def hasContent_(self): if ( self.para is not None or self.sect1 is not None or self.internal is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCopyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.link is not None: showIndent(outfile, level) outfile.write('link = %s,\n' % (self.link,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('para=[\n') level += 1 for para in self.para: showIndent(outfile, level) outfile.write('model_.para(\n') para.exportLiteral(outfile, level, name_='para') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') showIndent(outfile, level) outfile.write('sect1=[\n') level += 1 for sect1 in self.sect1: showIndent(outfile, level) outfile.write('model_.sect1(\n') sect1.exportLiteral(outfile, level, name_='sect1') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') if self.internal: showIndent(outfile, level) outfile.write('internal=model_.docInternalType(\n') self.internal.exportLiteral(outfile, level, name_='internal') showIndent(outfile, level) outfile.write('),\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('link'): self.link = attrs.get('link').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'para': obj_ = docParaType.factory() obj_.build(child_) self.para.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'sect1': obj_ = docSect1Type.factory() obj_.build(child_) self.sect1.append(obj_) elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'internal': obj_ = docInternalType.factory() obj_.build(child_) self.set_internal(obj_) # end class docCopyType class docCharType(GeneratedsSuper): subclass = None superclass = None def __init__(self, char=None, valueOf_=''): self.char = char self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docCharType.subclass: return docCharType.subclass(*args_, **kwargs_) else: return docCharType(*args_, **kwargs_) factory = staticmethod(factory) def get_char(self): return self.char def set_char(self, char): self.char = char def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docCharType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docCharType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docCharType'): if self.char is not None: outfile.write(' char=%s' % (quote_attrib(self.char), )) def exportChildren(self, outfile, level, namespace_='', name_='docCharType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docCharType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.char is not None: showIndent(outfile, level) outfile.write('char = "%s",\n' % (self.char,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('char'): self.char = attrs.get('char').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docCharType class docEmptyType(GeneratedsSuper): subclass = None superclass = None def __init__(self, valueOf_=''): self.valueOf_ = valueOf_ def factory(*args_, **kwargs_): if docEmptyType.subclass: return docEmptyType.subclass(*args_, **kwargs_) else: return docEmptyType(*args_, **kwargs_) factory = staticmethod(factory) def getValueOf_(self): return self.valueOf_ def setValueOf_(self, valueOf_): self.valueOf_ = valueOf_ def export(self, outfile, level, namespace_='', name_='docEmptyType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='docEmptyType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='docEmptyType'): pass def exportChildren(self, outfile, level, namespace_='', name_='docEmptyType'): if self.valueOf_.find('![CDATA')>-1: value=quote_xml('%s' % self.valueOf_) value=value.replace('![CDATA','') outfile.write(value) else: outfile.write(quote_xml('%s' % self.valueOf_)) def hasContent_(self): if ( self.valueOf_ is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='docEmptyType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): pass def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('valueOf_ = "%s",\n' % (self.valueOf_,)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) self.valueOf_ = '' for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): pass def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.TEXT_NODE: self.valueOf_ += child_.nodeValue elif child_.nodeType == Node.CDATA_SECTION_NODE: self.valueOf_ += '![CDATA['+child_.nodeValue+']]' # end class docEmptyType USAGE_TEXT = """ Usage: python .py [ -s ] Options: -s Use the SAX parser, not the minidom parser. """ def usage(): print USAGE_TEXT sys.exit(1) def parse(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygen", namespacedef_='') return rootObj def parseLiteral(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('from compound import *\n\n') sys.stdout.write('rootObj = doxygen(\n') rootObj.exportLiteral(sys.stdout, 0, name_="doxygen") sys.stdout.write(')\n') return rootObj def main(): args = sys.argv[1:] if len(args) == 1: parse(args[0]) else: usage() if __name__ == '__main__': main() #import pdb #pdb.run('main()') gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/index.py000066400000000000000000000035171221634657000245470ustar00rootroot00000000000000#!/usr/bin/env python """ Generated Mon Feb 9 19:08:05 2009 by generateDS.py. """ from xml.dom import minidom import os import sys import compound import indexsuper as supermod class DoxygenTypeSub(supermod.DoxygenType): def __init__(self, version=None, compound=None): supermod.DoxygenType.__init__(self, version, compound) def find_compounds_and_members(self, details): """ Returns a list of all compounds and their members which match details """ results = [] for compound in self.compound: members = compound.find_members(details) if members: results.append([compound, members]) else: if details.match(compound): results.append([compound, []]) return results supermod.DoxygenType.subclass = DoxygenTypeSub # end class DoxygenTypeSub class CompoundTypeSub(supermod.CompoundType): def __init__(self, kind=None, refid=None, name='', member=None): supermod.CompoundType.__init__(self, kind, refid, name, member) def find_members(self, details): """ Returns a list of all members which match details """ results = [] for member in self.member: if details.match(member): results.append(member) return results supermod.CompoundType.subclass = CompoundTypeSub # end class CompoundTypeSub class MemberTypeSub(supermod.MemberType): def __init__(self, kind=None, refid=None, name=''): supermod.MemberType.__init__(self, kind, refid, name) supermod.MemberType.subclass = MemberTypeSub # end class MemberTypeSub def parse(inFilename): doc = minidom.parse(inFilename) rootNode = doc.documentElement rootObj = supermod.DoxygenType.factory() rootObj.build(rootNode) return rootObj gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/generated/indexsuper.py000066400000000000000000000455261221634657000256340ustar00rootroot00000000000000#!/usr/bin/env python # # Generated Thu Jun 11 18:43:54 2009 by generateDS.py. # import sys import getopt from string import lower as str_lower from xml.dom import minidom from xml.dom import Node # # User methods # # Calls to the methods in these classes are generated by generateDS.py. # You can replace these methods by re-implementing the following class # in a module named generatedssuper.py. try: from generatedssuper import GeneratedsSuper except ImportError, exp: class GeneratedsSuper: def format_string(self, input_data, input_name=''): return input_data def format_integer(self, input_data, input_name=''): return '%d' % input_data def format_float(self, input_data, input_name=''): return '%f' % input_data def format_double(self, input_data, input_name=''): return '%e' % input_data def format_boolean(self, input_data, input_name=''): return '%s' % input_data # # If you have installed IPython you can uncomment and use the following. # IPython is available from http://ipython.scipy.org/. # ## from IPython.Shell import IPShellEmbed ## args = '' ## ipshell = IPShellEmbed(args, ## banner = 'Dropping into IPython', ## exit_msg = 'Leaving Interpreter, back to program.') # Then use the following line where and when you want to drop into the # IPython shell: # ipshell(' -- Entering ipshell.\nHit Ctrl-D to exit') # # Globals # ExternalEncoding = 'ascii' # # Support/utility functions. # def showIndent(outfile, level): for idx in range(level): outfile.write(' ') def quote_xml(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') return s1 def quote_attrib(inStr): s1 = (isinstance(inStr, basestring) and inStr or '%s' % inStr) s1 = s1.replace('&', '&') s1 = s1.replace('<', '<') s1 = s1.replace('>', '>') if '"' in s1: if "'" in s1: s1 = '"%s"' % s1.replace('"', """) else: s1 = "'%s'" % s1 else: s1 = '"%s"' % s1 return s1 def quote_python(inStr): s1 = inStr if s1.find("'") == -1: if s1.find('\n') == -1: return "'%s'" % s1 else: return "'''%s'''" % s1 else: if s1.find('"') != -1: s1 = s1.replace('"', '\\"') if s1.find('\n') == -1: return '"%s"' % s1 else: return '"""%s"""' % s1 class MixedContainer: # Constants for category: CategoryNone = 0 CategoryText = 1 CategorySimple = 2 CategoryComplex = 3 # Constants for content_type: TypeNone = 0 TypeText = 1 TypeString = 2 TypeInteger = 3 TypeFloat = 4 TypeDecimal = 5 TypeDouble = 6 TypeBoolean = 7 def __init__(self, category, content_type, name, value): self.category = category self.content_type = content_type self.name = name self.value = value def getCategory(self): return self.category def getContenttype(self, content_type): return self.content_type def getValue(self): return self.value def getName(self): return self.name def export(self, outfile, level, name, namespace): if self.category == MixedContainer.CategoryText: outfile.write(self.value) elif self.category == MixedContainer.CategorySimple: self.exportSimple(outfile, level, name) else: # category == MixedContainer.CategoryComplex self.value.export(outfile, level, namespace,name) def exportSimple(self, outfile, level, name): if self.content_type == MixedContainer.TypeString: outfile.write('<%s>%s' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeInteger or \ self.content_type == MixedContainer.TypeBoolean: outfile.write('<%s>%d' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeFloat or \ self.content_type == MixedContainer.TypeDecimal: outfile.write('<%s>%f' % (self.name, self.value, self.name)) elif self.content_type == MixedContainer.TypeDouble: outfile.write('<%s>%g' % (self.name, self.value, self.name)) def exportLiteral(self, outfile, level, name): if self.category == MixedContainer.CategoryText: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) elif self.category == MixedContainer.CategorySimple: showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s", "%s"),\n' % \ (self.category, self.content_type, self.name, self.value)) else: # category == MixedContainer.CategoryComplex showIndent(outfile, level) outfile.write('MixedContainer(%d, %d, "%s",\n' % \ (self.category, self.content_type, self.name,)) self.value.exportLiteral(outfile, level + 1) showIndent(outfile, level) outfile.write(')\n') class _MemberSpec(object): def __init__(self, name='', data_type='', container=0): self.name = name self.data_type = data_type self.container = container def set_name(self, name): self.name = name def get_name(self): return self.name def set_data_type(self, data_type): self.data_type = data_type def get_data_type(self): return self.data_type def set_container(self, container): self.container = container def get_container(self): return self.container # # Data representation classes. # class DoxygenType(GeneratedsSuper): subclass = None superclass = None def __init__(self, version=None, compound=None): self.version = version if compound is None: self.compound = [] else: self.compound = compound def factory(*args_, **kwargs_): if DoxygenType.subclass: return DoxygenType.subclass(*args_, **kwargs_) else: return DoxygenType(*args_, **kwargs_) factory = staticmethod(factory) def get_compound(self): return self.compound def set_compound(self, compound): self.compound = compound def add_compound(self, value): self.compound.append(value) def insert_compound(self, index, value): self.compound[index] = value def get_version(self): return self.version def set_version(self, version): self.version = version def export(self, outfile, level, namespace_='', name_='DoxygenType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='DoxygenType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='DoxygenType'): outfile.write(' version=%s' % (self.format_string(quote_attrib(self.version).encode(ExternalEncoding), input_name='version'), )) def exportChildren(self, outfile, level, namespace_='', name_='DoxygenType'): for compound_ in self.compound: compound_.export(outfile, level, namespace_, name_='compound') def hasContent_(self): if ( self.compound is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='DoxygenType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.version is not None: showIndent(outfile, level) outfile.write('version = %s,\n' % (self.version,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('compound=[\n') level += 1 for compound in self.compound: showIndent(outfile, level) outfile.write('model_.compound(\n') compound.exportLiteral(outfile, level, name_='compound') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('version'): self.version = attrs.get('version').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'compound': obj_ = CompoundType.factory() obj_.build(child_) self.compound.append(obj_) # end class DoxygenType class CompoundType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, refid=None, name=None, member=None): self.kind = kind self.refid = refid self.name = name if member is None: self.member = [] else: self.member = member def factory(*args_, **kwargs_): if CompoundType.subclass: return CompoundType.subclass(*args_, **kwargs_) else: return CompoundType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def get_member(self): return self.member def set_member(self, member): self.member = member def add_member(self, value): self.member.append(value) def insert_member(self, index, value): self.member[index] = value def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='CompoundType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='CompoundType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='CompoundType'): outfile.write(' kind=%s' % (quote_attrib(self.kind), )) outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='CompoundType'): if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) for member_ in self.member: member_.export(outfile, level, namespace_, name_='member') def hasContent_(self): if ( self.name is not None or self.member is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='CompoundType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) showIndent(outfile, level) outfile.write('member=[\n') level += 1 for member in self.member: showIndent(outfile, level) outfile.write('model_.member(\n') member.exportLiteral(outfile, level, name_='member') showIndent(outfile, level) outfile.write('),\n') level -= 1 showIndent(outfile, level) outfile.write('],\n') def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ elif child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'member': obj_ = MemberType.factory() obj_.build(child_) self.member.append(obj_) # end class CompoundType class MemberType(GeneratedsSuper): subclass = None superclass = None def __init__(self, kind=None, refid=None, name=None): self.kind = kind self.refid = refid self.name = name def factory(*args_, **kwargs_): if MemberType.subclass: return MemberType.subclass(*args_, **kwargs_) else: return MemberType(*args_, **kwargs_) factory = staticmethod(factory) def get_name(self): return self.name def set_name(self, name): self.name = name def get_kind(self): return self.kind def set_kind(self, kind): self.kind = kind def get_refid(self): return self.refid def set_refid(self, refid): self.refid = refid def export(self, outfile, level, namespace_='', name_='MemberType', namespacedef_=''): showIndent(outfile, level) outfile.write('<%s%s %s' % (namespace_, name_, namespacedef_, )) self.exportAttributes(outfile, level, namespace_, name_='MemberType') if self.hasContent_(): outfile.write('>\n') self.exportChildren(outfile, level + 1, namespace_, name_) showIndent(outfile, level) outfile.write('\n' % (namespace_, name_)) else: outfile.write(' />\n') def exportAttributes(self, outfile, level, namespace_='', name_='MemberType'): outfile.write(' kind=%s' % (quote_attrib(self.kind), )) outfile.write(' refid=%s' % (self.format_string(quote_attrib(self.refid).encode(ExternalEncoding), input_name='refid'), )) def exportChildren(self, outfile, level, namespace_='', name_='MemberType'): if self.name is not None: showIndent(outfile, level) outfile.write('<%sname>%s\n' % (namespace_, self.format_string(quote_xml(self.name).encode(ExternalEncoding), input_name='name'), namespace_)) def hasContent_(self): if ( self.name is not None ): return True else: return False def exportLiteral(self, outfile, level, name_='MemberType'): level += 1 self.exportLiteralAttributes(outfile, level, name_) if self.hasContent_(): self.exportLiteralChildren(outfile, level, name_) def exportLiteralAttributes(self, outfile, level, name_): if self.kind is not None: showIndent(outfile, level) outfile.write('kind = "%s",\n' % (self.kind,)) if self.refid is not None: showIndent(outfile, level) outfile.write('refid = %s,\n' % (self.refid,)) def exportLiteralChildren(self, outfile, level, name_): showIndent(outfile, level) outfile.write('name=%s,\n' % quote_python(self.name).encode(ExternalEncoding)) def build(self, node_): attrs = node_.attributes self.buildAttributes(attrs) for child_ in node_.childNodes: nodeName_ = child_.nodeName.split(':')[-1] self.buildChildren(child_, nodeName_) def buildAttributes(self, attrs): if attrs.get('kind'): self.kind = attrs.get('kind').value if attrs.get('refid'): self.refid = attrs.get('refid').value def buildChildren(self, child_, nodeName_): if child_.nodeType == Node.ELEMENT_NODE and \ nodeName_ == 'name': name_ = '' for text__content_ in child_.childNodes: name_ += text__content_.nodeValue self.name = name_ # end class MemberType USAGE_TEXT = """ Usage: python .py [ -s ] Options: -s Use the SAX parser, not the minidom parser. """ def usage(): print USAGE_TEXT sys.exit(1) def parse(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygenindex", namespacedef_='') return rootObj def parseString(inString): doc = minidom.parseString(inString) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('\n') rootObj.export(sys.stdout, 0, name_="doxygenindex", namespacedef_='') return rootObj def parseLiteral(inFileName): doc = minidom.parse(inFileName) rootNode = doc.documentElement rootObj = DoxygenType.factory() rootObj.build(rootNode) # Enable Python to collect the space used by the DOM. doc = None sys.stdout.write('from index import *\n\n') sys.stdout.write('rootObj = doxygenindex(\n') rootObj.exportLiteral(sys.stdout, 0, name_="doxygenindex") sys.stdout.write(')\n') return rootObj def main(): args = sys.argv[1:] if len(args) == 1: parse(args[0]) else: usage() if __name__ == '__main__': main() #import pdb #pdb.run('main()') gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/run_tests.in000066400000000000000000000010371221634657000235010ustar00rootroot00000000000000#!/bin/sh # 1st parameter is absolute path to component source directory # 2nd parameter is absolute path to component build directory # 3rd parameter is path to Python QA directory # Note: calling master run_tests.sh in gnuradio core is not strictly # correct, as it will result in a partially bogus PYTHONPATH, but it # does make the correct paths in the second half so all is well. @PYTHON@ @srcdir@/__init__.py # @top_builddir@/run_tests.sh \ # @abs_top_srcdir@/gnuradio-core \ # @abs_top_builddir@/gnuradio-core \ # @srcdir@ gr-air-modes-0.0.0.e47992d/docs/doxygen/doxyxml/text.py000066400000000000000000000034551221634657000224670ustar00rootroot00000000000000# # Copyright 2010 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Utilities for extracting text from generated classes. """ def is_string(txt): if isinstance(txt, str): return True try: if isinstance(txt, unicode): return True except NameError: pass return False def description(obj): if obj is None: return None return description_bit(obj).strip() def description_bit(obj): if hasattr(obj, 'content'): contents = [description_bit(item) for item in obj.content] result = ''.join(contents) elif hasattr(obj, 'content_'): contents = [description_bit(item) for item in obj.content_] result = ''.join(contents) elif hasattr(obj, 'value'): result = description_bit(obj.value) elif is_string(obj): return obj else: raise StandardError('Expecting a string or something with content, content_ or value attribute') # If this bit is a paragraph then add one some line breaks. if hasattr(obj, 'name') and obj.name == 'para': result += "\n\n" return result gr-air-modes-0.0.0.e47992d/docs/doxygen/other/000077500000000000000000000000001221634657000205375ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/docs/doxygen/other/group_defs.dox000066400000000000000000000003401221634657000234050ustar00rootroot00000000000000/*! * \defgroup block GNU Radio GR-AIR-MODES C++ Signal Processing Blocks * \brief All C++ blocks that can be used from the GR-AIR-MODES GNU Radio * module are listed here or in the subcategories below. * */ gr-air-modes-0.0.0.e47992d/docs/doxygen/other/main_page.dox000066400000000000000000000004351221634657000231750ustar00rootroot00000000000000/*! \mainpage Welcome to the GNU Radio GR-AIR-MODES Block This is the intro page for the Doxygen manual generated for the GR-AIR-MODES block (docs/doxygen/other/main_page.dox). Edit it to add more detailed documentation about the new GNU Radio modules contained in this project. */ gr-air-modes-0.0.0.e47992d/docs/doxygen/swig_doc.py000066400000000000000000000206651221634657000215770ustar00rootroot00000000000000# # Copyright 2010,2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Creates the swig_doc.i SWIG interface file. Execute using: python swig_doc.py xml_path outputfilename The file instructs SWIG to transfer the doxygen comments into the python docstrings. """ import sys try: from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base except ImportError: from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base def py_name(name): bits = name.split('_') return '_'.join(bits[1:]) def make_name(name): bits = name.split('_') return bits[0] + '_make_' + '_'.join(bits[1:]) class Block(object): """ Checks if doxyxml produced objects correspond to a gnuradio block. """ @classmethod def includes(cls, item): if not isinstance(item, DoxyClass): return False # Check for a parsing error. if item.error(): return False return item.has_member(make_name(item.name()), DoxyFriend) def utoascii(text): """ Convert unicode text into ascii and escape quotes. """ if text is None: return '' out = text.encode('ascii', 'replace') out = out.replace('"', '\\"') return out def combine_descriptions(obj): """ Combines the brief and detailed descriptions of an object together. """ description = [] bd = obj.brief_description.strip() dd = obj.detailed_description.strip() if bd: description.append(bd) if dd: description.append(dd) return utoascii('\n\n'.join(description)).strip() entry_templ = '%feature("docstring") {name} "{docstring}"' def make_entry(obj, name=None, templ="{description}", description=None): """ Create a docstring entry for a swig interface file. obj - a doxyxml object from which documentation will be extracted. name - the name of the C object (defaults to obj.name()) templ - an optional template for the docstring containing only one variable named 'description'. description - if this optional variable is set then it's value is used as the description instead of extracting it from obj. """ if name is None: name=obj.name() if description is None: description = combine_descriptions(obj) docstring = templ.format(description=description) if not docstring: return '' return entry_templ.format( name=name, docstring=docstring, ) def make_func_entry(func, name=None, description=None, params=None): """ Create a function docstring entry for a swig interface file. func - a doxyxml object from which documentation will be extracted. name - the name of the C object (defaults to func.name()) description - if this optional variable is set then it's value is used as the description instead of extracting it from func. params - a parameter list that overrides using func.params. """ if params is None: params = func.params params = [prm.declname for prm in params] if params: sig = "Params: (%s)" % ", ".join(params) else: sig = "Params: (NONE)" templ = "{description}\n\n" + sig return make_entry(func, name=name, templ=utoascii(templ), description=description) def make_class_entry(klass, description=None): """ Create a class docstring for a swig interface file. """ output = [] output.append(make_entry(klass, description=description)) for func in klass.in_category(DoxyFunction): name = klass.name() + '::' + func.name() output.append(make_func_entry(func, name=name)) return "\n\n".join(output) def make_block_entry(di, block): """ Create class and function docstrings of a gnuradio block for a swig interface file. """ descriptions = [] # Get the documentation associated with the class. class_desc = combine_descriptions(block) if class_desc: descriptions.append(class_desc) # Get the documentation associated with the make function make_func = di.get_member(make_name(block.name()), DoxyFunction) make_func_desc = combine_descriptions(make_func) if make_func_desc: descriptions.append(make_func_desc) # Get the documentation associated with the file try: block_file = di.get_member(block.name() + ".h", DoxyFile) file_desc = combine_descriptions(block_file) if file_desc: descriptions.append(file_desc) except base.Base.NoSuchMember: # Don't worry if we can't find a matching file. pass # And join them all together to make a super duper description. super_description = "\n\n".join(descriptions) # Associate the combined description with the class and # the make function. output = [] output.append(make_class_entry(block, description=super_description)) creator = block.get_member(block.name(), DoxyFunction) output.append(make_func_entry(make_func, description=super_description, params=creator.params)) return "\n\n".join(output) def make_swig_interface_file(di, swigdocfilename, custom_output=None): output = [""" /* * This file was automatically generated using swig_doc.py. * * Any changes to it will be lost next time it is regenerated. */ """] if custom_output is not None: output.append(custom_output) # Create docstrings for the blocks. blocks = di.in_category(Block) make_funcs = set([]) for block in blocks: try: make_func = di.get_member(make_name(block.name()), DoxyFunction) make_funcs.add(make_func.name()) output.append(make_block_entry(di, block)) except block.ParsingError: print('Parsing error for block %s' % block.name()) # Create docstrings for functions # Don't include the make functions since they have already been dealt with. funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs] for f in funcs: try: output.append(make_func_entry(f)) except f.ParsingError: print('Parsing error for function %s' % f.name()) # Create docstrings for classes block_names = [block.name() for block in blocks] klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names] for k in klasses: try: output.append(make_class_entry(k)) except k.ParsingError: print('Parsing error for class %s' % k.name()) # Docstrings are not created for anything that is not a function or a class. # If this excludes anything important please add it here. output = "\n\n".join(output) swig_doc = file(swigdocfilename, 'w') swig_doc.write(output) swig_doc.close() if __name__ == "__main__": # Parse command line options and set up doxyxml. err_msg = "Execute using: python swig_doc.py xml_path outputfilename" if len(sys.argv) != 3: raise StandardError(err_msg) xml_path = sys.argv[1] swigdocfilename = sys.argv[2] di = DoxyIndex(xml_path) # gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined! # This is presumably a bug in SWIG. #msg_q = di.get_member(u'gr_msg_queue', DoxyClass) #insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction) #delete_head = msg_q.get_member(u'delete_head', DoxyFunction) output = [] #output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail')) #output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head')) custom_output = "\n\n".join(output) # Generate the docstrings interface file. make_swig_interface_file(di, swigdocfilename, custom_output=custom_output) gr-air-modes-0.0.0.e47992d/grc/000077500000000000000000000000001221634657000155645ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/grc/CMakeLists.txt000066400000000000000000000016151221634657000203270ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. #install(FILES # gr-air-modes_square_ff.xml # gr-air-modes_square2_ff.xml # DESTINATION share/gnuradio/grc/blocks #) gr-air-modes-0.0.0.e47992d/include/000077500000000000000000000000001221634657000164345ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/include/gr_air_modes/000077500000000000000000000000001221634657000210665ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/include/gr_air_modes/CMakeLists.txt000066400000000000000000000020421221634657000236240ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Install public header files ######################################################################## install(FILES preamble.h slicer.h types.h api.h DESTINATION include/gr_air_modes ) gr-air-modes-0.0.0.e47992d/include/gr_air_modes/api.h000066400000000000000000000020501221634657000220050ustar00rootroot00000000000000/* * Copyright 2011 Free Software Foundation, Inc. * * This file is part of GNU Radio * * GNU Radio is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 3, or (at your option) * any later version. * * GNU Radio is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Radio; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #ifndef INCLUDED_AIR_MODES_API_H #define INCLUDED_AIR_MODES_API_H #include #ifdef AIR_MODES_EXPORTS # define AIR_MODES_API __GR_ATTR_EXPORT #else # define AIR_MODES_API __GR_ATTR_IMPORT #endif #endif /* INCLUDED_AIR_MODES_API_H */ gr-air-modes-0.0.0.e47992d/include/gr_air_modes/modes_crc.h000066400000000000000000000020551221634657000231770ustar00rootroot00000000000000/* # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifndef INCLUDED_MODES_CRC_H #define INCLUDED_MODES_CRC_H extern const unsigned int modes_crc_table[112]; unsigned int modes_check_crc(unsigned char data[], int length); bruteResultTypeDef modes_ec_brute(modes_packet &err_packet); unsigned next_set_of_n_elements(unsigned x); #endif gr-air-modes-0.0.0.e47992d/include/gr_air_modes/preamble.h000066400000000000000000000027021221634657000230270ustar00rootroot00000000000000/* # Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifndef INCLUDED_AIR_MODES_PREAMBLE_H #define INCLUDED_AIR_MODES_PREAMBLE_H #include #include namespace gr { namespace air_modes { /*! * \brief mode select preamble detection * \ingroup block */ class AIR_MODES_API preamble : virtual public gr::block { public: typedef boost::shared_ptr sptr; static sptr make(int channel_rate, float threshold_db); virtual void set_rate(int channel_rate) = 0; virtual void set_threshold(float threshold_db) = 0; virtual int get_rate(void) = 0; virtual float get_threshold(void) = 0; }; } // namespace air_modes } // namespace gr #endif /* INCLUDED_AIR_MODES_PREAMBLE_H */ gr-air-modes-0.0.0.e47992d/include/gr_air_modes/slicer.h000066400000000000000000000024131221634657000225200ustar00rootroot00000000000000/* # Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifndef INCLUDED_AIR_MODES_SLICER_H #define INCLUDED_AIR_MODES_SLICER_H #include #include #include namespace gr { namespace air_modes { /*! * \brief mode select slicer * \ingroup block */ class AIR_MODES_API slicer : virtual public gr::sync_block { public: typedef boost::shared_ptr sptr; static sptr make(gr::msg_queue::sptr queue); }; } //namespace air_modes } //namespace gr #endif /* INCLUDED_AIR_MODES_SLICER_H */ gr-air-modes-0.0.0.e47992d/include/gr_air_modes/types.h000066400000000000000000000027711221634657000224120ustar00rootroot00000000000000/* # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifndef AIR_MODES_TYPES_H #define AIR_MODES_TYPES_H typedef enum { No_Packet = 0, Short_Packet = 1, Fruited_Packet = 2, Long_Packet = 3 } framer_packet_type; typedef enum { No_Error = 0, Solution_Found, Too_Many_LCBs, No_Solution, Multiple_Solutions } bruteResultTypeDef; struct modes_packet { unsigned char data[14]; // unsigned char confidence[14]; //112 bits of boolean high/low confidence data for each bit unsigned char lowconfbits[24]; //positions of low confidence bits within the packet unsigned long crc; unsigned int numlowconf; framer_packet_type type; //what length packet are we unsigned int message_type; float reference_level; double timestamp; }; struct slice_result_t { bool decision; bool confidence; }; #endif gr-air-modes-0.0.0.e47992d/lib/000077500000000000000000000000001221634657000155575ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/lib/CMakeLists.txt000066400000000000000000000052131221634657000203200ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Setup library ######################################################################## include(GrPlatform) #define LIB_SUFFIX add_library(air_modes SHARED preamble_impl.cc slicer_impl.cc modes_crc.cc ) target_link_libraries(air_modes ${Boost_LIBRARIES} ${GNURADIO_RUNTIME_LIBRARIES}) set_target_properties(air_modes PROPERTIES DEFINE_SYMBOL "AIR_MODES_EXPORTS") set_target_properties(air_modes PROPERTIES SOVERSION "${gr-gr-air-modes_VERSION_MAJOR}") set_target_properties(air_modes PROPERTIES VERSION "${gr-gr-air-modes_VERSION_MAJOR}.${gr-gr-air-modes_VERSION_MINOR}") ######################################################################## # Install built library files ######################################################################## install(TARGETS air_modes LIBRARY DESTINATION lib${LIB_SUFFIX} # .so/.dylib file ARCHIVE DESTINATION lib${LIB_SUFFIX} # .lib file RUNTIME DESTINATION bin # .dll file ) ######################################################################## # Build and register unit test ######################################################################## #find_package(Boost COMPONENTS unit_test_framework) #include(GrTest) #set(GR_TEST_TARGET_DEPS gnuradio-gr-air-modes) #turn each test cpp file into an executable with an int main() function #add_definitions(-DBOOST_TEST_DYN_LINK -DBOOST_TEST_MAIN) #add_executable(qa_gr-air-modes_square_ff qa_gr-air-modes_square_ff.cc) #target_link_libraries(qa_gr-air-modes_square_ff gnuradio-gr-air-modes ${Boost_LIBRARIES}) #GR_ADD_TEST(qa_gr-air-modes_square_ff qa_gr-air-modes_square_ff) #add_executable(qa_gr-air-modes_square2_ff qa_gr-air-modes_square2_ff.cc) #target_link_libraries(qa_gr-air-modes_square2_ff gnuradio-gr-air-modes ${Boost_LIBRARIES}) #GR_ADD_TEST(qa_gr-air-modes_square2_ff qa_gr-air-modes_square2_ff) gr-air-modes-0.0.0.e47992d/lib/modes_crc.cc000066400000000000000000000033251221634657000200270ustar00rootroot00000000000000/* * Copyright 2013 Nick Foster * * This file is part of gr-air-modes * * gr-air-modes is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * gr-air-modes is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with gr-air-modes; see the file COPYING. If not, write to * the Free Software Foundation, Inc., 51 Franklin Street, * Boston, MA 02110-1301, USA. */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #include #include #include #include unsigned int crc_table[256]; const unsigned int POLY=0xFFF409; //generate a bytewise lookup CRC table void generate_crc_table(void) { unsigned int crc = 0; for(int n=0; n<256; n++) { crc = n<<16; for(int k=0; k<8; k++) { if(crc & 0x800000) { crc = ((crc<<1) ^ POLY) & 0xFFFFFF; } else { crc = (crc<<1) & 0xFFFFFF; } } crc_table[n] = crc & 0xFFFFFF; } } //Perform a bytewise CRC check unsigned int modes_check_crc(unsigned char data[], int length) { if(crc_table[1] != POLY) generate_crc_table(); unsigned int crc=0; for(int i=0; i>16) ^ data[i]) & 0xff] ^ (crc << 8); } return crc & 0xFFFFFF; } gr-air-modes-0.0.0.e47992d/lib/preamble_impl.cc000066400000000000000000000220201221634657000206720ustar00rootroot00000000000000/* # Copyright 2010 Nick Foster # Copyright 2013 Nicholas Corgan # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #include "preamble_impl.h" #include #include #include #include namespace gr { air_modes::preamble::sptr air_modes::preamble::make(int channel_rate, float threshold_db) { return gnuradio::get_initial_sptr(new air_modes::preamble_impl(channel_rate, threshold_db)); } air_modes::preamble_impl::preamble_impl(int channel_rate, float threshold_db) : gr::block ("preamble", gr::io_signature::make2 (2, 2, sizeof(float), sizeof(float)), //stream 0 is received data, stream 1 is moving average for reference gr::io_signature::make (1, 1, sizeof(float))) //the output soft symbols { d_chip_rate = 2000000; //2Mchips per second set_rate(channel_rate); set_threshold(threshold_db); std::stringstream str; str << name() << unique_id(); d_me = pmt::string_to_symbol(str.str()); d_key = pmt::string_to_symbol("preamble_found"); } void air_modes::preamble_impl::set_rate(int channel_rate) { d_samples_per_chip = channel_rate / d_chip_rate; d_samples_per_symbol = d_samples_per_chip * 2; d_check_width = 120 * d_samples_per_symbol; d_secs_per_sample = 1.0/channel_rate; set_output_multiple(1+d_check_width*2); set_history(d_samples_per_symbol); } void air_modes::preamble_impl::set_threshold(float threshold_db) { d_threshold_db = threshold_db; d_threshold = powf(10., threshold_db/20.); //the level that the sample must be above the moving average in order to qualify as a pulse } float air_modes::preamble_impl::get_threshold(void) { return d_threshold_db; } int air_modes::preamble_impl::get_rate(void) { return d_samples_per_chip * d_chip_rate; } static void integrate_and_dump(float *out, const float *in, int chips, int samps_per_chip) { for(int i=0; i tstamp_tags; get_tags_in_range(tstamp_tags, 0, abs_sample_cnt, abs_sample_cnt + ninputs, pmt::string_to_symbol("rx_time")); //tags.back() is the most recent timestamp, then. if(tstamp_tags.size() > 0) { d_timestamp = tstamp_tags.back(); } for(int i=0; i < ninputs; i++) { float pulse_threshold = inavg[i] * d_threshold; if(in[i] > pulse_threshold) { //hey we got a candidate if(in[i+1] > in[i]) continue; //wait for the peak //check to see the rest of the pulses are there if( in[i+pulse_offsets[1]] < pulse_threshold ) continue; if( in[i+pulse_offsets[2]] < pulse_threshold ) continue; if( in[i+pulse_offsets[3]] < pulse_threshold ) continue; //get a more accurate bit center by finding the correlation peak across all four preamble bits bool late, early; int how_late = 0; do { double now_corr = correlate_preamble(in+i, d_samples_per_chip); double late_corr = correlate_preamble(in+i+1, d_samples_per_chip); double early_corr = correlate_preamble(in+i-1, d_samples_per_chip); late = (late_corr > now_corr); //early = (early_corr > now_corr); if(late) { i++; how_late++; } //if(early && i>0) { std::cout << "EARLY " << i << std::endl; i--; } } while(late and how_late < d_samples_per_chip);// xor early); if(0) std::cout << "We were " << how_late << " samples late" << std::endl; //now check to see that the non-peak symbols in the preamble //are below the peaks by threshold dB float avgpeak = ( in[i+pulse_offsets[0]] + in[i+pulse_offsets[1]] + in[i+pulse_offsets[2]] + in[i+pulse_offsets[3]]) / 4.0; float space_threshold = inavg[i] + (avgpeak - inavg[i])/d_threshold; bool valid_preamble = true; //f'in c++ for( int j=1.5*d_samples_per_symbol; j<=3*d_samples_per_symbol; j++) if(in[i+j] > space_threshold) valid_preamble = false; for( int j=5*d_samples_per_symbol; j<=7.5*d_samples_per_symbol; j++) if(in[i+j] > space_threshold) valid_preamble = false; if(!valid_preamble) continue; //be sure we've got enough room in the input buffer to copy out a whole packet if(ninputs-i < 240*d_samples_per_chip) { consume_each(std::max(i-1,0)); if(0) std::cout << "Preamble consumed " << std::max(i-1,0) << ", returned 0 (no room)" << std::endl; return 0; } //all right i'm prepared to call this a preamble for(int j=0; j<240; j++) { out[j] = in[i+j*d_samples_per_chip] - inavg[i]; } //get the timestamp of the preamble double tstamp = tag_to_timestamp(d_timestamp, abs_sample_cnt + i, d_secs_per_sample); //now tag the preamble add_item_tag(0, //stream ID nitems_written(0), //sample d_key, //frame_info pmt::from_double(tstamp), d_me //block src id ); //std::cout << "PREAMBLE" << std::endl; //produce only one output per work call -- TODO this should probably change if(0) std::cout << "Preamble consumed " << i+240*d_samples_per_chip << "with i=" << i << ", returned 240" << std::endl; consume_each(i+240*d_samples_per_chip); return 240; } } //didn't get anything this time if(0) std::cout << "Preamble consumed " << ninputs << ", returned 0" << std::endl; consume_each(ninputs); return 0; } } //namespace gr gr-air-modes-0.0.0.e47992d/lib/preamble_impl.h000066400000000000000000000017641221634657000205500ustar00rootroot00000000000000 #ifndef _AIR_MODES_PREAMBLE_IMPL_H_ #define _AIR_MODES_PREAMBLE_IMPL_H_ #include #include #include namespace gr { namespace air_modes { class AIR_MODES_API preamble_impl : public preamble { private: int d_check_width; int d_chip_rate; float d_preamble_length_us; int d_samples_per_chip; int d_samples_per_symbol; float d_threshold_db; float d_threshold; pmt::pmt_t d_me, d_key; gr::tag_t d_timestamp; double d_secs_per_sample; public: preamble_impl(int channel_rate, float threshold_db); int general_work (int noutput_items, gr_vector_int &ninput_items, gr_vector_const_void_star &input_items, gr_vector_void_star &output_items); void set_rate(int channel_rate); void set_threshold(float threshold_db); float get_threshold(void); int get_rate(void); }; } //namespace air_modes } //namespace gr #endif //_AIR_MODES_PREAMBLE_IMPL_H_ gr-air-modes-0.0.0.e47992d/lib/slicer_impl.cc000066400000000000000000000176731221634657000204060ustar00rootroot00000000000000/* # Copyright 2010 Nick Foster # Copyright 2013 Nicholas Corgan # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include #include "slicer_impl.h" #include #include #include #include #include #include #include extern "C" { #include #include } namespace gr { air_modes::slicer::sptr air_modes::slicer::make(gr::msg_queue::sptr queue) { return gnuradio::get_initial_sptr(new air_modes::slicer_impl(queue)); } air_modes::slicer_impl::slicer_impl(gr::msg_queue::sptr queue) : gr::sync_block ("slicer", gr::io_signature::make (1, 1, sizeof(float)), gr::io_signature::make (0, 0, 0) ) { //initialize private data here d_chip_rate = 2000000; //2Mchips per second d_samples_per_chip = 2;//FIXME this is constant now channel_rate / d_chip_rate; d_samples_per_symbol = d_samples_per_chip * 2; d_check_width = 120 * d_samples_per_symbol; //how far you will have to look ahead d_queue = queue; set_output_multiple(d_check_width*2); //how do you specify buffer size for sinks? } //this slicer is courtesy of Lincoln Labs. supposedly it is more resistant to mode A/C FRUIT. //see http://adsb.tc.faa.gov/WG3_Meetings/Meeting8/Squitter-Lon.pdf static slice_result_t llslicer(const float bit0, const float bit1, const float ref) { slice_result_t result; //3dB limits for bit slicing and confidence measurement float highlimit=ref*1.414; float lowlimit=ref*0.707; bool firstchip_inref = ((bit0 > lowlimit) && (bit0 < highlimit)); bool secondchip_inref = ((bit1 > lowlimit) && (bit1 < highlimit)); if(firstchip_inref && !secondchip_inref) { result.decision = 1; result.confidence = 1; } else if(secondchip_inref && !firstchip_inref) { result.decision = 0; result.confidence = 1; } else if(firstchip_inref && secondchip_inref) { result.decision = bit0 > bit1; result.confidence = 0; } else {//if(!firstchip_inref && !secondchip_inref) { result.decision = bit0 > bit1; if(result.decision) { if(bit1 < lowlimit * 0.5) result.confidence = 1; else result.confidence = 0; } else { if(bit0 < lowlimit * 0.5) result.confidence = 1; else result.confidence = 0; } } return result; } int air_modes::slicer_impl::work(int noutput_items, gr_vector_const_void_star &input_items, gr_vector_void_star &output_items) { const float *in = (const float *) input_items[0]; int size = noutput_items - d_check_width; //since it's a sync block, i assume that it runs with ninput_items = noutput_items if(0) std::cout << "Slicer called with " << size << " samples" << std::endl; std::vector tags; uint64_t abs_sample_cnt = nitems_read(0); get_tags_in_range(tags, 0, abs_sample_cnt, abs_sample_cnt + size, pmt::string_to_symbol("preamble_found")); std::vector::iterator tag_iter; for(tag_iter = tags.begin(); tag_iter != tags.end(); tag_iter++) { uint64_t i = tag_iter->offset - abs_sample_cnt; modes_packet rx_packet; memset(&rx_packet.data, 0x00, 14 * sizeof(unsigned char)); memset(&rx_packet.lowconfbits, 0x00, 24 * sizeof(unsigned char)); rx_packet.numlowconf = 0; //let's use the preamble to get a reference level for the packet //fixme: a better thing to do is create a bi-level avg 1 and avg 0 //through simple statistics, then take the median for your slice level //this won't improve decoding but will improve confidence rx_packet.reference_level = (in[i] + in[i+2] + in[i+7] + in[i+9]) / 4.0; i += 16; //move on up to the first bit of the packet data //now let's slice the header so we can determine if it's a short pkt or a long pkt unsigned char pkt_hdr = 0; for(int j=0; j < 5; j++) { slice_result_t slice_result = llslicer(in[i+j*2], in[i+j*2+1], rx_packet.reference_level); if(slice_result.decision) pkt_hdr += 1 << (4-j); } if(pkt_hdr == 16 or pkt_hdr == 17 or pkt_hdr == 20 or pkt_hdr == 21) rx_packet.type = Long_Packet; else rx_packet.type = Short_Packet; int packet_length = (rx_packet.type == framer_packet_type(Short_Packet)) ? 56 : 112; //it's slice time! //TODO: don't repeat your work here, you already have the first 5 bits for(int j = 0; j < packet_length; j++) { slice_result_t slice_result = llslicer(in[i+j*2], in[i+j*2+1], rx_packet.reference_level); //put the data into the packet if(slice_result.decision) { rx_packet.data[j/8] += 1 << (7-(j%8)); } //put the confidence decision into the packet if(slice_result.confidence) { //rx_packet.confidence[j/8] += 1 << (7-(j%8)); } else { if(rx_packet.numlowconf < 24) rx_packet.lowconfbits[rx_packet.numlowconf++] = j; } } rx_packet.timestamp = pmt::to_double(tag_iter->value); //here you might want to traverse the whole packet and if you find all 0's, just toss it. don't know why these packets turn up, but they pass ECC. bool zeroes = 1; for(int m = 0; m < 14; m++) { if(rx_packet.data[m]) zeroes = 0; } if(zeroes) {continue;} //toss it rx_packet.message_type = (rx_packet.data[0] >> 3) & 0x1F; //get the message type to make decisions on ECC methods if(rx_packet.type == Short_Packet && rx_packet.message_type != 11 && rx_packet.numlowconf > 0) {continue;} if(rx_packet.message_type == 11 && rx_packet.numlowconf >= 10) {continue;} rx_packet.crc = modes_check_crc(rx_packet.data, (packet_length/8)-3); unsigned int ap = rx_packet.data[packet_length/8-3] << 16 | rx_packet.data[packet_length/8-2] << 8 | rx_packet.data[packet_length/8-1] << 0; rx_packet.crc ^= ap; //crc for packets that aren't type 11 or type 17 is encoded with the transponder ID, which we don't know //therefore we toss 'em if there's syndrome //crc for the other short packets is usually nonzero, so they can't really be trusted that far if(rx_packet.crc && (rx_packet.message_type == 11 || rx_packet.message_type == 17)) {continue;} d_payload.str(""); for(int m = 0; m < packet_length/8; m++) { d_payload << std::hex << std::setw(2) << std::setfill('0') << unsigned(rx_packet.data[m]); } d_payload << " " << std::setw(6) << rx_packet.crc << " " << std::dec << rx_packet.reference_level << " " << std::setprecision(10) << std::setw(10) << rx_packet.timestamp; gr::message::sptr msg = gr::message::make_from_string(std::string(d_payload.str())); d_queue->handle(msg); } if(0) std::cout << "Slicer consumed " << size << ", returned " << size << std::endl; return size; } } //namespace gr gr-air-modes-0.0.0.e47992d/lib/slicer_impl.h000066400000000000000000000030121221634657000202260ustar00rootroot00000000000000/* # Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # */ #ifndef INCLUDED_AIR_MODES_SLICER_IMPL_H #define INCLUDED_AIR_MODES_SLICER_IMPL_H #include #include #include #include namespace gr { namespace air_modes { class AIR_MODES_API slicer_impl : public slicer { private: int d_check_width; int d_chip_rate; int d_samples_per_chip; int d_samples_per_symbol; gr::msg_queue::sptr d_queue; std::ostringstream d_payload; public: slicer_impl(gr::msg_queue::sptr queue); int work (int noutput_items, gr_vector_const_void_star &input_items, gr_vector_void_star &output_items); }; } //namespace air_modes } //namespace gr #endif /* INCLUDED_AIR_MODES_SLICER_IMPL_H */ gr-air-modes-0.0.0.e47992d/python/000077500000000000000000000000001221634657000163325ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/python/CMakeLists.txt000066400000000000000000000036471221634657000211040ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Include python install macros ######################################################################## include(GrPython) if(NOT PYTHONINTERP_FOUND) return() endif() ######################################################################## # Install python sources ######################################################################## GR_PYTHON_INSTALL( FILES __init__.py altitude.py az_map.py cpr.py html_template.py mlat.py exceptions.py flightgear.py gui_model.py kml.py parse.py msprint.py radio.py raw_server.py rx_path.py sbs1.py sql.py types.py zmq_socket.py Quaternion.py DESTINATION ${GR_PYTHON_DIR}/air_modes ) ######################################################################## # Handle the unit tests ######################################################################## #include(GrTest) #set(GR_TEST_TARGET_DEPS gnuradio-gr-air-modes) #set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig) #GR_ADD_TEST(qa_gr-air-modes ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_gr-air-modes.py) gr-air-modes-0.0.0.e47992d/python/Quaternion.py000066400000000000000000000275441221634657000210450ustar00rootroot00000000000000""" Quaternion provides a class for manipulating quaternion objects. This class provides: - a convenient constructor to convert to/from Euler Angles (RA,Dec,Roll) to/from quaternions - class methods to multiply and divide quaternions """ """Copyright 2009 Smithsonian Astrophysical Observatory Released under New BSD / 3-Clause BSD License All rights reserved """ """ Modified 2012 by Nick Foster Modified from version 0.3.1 http://pypi.python.org/pypi/Quaternion/0.03 Added _get_angle_axis to get the angle-axis representation Added _latlontoquat to get a rotation quat to ECEF from lat/lon """ import numpy as np from math import cos, sin, radians, degrees, atan2, sqrt, acos, pi class Quat(object): """ Quaternion class Example usage:: >>> from Quaternion import Quat >>> quat = Quat((12,45,45)) >>> quat.ra, quat.dec, quat.roll (12, 45, 45) >>> quat.q array([ 0.38857298, -0.3146602 , 0.23486498, 0.8335697 ]) >>> q2 = Quat([ 0.38857298, -0.3146602 , 0.23486498, 0.8335697]) >>> q2.ra 11.999999315925008 Multiplication and division operators are overloaded for the class to perform appropriate quaternion multiplication and division. Example usage:: >>> q1 = Quat((20,30,40)) >>> q2 = Quat((30,40,50)) >>> q = q1 / q2 Performs the operation as q1 * inverse q2 Example usage:: >>> q1 = Quat((20,30,40)) >>> q2 = Quat((30,40,50)) >>> q = q1 * q2 :param attitude: initialization attitude for quat ``attitude`` may be: * another Quat * a 4 element array (expects x,y,z,w quat form) * a 3 element array (expects ra,dec,roll in degrees) * a 3x3 transform/rotation matrix """ def __init__(self, attitude): self._q = None self._equatorial = None self._T = None # checks to see if we've been passed a Quat if isinstance(attitude, Quat): self._set_q(attitude.q) else: # make it an array and check to see if it is a supported shape attitude = np.array(attitude) if len(attitude) == 4: self._set_q(attitude) elif attitude.shape == (3,3): self._set_transform(attitude) elif attitude.shape == (3,): self._set_equatorial(attitude) elif attitude.shape == (2,): self._set_latlon(attitude) else: raise TypeError("attitude is not one of possible types (2, 3 or 4 elements, Quat, or 3x3 matrix)") def _set_q(self, q): """ Set the value of the 4 element quaternion vector :param q: list or array of normalized quaternion elements """ q = np.array(q) if abs(np.sum(q**2) - 1.0) > 1e-6: raise ValueError('Quaternion must be normalized so sum(q**2) == 1; use Quaternion.normalize') self._q = (q if q[3] > 0 else -q) # Erase internal values of other representations self._equatorial = None self._T = None def _get_q(self): """ Retrieve 4-vector of quaternion elements in [x, y, z, w] form :rtype: numpy array """ if self._q is None: # Figure out q from available values, doing nothing others are not defined if self._equatorial is not None: self._q = self._equatorial2quat() elif self._T is not None: self._q = self._transform2quat() return self._q # use property to make this get/set automatic q = property(_get_q, _set_q) def _set_equatorial(self, equatorial): """Set the value of the 3 element equatorial coordinate list [RA,Dec,Roll] expects values in degrees bounds are not checked :param equatorial: list or array [ RA, Dec, Roll] in degrees """ att = np.array(equatorial) ra, dec, roll = att self._ra0 = ra if ( ra > 180 ): self._ra0 = ra - 360 self._roll0 = roll if ( roll > 180): self._roll0 = roll - 360 self._equatorial = att def _set_latlon(self, latlon): self._q = self._latlontoquat(latlon) def _get_equatorial(self): """Retrieve [RA, Dec, Roll] :rtype: numpy array """ if self._equatorial is None: if self._q is not None: self._equatorial = self._quat2equatorial() elif self._T is not None: self._q = self._transform2quat() self._equatorial = self._quat2equatorial() return self._equatorial equatorial = property(_get_equatorial,_set_equatorial) def _get_ra(self): """Retrieve RA term from equatorial system in degrees""" return self.equatorial[0] def _get_dec(self): """Retrieve Dec term from equatorial system in degrees""" return self.equatorial[1] def _get_roll(self): """Retrieve Roll term from equatorial system in degrees""" return self.equatorial[2] ra = property(_get_ra) dec = property(_get_dec) roll = property(_get_roll) def _set_transform(self, T): """ Set the value of the 3x3 rotation/transform matrix :param T: 3x3 array/numpy array """ transform = np.array(T) self._T = transform def _get_transform(self): """ Retrieve the value of the 3x3 rotation/transform matrix :returns: 3x3 rotation/transform matrix :rtype: numpy array """ if self._T is None: if self._q is not None: self._T = self._quat2transform() elif self._equatorial is not None: self._T = self._equatorial2transform() return self._T transform = property(_get_transform, _set_transform) def _quat2equatorial(self): """ Determine Right Ascension, Declination, and Roll for the object quaternion :returns: RA, Dec, Roll :rtype: numpy array [ra,dec,roll] """ q = self.q q2 = self.q**2 ## calculate direction cosine matrix elements from $quaternions xa = q2[0] - q2[1] - q2[2] + q2[3] xb = 2 * (q[0] * q[1] + q[2] * q[3]) xn = 2 * (q[0] * q[2] - q[1] * q[3]) yn = 2 * (q[1] * q[2] + q[0] * q[3]) zn = q2[3] + q2[2] - q2[0] - q2[1] ##; calculate RA, Dec, Roll from cosine matrix elements ra = degrees(atan2(xb , xa)) ; dec = degrees(atan2(xn , sqrt(1 - xn**2))); roll = degrees(atan2(yn , zn)) ; if ( ra < 0 ): ra += 360 if ( roll < 0 ): roll += 360 return np.array([ra, dec, roll]) def _quat2transform(self): """ Transform a unit quaternion into its corresponding rotation matrix (to be applied on the right side). :returns: transform matrix :rtype: numpy array """ x, y, z, w = self.q xx2 = 2 * x * x yy2 = 2 * y * y zz2 = 2 * z * z xy2 = 2 * x * y wz2 = 2 * w * z zx2 = 2 * z * x wy2 = 2 * w * y yz2 = 2 * y * z wx2 = 2 * w * x rmat = np.empty((3, 3), float) rmat[0,0] = 1. - yy2 - zz2 rmat[0,1] = xy2 - wz2 rmat[0,2] = zx2 + wy2 rmat[1,0] = xy2 + wz2 rmat[1,1] = 1. - xx2 - zz2 rmat[1,2] = yz2 - wx2 rmat[2,0] = zx2 - wy2 rmat[2,1] = yz2 + wx2 rmat[2,2] = 1. - xx2 - yy2 return rmat def _equatorial2quat( self ): """Dummy method to return return quat. :returns: quaternion :rtype: Quat """ return self._transform2quat() def _equatorial2transform( self ): """Construct the transform/rotation matrix from RA,Dec,Roll :returns: transform matrix :rtype: 3x3 numpy array """ ra = radians(self._get_ra()) dec = radians(self._get_dec()) roll = radians(self._get_roll()) ca = cos(ra) sa = sin(ra) cd = cos(dec) sd = sin(dec) cr = cos(roll) sr = sin(roll) # This is the transpose of the transformation matrix (related to translation # of original perl code rmat = np.array([[ca * cd, sa * cd, sd ], [-ca * sd * sr - sa * cr, -sa * sd * sr + ca * cr, cd * sr], [-ca * sd * cr + sa * sr, -sa * sd * cr - ca * sr, cd * cr]]) return rmat.transpose() def _transform2quat( self ): """Construct quaternion from the transform/rotation matrix :returns: quaternion formed from transform matrix :rtype: numpy array """ # Code was copied from perl PDL code that uses backwards index ordering T = self.transform.transpose() den = np.array([ 1.0 + T[0,0] - T[1,1] - T[2,2], 1.0 - T[0,0] + T[1,1] - T[2,2], 1.0 - T[0,0] - T[1,1] + T[2,2], 1.0 + T[0,0] + T[1,1] + T[2,2]]) max_idx = np.flatnonzero(den == max(den))[0] q = np.zeros(4) q[max_idx] = 0.5 * sqrt(max(den)) denom = 4.0 * q[max_idx] if (max_idx == 0): q[1] = (T[1,0] + T[0,1]) / denom q[2] = (T[2,0] + T[0,2]) / denom q[3] = -(T[2,1] - T[1,2]) / denom if (max_idx == 1): q[0] = (T[1,0] + T[0,1]) / denom q[2] = (T[2,1] + T[1,2]) / denom q[3] = -(T[0,2] - T[2,0]) / denom if (max_idx == 2): q[0] = (T[2,0] + T[0,2]) / denom q[1] = (T[2,1] + T[1,2]) / denom q[3] = -(T[1,0] - T[0,1]) / denom if (max_idx == 3): q[0] = -(T[2,1] - T[1,2]) / denom q[1] = -(T[0,2] - T[2,0]) / denom q[2] = -(T[1,0] - T[0,1]) / denom return q def _get_angle_axis(self): lim = 1e-12 norm = np.linalg.norm(self.q) if norm < lim: angle = 0 axis = [0,0,0] else: rnorm = 1.0 / norm angle = acos(max(-1, min(1, rnorm*self.q[3]))); sangle = sin(angle) if sangle < lim: axis = [0,0,0] else: axis = (rnorm / sangle) * np.array(self.q[0:3]) angle *= 2 return (angle, axis) def _latlontoquat ( self, latlon ): q = np.zeros(4) lon = latlon[1]*(pi/180.) lat = latlon[0]*(pi/180.) zd2 = 0.5*lon yd2 = -0.25*pi - 0.5*lat Szd2 = sin(zd2) Syd2 = sin(yd2) Czd2 = cos(zd2) Cyd2 = cos(yd2) q[0] = -Szd2*Syd2 q[1] = Czd2*Syd2 q[2] = Szd2*Cyd2 q[3] = Czd2*Cyd2 return q def __div__(self, quat2): """ Divide one quaternion by another. Example usage:: >>> q1 = Quat((20,30,40)) >>> q2 = Quat((30,40,50)) >>> q = q1 / q2 Performs the operation as q1 * inverse q2 :returns: product q1 * inverse q2 :rtype: Quat """ return self * quat2.inv() def __mul__(self, quat2): """ Multiply quaternion by another. Example usage:: >>> q1 = Quat((20,30,40)) >>> q2 = Quat((30,40,50)) >>> (q1 * q2).equatorial array([ 349.73395729, 76.25393056, 127.61636727]) :returns: product q1 * q2 :rtype: Quat """ q1 = self.q q2 = quat2.q mult = np.zeros(4) mult[0] = q1[3]*q2[0] - q1[2]*q2[1] + q1[1]*q2[2] + q1[0]*q2[3] mult[1] = q1[2]*q2[0] + q1[3]*q2[1] - q1[0]*q2[2] + q1[1]*q2[3] mult[2] = -q1[1]*q2[0] + q1[0]*q2[1] + q1[3]*q2[2] + q1[2]*q2[3] mult[3] = -q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] + q1[3]*q2[3] return Quat(mult) def inv(self): """ Invert the quaternion :returns: inverted quaternion :rtype: Quat """ return Quat([self.q[0], self.q[1], self.q[2], -self.q[3]]) def normalize(array): """ Normalize a 4 element array/list/numpy.array for use as a quaternion :param quat_array: 4 element list/array :returns: normalized array :rtype: numpy array """ quat = np.array(array) return quat / np.sqrt(np.dot(quat, quat)) gr-air-modes-0.0.0.e47992d/python/__init__.py000066400000000000000000000055261221634657000204530ustar00rootroot00000000000000# # Copyright 2008,2009 Free Software Foundation, Inc. # # This application is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # This application is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # The presence of this file turns this directory into a Python package ''' This is the GNU Radio gr-air-modes package. It provides a library and application for receiving Mode S / ADS-B signals from aircraft. Use uhd_modes.py as the main application for receiving signals. cpr.py provides an implementation of Compact Position Reporting. altitude.py implements Gray-coded altitude decoding. Various plugins exist for SQL, KML, and PlanePlotter-compliant SBS-1 emulation output. mlat.py provides an experimental implementation of a multilateration solver. ''' # ---------------------------------------------------------------- # Temporary workaround for ticket:181 (swig+python problem) import sys _RTLD_GLOBAL = 0 try: from dl import RTLD_GLOBAL as _RTLD_GLOBAL except ImportError: try: from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL except ImportError: pass if _RTLD_GLOBAL != 0: _dlopenflags = sys.getdlopenflags() sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL) # ---------------------------------------------------------------- # import swig generated symbols into the gr-air-modes namespace from air_modes_swig import * # import any pure python here # from rx_path import rx_path from zmq_socket import zmq_pubsub_iface from parse import * from msprint import output_print from sql import output_sql from sbs1 import output_sbs1 from kml import output_kml, output_jsonp from raw_server import raw_server from radio import modes_radio from exceptions import * from az_map import * from types import * from altitude import * from cpr import cpr_decoder from html_template import html_template #this is try/excepted in case the user doesn't have numpy installed try: from flightgear import output_flightgear from Quaternion import * except ImportError: print "gr-air-modes warning: numpy+scipy not installed, FlightGear interface not supported" pass # ---------------------------------------------------------------- # Tail of workaround if _RTLD_GLOBAL != 0: sys.setdlopenflags(_dlopenflags) # Restore original flags # ---------------------------------------------------------------- gr-air-modes-0.0.0.e47992d/python/altitude.py000077500000000000000000000101361221634657000205230ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2010, 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # For reference into the methodology used to decode altitude, # see RTCA DO-181D p.42 from air_modes.exceptions import * def decode_alt(alt, bit13): mbit = alt & 0x0040 qbit = alt & 0x0010 if mbit and bit13: #nobody uses metric altitude: AFAIK, it's an orphaned part of #the spec. haven't seen it in three years. as a result, replies #with mbit set can be considered spurious, and so we discard them here. #bits 20-25, 27-31 encode alt in meters #remember that bits are LSB (bit 20 is MSB) #meters_alt = 0 #for (shift, bit) in enumerate(range(31,26,-1)+range(25,19,-1)): # meters_alt += ((alt & (1<> 2 tmp2 = (alt & 0x0020) >> 1 else: tmp1 = (alt & 0x1FE0) >> 1 tmp2 = 0 decoded_alt = ((alt & 0x0F) | tmp1 | tmp2) * 25 - 1000 else: #a mode C-style reply #okay, the order they come in is: #C1 A1 C2 A2 C4 A4 X B1 D1 B2 D2 B4 D4 #the order we want them in is: #D2 D4 A1 A2 A4 B1 B2 B4 #so we'll reassemble into a Gray-coded representation if bit13 is False: alt = (alt & 0x003F) | (alt & 0x0FC0 << 1) C1 = 0x1000 A1 = 0x0800 C2 = 0x0400 A2 = 0x0200 #this represents the order in which the bits come C4 = 0x0100 A4 = 0x0080 B1 = 0x0020 D1 = 0x0010 B2 = 0x0008 D2 = 0x0004 B4 = 0x0002 D4 = 0x0001 bigpart = ((alt & B4) >> 1) \ + ((alt & B2) >> 2) \ + ((alt & B1) >> 3) \ + ((alt & A4) >> 4) \ + ((alt & A2) >> 5) \ + ((alt & A1) >> 6) \ + ((alt & D4) << 6) \ + ((alt & D2) << 5) #bigpart is now the 500-foot-resolution Gray-coded binary part decoded_alt = gray2bin(bigpart) #real_alt is now the 500-foot-per-tick altitude cbits = ((alt & C4) >> 8) + ((alt & C2) >> 9) + ((alt & C1) >> 10) cval = gray2bin(cbits) #turn them into a real number if cval == 7: cval = 5 #not a real gray code after all if decoded_alt % 2: cval = 6 - cval #since the code is symmetric this unwraps it to see whether to subtract the C bits or add them decoded_alt *= 500 #take care of the A,B,D data decoded_alt += cval * 100 #factor in the C data decoded_alt -= 1300 #subtract the offset return decoded_alt def gray2bin(gray): i = gray >> 1 while i != 0: gray ^= i i >>= 1 return gray def encode_alt_modes(alt, bit13): mbit = False qbit = True encalt = (int(alt) + 1000) / 25 if bit13 is True: tmp1 = (encalt & 0xfe0) << 2 tmp2 = (encalt & 0x010) << 1 else: tmp1 = (encalt & 0xff8) << 1 tmp2 = 0 return (encalt & 0x0F) | tmp1 | tmp2 | (mbit << 6) | (qbit << 4) if __name__ == "__main__": try: for alt in range(-1000, 101400, 25): dec = decode_alt(encode_alt_modes(alt, False), False) if dec != alt: print "Failure at %i with bit13 clear (got %s)" % (alt, dec) for alt in range(-1000, 101400, 25): dec = decode_alt(encode_alt_modes(alt, True), True) if dec != alt: print "Failure at %i with bit13 set (got %s)" % (alt, dec) except MetricAltError: print "Failure at %i due to metric alt bit" % alt gr-air-modes-0.0.0.e47992d/python/az_map.py000077500000000000000000000212721221634657000201620ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # azimuthal projection widget to plot reception range vs. azimuth from PyQt4 import QtCore, QtGui import threading import math import air_modes from air_modes.exceptions import * import numpy as np # model has max range vs. azimuth in n-degree increments # contains separate max range for a variety of altitudes so # you can determine your altitude dropouts by bearing # assumes that if you can hear ac at 1000', you can hear at 5000'+. class az_map_model(QtCore.QObject): dataChanged = QtCore.pyqtSignal(name='dataChanged') npoints = 360/5 def __init__(self, parent=None): super(az_map_model, self).__init__(parent) self._data = [] self.lock = threading.Lock() self._altitudes = [0, 1000, 2000, 5000, 10000, 15000, 20000, 25000, 30000] #initialize everything to 0 for i in range(0,az_map_model.npoints): self._data.append([0] * len(self._altitudes)) def rowCount(self): return len(self._data) def columnCount(self): return len(self._altitudes) def data(self, row, col): return self._data[row][col] def addRecord(self, bearing, altitude, distance): with self.lock: #round up to nearest altitude in altitudes list #there's probably another way to do it if altitude >= max(self._altitudes): col = self.columnCount()-1 else: col = self._altitudes.index(min([alt for alt in self._altitudes if alt >= altitude])) #find which bearing row we sit in row = int(int(bearing+(180./az_map_model.npoints)) / (360./az_map_model.npoints)) % az_map_model.npoints #set max range for all alts >= the ac alt #this expresses the assumption that higher ac can be heard further update = False for i in range(col, len(self._altitudes)): if distance > self._data[row][i]: self._data[row][i] = distance update = True if update: self.dataChanged.emit() def reset(self): with self.lock: self._data = [] for i in range(0,az_map_model.npoints): self._data.append([0] * len(self._altitudes)) self.dataChanged.emit() # the azimuth map widget class az_map(QtGui.QWidget): maxrange = 200 bgcolor = QtCore.Qt.black ringpen = QtGui.QPen(QtGui.QColor(0, 96, 127, 255), 1.3) def __init__(self, parent=None): super(az_map, self).__init__(parent) self._model = None self._paths = [] self.maxrange = az_map.maxrange def minimumSizeHint(self): return QtCore.QSize(50, 50) def sizeHint(self): return QtCore.QSize(300, 300) def setModel(self, model): self._model = model self._model.dataChanged.connect(self.repaint) def paintEvent(self, event): painter = QtGui.QPainter(self) painter.setRenderHint(QtGui.QPainter.Antialiasing) #TODO: make it not have to redraw paths EVERY repaint #drawing paths is VERY SLOW #maybe use a QTimer to limit repaints self.drawPaths() #set background painter.fillRect(event.rect(), QtGui.QBrush(az_map.bgcolor)) #draw the range rings self.drawRangeRings(painter) for i in range(len(self._paths)): alpha = 230 * (i+1) / (len(self._paths)) + 25 painter.setPen(QtGui.QPen(QtGui.QColor(alpha,alpha,0,255), 1.0)) painter.drawPath(self._paths[i]) def drawPaths(self): self._paths = [] if(self._model): for alt in range(0, self._model.columnCount()): path = QtGui.QPainterPath() for i in range(az_map_model.npoints-1,-1,-1): #bearing is to start point of arc (clockwise) bearing = (i+0.5) * 360./az_map_model.npoints distance = self._model._data[i][alt] radius = min(self.width(), self.height()) / 2.0 scale = radius * distance / self.get_range() #convert bearing,distance to x,y xpts = scale * math.sin(bearing * math.pi / 180) ypts = scale * math.cos(bearing * math.pi / 180) #get the bounding rectangle of the arc arcrect = QtCore.QRectF(QtCore.QPointF(0-scale, 0-scale), QtCore.QPointF(scale, scale)) if path.isEmpty(): path.moveTo(xpts, 0-ypts) #so we don't get a line from 0,0 to the first point else: path.lineTo(xpts, 0-ypts) path.arcTo(arcrect, 90-bearing, 360./az_map_model.npoints) self._paths.append(path) #this is just to add a little buffer space for showing the ring & range def get_range(self): return int(self.maxrange * 1.1) def drawRangeRings(self, painter): painter.translate(self.width()/2, self.height()/2) #choose intelligent range step -- keep it between 3-5 rings rangestep = 100 while self.get_range() / rangestep < 3: rangestep /= 2.0 for i in np.arange(rangestep, self.get_range(), rangestep): diameter = (float(i) / self.get_range()) * min(self.width(), self.height()) painter.setPen(az_map.ringpen) painter.drawEllipse(QtCore.QRectF(-diameter / 2.0, -diameter / 2.0, diameter, diameter)) painter.setPen(QtGui.QColor(255,127,0,255)) painter.drawText(0-70/2.0, diameter/2.0, 70, 30, QtCore.Qt.AlignHCenter, "%.1fnm" % i) def setMaxRange(self, maxrange): maxrange = max(3.25, maxrange) maxrange = min(500., maxrange) self.maxrange = maxrange self.repaint() def wheelEvent(self, event): self.setMaxRange(self.maxrange + (event.delta()/120.)*self.maxrange/4.) class az_map_output: def __init__(self, cprdec, model, pub): self._cpr = cprdec self.model = model pub.subscribe("type17_dl", self.output) def output(self, msg): try: now = time.time() icao = msg.data["aa"] subtype = msg.data["ftc"] distance, altitude, bearing = [0,0,0] if 5 <= subtype <= 8: (ground_track, decoded_lat, decoded_lon, distance, bearing) = air_modes.parseBDS06(msg.data, self._cpr) altitude = 0 elif 9 <= subtype <= 18: (altitude, decoded_lat, decoded_lon, distance, bearing) = air_modes.parseBDS05(msg.data, self._cpr) self.model.addRecord(bearing, altitude, distance) except ADSBError: pass ############################## # Test stuff ############################## import random, time class model_updater(threading.Thread): def __init__(self, model): super(model_updater, self).__init__() self.model = model self.setDaemon(1) self.done = False self.start() def run(self): for i in range(az_map_model.npoints): time.sleep(0.005) if(self.model): for alt in self.model._altitudes: self.model.addRecord(i*360./az_map_model.npoints, alt, random.randint(0,az_map.maxrange)*alt / max(self.model._altitudes)) self.done = True class Window(QtGui.QWidget): def __init__(self): super(Window, self).__init__() layout = QtGui.QGridLayout() self.model = az_map_model() mymap = az_map(None) mymap.setModel(self.model) self.updater = model_updater(self.model) layout.addWidget(mymap, 0, 1) self.setLayout(layout) if __name__ == '__main__': import sys app = QtGui.QApplication(sys.argv) window = Window() window.show() window.update() sys.exit(app.exec_()) gr-air-modes-0.0.0.e47992d/python/cpr.py000077500000000000000000000246041221634657000175010ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2010, 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import math, time from air_modes.exceptions import * #this implements CPR position decoding and encoding. #the decoder is implemented as a class, cpr_decoder, which keeps state for local decoding. #the encoder is cpr_encode([lat, lon], type (even=0, odd=1), and surface (0 for surface, 1 for airborne)) #TODO: remove range/bearing calc from CPR decoder class. you can do this outside of the decoder. latz = 15 def nz(ctype): return 4 * latz - ctype def dlat(ctype, surface): if surface == 1: tmp = 90.0 else: tmp = 360.0 nzcalc = nz(ctype) if nzcalc == 0: return tmp else: return tmp / nzcalc def nl(declat_in): if abs(declat_in) >= 87.0: return 1.0 return math.floor( (2.0*math.pi) * math.acos(1.0- (1.0-math.cos(math.pi/(2.0*latz))) / math.cos( (math.pi/180.0)*abs(declat_in) )**2 )**-1) def dlon(declat_in, ctype, surface): if surface: tmp = 90.0 else: tmp = 360.0 nlcalc = max(nl(declat_in)-ctype, 1) return tmp / nlcalc def decode_lat(enclat, ctype, my_lat, surface): tmp1 = dlat(ctype, surface) tmp2 = float(enclat) / (2**17) j = math.floor(my_lat/tmp1) + math.floor(0.5 + ((my_lat % tmp1) / tmp1) - tmp2) return tmp1 * (j + tmp2) def decode_lon(declat, enclon, ctype, my_lon, surface): tmp1 = dlon(declat, ctype, surface) tmp2 = float(enclon) / (2**17) m = math.floor(my_lon / tmp1) + math.floor(0.5 + ((my_lon % tmp1) / tmp1) - tmp2) return tmp1 * (m + tmp2) def cpr_resolve_local(my_location, encoded_location, ctype, surface): [my_lat, my_lon] = my_location [enclat, enclon] = encoded_location decoded_lat = decode_lat(enclat, ctype, my_lat, surface) decoded_lon = decode_lon(decoded_lat, enclon, ctype, my_lon, surface) return [decoded_lat, decoded_lon] def cpr_resolve_global(evenpos, oddpos, mypos, mostrecent, surface): #cannot resolve surface positions unambiguously without knowing receiver position if surface and mypos is None: raise CPRNoPositionError dlateven = dlat(0, surface) dlatodd = dlat(1, surface) evenpos = [float(evenpos[0]), float(evenpos[1])] oddpos = [float(oddpos[0]), float(oddpos[1])] j = math.floor(((nz(1)*evenpos[0] - nz(0)*oddpos[0])/2**17) + 0.5) #latitude index rlateven = dlateven * ((j % nz(0))+evenpos[0]/2**17) rlatodd = dlatodd * ((j % nz(1))+ oddpos[0]/2**17) #limit to -90, 90 if rlateven > 270.0: rlateven -= 360.0 if rlatodd > 270.0: rlatodd -= 360.0 #This checks to see if the latitudes of the reports straddle a transition boundary #If so, you can't get a globally-resolvable location. if nl(rlateven) != nl(rlatodd): raise CPRBoundaryStraddleError if mostrecent == 0: rlat = rlateven else: rlat = rlatodd #disambiguate latitude if surface: if mypos[0] < 0: rlat -= 90 dl = dlon(rlat, mostrecent, surface) nl_rlat = nl(rlat) m = math.floor(((evenpos[1]*(nl_rlat-1)-oddpos[1]*nl_rlat)/2**17)+0.5) #longitude index #when surface positions straddle a disambiguation boundary (90 degrees), #surface decoding will fail. this might never be a problem in real life, but it'll fail in the #test case. the documentation doesn't mention it. if mostrecent == 0: enclon = evenpos[1] else: enclon = oddpos[1] rlon = dl * ((m % max(nl_rlat-mostrecent,1)) + enclon/2.**17) #print "DL: %f nl: %f m: %f rlon: %f" % (dl, nl_rlat, m, rlon) #print "evenpos: %x, oddpos: %x, mostrecent: %i" % (evenpos[1], oddpos[1], mostrecent) if surface: #longitudes need to be resolved to the nearest 90 degree segment to the receiver. wat = mypos[1] if wat < 0: wat += 360 zone = lambda lon: 90 * (int(lon) / 90) rlon += (zone(wat) - zone(rlon)) #limit to (-180, 180) if rlon > 180: rlon -= 360.0 return [rlat, rlon] #calculate range and bearing between two lat/lon points #should probably throw this in the mlat py somewhere or make another lib def range_bearing(loc_a, loc_b): [a_lat, a_lon] = loc_a [b_lat, b_lon] = loc_b esquared = (1/298.257223563)*(2-(1/298.257223563)) earth_radius_mi = 3963.19059 * (math.pi / 180) delta_lat = b_lat - a_lat delta_lon = b_lon - a_lon avg_lat = ((a_lat + b_lat) / 2.0) * math.pi / 180 R1 = earth_radius_mi*(1.0-esquared)/pow((1.0-esquared*pow(math.sin(avg_lat),2)),1.5) R2 = earth_radius_mi/math.sqrt(1.0-esquared*pow(math.sin(avg_lat),2)) distance_North = R1*delta_lat distance_East = R2*math.cos(avg_lat)*delta_lon bearing = math.atan2(distance_East,distance_North) * (180.0 / math.pi) if bearing < 0.0: bearing += 360.0 rnge = math.hypot(distance_East,distance_North) return [rnge, bearing] class cpr_decoder: def __init__(self, my_location): self.my_location = my_location self.evenlist = {} self.oddlist = {} self.evenlist_sfc = {} self.oddlist_sfc = {} def set_location(self, new_location): self.my_location = new_location def weed_poslists(self): for poslist in [self.evenlist, self.oddlist]: for key, item in poslist.items(): if time.time() - item[2] > 10: del poslist[key] for poslist in [self.evenlist_sfc, self.oddlist_sfc]: for key, item in poslist.items(): if time.time() - item[2] > 25: del poslist[key] def decode(self, icao24, encoded_lat, encoded_lon, cpr_format, surface): if surface: oddlist = self.oddlist_sfc evenlist = self.evenlist_sfc else: oddlist = self.oddlist evenlist = self.evenlist #add the info to the position reports list for global decoding if cpr_format==1: oddlist[icao24] = [encoded_lat, encoded_lon, time.time()] else: evenlist[icao24] = [encoded_lat, encoded_lon, time.time()] [decoded_lat, decoded_lon] = [None, None] #okay, let's traverse the lists and weed out those entries that are older than 10 seconds self.weed_poslists() if (icao24 in evenlist) \ and (icao24 in oddlist): newer = (oddlist[icao24][2] - evenlist[icao24][2]) > 0 #figure out which report is newer [decoded_lat, decoded_lon] = cpr_resolve_global(evenlist[icao24][0:2], oddlist[icao24][0:2], self.my_location, newer, surface) #do a global decode else: raise CPRNoPositionError if self.my_location is not None: [rnge, bearing] = range_bearing(self.my_location, [decoded_lat, decoded_lon]) else: rnge = None bearing = None return [decoded_lat, decoded_lon, rnge, bearing] #encode CPR position def cpr_encode(lat, lon, ctype, surface): if surface is True: scalar = 2.**19 else: scalar = 2.**17 #encode using 360 constant for segment size. dlati = dlat(ctype, False) yz = math.floor(scalar * ((lat % dlati)/dlati) + 0.5) rlat = dlati * ((yz / scalar) + math.floor(lat / dlati)) #encode using 360 constant for segment size. dloni = dlon(lat, ctype, False) xz = math.floor(scalar * ((lon % dloni)/dloni) + 0.5) yz = int(yz) & (2**17-1) xz = int(xz) & (2**17-1) return (yz, xz) #lat, lon if __name__ == '__main__': import sys, random rounds = 10001 threshold = 1e-3 #0.001 deg lat/lon #this accuracy is highly dependent on latitude, since at high #latitudes the corresponding error in longitude is greater bs = 0 surface = False lats = [i/(rounds/170.)-85 for i in range(0,rounds)] lons = [i/(rounds/360.)-180 for i in range(0,rounds)] for i in range(0, rounds): even_lat = lats[i] #even_lat = random.uniform(-85, 85) even_lon = lons[i] #even_lon = random.uniform(-180, 180) odd_lat = even_lat + 1e-3 odd_lon = min(even_lon + 1e-3, 180) decoder = cpr_decoder([odd_lat, odd_lon]) #encode that position (evenenclat, evenenclon) = cpr_encode(even_lat, even_lon, False, surface) (oddenclat, oddenclon) = cpr_encode(odd_lat, odd_lon, True, surface) #try to perform a global decode -- this should fail since the decoder #only has heard one position. need two for global decoding. icao = random.randint(0, 0xffffff) try: evenpos = decoder.decode(icao, evenenclat, evenenclon, False, surface) raise Exception("CPR test failure: global decode with only one report") except CPRNoPositionError: pass #now try to do a real decode with the last packet's odd complement #watch for a boundary straddle -- this isn't fatal, it just indicates #that the even and odd reports lie on either side of a longitudinal boundary #and so you can't get a position try: (odddeclat, odddeclon, rng, brg) = decoder.decode(icao, oddenclat, oddenclon, True, surface) except CPRBoundaryStraddleError: bs += 1 continue except CPRNoPositionError: raise Exception("CPR test failure: no decode after even/odd inputs") if abs(odddeclat - odd_lat) > threshold or abs(odddeclon - odd_lon) > threshold: print "F odddeclat: %f odd_lat: %f" % (odddeclat, odd_lat) print "F odddeclon: %f odd_lon: %f" % (odddeclon, odd_lon) raise Exception("CPR test failure: global decode error greater than threshold") # else: # print "S odddeclat: %f odd_lat: %f" % (odddeclat, odd_lat) # print "S odddeclon: %f odd_lon: %f" % (odddeclon, odd_lon) nexteven_lat = odd_lat + 1e-3 nexteven_lon = min(odd_lon + 1e-3, 180) (nexteven_enclat, nexteven_enclon) = cpr_encode(nexteven_lat, nexteven_lon, False, surface) #try a locally-referenced decode try: (evendeclat, evendeclon) = cpr_resolve_local([even_lat, even_lon], [nexteven_enclat, nexteven_enclon], False, surface) except CPRNoPositionError: raise Exception("CPR test failure: local decode failure to resolve") #check to see if the positions were valid if abs(evendeclat - nexteven_lat) > threshold or abs(evendeclon - nexteven_lon) > threshold: print "F evendeclat: %f nexteven_lat: %f evenlat: %f" % (evendeclat, nexteven_lat, even_lat) print "F evendeclon: %f nexteven_lon: %f evenlon: %f" % (evendeclon, nexteven_lon, even_lon) raise Exception("CPR test failure: local decode error greater than threshold") print "CPR test successful. There were %i boundary straddles over %i rounds." % (bs, rounds) gr-air-modes-0.0.0.e47992d/python/exceptions.py000066400000000000000000000023501221634657000210650ustar00rootroot00000000000000# # Copyright 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # class ADSBError(Exception): pass class MetricAltError(ADSBError): pass class ParserError(ADSBError): pass class NoHandlerError(ADSBError): def __init__(self, msgtype=None): self.msgtype = msgtype class MlatNonConvergeError(ADSBError): pass class CPRNoPositionError(ADSBError): pass class CPRBoundaryStraddleError(CPRNoPositionError): pass class FieldNotInPacket(ParserError): def __init__(self, item): self.item = item gr-air-modes-0.0.0.e47992d/python/flightgear.py000077500000000000000000000216231221634657000210270ustar00rootroot00000000000000#!/usr/bin/env python #flightgear interface to uhd_modes.py #outputs UDP data to add traffic to FGFS import struct import socket import air_modes from air_modes import mlat import sqlite3 import string, threading, math, time from air_modes.sql import output_sql from Quaternion import Quat import numpy from air_modes.exceptions import * class output_flightgear: def __init__(self, cprdec, hostname, port, pub): self.hostname = hostname self.port = port self.localpos = localpos self.positions = {} self.velocities = {} self.callsigns = {} self._cpr = cprdec self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.connect((self.hostname, self.port)) pub.subscribe("type17_dl", output) def output(self, msg): try: msgtype = msg.data["df"] if msgtype == 17: #ADS-B report icao24 = msg.data["aa"] bdsreg = msg.data["me"].get_type() if bdsreg == 0x08: #ident packet (ident, actype) = air_modes.parseBDS08(data) #select model based on actype self.callsigns[icao24] = [ident, actype] elif bdsreg == 0x06: #BDS0,6 pos [ground_track, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS06(data, self._cpr) self.positions[icao24] = [decoded_lat, decoded_lon, 0] self.update(icao24) elif bdsreg == 0x05: #BDS0,5 pos [altitude, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS05(data, self._cpr) self.positions[icao24] = [decoded_lat, decoded_lon, altitude] self.update(icao24) elif bdsreg == 0x09: #velocity subtype = data["bds09"].get_type() if subtype == 0: [velocity, heading, vert_spd, turnrate] = air_modes.parseBDS09_0(data) elif subtype == 1: [velocity, heading, vert_spd] = air_modes.parseBDS09_1(data) turnrate = 0 else: return self.velocities[icao24] = [velocity, heading, vert_spd, turnrate] except ADSBError: pass def update(self, icao24): #check to see that ICAO24 appears in all three records and that the data looks valid complete = (icao24 in self.positions)\ and (icao24 in self.velocities)\ and (icao24 in self.callsigns) if complete: print "FG update: %s" % (self.callsigns[icao24][0]) msg = fg_posmsg(self.callsigns[icao24][0], self.callsigns[icao24][1], self.positions[icao24][0], self.positions[icao24][1], self.positions[icao24][2], self.velocities[icao24][1], self.velocities[icao24][0], self.velocities[icao24][2], self.velocities[icao24][3]).pack() self.sock.send(msg) class fg_header: def __init__(self): self.magic = "FGFS" self.proto = 0x00010001 self.msgid = 0 self.msglen = 0 #in bytes, though they swear it isn't self.replyaddr = 0 #unused self.replyport = 0 #unused self.callsign = "UNKNOWN" self.data = None hdrfmt = '!4sLLLLL8s0L' def pack(self): self.msglen = 32 + len(self.data) packed = struct.pack(self.hdrfmt, self.magic, self.proto, self.msgid, self.msglen, self.replyaddr, self.replyport, self.callsign) return packed #so this appears to work, but FGFS doesn't display it in flight for some reason. not in the chat window either. oh well. class fg_chatmsg(fg_header): def __init__(self, msg): fg_header.__init__(self) self.chatmsg = msg self.msgid = 1 def pack(self): self.chatfmt = '!' + str(len(self.chatmsg)) + 's' #print "Packing with strlen %i " % len(self.chatmsg) self.data = struct.pack(self.chatfmt, self.chatmsg) return fg_header.pack(self) + self.data modelmap = { None: 'Aircraft/777-200/Models/777-200ER.xml', "NO INFO": 'Aircraft/777-200/Models/777-200ER.xml', "LIGHT": 'Aircraft/c172p/Models/c172p.xml', "SMALL": 'Aircraft/CitationX/Models/Citation-X.xml', "LARGE": 'Aircraft/CRJ700-family/Models/CRJ700.xml', "LARGE HIGH VORTEX": 'Aircraft/757-200/Models/757-200.xml', "HEAVY": 'Aircraft/747-200/Models/boeing747-200.xml', "HIGH PERFORMANCE": 'Aircraft/SR71-BlackBird/Models/Blackbird-SR71B.xml', #yeah i know "ROTORCRAFT": 'Aircraft/ec130/Models/ec130b4.xml', "GLIDER": 'Aircraft/ASK21-MI/Models/ask21mi.xml', "BALLOON/BLIMP": 'Aircraft/ZLT-NT/Models/ZLT-NT.xml', "ULTRALIGHT": 'Aircraft/cri-cri/Models/MC-15.xml', "UAV": 'Aircraft/YardStik/Models/yardstik.xml', #hahahaha "SPACECRAFT": 'Aircraft/SpaceShip-One/Models/spaceshipone.xml', "SURFACE EMERGENCY VEHICLE": 'Aircraft/followme/Models/follow_me.xml', #not the best "SURFACE SERVICE VEHICLE": 'Aircraft/pushback/Models/Pushback.xml' } class fg_posmsg(fg_header): def __init__(self, callsign, modelname, lat, lon, alt, hdg, vel, vs, turnrate): #from the above, calculate valid FGFS mp vals #this is the translation layer between ADS-B and FGFS fg_header.__init__(self) self.callsign = callsign if self.callsign is None: self.callsign = "UNKNOWN" self.modelname = modelname if self.modelname not in modelmap: #this should keep people on their toes when strange aircraft types are seen self.model = 'Aircraft/santa/Models/santa.xml' else: self.model = modelmap[self.modelname] self.lat = lat self.lon = lon self.alt = alt self.hdg = hdg self.vel = vel self.vs = vs self.turnrate = turnrate self.msgid = 7 self.time = time.time() self.lag = 0 def pack(self): #this is, in order: #model, time (time.time() is fine), lag, position, orientation, linear vel, angular vel, linear accel, angular accel (accels unused), 0 #position is in ECEF format -- same as mlat uses. what luck! pos = mlat.llh2ecef([self.lat, self.lon, self.alt * 0.3048]) #alt is in meters! #get the rotation quaternion to rotate to local reference frame from lat/lon rotquat = Quat([self.lat, self.lon]) #get the quaternion corresponding to aircraft orientation acquat = Quat([self.hdg, 0, 0]) #rotate aircraft into ECEF frame ecefquat = rotquat * acquat #get it in angle/axis representation (angle, axis) = ecefquat._get_angle_axis() orientation = angle * axis kts_to_ms = 0.514444444 #convert kts to m/s vel_ms = self.vel * kts_to_ms velvec = (vel_ms,0,0) #velocity vector in m/s -- is this in the local frame? looks like [0] is fwd vel, #we'll pretend the a/c is always moving the dir it's pointing turnvec = (0,0,self.turnrate * (math.pi / 180.) ) #turn rates in rad/s [roll, pitch, yaw] accelvec = (0,0,0) turnaccelvec = (0,0,0) self.posfmt = '!96s' + 'd' + 'd' + '3d' + '3f' + '3f' + '3f' + '3f' + '3f' + 'I' self.data = struct.pack(self.posfmt, self.model, self.time, self.lag, pos[0], pos[1], pos[2], orientation[0], orientation[1], orientation[2], velvec[0], velvec[1], velvec[2], turnvec[0], turnvec[1], turnvec[2], accelvec[0], accelvec[1], accelvec[2], turnaccelvec[0], turnaccelvec[1], turnaccelvec[2], 0) return fg_header.pack(self) + self.data if __name__ == '__main__': timeoffset = time.time() iof = open('27augrudi3.txt') localpos = [37.409066,-122.077836] hostname = "localhost" port = 5000 fgout = output_flightgear(localpos, hostname, port) for line in iof: timetosend = float(line.split()[6]) while (time.time() - timeoffset) < timetosend: time.sleep(0.02) fgout.output(line) gr-air-modes-0.0.0.e47992d/python/get_uniq.py000077500000000000000000000010111221634657000205130ustar00rootroot00000000000000#!/usr/bin/env python import sys, re if __name__== '__main__': data = sys.stdin.readlines() icaos = [] num_icaos = 0 for line in data: match = re.match(".*from (\w+)", line) if match is not None: icao = int(match.group(1), 16) icaos.append(icao) #get dupes dupes = sorted([icao for icao in set(icaos) if icaos.count(icao) > 1]) for icao in dupes: print "%x" % icao print "Found non-unique replies from %i aircraft" % len(dupes) gr-air-modes-0.0.0.e47992d/python/gui_model.py000066400000000000000000000222311221634657000206500ustar00rootroot00000000000000#!/usr/bin/env python # Copyright 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # This file contains data models, view delegates, and associated classes # for handling the GUI back end data model. from PyQt4 import QtCore, QtGui import air_modes import threading, math, time from air_modes.exceptions import * #fades the ICAOs out as their last report gets older, #and display ident if available, ICAO otherwise class ICAOViewDelegate(QtGui.QStyledItemDelegate): def paint(self, painter, option, index): #draw selection rectangle if option.state & QtGui.QStyle.State_Selected: painter.setBrush(QtGui.QPalette().highlight()) painter.drawRect(option.rect) #if there's an ident available, use it. otherwise print the ICAO if index.model().data(index.model().index(index.row(), 9)) != QtCore.QVariant(): paintstr = index.model().data(index.model().index(index.row(), 9)).toString() else: paintstr = index.model().data(index.model().index(index.row(), 0)).toString() last_report = index.model().data(index.model().index(index.row(), 1)).toDouble()[0] age = (time.time() - last_report) max_age = 60. #age at which it grays out #minimum alpha is 0x40 (oldest), max is 0xFF (newest) age = min(age, max_age) alpha = int(0xFF - (0xBF / max_age) * age) painter.setPen(QtGui.QColor(0, 0, 0, alpha)) painter.drawText(option.rect.left()+3, option.rect.top(), option.rect.width(), option.rect.height(), option.displayAlignment, paintstr) #the data model used to display dashboard data. class dashboard_data_model(QtCore.QAbstractTableModel): def __init__(self, parent): QtCore.QAbstractTableModel.__init__(self, parent) self._data = [] self.lock = threading.Lock() self._colnames = ["icao", "seen", "rssi", "latitude", "longitude", "altitude", "speed", "heading", "vertical", "ident", "type", "range", "bearing"] #custom precision limits for display self._precisions = [None, None, None, 6, 6, 0, 0, 0, 0, None, None, 2, 0] for field in self._colnames: self.setHeaderData(self._colnames.index(field), QtCore.Qt.Horizontal, field) def rowCount(self, parent=QtCore.QVariant()): return len(self._data) def columnCount(self, parent=QtCore.QVariant()): return len(self._colnames) def data(self, index, role=QtCore.Qt.DisplayRole): if not index.isValid(): return QtCore.QVariant() if index.row() >= self.rowCount(): return QtCore.QVariant() if index.column() >= self.columnCount(): return QtCore.QVariant() if (role != QtCore.Qt.DisplayRole) and (role != QtCore.Qt.EditRole): return QtCore.QVariant() if self._data[index.row()][index.column()] is None: return QtCore.QVariant() else: #if there's a dedicated precision for that column, print it out with the specified precision. #this only works well if you DON'T have other views/widgets that depend on numeric data coming out. #i don't like this, but it works for now. unfortunately it seems like Qt doesn't give you a #good alternative. if self._precisions[index.column()] is not None: return QtCore.QVariant("%.*f" % (self._precisions[index.column()], self._data[index.row()][index.column()])) else: if self._colnames[index.column()] == "icao": return QtCore.QVariant("%06x" % self._data[index.row()][index.column()]) #return as hex string else: return QtCore.QVariant(self._data[index.row()][index.column()]) def setData(self, index, value, role=QtCore.Qt.EditRole): self.lock.acquire() if not index.isValid(): return False if index.row() >= self.rowCount(): return False if index.column >= self.columnCount(): return False if role != QtCore.Qt.EditRole: return False self._data[index.row()][index.column()] = value self.lock.release() #addRecord implements an upsert on self._data; that is, #it updates the row if the ICAO exists, or else it creates a new row. def addRecord(self, record): self.lock.acquire() icaos = [x[0] for x in self._data] if record["icao"] in icaos: row = icaos.index(record["icao"]) for column in record: self._data[row][self._colnames.index(column)] = record[column] #create index to existing row and tell the model everything's changed in this row #or inside the for loop, use dataChanged on each changed field (might be better) self.dataChanged.emit(self.createIndex(row, 0), self.createIndex(row, len(self._colnames)-1)) #only create records for ICAOs with ADS-B reports elif ("latitude" or "speed" or "ident") in record: #find new inserted row number icaos.append(record["icao"]) newrowoffset = sorted(icaos).index(record["icao"]) self.beginInsertRows(QtCore.QModelIndex(), newrowoffset, newrowoffset) newrecord = [None for x in xrange(len(self._colnames))] for col in xrange(0, len(self._colnames)): if self._colnames[col] in record: newrecord[col] = record[self._colnames[col]] self._data.append(newrecord) self._data = sorted(self._data, key = lambda x: x[0]) #sort by icao self.endInsertRows() self.lock.release() self.prune() #weeds out ICAOs older than 1 minute def prune(self): self.lock.acquire() for (index,row) in enumerate(self._data): if time.time() - row[1] >= 60: self.beginRemoveRows(QtCore.QModelIndex(), index, index) self._data.pop(index) self.endRemoveRows() self.lock.release() class dashboard_output: def __init__(self, cprdec, model, pub): self.model = model self._cpr = cprdec pub.subscribe("modes_dl", self.output) def output(self, msg): try: msgtype = msg.data["df"] now = time.time() newrow = {"rssi": msg.rssi, "seen": now} if msgtype in [0, 4, 20]: newrow["altitude"] = air_modes.altitude.decode_alt(msg.data["ac"], True) newrow["icao"] = msg.ecc self.model.addRecord(newrow) elif msgtype == 17: icao = msg.data["aa"] newrow["icao"] = icao subtype = msg.data["ftc"] if subtype == 4: (ident, actype) = air_modes.parseBDS08(msg.data) newrow["ident"] = ident newrow["type"] = actype elif 5 <= subtype <= 8: (ground_track, decoded_lat, decoded_lon, rnge, bearing) = air_modes.parseBDS06(msg.data, self._cpr) newrow["heading"] = ground_track newrow["latitude"] = decoded_lat newrow["longitude"] = decoded_lon newrow["altitude"] = 0 if rnge is not None: newrow["range"] = rnge newrow["bearing"] = bearing elif 9 <= subtype <= 18: (altitude, decoded_lat, decoded_lon, rnge, bearing) = air_modes.parseBDS05(msg.data, self._cpr) newrow["altitude"] = altitude newrow["latitude"] = decoded_lat newrow["longitude"] = decoded_lon if rnge is not None: newrow["range"] = rnge newrow["bearing"] = bearing elif subtype == 19: subsubtype = msg.data["sub"] velocity = None heading = None vert_spd = None if subsubtype == 0: (velocity, heading, vert_spd) = air_modes.parseBDS09_0(msg.data) elif 1 <= subsubtype <= 2: (velocity, heading, vert_spd) = air_modes.parseBDS09_1(msg.data) newrow["speed"] = velocity newrow["heading"] = heading newrow["vertical"] = vert_spd self.model.addRecord(newrow) except ADSBError: return gr-air-modes-0.0.0.e47992d/python/html_template.py000066400000000000000000000115351221634657000215500ustar00rootroot00000000000000#!/usr/bin/env python #HTML template for Mode S map display #Nick Foster, 2013 def html_template(my_position, json_file): if my_position is None: my_position = [37, -122] return """ ADS-B Aircraft Map
""" % (my_position[0], my_position[1], json_file) gr-air-modes-0.0.0.e47992d/python/kml.py000066400000000000000000000245071221634657000174770ustar00rootroot00000000000000# # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import sqlite3 import string, math, threading, time class output_kml(threading.Thread): def __init__(self, filename, dbname, localpos, lock, timeout=5): threading.Thread.__init__(self) self._dbname = dbname self._filename = filename self.my_coords = localpos self._timeout = timeout self._lock = lock self.shutdown = threading.Event() self.finished = threading.Event() self.setDaemon(1) self.start() def run(self): self._db = sqlite3.connect(self._dbname) #read from the db while self.shutdown.is_set() is False: time.sleep(self._timeout) self.writekml() self._db.close() self._db = None self.finished.set() def close(self): self.shutdown.set() self.finished.wait(0.2) #there's a bug here where self._timeout is long and close() has #to wait for the sleep to expire before closing. we just bail #instead with the 0.2 param above. def writekml(self): kmlstr = self.genkml() if kmlstr is not None: f = open(self._filename, 'w') f.write(kmlstr) f.close() def locked_execute(self, c, query): with self._lock: c.execute(query) def draw_circle(self, center, rng): retstr = "" steps=30 #so we're going to do this by computing a bearing angle based on the steps, and then compute the coordinate of a line extended from the center point to that range. [center_lat, center_lon] = center esquared = (1/298.257223563)*(2-(1/298.257223563)) earth_radius_mi = 3963.19059 #here we figure out the circumference of the latitude ring #which tells us how wide one line of longitude is at our latitude lat_circ = earth_radius_mi * math.cos(center_lat) #the circumference of the longitude ring will be equal to the circumference of the earth lat_rad = math.radians(center_lat) lon_rad = math.radians(center_lon) tmp0 = rng / earth_radius_mi for i in range(0, steps+1): bearing = i*(2*math.pi/steps) #in radians lat_out = math.degrees(math.asin(math.sin(lat_rad)*math.cos(tmp0) + math.cos(lat_rad)*math.sin(tmp0)*math.cos(bearing))) lon_out = center_lon + math.degrees(math.atan2(math.sin(bearing)*math.sin(tmp0)*math.cos(lat_rad), math.cos(tmp0)-math.sin(lat_rad)*math.sin(math.radians(lat_out)))) retstr += " %.8f,%.8f, 0" % (lon_out, lat_out,) retstr = string.lstrip(retstr) return retstr def genkml(self): #first let's draw the static content retstr="""\n\n\n\t\n\t\n\t""" if self.my_coords is not None: retstr += """\n\t\n\t\tRange rings\n\t\t0""" for rng in [100, 200, 300]: retstr += """\n\t\t\n\t\t\t%inm\n\t\t\t#rangering\n\t\t\t\n\t\t\t\t%s\n\t\t\t\n\t\t""" % (rng, self.draw_circle(self.my_coords, rng),) retstr += """\t\n""" retstr += """\t\n\t\tAircraft locations\n\t\t0""" #read the database and add KML q = "select distinct icao from positions where seen > datetime('now', '-5 minute')" c = self._db.cursor() self.locked_execute(c, q) icaolist = c.fetchall() #now we have a list icaolist of all ICAOs seen in the last 5 minutes for icao in icaolist: #print "ICAO: %x" % icao q = "select * from positions where icao=%i and seen > datetime('now', '-2 hour') ORDER BY seen DESC" % icao self.locked_execute(c, q) track = c.fetchall() #print "Track length: %i" % len(track) if len(track) != 0: lat = track[0][3] if lat is None: lat = 0 lon = track[0][4] if lon is None: lon = 0 alt = track[0][2] if alt is None: alt = 0 metric_alt = alt * 0.3048 #google earth takes meters, the commie bastards trackstr = "" for pos in track: trackstr += " %f,%f,%f" % (pos[4], pos[3], pos[2]*0.3048) trackstr = string.lstrip(trackstr) else: alt = 0 metric_alt = 0 lat = 0 lon = 0 trackstr = str("") #now get metadata q = "select ident from ident where icao=%i" % icao self.locked_execute(c, q) r = c.fetchall() if len(r) != 0: ident = r[0][0] else: ident="" #if ident is None: ident = "" #get most recent speed/heading/vertical q = "select seen, speed, heading, vertical from vectors where icao=%i order by seen desc limit 1" % icao self.locked_execute(c, q) r = c.fetchall() if len(r) != 0: seen = r[0][0] speed = r[0][1] heading = r[0][2] vertical = r[0][3] else: seen = 0 speed = 0 heading = 0 vertical = 0 #now generate some KML retstr+= "\n\t\t\n\t\t\t%s\n\t\t\t\n\t\t\t#airplane\n\t\t\t\n\t\t\t\tHeading: %i
Speed: %i
Vertical speed: %i
ICAO: %x
Last seen: %s]]>\n\t\t\t
\n\t\t\t\n\t\t\t\tabsolute\n\t\t\t\t1\n\t\t\t\t%s,%s,%i\n\t\t\t\n\t\t
" % (ident, heading, alt, heading, speed, vertical, icao[0], seen, lon, lat, metric_alt, ) retstr+= "\n\t\t\n\t\t\t#track\n\t\t\t\n\t\t\t\t0\n\t\t\t\tabsolute\n\t\t\t\t%s\n\t\t\t\n\t\t" % (trackstr,) retstr+= '\n\t
\n
\n
' return retstr #we just inherit from output_kml because we're doing the same thing, only in a different format. class output_jsonp(output_kml): def set_highlight(self, icao): self.highlight = icao def genkml(self): retstr="""jsonp_callback([""" # if self.my_coords is not None: # retstr += """\n\t\n\t\tRange rings\n\t\t0""" # for rng in [100, 200, 300]: # retstr += """\n\t\t\n\t\t\t%inm\n\t\t\t#rangering\n\t\t\t\n\t\t\t\t%s\n\t\t\t\n\t\t""" % (rng, self.draw_circle(self.my_coords, rng),) # retstr += """\t\n""" # retstr += """\t\n\t\tAircraft locations\n\t\t0""" #read the database and add KML q = "select distinct icao from positions where seen > datetime('now', '-1 minute')" c = self._db.cursor() self.locked_execute(c, q) icaolist = c.fetchall() #now we have a list icaolist of all ICAOs seen in the last 5 minutes for icao in icaolist: icao = icao[0] #now get metadata q = "select ident, type from ident where icao=%i" % icao self.locked_execute(c, q) r = c.fetchall() if len(r) != 0: ident = r[0][0] actype = r[0][1] else: ident="" actype = "" if ident is None: ident = "" #get most recent speed/heading/vertical q = "select seen, speed, heading, vertical from vectors where icao=%i order by seen desc limit 1" % icao self.locked_execute(c, q) r = c.fetchall() if len(r) != 0: seen = r[0][0] speed = r[0][1] heading = r[0][2] vertical = r[0][3] else: seen = 0 speed = 0 heading = 0 vertical = 0 q = "select lat, lon, alt from positions where icao=%i order by seen desc limit 1" % icao self.locked_execute(c, q) r = c.fetchall() if len(r) != 0: lat = r[0][0] lon = r[0][1] alt = r[0][2] else: lat = 0 lon = 0 alt = 0 highlight = 0 if hasattr(self, 'highlight'): if self.highlight == icao: highlight = 1 #now generate some JSONP retstr+= """{"icao": "%.6x", "lat": %f, "lon": %f, "alt": %i, "hdg": %i, "speed": %i, "vertical": %i, "ident": "%s", "type": "%s", "highlight": %i},""" % (icao, lat, lon, alt, heading, speed, vertical, ident, actype, highlight) retstr+= """]);""" return retstr gr-air-modes-0.0.0.e47992d/python/mlat.py000077500000000000000000000262411221634657000176510ustar00rootroot00000000000000#!/usr/bin/python import math import numpy from scipy.ndimage import map_coordinates #functions for multilateration. #this library is more or less based around the so-called "GPS equation", the canonical #iterative method for getting position from GPS satellite time difference of arrival data. #here, instead of multiple orbiting satellites with known time reference and position, #we have multiple fixed stations with known time references (GPSDO, hopefully) and known #locations (again, GPSDO). #NB: because of the way this solver works, at least 3 stations and timestamps #are required. this function will not return hyperbolae for underconstrained systems. #TODO: get HDOP out of this so we can draw circles of likely position and indicate constraint ########################END NOTES####################################### #this is a 10x10-degree WGS84 geoid datum, in meters relative to the WGS84 reference ellipsoid. given the maximum slope, you should probably interpolate. #NIMA suggests a 2x2 interpolation using four neighbors. we'll go cubic spline JUST BECAUSE WE CAN wgs84_geoid = numpy.array([[13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13], #90N [3,1,-2,-3,-3,-3,-1,3,1,5,9,11,19,27,31,34,33,34,33,34,28,23,17,13,9,4,4,1,-2,-2,0,2,3,2,1,1], #80N [2,2,1,-1,-3,-7,-14,-24,-27,-25,-19,3,24,37,47,60,61,58,51,43,29,20,12,5,-2,-10,-14,-12,-10,-14,-12,-6,-2,3,6,4], #70N [2,9,17,10,13,1,-14,-30,-39,-46,-42,-21,6,29,49,65,60,57,47,41,21,18,14,7,-3,-22,-29,-32,-32,-26,-15,-2,13,17,19,6], #60N [-8,8,8,1,-11,-19,-16,-18,-22,-35,-40,-26,-12,24,45,63,62,59,47,48,42,28,12,-10,-19,-33,-43,-42,-43,-29,-2,17,23,22,6,2], #50N [-12,-10,-13,-20,-31,-34,-21,-16,-26,-34,-33,-35,-26,2,33,59,52,51,52,48,35,40,33,-9,-28,-39,-48,-59,-50,-28,3,23,37,18,-1,-11], #40N [-7,-5,-8,-15,-28,-40,-42,-29,-22,-26,-32,-51,-40,-17,17,31,34,44,36,28,29,17,12,-20,-15,-40,-33,-34,-34,-28,7,29,43,20,4,-6], #30N [5,10,7,-7,-23,-39,-47,-34,-9,-10,-20,-45,-48,-32,-9,17,25,31,31,26,15,6,1,-29,-44,-61,-67,-59,-36,-11,21,39,49,39,22,10], #20N [13,12,11,2,-11,-28,-38,-29,-10,3,1,-11,-41,-42,-16,3,17,33,22,23,2,-3,-7,-36,-59,-90,-95,-63,-24,12,53,60,58,46,36,26], #10N [22,16,17,13,1,-12,-23,-20,-14,-3,14,10,-15,-27,-18,3,12,20,18,12,-13,-9,-28,-49,-62,-89,-102,-63,-9,33,58,73,74,63,50,32], #0 [36,22,11,6,-1,-8,-10,-8,-11,-9,1,32,4,-18,-13,-9,4,14,12,13,-2,-14,-25,-32,-38,-60,-75,-63,-26,0,35,52,68,76,64,52], #10S [51,27,10,0,-9,-11,-5,-2,-3,-1,9,35,20,-5,-6,-5,0,13,17,23,21,8,-9,-10,-11,-20,-40,-47,-45,-25,5,23,45,58,57,63], #20S [46,22,5,-2,-8,-13,-10,-7,-4,1,9,32,16,4,-8,4,12,15,22,27,34,29,14,15,15,7,-9,-25,-37,-39,-23,-14,15,33,34,45], #30S [21,6,1,-7,-12,-12,-12,-10,-7,-1,8,23,15,-2,-6,6,21,24,18,26,31,33,39,41,30,24,13,-2,-20,-32,-33,-27,-14,-2,5,20], #40S [-15,-18,-18,-16,-17,-15,-10,-10,-8,-2,6,14,13,3,3,10,20,27,25,26,34,39,45,45,38,39,28,13,-1,-15,-22,-22,-18,-15,-14,-10], #50S [-45,-43,-37,-32,-30,-26,-23,-22,-16,-10,-2,10,20,20,21,24,22,17,16,19,25,30,35,35,33,30,27,10,-2,-14,-23,-30,-33,-29,-35,-43], #60S [-61,-60,-61,-55,-49,-44,-38,-31,-25,-16,-6,1,4,5,4,2,6,12,16,16,17,21,20,26,26,22,16,10,-1,-16,-29,-36,-46,-55,-54,-59], #70S [-53,-54,-55,-52,-48,-42,-38,-38,-29,-26,-26,-24,-23,-21,-19,-16,-12,-8,-4,-1,1,4,4,6,5,4,2,-6,-15,-24,-33,-40,-48,-50,-53,-52], #80S [-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30,-30]], #90S dtype=numpy.float) #ok this calculates the geoid offset from the reference ellipsoid #combined with LLH->ECEF this gets you XYZ for a ground-referenced point def wgs84_height(lat, lon): yi = numpy.array([9-lat/10.0]) xi = numpy.array([18+lon/10.0]) return float(map_coordinates(wgs84_geoid, [yi, xi])) #WGS84 reference ellipsoid constants wgs84_a = 6378137.0 wgs84_b = 6356752.314245 wgs84_e2 = 0.0066943799901975848 wgs84_a2 = wgs84_a**2 #to speed things up a bit wgs84_b2 = wgs84_b**2 #convert ECEF to lat/lon/alt without geoid correction #returns alt in meters def ecef2llh((x,y,z)): ep = math.sqrt((wgs84_a2 - wgs84_b2) / wgs84_b2) p = math.sqrt(x**2+y**2) th = math.atan2(wgs84_a*z, wgs84_b*p) lon = math.atan2(y, x) lat = math.atan2(z+ep**2*wgs84_b*math.sin(th)**3, p-wgs84_e2*wgs84_a*math.cos(th)**3) N = wgs84_a / math.sqrt(1-wgs84_e2*math.sin(lat)**2) alt = p / math.cos(lat) - N lon *= (180. / math.pi) lat *= (180. / math.pi) return [lat, lon, alt] #convert lat/lon/alt coords to ECEF without geoid correction, WGS84 model #remember that alt is in meters def llh2ecef((lat, lon, alt)): lat *= (math.pi / 180.0) lon *= (math.pi / 180.0) n = lambda x: wgs84_a / math.sqrt(1 - wgs84_e2*(math.sin(x)**2)) x = (n(lat) + alt)*math.cos(lat)*math.cos(lon) y = (n(lat) + alt)*math.cos(lat)*math.sin(lon) z = (n(lat)*(1-wgs84_e2)+alt)*math.sin(lat) return [x,y,z] #do both of the above to get a geoid-corrected x,y,z position def llh2geoid((lat, lon, alt)): (x,y,z) = llh2ecef((lat, lon, alt + wgs84_height(lat, lon))) return [x,y,z] c = 299792458 / 1.0003 #modified for refractive index of air, why not #this function is the iterative solver core of the mlat function below #we use limit as a goal to stop solving when we get "close enough" (error magnitude in meters for that iteration) #basically 20 meters is way less than the anticipated error of the system so it doesn't make sense to continue #it's possible this could fail in situations where the solution converges slowly #TODO: this fails to converge for some seriously advantageous geometry def mlat_iter(rel_stations, prange_obs, xguess = [0,0,0], limit = 20, maxrounds = 100): xerr = [1e9, 1e9, 1e9] rounds = 0 while numpy.linalg.norm(xerr) > limit: prange_est = [[numpy.linalg.norm(station - xguess)] for station in rel_stations] dphat = prange_obs - prange_est H = numpy.array([(numpy.array(-rel_stations[row,:])+xguess) / prange_est[row] for row in range(0,len(rel_stations))]) #now we have H, the Jacobian, and can solve for residual error xerr = numpy.linalg.lstsq(H, dphat)[0].flatten() xguess += xerr #print xguess, xerr rounds += 1 if rounds > maxrounds: raise Exception("Failed to converge!") break return xguess #func mlat: #uses a modified GPS pseudorange solver to locate aircraft by multilateration. #replies is a list of reports, in ([lat, lon, alt], timestamp) format #altitude is the barometric altitude of the aircraft as returned by the aircraft #returns the estimated position of the aircraft in (lat, lon, alt) geoid-corrected WGS84. #let's make it take a list of tuples so we can sort by them def mlat(replies, altitude): sorted_replies = sorted(replies, key=lambda time: time[1]) stations = [sorted_reply[0] for sorted_reply in sorted_replies] timestamps = [sorted_reply[1] for sorted_reply in sorted_replies] me_llh = stations[0] me = llh2geoid(stations[0]) #list of stations in XYZ relative to me rel_stations = [numpy.array(llh2geoid(station)) - numpy.array(me) for station in stations[1:]] rel_stations.append([0,0,0] - numpy.array(me)) rel_stations = numpy.array(rel_stations) #convert list of arrays to 2d array #differentiate the timestamps to get TDOA, multiply by c to get pseudorange prange_obs = [[c*(stamp-timestamps[0])] for stamp in timestamps[1:]] #so here we calc the estimated pseudorange to the center of the earth, using station[0] as a reference point for the geoid #in other words, we say "if the aircraft were directly overhead of station[0], this is the prange to the center of the earth" #this is a necessary approximation since we don't know the location of the aircraft yet #if the dang earth were actually round this wouldn't be an issue prange_obs.append( [numpy.linalg.norm(llh2ecef((me_llh[0], me_llh[1], altitude)))] ) #use ECEF not geoid since alt is MSL not GPS prange_obs = numpy.array(prange_obs) #xguess = llh2ecef([37.617175,-122.400843, 8000])-numpy.array(me) #xguess = [0,0,0] #start our guess directly overhead, who cares xguess = numpy.array(llh2ecef([me_llh[0], me_llh[1], altitude])) - numpy.array(me) xyzpos = mlat_iter(rel_stations, prange_obs, xguess) llhpos = ecef2llh(xyzpos+me) #now, we could return llhpos right now and be done with it. #but the assumption we made above, namely that the aircraft is directly above the #nearest station, results in significant error due to the oblateness of the Earth's geometry. #so now we solve AGAIN, but this time with the corrected pseudorange of the aircraft altitude #this might not be really useful in practice but the sim shows >50m errors without it #and <4cm errors with it, not that we'll get that close in reality but hey let's do it right prange_obs[-1] = [numpy.linalg.norm(llh2ecef((llhpos[0], llhpos[1], altitude)))] xyzpos_corr = mlat_iter(rel_stations, prange_obs, xyzpos) #start off with a really close guess llhpos = ecef2llh(xyzpos_corr+me) #and now, what the hell, let's try to get dilution of precision data #avec is the unit vector of relative ranges to the aircraft from each of the stations # for i in range(len(avec)): # avec[i] = numpy.array(avec[i]) / numpy.linalg.norm(numpy.array(avec[i])) # numpy.append(avec, [[-1],[-1],[-1],[-1]], 1) #must be # of stations # doparray = numpy.linalg.inv(avec.T*avec) #the diagonal elements of doparray will be the x, y, z DOPs. return llhpos if __name__ == '__main__': #here's some test data to validate the algorithm teststations = [[37.76225, -122.44254, 100], [37.680016,-121.772461, 100], [37.385844,-122.083082, 100], [37.701207,-122.309418, 100]] testalt = 8000 testplane = numpy.array(llh2ecef([37.617175,-122.400843, testalt])) testme = llh2geoid(teststations[0]) teststamps = [10, 10 + numpy.linalg.norm(testplane-numpy.array(llh2geoid(teststations[1]))) / c, 10 + numpy.linalg.norm(testplane-numpy.array(llh2geoid(teststations[2]))) / c, 10 + numpy.linalg.norm(testplane-numpy.array(llh2geoid(teststations[3]))) / c, ] print teststamps replies = [] for i in range(0, len(teststations)): replies.append((teststations[i], teststamps[i])) ans = mlat(replies, testalt) error = numpy.linalg.norm(numpy.array(llh2ecef(ans))-numpy.array(testplane)) range = numpy.linalg.norm(llh2geoid(ans)-numpy.array(testme)) print testplane-testme print ans print "Error: %.2fm" % (error) print "Range: %.2fkm (from first station in list)" % (range/1000) gr-air-modes-0.0.0.e47992d/python/mlat_client.py000066400000000000000000000055241221634657000212050ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # #multilateration client #outputs stamps to server, receives multilaterated outputs back import socket, pickle, time, sys import air_modes from gnuradio import gr pickle_prot = 0 #pickle_prot = pickle.HIGHEST_PROTOCOL class client_info: def __init__(self): self.name = "" self.position = [] self.offset_secs = 0 self.offset_frac_secs = 0.0 self.time_source = None class mlat_client: def __init__(self, queue, position, server_addr, time_source): self._queue = queue self._pos = position self._name = socket.gethostname() #connect to server self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock.setblocking(1) self._sock.connect((server_addr, 19005)) info = client_info() info.name = self._name info.position = self._pos info.time_source = time_source #"gpsdo" or None self._sock.send(pickle.dumps(info)) reply = self._sock.recv(1024) if reply != "HELO": #i know, shut up raise Exception("Invalid reply from server: %s" % reply) self._sock.setblocking(0) self._remnant = None def __del__(self): self._sock.close() #send a stamped report to the server def output(self, message): self._sock.send(message+"\n") #this is called from the update() method list of the main app thread def get_mlat_positions(self): msg = None try: msg = self._sock.recv(1024) except socket.error: pass if msg: for line in msg.splitlines(True): if line.endswith("\n"): if self._remnant: line = self._remnant + line self._remnant = None self._queue.insert_tail(gr.message_from_string(line)) else: if self._remnant is not None: raise Exception("Malformed data: " + line) else: self._remnant = line gr-air-modes-0.0.0.e47992d/python/mlat_types.py000066400000000000000000000000011221634657000210540ustar00rootroot00000000000000 gr-air-modes-0.0.0.e47992d/python/msprint.py000066400000000000000000000213651221634657000204070ustar00rootroot00000000000000# # Copyright 2010, 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time, os, sys from string import split, join import air_modes from air_modes.exceptions import * import math #TODO get rid of class and convert to functions #no need for class here class output_print: def __init__(self, cpr, publisher, callback=None): self._cpr = cpr self._callback = callback #sub to every function that starts with "handle" self._fns = [int(l[6:]) for l in dir(self) if l.startswith("handle")] for i in self._fns: publisher.subscribe("type%i_dl" % i, getattr(self, "handle%i" % i)) publisher.subscribe("modes_dl", self.catch_nohandler) @staticmethod def prefix(msg): return "(%i %.8f) " % (msg.rssi, msg.timestamp) def _print(self, msg): if self._callback is None: print msg else: self._callback(msg) def catch_nohandler(self, msg): if msg.data.get_type() not in self._fns: retstr = output_print.prefix(msg) retstr += "No handler for message type %i" % msg.data.get_type() if "aa" not in msg.data.fields: retstr += " from %.6x" % msg.ecc else: retstr += " from %.6x" % msg.data["aa"] self._print(retstr) def handle0(self, msg): try: retstr = output_print.prefix(msg) retstr += "Type 0 (short A-A surveillance) from %x at %ift" % (msg.ecc, air_modes.decode_alt(msg.data["ac"], True)) ri = msg.data["ri"] if ri == 0: retstr += " (No TCAS)" elif ri == 2: retstr += " (TCAS resolution inhibited)" elif ri == 3: retstr += " (Vertical TCAS resolution only)" elif ri == 4: retstr += " (Full TCAS resolution)" elif ri == 9: retstr += " (speed <75kt)" elif ri > 9: retstr += " (speed %i-%ikt)" % (75 * (1 << (ri-10)), 75 * (1 << (ri-9))) else: raise ADSBError except ADSBError: return if msg.data["vs"] is 1: retstr += " (aircraft is on the ground)" self._print(retstr) @staticmethod def fs_text(fs): if fs == 1: return " (aircraft is on the ground)" elif fs == 2: return " (AIRBORNE ALERT)" elif fs == 3: return " (GROUND ALERT)" elif fs == 4: return " (SPI ALERT)" elif fs == 5: return " (SPI)" else: raise ADSBError def handle4(self, msg): try: retstr = output_print.prefix(msg) retstr += "Type 4 (short surveillance altitude reply) from %x at %ift" % (msg.ecc, air_modes.decode_alt(msg.data["ac"], True)) retstr += output_print.fs_text(msg.data["fs"]) except ADSBError: return self._print(retstr) def handle5(self, msg): try: retstr = output_print.prefix(msg) retstr += "Type 5 (short surveillance ident reply) from %x with ident %i" % (msg.ecc, air_modes.decode_id(msg.data["id"])) retstr += output_print.fs_text(msg.data["fs"]) except ADSBError: return self._print(retstr) def handle11(self, msg): try: retstr = output_print.prefix(msg) retstr += "Type 11 (all call reply) from %x in reply to interrogator %i with capability level %i" % (msg.data["aa"], msg.ecc & 0xF, msg.data["ca"]+1) except ADSBError: return self._print(retstr) #the only one which requires state def handle17(self, msg): icao24 = msg.data["aa"] bdsreg = msg.data["me"].get_type() retstr = output_print.prefix(msg) try: if bdsreg == 0x08: (ident, typestring) = air_modes.parseBDS08(msg.data) retstr += "Type 17 BDS0,8 (ident) from %x type %s ident %s" % (icao24, typestring, ident) elif bdsreg == 0x06: [ground_track, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS06(msg.data, self._cpr) retstr += "Type 17 BDS0,6 (surface report) from %x at (%.6f, %.6f) ground track %i" % (icao24, decoded_lat, decoded_lon, ground_track) if rnge is not None and bearing is not None: retstr += " (%.2f @ %.0f)" % (rnge, bearing) elif bdsreg == 0x05: [altitude, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS05(msg.data, self._cpr) retstr += "Type 17 BDS0,5 (position report) from %x at (%.6f, %.6f)" % (icao24, decoded_lat, decoded_lon) if rnge is not None and bearing is not None: retstr += " (" + "%.2f" % rnge + " @ " + "%.0f" % bearing + ")" retstr += " at " + str(altitude) + "ft" elif bdsreg == 0x09: subtype = msg.data["bds09"].get_type() if subtype == 0: [velocity, heading, vert_spd, turnrate] = air_modes.parseBDS09_0(msg.data) retstr += "Type 17 BDS0,9-%i (track report) from %x with velocity %.0fkt heading %.0f VS %.0f turn rate %.0f" \ % (subtype, icao24, velocity, heading, vert_spd, turnrate) elif subtype == 1: [velocity, heading, vert_spd] = air_modes.parseBDS09_1(msg.data) retstr += "Type 17 BDS0,9-%i (track report) from %x with velocity %.0fkt heading %.0f VS %.0f" % (subtype, icao24, velocity, heading, vert_spd) elif subtype == 3: [mag_hdg, vel_src, vel, vert_spd, geo_diff] = air_modes.parseBDS09_3(msg.data) retstr += "Type 17 BDS0,9-%i (air course report) from %x with %s %.0fkt magnetic heading %.0f VS %.0f geo. diff. from baro. alt. %.0fft" \ % (subtype, icao24, vel_src, vel, mag_hdg, vert_spd, geo_diff) else: retstr += "Type 17 BDS0,9-%i from %x not implemented" % (subtype, icao24) elif bdsreg == 0x62: emerg_str = air_modes.parseBDS62(data) retstr += "Type 17 BDS6,2 (emergency) from %x type %s" % (icao24, emerg_str) else: retstr += "Type 17 with FTC=%i from %x not implemented" % (msg.data["ftc"], icao24) except ADSBError: return self._print(retstr) def printTCAS(self, msg): msgtype = msg.data["df"] if msgtype == 16: bds1 = msg.data["vds1"] bds2 = msg.data["vds2"] else: bds1 = msg.data["bds1"] bds2 = msg.data["bds2"] retstr = output_print.prefix(msg) if bds2 != 0: retstr += "No handler in type %i for BDS2 == %i from %x" % (msgtype, bds2, msg.ecc) elif bds1 == 0: retstr += "No handler in type %i for BDS1 == 0 from %x" % (msgtype, msg.ecc) elif bds1 == 1: retstr += "Type %i link capability report from %x: ACS: 0x%x, BCS: 0x%x, ECS: 0x%x, continues %i" \ % (msgtype, msg.ecc, msg.data["acs"], msg.data["bcs"], msg.data["ecs"], msg.data["cfs"]) elif bds1 == 2: retstr += "Type %i identification from %x with text %s" % (msgtype, msg.ecc, air_modes.parseMB_id(msg.data)) elif bds1 == 3: retstr += "Type %i TCAS report from %x: " % (msgtype, msg.ecc) tti = msg.data["tti"] if msgtype == 16: (resolutions, complements, rat, mte) = air_modes.parse_TCAS_CRM(msg.data) retstr += "advised: %s complement: %s" % (resolutions, complements) else: if tti == 1: (resolutions, complements, rat, mte, threat_id) = air_modes.parseMB_TCAS_threatid(msg.data) retstr += "threat ID: %x advised: %s complement: %s" % (threat_id, resolutions, complements) elif tti == 2: (resolutions, complements, rat, mte, threat_alt, threat_range, threat_bearing) = air_modes.parseMB_TCAS_threatloc(msg.data) retstr += "range: %i bearing: %i alt: %i advised: %s complement: %s" % (threat_range, threat_bearing, threat_alt, resolutions, complements) else: rat = 0 mte = 0 retstr += " (no handler for TTI=%i)" % tti if mte == 1: retstr += " (multiple threats)" if rat == 1: retstr += " (resolved)" else: retstr += "No handler for type %i, BDS1 == %i from %x" % (msgtype, bds1, msg.ecc) if(msgtype == 20 or msgtype == 16): retstr += " at %ift" % air_modes.decode_alt(msg.data["ac"], True) else: retstr += " ident %x" % air_modes.decode_id(msg.data["id"]) self._print(retstr) handle16 = printTCAS handle20 = printTCAS handle21 = printTCAS gr-air-modes-0.0.0.e47992d/python/parse.py000066400000000000000000000364051221634657000200260ustar00rootroot00000000000000# # Copyright 2010, 2012 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time, os, sys from string import split, join from altitude import decode_alt import math import air_modes from air_modes.exceptions import * #this implements a packet class which can retrieve its own fields. class data_field: def __init__(self, data): self.data = data self.fields = self.parse() types = { } offset = 1 #field offset applied to all fields. used for offsetting #subtypes to reconcile with the spec. Really just for readability. #get a particular field from the data def __getitem__(self, fieldname): mytype = self.get_type() if mytype in self.types: if fieldname in self.fields: #verify it exists in this packet type return self.fields[fieldname] else: raise FieldNotInPacket(fieldname) else: raise NoHandlerError(mytype) #grab all the fields in the packet as a dict #done once on init so you don't have to iterate down every time you grab a field def parse(self): fields = {} mytype = self.get_type() if mytype in self.types: for field in self.types[mytype]: bits = self.types[self.get_type()][field] if len(bits) == 3: obj = bits[2](self.get_bits(bits[0], bits[1])) fields.update(obj.parse()) fields.update({field: obj}) else: fields.update({field: self.get_bits(bits[0], bits[1])}) else: raise NoHandlerError(mytype) return fields def get_type(self): raise NotImplementedError def get_numbits(self): raise NotImplementedError #retrieve bits from data given the offset and number of bits. #the offset is both left-justified (LSB) and starts at 1, to #correspond to the Mode S spec. Blame them. def get_bits(self, *args): startbit = args[0] num = args[1] bits = 0 try: bits = (self.data \ >> (self.get_numbits() - startbit - num + self.offset)) \ & ((1 << num) - 1) #the exception handler catches instances when you try to shift more than #the number of bits. this can happen when a garbage packet gets through #which reports itself as a short packet but of type long. #TODO: should find more productive way to throw this out except ValueError: pass #print "Short packet received for long packet type: %x" % self.data return bits class bds09_reply(data_field): offset = 6 types = { #BDS0,9 subtype 0 0: {"sub": (6,3), "dew": (10,1), "vew": (11,11), "dns": (22,1), "vns": (23,11), "str": (34,1), "tr": (35,6), "dvr": (41,1), "vr": (42,9)}, #BDS0,9 subtypes 1-2 (differ only in velocity encoding) 1: {"sub": (6,3), "icf": (9,1), "ifr": (10,1), "nuc": (11,3), "dew": (14,1), "vew": (15,10), "dns": (25,1), "vns": (26,10), "vrsrc": (36,1), "dvr": (37,1), "vr": (38,9), "dhd": (49,1), "hd": (50,6)}, #BDS0,9 subtype 3-4 (airspeed and heading) 3: {"sub": (6,3), "icf": (9,1), "ifr": (10,1), "nuc": (11,3), "mhs": (14,1), "hdg": (15,10), "ast": (25,1), "spd": (26,10), "vrsrc": (36,1), "dvr": (37,1), "vr": (38,9), "dhd": (49,1), "hd": (50,6)} } def get_type(self): sub = self.get_bits(6,3) if sub == 0: return 0 if 1 <= sub <= 2: return 1 if 3 <= sub <= 4: return 3 def get_numbits(self): return 51 #type 17 extended squitter data class me_reply(data_field): #types in this format are listed by BDS register #TODO: add comments explaining these fields types = { 0x05: {"ftc": (1,5), "ss": (6,2), "saf": (8,1), "alt": (9,12), "time": (21,1), "cpr": (22,1), "lat": (23,17), "lon": (40,17)}, #airborne position 0x06: {"ftc": (1,5), "mvt": (6,7), "gts": (13,1), "gtk": (14,7), "time": (21,1), "cpr": (22,1), "lat": (23,17), "lon": (40,17)}, #surface position 0x07: {"ftc": (1,5),}, #TODO extended squitter status 0x08: {"ftc": (1,5), "cat": (6,3), "ident": (9,48)}, #extended squitter identification and type 0x09: {"ftc": (1,5), "bds09": (6,51, bds09_reply)}, #0x0A: data link capability report #0x17: common usage capability report #0x18-0x1F: Mode S specific services capability report #0x20: aircraft identification 0x61: {"ftc": (1,5), "eps": (9,3)} } #maps ftc to BDS register def get_type(self): ftc = self.get_bits(1,5) if 1 <= ftc <= 4: return 0x08 elif 5 <= ftc <= 8: return 0x06 elif 9 <= ftc <= 18 and ftc != 15: #FTC 15 does not appear to be valid return 0x05 elif ftc == 19: return 0x09 elif ftc == 28: return 0x61 else: return NoHandlerError(ftc) def get_numbits(self): return 56 #resolves the TCAS reply types from TTI info class tcas_reply(data_field): offset = 61 types = { 0: {"tti": (61,2)}, #UNKNOWN 1: {"tti": (61,2), "tid": (63,26)}, 2: {"tti": (61,2), "tida": (63,13), "tidr": (76,7), "tidb": (83,6)} } def get_type(self): return self.get_bits(61,2) def get_numbits(self): return 28 #extended squitter types 20,21 MB subfield class mb_reply(data_field): offset = 33 #fields offset by 33 to match documentation #types are based on bds1 subfield types = { 0: {"bds1": (33,4), "bds2": (37,4)}, #TODO 1: {"bds1": (33,4), "bds2": (37,4), "cfs": (41,4), "acs": (45,20), "bcs": (65,16), "ecs": (81,8)}, 2: {"bds1": (33,4), "bds2": (37,4), "ais": (41,48)}, 3: {"bds1": (33,4), "bds2": (37,4), "ara": (41,14), "rac": (55,4), "rat": (59,1), "mte": (60,1), "tcas": (61, 28, tcas_reply)} } def get_type(self): bds1 = self.get_bits(33,4) bds2 = self.get_bits(37,4) if bds1 not in (0,1,2,3) or bds2 not in (0,): raise NoHandlerError(bds1) return int(bds1) def get_numbits(self): return 56 # #type MV (extended squitter type 16) subfields # mv_fields = { "ara": (41,14), "mte": (60,1), "rac": (55,4), "rat": (59,1), # "vds": (33,8), "vds1": (33,4), "vds2": (37,4) # } class mv_reply(data_field): offset = 33 types = { "ara": (41,14), "mte": (60,1), "rac": (55,4), "rat": (59,1), "vds": (33,8), "vds1": (33,4), "vds2": (37,4) } def get_type(self): vds1 = self.get_bits(33,4) vds2 = self.get_bits(37,4) if vds1 not in (3,) or vds2 not in (0,): raise NoHandlerError(bds1) return int(vds1) def get_numbits(self): return 56 #the whole Mode S packet type class modes_reply(data_field): types = { 0: {"df": (1,5), "vs": (6,1), "cc": (7,1), "sl": (9,3), "ri": (14,4), "ac": (20,13), "ap": (33,24)}, 4: {"df": (1,5), "fs": (6,3), "dr": (9,5), "um": (14,6), "ac": (20,13), "ap": (33,24)}, 5: {"df": (1,5), "fs": (6,3), "dr": (9,5), "um": (14,6), "id": (20,13), "ap": (33,24)}, 11: {"df": (1,5), "ca": (6,3), "aa": (9,24), "pi": (33,24)}, 16: {"df": (1,5), "vs": (6,1), "sl": (9,3), "ri": (14,4), "ac": (20,13), "mv": (33,56), "ap": (88,24)}, 17: {"df": (1,5), "ca": (6,3), "aa": (9,24), "me": (33,56, me_reply), "pi": (88,24)}, 20: {"df": (1,5), "fs": (6,3), "dr": (9,5), "um": (14,6), "ac": (20,13), "mb": (33,56, mb_reply), "ap": (88,24)}, 21: {"df": (1,5), "fs": (6,3), "dr": (9,5), "um": (14,6), "id": (20,13), "mb": (33,56, mb_reply), "ap": (88,24)}, 24: {"df": (1,5), "ke": (6,1), "nd": (7,4), "md": (11,80), "ap": (88,24)} } def is_long(self): return self.data > (1 << 56) def get_numbits(self): return 112 if self.is_long() else 56 def get_type(self): return self.get_bits(1,5) #unscramble mode A/C-style squawk codes for type 5 replies below def decode_id(id): C1 = 0x1000 A1 = 0x0800 C2 = 0x0400 A2 = 0x0200 #this represents the order in which the bits come C4 = 0x0100 A4 = 0x0080 B1 = 0x0020 D1 = 0x0010 B2 = 0x0008 D2 = 0x0004 B4 = 0x0002 D4 = 0x0001 a = ((id & A1) >> 11) + ((id & A2) >> 8) + ((id & A4) >> 5) b = ((id & B1) >> 5) + ((id & B2) >> 2) + ((id & B4) << 1) c = ((id & C1) >> 12) + ((id & C2) >> 9) + ((id & C4) >> 6) d = ((id & D1) >> 2) + ((id & D2) >> 1) + ((id & D4) << 2) return (a * 1000) + (b * 100) + (c * 10) + d #decode ident squawks def charmap(d): if d > 0 and d < 27: retval = chr(ord("A")+d-1) elif d == 32: retval = " " elif d > 47 and d < 58: retval = chr(ord("0")+d-48) else: retval = " " return retval def parseBDS08(data): categories = [["NO INFO", "RESERVED", "RESERVED", "RESERVED", "RESERVED", "RESERVED", "RESERVED", "RESERVED"],\ ["NO INFO", "SURFACE EMERGENCY VEHICLE", "SURFACE SERVICE VEHICLE", "FIXED OBSTRUCTION", "CLUSTER OBSTRUCTION", "LINE OBSTRUCTION", "RESERVED"],\ ["NO INFO", "GLIDER", "BALLOON/BLIMP", "PARACHUTE", "ULTRALIGHT", "RESERVED", "UAV", "SPACECRAFT"],\ ["NO INFO", "LIGHT", "SMALL", "LARGE", "LARGE HIGH VORTEX", "HEAVY", "HIGH PERFORMANCE", "ROTORCRAFT"]] catstring = categories[data["ftc"]-1][data["cat"]] msg = "" for i in range(0, 8): msg += charmap(data["ident"] >> (42-6*i) & 0x3F) return (msg, catstring) #NOTE: this is stateful -- requires CPR decoder def parseBDS05(data, cprdec): altitude = decode_alt(data["alt"], False) [decoded_lat, decoded_lon, rnge, bearing] = cprdec.decode(data["aa"], data["lat"], data["lon"], data["cpr"], 0) return [altitude, decoded_lat, decoded_lon, rnge, bearing] #NOTE: this is stateful -- requires CPR decoder def parseBDS06(data, cprdec): ground_track = data["gtk"] * 360. / 128 [decoded_lat, decoded_lon, rnge, bearing] = cprdec.decode(data["aa"], data["lat"], data["lon"], data["cpr"], 1) return [ground_track, decoded_lat, decoded_lon, rnge, bearing] def parseBDS09_0(data): #0: ["sub", "dew", "vew", "dns", "vns", "str", "tr", "svr", "vr"], vert_spd = data["vr"] * 32 ud = bool(data["dvr"]) if ud: vert_spd = 0 - vert_spd turn_rate = data["tr"] * 15/62 rl = data["str"] if rl: turn_rate = 0 - turn_rate ns_vel = data["vns"] - 1 ns = bool(data["dns"]) ew_vel = data["vew"] - 1 ew = bool(data["dew"]) velocity = math.hypot(ns_vel, ew_vel) if ew: ew_vel = 0 - ew_vel if ns: ns_vel = 0 - ns_vel heading = math.atan2(ew_vel, ns_vel) * (180.0 / math.pi) if heading < 0: heading += 360 return [velocity, heading, vert_spd, turn_rate] def parseBDS09_1(data): #1: ["sub", "icf", "ifr", "nuc", "dew", "vew", "dns", "vns", "vrsrc", "dvr", "vr", "dhd", "hd"], alt_geo_diff = data["hd"] * 25 above_below = bool(data["dhd"]) if above_below: alt_geo_diff = 0 - alt_geo_diff; vert_spd = float(data["vr"] - 1) * 64 ud = bool(data["dvr"]) if ud: vert_spd = 0 - vert_spd vert_src = bool(data["vrsrc"]) ns_vel = float(data["vns"]) ns = bool(data["dns"]) ew_vel = float(data["vew"]) ew = bool(data["dew"]) subtype = data["sub"] if subtype == 0x02: ns_vel <<= 2 ew_vel <<= 2 velocity = math.hypot(ns_vel, ew_vel) if ew: ew_vel = 0 - ew_vel if ns_vel == 0: heading = 0 else: heading = math.atan(float(ew_vel) / float(ns_vel)) * (180.0 / math.pi) if ns: heading = 180 - heading if heading < 0: heading += 360 return [velocity, heading, vert_spd] def parseBDS09_3(data): #3: {"sub", "icf", "ifr", "nuc", "mhs", "hdg", "ast", "spd", "vrsrc", # "dvr", "vr", "dhd", "hd"} mag_hdg = data["mhs"] * 360. / 1024 vel_src = "TAS" if data["ast"] == 1 else "IAS" vel = data["spd"] if data["sub"] == 4: vel *= 4 vert_spd = float(data["vr"] - 1) * 64 if data["dvr"] == 1: vert_spd = 0 - vert_spd geo_diff = float(data["hd"] - 1) * 25 return [mag_hdg, vel_src, vel, vert_spd, geo_diff] def parseBDS62(data): eps_strings = ["NO EMERGENCY", "GENERAL EMERGENCY", "LIFEGUARD/MEDICAL", "FUEL EMERGENCY", "NO COMMUNICATIONS", "UNLAWFUL INTERFERENCE", "RESERVED", "RESERVED"] return eps_strings[data["eps"]] def parseMB_id(data): #bds1 == 2, bds2 == 0 msg = "" for i in range(0, 8): msg += charmap( data["ais"] >> (42-6*i) & 0x3F) return (msg) def parseMB_TCAS_resolutions(data): #these are LSB because the ICAO are asshats ara_bits = {41: "CLIMB", 42: "DON'T DESCEND", 43: "DON'T DESCEND >500FPM", 44: "DON'T DESCEND >1000FPM", 45: "DON'T DESCEND >2000FPM", 46: "DESCEND", 47: "DON'T CLIMB", 48: "DON'T CLIMB >500FPM", 49: "DON'T CLIMB >1000FPM", 50: "DON'T CLIMB >2000FPM", 51: "TURN LEFT", 52: "TURN RIGHT", 53: "DON'T TURN LEFT", 54: "DON'T TURN RIGHT"} rac_bits = {55: "DON'T DESCEND", 56: "DON'T CLIMB", 57: "DON'T TURN LEFT", 58: "DON'T TURN RIGHT"} ara = data["ara"] rac = data["rac"] #check to see which bits are set resolutions = "" for bit in ara_bits: if ara & (1 << (54-bit)): resolutions += " " + ara_bits[bit] complements = "" for bit in rac_bits: if rac & (1 << (58-bit)): complements += " " + rac_bits[bit] return (resolutions, complements) #rat is 1 if resolution advisory terminated <18s ago #mte is 1 if multiple threats indicated #tti is threat type: 1 if ID, 2 if range/brg/alt #tida is threat altitude in Mode C format def parseMB_TCAS_threatid(data): #bds1==3, bds2==0, TTI==1 #3: {"bds1": (33,4), "bds2": (37,4), "ara": (41,14), "rac": (55,4), "rat": (59,1), # "mte": (60,1), "tti": (61,2), "tida": (63,13), "tidr": (76,7), "tidb": (83,6)} (resolutions, complements) = parseMB_TCAS_resolutions(data) return (resolutions, complements, data["rat"], data["mte"], data["tid"]) def parseMB_TCAS_threatloc(data): #bds1==3, bds2==0, TTI==2 (resolutions, complements) = parseMB_TCAS_resolutions(data) threat_alt = decode_alt(data["tida"], True) return (resolutions, complements, data["rat"], data["mte"], threat_alt, data["tidr"], data["tidb"]) #type 16 Coordination Reply Message def parse_TCAS_CRM(data): (resolutions, complements) = parseMB_TCAS_resolutions(data) return (resolutions, complements, data["rat"], data["mte"]) #this decorator takes a pubsub and returns a function which parses and publishes messages def make_parser(pub): publisher = pub def publish(message): [data, ecc, reference, timestamp] = message.split() try: ret = air_modes.modes_report(modes_reply(int(data, 16)), int(ecc, 16), 10.0*math.log10(max(1e-8,float(reference))), air_modes.stamp(0, float(timestamp))) pub["modes_dl"] = ret pub["type%i_dl" % ret.data.get_type()] = ret except ADSBError: pass return publish gr-air-modes-0.0.0.e47992d/python/qa_gr-air-modes.py000077500000000000000000000037041221634657000216620ustar00rootroot00000000000000#!/usr/bin/env python # # Copyright 2004,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest import gr-air-modes_swig class qa_gr-air-modes (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_001_square_ff (self): src_data = (-3, 4, -5.5, 2, 3) expected_result = (9, 16, 30.25, 4, 9) src = gr.vector_source_f (src_data) sqr = gr-air-modes_swig.square_ff () dst = gr.vector_sink_f () self.tb.connect (src, sqr) self.tb.connect (sqr, dst) self.tb.run () result_data = dst.data () self.assertFloatTuplesAlmostEqual (expected_result, result_data, 6) def test_002_square2_ff (self): src_data = (-3, 4, -5.5, 2, 3) expected_result = (9, 16, 30.25, 4, 9) src = gr.vector_source_f (src_data) sqr = gr-air-modes_swig.square2_ff () dst = gr.vector_sink_f () self.tb.connect (src, sqr) self.tb.connect (sqr, dst) self.tb.run () result_data = dst.data () self.assertFloatTuplesAlmostEqual (expected_result, result_data, 6) if __name__ == '__main__': gr_unittest.main () gr-air-modes-0.0.0.e47992d/python/radio.py000066400000000000000000000200451221634657000200030ustar00rootroot00000000000000# Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # # Radio interface for Mode S RX. # Handles all hardware- and source-related functionality # You pass it options, it gives you data. # It uses the pubsub interface to allow clients to subscribe to its data feeds. from gnuradio import gr, gru, eng_notation, filter, blocks from gnuradio.filter import optfir from gnuradio.eng_option import eng_option from gnuradio.gr.pubsub import pubsub from optparse import OptionParser, OptionGroup import air_modes import zmq import threading import time import re class modes_radio (gr.top_block, pubsub): def __init__(self, options, context): gr.top_block.__init__(self) pubsub.__init__(self) self._options = options self._queue = gr.msg_queue() self._rate = int(options.rate) self._resample = None self._setup_source(options) self._rx_path = air_modes.rx_path(self._rate, options.threshold, self._queue, options.pmf, options.dcblock) #now subscribe to set various options via pubsub self.subscribe("freq", self.set_freq) self.subscribe("gain", self.set_gain) self.subscribe("rate", self.set_rate) self.subscribe("rate", self._rx_path.set_rate) self.subscribe("threshold", self._rx_path.set_threshold) self.subscribe("pmf", self._rx_path.set_pmf) self.publish("freq", self.get_freq) self.publish("gain", self.get_gain) self.publish("rate", self.get_rate) self.publish("threshold", self._rx_path.get_threshold) self.publish("pmf", self._rx_path.get_pmf) if self._resample is not None: self.connect(self._u, self._resample, self._rx_path) else: self.connect(self._u, self._rx_path) #Publish messages when they come back off the queue server_addr = ["inproc://modes-radio-pub"] if options.tcp is not None: server_addr += ["tcp://*:%i" % options.tcp] self._sender = air_modes.zmq_pubsub_iface(context, subaddr=None, pubaddr=server_addr) self._async_sender = gru.msgq_runner(self._queue, self.send) def send(self, msg): self._sender["dl_data"] = msg.to_string() @staticmethod def add_radio_options(parser): group = OptionGroup(parser, "Receiver setup options") #Choose source group.add_option("-s","--source", type="string", default="uhd", help="Choose source: uhd, osmocom, , or [default=%default]") group.add_option("-t","--tcp", type="int", default=None, metavar="PORT", help="Open a TCP server on this port to publish reports") #UHD/Osmocom args group.add_option("-R", "--subdev", type="string", help="select USRP Rx side A or B", metavar="SUBDEV") group.add_option("-A", "--antenna", type="string", help="select which antenna to use on daughterboard") group.add_option("-D", "--args", type="string", help="arguments to pass to radio constructor", default="") group.add_option("-f", "--freq", type="eng_float", default=1090e6, help="set receive frequency in Hz [default=%default]", metavar="FREQ") group.add_option("-g", "--gain", type="int", default=None, help="set RF gain", metavar="dB") #RX path args group.add_option("-r", "--rate", type="eng_float", default=4e6, help="set sample rate [default=%default]") group.add_option("-T", "--threshold", type="eng_float", default=7.0, help="set pulse detection threshold above noise in dB [default=%default]") group.add_option("-p","--pmf", action="store_true", default=False, help="Use pulse matched filtering [default=%default]") group.add_option("-d","--dcblock", action="store_true", default=False, help="Use a DC blocking filter (best for HackRF Jawbreaker) [default=%default]") parser.add_option_group(group) def live_source(self): return self._options.source=="uhd" or self._options.source=="osmocom" def set_freq(self, freq): return self._u.set_center_freq(freq, 0) if self.live_source() else 0 def set_gain(self, gain): if self.live_source(): self._u.set_gain(gain) print "Gain is %f" % self.get_gain() return self.get_gain() def set_rate(self, rate): self._rx_path.set_rate(rate) return self._u.set_rate(rate) if self.live_source() else 0 def set_threshold(self, threshold): self._rx_path.set_threshold(threshold) def get_freq(self, freq): return self._u.get_center_freq(freq, 0) if self.live_source() else 1090e6 def get_gain(self): return self._u.get_gain() if self.live_source() else 0 def get_rate(self): return self._u.get_rate() if self.live_source() else self._rate def _setup_source(self, options): if options.source == "uhd": #UHD source by default from gnuradio import uhd self._u = uhd.single_usrp_source(options.args, uhd.io_type_t.COMPLEX_FLOAT32, 1) if(options.subdev): self._u.set_subdev_spec(options.subdev, 0) if not self._u.set_center_freq(options.freq): print "Failed to set initial frequency" #check for GPSDO #if you have a GPSDO, UHD will automatically set the timestamp to UTC time #as well as automatically set the clock to lock to GPSDO. if self._u.get_time_source(0) != 'gpsdo': self._u.set_time_now(uhd.time_spec(0.0)) if options.antenna is not None: self._u.set_antenna(options.antenna) self._u.set_samp_rate(options.rate) options.rate = int(self._u.get_samp_rate()) #retrieve actual if options.gain is None: #set to halfway g = self._u.get_gain_range() options.gain = (g.start()+g.stop()) / 2.0 print "Setting gain to %i" % options.gain self._u.set_gain(options.gain) print "Gain is %i" % self._u.get_gain() #TODO: detect if you're using an RTLSDR or Jawbreaker #and set up accordingly. elif options.source == "osmocom": #RTLSDR dongle or HackRF Jawbreaker import osmosdr self._u = osmosdr.source(options.args) # self._u.set_sample_rate(3.2e6) #fixed for RTL dongles self._u.set_sample_rate(options.rate) if not self._u.set_center_freq(options.freq): print "Failed to set initial frequency" # self._u.set_gain_mode(0) #manual gain mode if options.gain is None: options.gain = 34 self._u.set_gain(options.gain) print "Gain is %i" % self._u.get_gain() #Note: this should only come into play if using an RTLSDR. # lpfiltcoeffs = gr.firdes.low_pass(1, 5*3.2e6, 1.6e6, 300e3) # self._resample = filter.rational_resampler_ccf(interpolation=5, decimation=4, taps=lpfiltcoeffs) else: #semantically detect whether it's ip.ip.ip.ip:port or filename if ':' in options.source: try: ip, port = re.search("(.*)\:(\d{1,5})", options.source).groups() except: raise Exception("Please input UDP source e.g. 192.168.10.1:12345") self._u = gr.udp_source(gr.sizeof_gr_complex, ip, int(port)) print "Using UDP source %s:%s" % (ip, port) else: self._u = blocks.file_source(gr.sizeof_gr_complex, options.source) print "Using file source %s" % options.source print "Rate is %i" % (options.rate,) def close(self): self._sender.close() self._u = None gr-air-modes-0.0.0.e47992d/python/raw_server.py000066400000000000000000000030551221634657000210660ustar00rootroot00000000000000# # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time, os, sys, socket from string import split, join from datetime import * class raw_server: def __init__(self, port): self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._s.bind(('', port)) self._s.listen(1) self._s.setblocking(0) #nonblocking self._conns = [] #list of active connections def __del__(self): self._s.close() def output(self, msg): for conn in self._conns[:]: #iterate over a copy of the list try: conn.send(msg) except socket.error: self._conns.remove(conn) print "Connections: ", len(self._conns) def add_pending_conns(self): try: conn, addr = self._s.accept() self._conns.append(conn) print "Connections: ", len(self._conns) except socket.error: pass gr-air-modes-0.0.0.e47992d/python/rx_path.py000066400000000000000000000062071221634657000203560ustar00rootroot00000000000000# # Copyright 2012, 2013 Corgan Labs, Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, blocks, filter import air_modes_swig class rx_path(gr.hier_block2): def __init__(self, rate, threshold, queue, use_pmf=False, use_dcblock=False): gr.hier_block2.__init__(self, "modes_rx_path", gr.io_signature(1, 1, gr.sizeof_gr_complex), gr.io_signature(0,0,0)) self._rate = int(rate) self._threshold = threshold self._queue = queue self._spc = int(rate/2e6) # Convert incoming I/Q baseband to amplitude self._demod = blocks.complex_to_mag_squared() if use_dcblock: self._dcblock = filter.dc_blocker_cc(100*self._spc,True) self.connect(self, self._dcblock, self._demod) else: self.connect(self, self._demod) self._dcblock = None self._bb = self._demod # Pulse matched filter for 0.5us pulses if use_pmf: self._pmf = blocks.moving_average_ff(self._spc, 1.0/self._spc)#, self._rate) self.connect(self._demod, self._pmf) self._bb = self._pmf # Establish baseline amplitude (noise, interference) self._avg = blocks.moving_average_ff(48*self._spc, 1.0/(48*self._spc))#, self._rate) # 3 preambles # Synchronize to Mode-S preamble self._sync = air_modes_swig.preamble(self._rate, self._threshold) # Slice Mode-S bits and send to message queue self._slicer = air_modes_swig.slicer(self._queue) # Wire up the flowgraph self.connect(self._bb, (self._sync, 0)) self.connect(self._bb, self._avg, (self._sync, 1)) self.connect(self._sync, self._slicer) def set_rate(self, rate): self._sync.set_rate(rate) self._spc = int(rate/2e6) self._avg.set_length_and_scale(48*self._spc, 1.0/(48*self._spc)) if self._bb != self._demod: self._pmf.set_length_and_scale(self._spc, 1.0/self._spc) if self._dcblock is not None: self._dcblock.set_length(100*self._spc) def set_threshold(self, threshold): self._sync.set_threshold(threshold) def set_pmf(self, pmf): #TODO must be done when top block is stopped pass def get_pmf(self, pmf): return not (self._bb == self._demod) def get_threshold(self, threshold): return self._sync.get_threshold() gr-air-modes-0.0.0.e47992d/python/sbs1.py000066400000000000000000000173661221634657000175710ustar00rootroot00000000000000# # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time, os, sys, socket from string import split, join import air_modes from datetime import * from air_modes.exceptions import * import threading class dumb_task_runner(threading.Thread): def __init__(self, task, interval): threading.Thread.__init__(self) self._task = task self._interval = interval self.shutdown = threading.Event() self.finished = threading.Event() self.setDaemon(True) self.start() def run(self): while not self.shutdown.is_set(): self._task() time.sleep(self._interval) self.finished.set() def close(self): self.shutdown.set() self.finished.wait(self._interval) class output_sbs1: def __init__(self, cprdec, port, pub): self._s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._s.bind(('', port)) self._s.listen(1) self._s.setblocking(0) #nonblocking self._conns = [] #list of active connections self._aircraft_id_map = {} # dictionary of icao24 to aircraft IDs self._aircraft_id_count = 0 # Current Aircraft ID count self._cpr = cprdec #it could be cleaner if there were separate output_* fns #but this works for i in (0, 4, 5, 11, 17): pub.subscribe("type%i_dl" % i, output) #spawn thread to add new connections as they come in self._runner = dumb_task_runner(self.add_pending_conns, 0.1) def __del__(self): self._s.close() def get_aircraft_id(self, icao24): if icao24 in self._aircraft_id_map: return self._aircraft_id_map[icao24] # Adding this new ID to the dictionary self._aircraft_id_count += 1 self._aircraft_id_map[icao24] = self._aircraft_id_count # Checking to see if we need to clean up in the event that the # dictionary is getting too large. if len(self._aircraft_id_map) > 1e4: minimum = min(self._aircraft_id_map.values()) + (len(self._aircraft_id_map) - 1e4) for icao, _id in self._aircraft_id_map: if _id < minimum: del self._aircraft_id_map[icao] # Finally return the new pair return self._aircraft_id_count def output(self, msg): try: sbs1_msg = self.parse(msg) if sbs1_msg is not None: for conn in self._conns[:]: #iterate over a copy of the list conn.send(sbs1_msg) except socket.error: self._conns.remove(conn) print "Connections: ", len(self._conns) except ADSBError: pass def add_pending_conns(self): try: conn, addr = self._s.accept() self._conns.append(conn) print "Connections: ", len(self._conns) except socket.error: pass def current_time(self): timenow = datetime.now() return [timenow.strftime("%Y/%m/%d"), timenow.strftime("%H:%M:%S.%f")[0:-3]] def decode_fs(self, fs): if fs == 0: return "0,0,0,0" elif fs == 1: return "0,0,0,1" elif fs == 2: return "1,0,0,0" elif fs == 3: return "1,0,0,1" elif fs == 4: return "1,0,1," elif fs == 5: return "0,0,1," else: return ",,," def parse(self, msg): #assembles a SBS-1-style output string from the received message msgtype = msg.data["df"] outmsg = None if msgtype == 0: outmsg = self.pp0(msg.data, msg.ecc) elif msgtype == 4: outmsg = self.pp4(msg.data, msg.ecc) elif msgtype == 5: outmsg = self.pp5(msg.data, msg.ecc) elif msgtype == 11: outmsg = self.pp11(msg.data, msg.ecc) elif msgtype == 17: outmsg = self.pp17(msg.data) else: raise NoHandlerError(msgtype) return outmsg def pp0(self, shortdata, ecc): [datestr, timestr] = self.current_time() aircraft_id = self.get_aircraft_id(ecc) retstr = "MSG,7,0,%i,%06X,%i,%s,%s,%s,%s,,%s,,,,,,,,,," % (aircraft_id, ecc, aircraft_id+100, datestr, timestr, datestr, timestr, air_modes.decode_alt(shortdata["ac"], True)) if vs: retstr += "1\r\n" else: retstr += "0\r\n" return retstr def pp4(self, shortdata, ecc): [datestr, timestr] = self.current_time() aircraft_id = self.get_aircraft_id(ecc) retstr = "MSG,5,0,%i,%06X,%i,%s,%s,%s,%s,,%s,,,,,,," % (aircraft_id, ecc, aircraft_id+100, datestr, timestr, datestr, timestr, air_modes.decode_alt(shortdata["ac"], True)) return retstr + self.decode_fs(shortdata["fs"]) + "\r\n" def pp5(self, shortdata, ecc): [datestr, timestr] = self.current_time() aircraft_id = self.get_aircraft_id(ecc) retstr = "MSG,6,0,%i,%06X,%i,%s,%s,%s,%s,,,,,,,,%04i," % (aircraft_id, ecc, aircraft_id+100, datestr, timestr, datestr, timestr, air_modes.decode_id(shortdata["id"])) return retstr + self.decode_fs(shortdata["fs"]) + "\r\n" def pp11(self, shortdata, ecc): [datestr, timestr] = self.current_time() aircraft_id = self.get_aircraft_id(icao24) return "MSG,8,0,%i,%06X,%i,%s,%s,%s,%s,,,,,,,,,,,,\r\n" % (aircraft_id, shortdata["aa"], aircraft_id+100, datestr, timestr, datestr, timestr) def pp17(self, data): icao24 = data["aa"] aircraft_id = self.get_aircraft_id(icao24) bdsreg = data["me"].get_type() retstr = None #we'll get better timestamps later, hopefully with actual VRT time #in them [datestr, timestr] = self.current_time() if bdsreg == 0x08: # Aircraft Identification (msg, typestring) = air_modes.parseBDS08(data) retstr = "MSG,1,0,%i,%06X,%i,%s,%s,%s,%s,%s,,,,,,,,,,,\r\n" % (aircraft_id, icao24, aircraft_id+100, datestr, timestr, datestr, timestr, msg) elif bdsreg == 0x06: # Surface position measurement [ground_track, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS06(data, self._cpr) altitude = 0 if decoded_lat is None: #no unambiguously valid position available retstr = None else: retstr = "MSG,2,0,%i,%06X,%i,%s,%s,%s,%s,,%i,,,%.5f,%.5f,,,,0,0,0\r\n" % (aircraft_id, icao24, aircraft_id+100, datestr, timestr, datestr, timestr, altitude, decoded_lat, decoded_lon) elif bdsreg == 0x05: # Airborne position measurements # WRONG (rnge, bearing), is this still true? [altitude, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS05(data, self._cpr) if decoded_lat is None: #no unambiguously valid position available retstr = None else: retstr = "MSG,3,0,%i,%06X,%i,%s,%s,%s,%s,,%i,,,%.5f,%.5f,,,,0,0,0\r\n" % (aircraft_id, icao24, aircraft_id+100, datestr, timestr, datestr, timestr, altitude, decoded_lat, decoded_lon) elif bdsreg == 0x09: # Airborne velocity measurements # WRONG (heading, vert_spd), Is this still true? subtype = data["bds09"].get_type() if subtype == 0 or subtype == 1: parser = air_modes.parseBDS09_0 if subtype == 0 else air_modes.parseBDS09_1 [velocity, heading, vert_spd] = parser(data) retstr = "MSG,4,0,%i,%06X,%i,%s,%s,%s,%s,,,%.1f,%.1f,,,%i,,,,,\r\n" % (aircraft_id, icao24, aircraft_id+100, datestr, timestr, datestr, timestr, velocity, heading, vert_spd) return retstr gr-air-modes-0.0.0.e47992d/python/sql.py000066400000000000000000000131371221634657000175100ustar00rootroot00000000000000# # Copyright 2010 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import time, os, sys, threading from string import split, join import air_modes import sqlite3 from air_modes.exceptions import * from gnuradio.gr.pubsub import pubsub class output_sql: def __init__(self, cpr, filename, lock, publisher): #pubsub.__init__(self) self._cpr = cpr self._lock = lock; #create the database self.filename = filename self._db = sqlite3.connect(filename) #now execute a schema to create the tables you need c = self._db.cursor() query = """CREATE TABLE IF NOT EXISTS "positions" ( "icao" INTEGER KEY NOT NULL, "seen" TEXT NOT NULL, "alt" INTEGER, "lat" REAL, "lon" REAL );""" c.execute(query) query = """CREATE TABLE IF NOT EXISTS "vectors" ( "icao" INTEGER KEY NOT NULL, "seen" TEXT NOT NULL, "speed" REAL, "heading" REAL, "vertical" REAL );""" c.execute(query) query = """CREATE TABLE IF NOT EXISTS "ident" ( "icao" INTEGER PRIMARY KEY NOT NULL, "ident" TEXT NOT NULL, "type" TEXT NOT NULL );""" c.execute(query) c.close() self._db.commit() #we close the db conn now to reopen it in the output() thread context. self._db.close() self._db = None publisher.subscribe("type17_dl", self.insert) def insert(self, message): with self._lock: try: #we're checking to see if the db is empty, and creating the db object #if it is. the reason for this is so that the db writing is done within #the thread context of output(), rather than the thread context of the #constructor. if self._db is None: self._db = sqlite3.connect(self.filename) query = self.make_insert_query(message) if query is not None: c = self._db.cursor() c.execute(query) c.close() self._db.commit() except ADSBError: pass def make_insert_query(self, msg): #assembles a SQL query tailored to our database #this version ignores anything that isn't Type 17 for now, because we just don't care query = None msgtype = msg.data["df"] if msgtype == 17: query = self.sql17(msg.data) #self["new_adsb"] = data["aa"] #publish change notification return query #TODO: if there's a way to publish selective reports on upsert to distinguish, #for instance, between a new ICAO that's just been heard, and a refresh of an #existing ICAO, both of those would be useful publishers for the GUI model. #otherwise, worst-case you can just refresh everything every time a report #comes in, but it's going to use more CPU. Not likely a problem if you're only #looking at ADS-B (no mode S) data. #It's probably time to look back at the Qt SQL table model and see if it can be #bent into shape for you. def sql17(self, data): icao24 = data["aa"] bdsreg = data["me"].get_type() #self["bds%.2i" % bdsreg] = icao24 #publish under "bds08", "bds06", etc. if bdsreg == 0x08: (msg, typename) = air_modes.parseBDS08(data) return "INSERT OR REPLACE INTO ident (icao, ident, type) VALUES (" + "%i" % icao24 + ", '" + msg + "', '" + typename + "')" elif bdsreg == 0x06: [ground_track, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS06(data, self._cpr) altitude = 0 if decoded_lat is None: #no unambiguously valid position available raise CPRNoPositionError else: return "INSERT INTO positions (icao, seen, alt, lat, lon) VALUES (" + "%i" % icao24 + ", datetime('now'), " + str(altitude) + ", " + "%.6f" % decoded_lat + ", " + "%.6f" % decoded_lon + ")" elif bdsreg == 0x05: [altitude, decoded_lat, decoded_lon, rnge, bearing] = air_modes.parseBDS05(data, self._cpr) if decoded_lat is None: #no unambiguously valid position available raise CPRNoPositionError else: return "INSERT INTO positions (icao, seen, alt, lat, lon) VALUES (" + "%i" % icao24 + ", datetime('now'), " + str(altitude) + ", " + "%.6f" % decoded_lat + ", " + "%.6f" % decoded_lon + ")" elif bdsreg == 0x09: subtype = data["bds09"].get_type() if subtype == 0: [velocity, heading, vert_spd, turnrate] = air_modes.parseBDS09_0(data) return "INSERT INTO vectors (icao, seen, speed, heading, vertical) VALUES (" + "%i" % icao24 + ", datetime('now'), " + "%.0f" % velocity + ", " + "%.0f" % heading + ", " + "%.0f" % vert_spd + ")" elif subtype == 1: [velocity, heading, vert_spd] = air_modes.parseBDS09_1(data) return "INSERT INTO vectors (icao, seen, speed, heading, vertical) VALUES (" + "%i" % icao24 + ", datetime('now'), " + "%.0f" % velocity + ", " + "%.0f" % heading + ", " + "%.0f" % vert_spd + ")" else: raise NoHandlerError gr-air-modes-0.0.0.e47992d/python/types.py000066400000000000000000000076461221634657000200650ustar00rootroot00000000000000# # Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from collections import namedtuple #this is a timestamp that preserves precision when used with UTC timestamps. #ordinary double-precision timestamps lose significant fractional precision #when the exponent is as large as necessary for UTC. class stamp: def __init__(self, secs, frac_secs): self.secs = secs self.frac_secs = frac_secs self.secs += int(self.frac_secs) self.frac_secs -= int(self.frac_secs) def __lt__(self, other): if isinstance(other, self.__class__): if self.secs == other.secs: return self.frac_secs < other.frac_secs else: return self.secs < other.secs elif isinstance(other, float): return float(self) > other else: raise TypeError def __gt__(self, other): if type(other) is type(self): if self.secs == other.secs: return self.frac_secs > other.frac_secs else: return self.secs > other.secs elif type(other) is type(float): return float(self) > other else: raise TypeError def __eq__(self, other): if isinstance(other, self.__class__): return self.secs == other.secs and self.frac_secs == other.frac_secs elif isinstance(other, float): return float(self) == other else: raise TypeError def __ne__(self, other): return not (self == other) def __le__(self, other): return (self == other) or (self < other) def __ge__(self, other): return (self == other) or (self > other) def __add__(self, other): if isinstance(other, self.__class__): ipart = self.secs + other.secs fpart = self.frac_secs + other.frac_secs return stamp(ipart, fpart) elif isinstance(other, float): return self + stamp(0, other) elif isinstance(other, int): return self + stamp(other, 0) else: raise TypeError def __sub__(self, other): if isinstance(other, self.__class__): ipart = self.secs - other.secs fpart = self.frac_secs - other.frac_secs return stamp(ipart, fpart) elif isinstance(other, float): return self - stamp(0, other) elif isinstance(other, int): return self - stamp(other, 0) else: raise TypeError #to ensure we don't hash by stamp #TODO fixme with a reasonable hash in case you feel like you'd hash by stamp __hash__ = None #good to within ms for comparison def __float__(self): return self.secs + self.frac_secs def __str__(self): return "%f" % float(self) #a Mode S report including the modes_reply data object modes_report = namedtuple('modes_report', ['data', 'ecc', 'rssi', 'timestamp']) #lat, lon, alt #TODO: a position class internally represented as ECEF XYZ which can easily be used for multilateration and distance calculation llh = namedtuple('llh', ['lat', 'lon', 'alt']) mlat_report = namedtuple('mlat_report', ['data', 'nreps', 'timestamp', 'llh', 'hdop', 'vdop']) gr-air-modes-0.0.0.e47992d/python/zmq_socket.py000066400000000000000000000115101221634657000210610ustar00rootroot00000000000000# Copyright 2013 Nick Foster # # This file is part of gr-air-modes # # gr-air-modes is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # gr-air-modes is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with gr-air-modes; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. #this serves as a bridge between ZMQ subscriber and the GR pubsub callbacks interface #creates a thread, publishes socket data to pubsub subscribers #just a convenient way to create an aggregating socket with callbacks on receive #can use this for inproc:// signalling with minimal overhead #not sure if it's a good idea to use this yet import time import threading import zmq from gnuradio.gr.pubsub import pubsub import Queue class zmq_pubsub_iface(threading.Thread): def __init__(self, context, subaddr=None, pubaddr=None): threading.Thread.__init__(self) #private data self._queue = Queue.Queue() self._subsocket = context.socket(zmq.SUB) self._pubsocket = context.socket(zmq.PUB) self._subaddr = subaddr self._pubaddr = pubaddr if type(self._subaddr) is str: self._subaddr = [self._subaddr] if type(self._pubaddr) is str: self._pubaddr = [self._pubaddr] self._sub_connected = False self._pubsub = pubsub() if self._pubaddr is not None: for addr in self._pubaddr: self._pubsocket.bind(addr) self._poller = zmq.Poller() self._poller.register(self._subsocket, zmq.POLLIN) #public data self.shutdown = threading.Event() self.finished = threading.Event() #init self.setDaemon(True) self.start() def subscribe(self, key, subscriber): if not self._sub_connected: if not self._subaddr: raise Exception("No subscriber address set") for addr in self._subaddr: self._subsocket.connect(addr) self._sub_connected = True self._subsocket.setsockopt(zmq.SUBSCRIBE, key) self._pubsub.subscribe(key, subscriber) def unsubscribe(self, key, subscriber): self._subsocket.setsockopt(zmq.UNSUBSCRIBE, key) self._pubsub.unsubscribe(key, subscriber) #executed from the thread context(s) of the caller(s) #so we use a queue to push sending into the run loop #since sockets must be used in the thread they were created in def __setitem__(self, key, val): if not self._pubaddr: raise Exception("No publisher address set") if not self.shutdown.is_set(): self._queue.put([key, val]) def __getitem__(self, key): return self._pubsub[key] def run(self): done = False while not self.shutdown.is_set() and not done: if self.shutdown.is_set(): done = True #send while not self._queue.empty(): self._pubsocket.send_multipart(self._queue.get()) #receive if self._sub_connected: socks = dict(self._poller.poll(timeout=0)) while self._subsocket in socks \ and socks[self._subsocket] == zmq.POLLIN: [address, msg] = self._subsocket.recv_multipart() self._pubsub[address] = msg socks = dict(self._poller.poll(timeout=0)) #snooze if not done: time.sleep(0.1) self._subsocket.close() self._pubsocket.close() self.finished.set() def close(self): self.shutdown.set() #self._queue.join() #why does this block forever self.finished.wait(0.2) def pr(x): print x if __name__ == "__main__": #create socket pair context = zmq.Context(1) sock1 = zmq_pubsub_iface(context, subaddr="inproc://sock2-pub", pubaddr="inproc://sock1-pub") sock2 = zmq_pubsub_iface(context, subaddr="inproc://sock1-pub", pubaddr=["inproc://sock2-pub", "tcp://*:5433"]) sock3 = zmq_pubsub_iface(context, subaddr="tcp://localhost:5433", pubaddr=None) sock1.subscribe("data1", pr) sock2.subscribe("data2", pr) sock3.subscribe("data3", pr) for i in range(10): sock1["data2"] = "HOWDY" sock2["data3"] = "DRAW" sock2["data1"] = "PARDNER" time.sleep(0.1) time.sleep(0.1) sock1.close() sock2.close() sock3.close() gr-air-modes-0.0.0.e47992d/res/000077500000000000000000000000001221634657000156025ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/res/CMakeLists.txt000066400000000000000000000051711221634657000203460ustar00rootroot00000000000000# Copyright 2011,2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Include python install macros ######################################################################## include(GrPython) if(NOT PYTHONINTERP_FOUND) return() endif() find_package(PyQt4) if(NOT PYQT4_FOUND) message(STATUS "PyQt4 not found, not installing GUI application") return() endif() if(NOT PYUIC4_FOUND) message(STATUS "pyuic4 not found, not installing GUI application") return() endif() find_package(Qwt) if(NOT QWT_FOUND) message(STATUS "Qwt not found, not installing GUI application") return() endif() set(RX_UI_SRC ${CMAKE_CURRENT_SOURCE_DIR}/modes_rx.ui) set(RX_UI_PY_PRE_MASSAGE ${CMAKE_CURRENT_BINARY_DIR}/modes_rx_ui_borked.py) set(RX_UI_PY ${CMAKE_CURRENT_BINARY_DIR}/modes_rx_ui.py) set(PYUIC_MASSAGER sed "s/from qwt.* import/from PyQt4.Qwt5.Qwt import/") add_custom_target(rx_ui ALL DEPENDS ${RX_UI_PY} ) add_custom_command(OUTPUT ${RX_UI_PY_PRE_MASSAGE} COMMAND ${PYUIC4_EXECUTABLE} ${RX_UI_SRC} > ${RX_UI_PY_PRE_MASSAGE} MAIN_DEPENDENCY ${RX_UI_SRC} ) add_custom_command(OUTPUT ${RX_UI_PY} COMMAND ${PYUIC_MASSAGER} ${RX_UI_PY_PRE_MASSAGE} > ${RX_UI_PY} MAIN_DEPENDENCY ${RX_UI_PY_PRE_MASSAGE} ) ######################################################################## # Install python sources ######################################################################## GR_PYTHON_INSTALL( FILES ${RX_UI_PY} DESTINATION ${GR_PYTHON_DIR}/air_modes ) ######################################################################## # Handle the unit tests ######################################################################## #include(GrTest) #set(GR_TEST_TARGET_DEPS gnuradio-gr-air-modes) #set(GR_TEST_PYTHON_DIRS ${CMAKE_BINARY_DIR}/swig) #GR_ADD_TEST(qa_gr-air-modes ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/qa_gr-air-modes.py) gr-air-modes-0.0.0.e47992d/res/modes_rx.ui000066400000000000000000001002151221634657000177600ustar00rootroot00000000000000 MainWindow 0 0 687 422 0 0 MainWindow 0 0 QLayout::SetDefaultConstraint Visible aircraft 0 0 100 100 100 16777215 QAbstractItemView::NoEditTriggers QListView::SinglePass true 1 1 0 0 0 Setup 10 20 236 251 Input 11 66 66 17 Rate 11 36 66 17 Source 90 30 121 27 10 95 66 17 Threshold 90 60 91 27 90 90 71 27 164 96 31 17 dB 184 66 41 17 Msps -1 117 221 71 0 90 40 121 27 90 10 71 27 10 10 66 17 Gain 10 40 66 17 Antenna 160 10 31 17 dB 91 2 113 27 11 7 66 17 Filename 10 200 221 22 Use Pulse Matched Filtering 10 220 221 22 Use DC blocking filter 270 20 281 151 Output 19 124 61 22 KML 19 34 71 22 SBS-1 19 64 61 22 TCP 160 120 111 27 160 30 71 27 160 60 71 27 160 90 71 27 19 94 101 22 FlightGear 128 66 31 17 Port 128 96 31 17 Port 128 36 31 17 Port 98 126 66 17 Filename 280 170 211 111 RX position Qt::AlignLeading|Qt::AlignLeft|Qt::AlignVCenter 16 64 71 20 Longitude 90 60 113 27 90 30 113 27 28 34 61 20 Latitude Dashboard 193 35 21 18 nm 483 66 21 17 kts 483 96 21 17 ft 250 10 61 20 Heading 30 10 61 20 Bearing 120 240 118 23 24 false 17 243 101 17 Signal strength 120 10 61 20 Range 365 64 51 20 Speed 352 93 61 20 Altitude 370 154 51 20 Ident 120 30 71 27 true 410 60 71 27 true 410 90 71 27 true 410 150 71 27 true 410 180 121 27 true 373 184 51 20 Type 120 180 121 27 true 120 210 121 27 true 62 183 61 21 Latitude 50 213 71 21 Longitude 410 120 71 27 true 483 125 51 17 ft/min 368 123 41 20 Climb 200 30 161 91 true 4 372 34 51 20 ICAO 410 30 71 27 true -20 30 161 91 true 4 Azimuth map Map 0 0 Live data false Show ADS-B-equipped aircraft only Qt::Horizontal 40 20 Reports/second 50 16777215 Start 0 0 687 25 QwtDial QWidget
qwt_dial.h
az_map QWidget
air_modes/az_map
1
QwtCompass QwtDial
qwt_compass.h
QWebView QWidget
QtWebKit/qwebview.h
1
gr-air-modes-0.0.0.e47992d/swig/000077500000000000000000000000001221634657000157625ustar00rootroot00000000000000gr-air-modes-0.0.0.e47992d/swig/CMakeLists.txt000066400000000000000000000042301221634657000205210ustar00rootroot00000000000000# Copyright 2011 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. ######################################################################## # Include swig generation macros ######################################################################## find_package(SWIG) find_package(PythonLibs) if(NOT SWIG_FOUND OR NOT PYTHONLIBS_FOUND) return() endif() include(GrSwig) include(GrPython) ######################################################################## # Setup swig generation ######################################################################## foreach(incdir ${GNURADIO_RUNTIME_INCLUDE_DIRS}) list(APPEND GR_SWIG_INCLUDE_DIRS ${incdir}/gnuradio/swig) endforeach(incdir) set(GR_SWIG_LIBRARIES air_modes) #set(GR_SWIG_DOC_FILE ${CMAKE_CURRENT_BINARY_DIR}/gr-air-modes_swig_doc.i) #set(GR_SWIG_DOC_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../include) GR_SWIG_MAKE(air_modes_swig air_modes.i) ######################################################################## # Install the build swig module ######################################################################## GR_SWIG_INSTALL(TARGETS air_modes_swig DESTINATION ${GR_PYTHON_DIR}/air_modes) ######################################################################## # Install swig .i files for development ######################################################################## #install( # FILES # air_modes.i #${CMAKE_CURRENT_BINARY_DIR}/air_modes_swig_doc.i # DESTINATION $(GR_INCLUDE_DIR)/air_modes/swig #) gr-air-modes-0.0.0.e47992d/swig/air_modes.i000066400000000000000000000004441221634657000201000ustar00rootroot00000000000000/* -*- c++ -*- */ #define AIR_MODES_API %include "gnuradio.i" %{ #include "gr_air_modes/preamble.h" #include "gr_air_modes/slicer.h" %} %include "gr_air_modes/preamble.h" %include "gr_air_modes/slicer.h" GR_SWIG_BLOCK_MAGIC2(air_modes,preamble); GR_SWIG_BLOCK_MAGIC2(air_modes,slicer);