pax_global_header00006660000000000000000000000064136552060520014516gustar00rootroot0000000000000052 comment=e0ec5528d79a1119717b74489a7d83bfd1349a31 xapers-0.9.0/000077500000000000000000000000001365520605200130265ustar00rootroot00000000000000xapers-0.9.0/.gitignore000066400000000000000000000000441365520605200150140ustar00rootroot00000000000000*~ *.pyc dist build xapers.egg-info xapers-0.9.0/COPYING000066400000000000000000000012131365520605200140560ustar00rootroot00000000000000Xapers is free software. You can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program, (in the COPYING-GPL-3 file in this directory). If not, see http://www.gnu.org/licenses/ xapers-0.9.0/COPYING-GPL-3000066400000000000000000001043741365520605200146520ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . xapers-0.9.0/MANIFEST.in000066400000000000000000000000601365520605200145600ustar00rootroot00000000000000include COPYING COPYING-GPL-3 README.md NEWS.md xapers-0.9.0/Makefile000066400000000000000000000022551365520605200144720ustar00rootroot00000000000000# -*- makefile -*- VERSION:=$(shell git describe --tags | sed -e s/_/~/ -e s/-/+/ -e s/-/~/) PV_FILE=lib/xapers/version.py .PHONY: all all: .PHONY: test test: ./test/xapers-test .PHONY: update-version update-version: echo "__version__ = '$(VERSION)'" >$(PV_FILE) .PHONY: release ifdef V update-version: VERSION:=$(V) release: VERSION:=$(V) release: update-version head -1 NEWS.md | grep "^Xapers $(VERSION) " make test git commit -m "Update version for release $(VERSION)." $(PV_FILE) || true git tag --sign -m "Xapers $(VERSION) release." $(VERSION) else release: git tag -l | grep -v debian/ endif .PHONY: deb-snapshot debian-snapshot debian-snapshot: deb-snapshot deb-snapshot: rm -rf build/snapshot mkdir -p build/snapshot/debian git archive HEAD | tar -x -C build/snapshot/ git archive debian:debian | tar -x -C build/snapshot/debian/ cd build/snapshot; make update-version cd build/snapshot; dch -b -v $(VERSION) -D UNRELEASED 'test build, not for upload' cd build/snapshot; echo '3.0 (native)' > debian/source/format cd build/snapshot; debuild -us -uc .PHONY: clean clean: rm -rf build rm -rf test/test-results rm -rf test/tmp.* debuild clean 2>/dev/null || true xapers-0.9.0/NEWS.md000066400000000000000000000034301365520605200141240ustar00rootroot00000000000000Xapers 0.8.2 (2017-08-08) ======================= * Minor bug fixes Xapers 0.8 (2017-08-02) ======================= * Move to python 3 * lib: add missing get_authors method (thanks William Pettersson) Xapers 0.7.1 (2017-01-04) ======================= * Add ability to search sort results by year. Works in both cli (--sort option) and curses interface ('o' to toggle sort order) * fix to charset decoding in cryptoeprint source (thanks dkg) Xapers 0.7 (2016-12-31) ======================= * nci: various improvements: * better buffer handling/redraw * more commands * improved prompts, with navigation, history, tab completion, etc. * lib: fix query matching for tag terms: default AND instead of OR * bibtex: updates for pybibtex 0.20 Xapers 0.6 (2015-01-10) ======================= * lib: Document year is now indexed as a proper value, supporting both 'year:' binary and 'year:..' range searches. Will require running 'restore' from the cli to re-index previously added articles. Thanks to Rafael Laboissière for the contribution. * lib: Much improved performance of Document prefixed term retrieval. * cli: Much improved search output performance. Default output limit of 20 has been removed. * nci: Curses interface now loads entries dynamically, so search results are no longer limited. * nci: Cleaner results display. Document 'summary' has been removed, but other information has been added, such as journal article, year, file info, and document match information. * nci: Extended navigation commands. * sources: Now support PDF download from sources, if the source supports it. * sources: New IACR 'crytoeprint' source. Thanks to Daniel Kahn Gillmor for the contribution. * Other cleanup, bug fixes, and performance improvements. xapers-0.9.0/README.md000066400000000000000000000203301365520605200143030ustar00rootroot00000000000000Xapers - personal journal article management system =================================================== Xapers is a personal document indexing system, geared towards academic journal articles. Think of it as your own personal document search engine, or a local cache of online libraries. It provides fast search of document text and bibliographic data and simple document and bibtex retrieval. Document files (in PDF format) and source identifiers (e.g. DOI) are parsed and indexed into a Xapian search engine [0]. Document text is extracted from the PDF and fully indexed. Bibliographic information downloaded from online libraries is indexed as prefixed search terms. Existing bibtex databases can be easily imported as well, including import of pdf files specified in Jabref/Mendeley format. Documents can be arbitrarily tagged. Original document files are easily retrievable from a simple curses search UI. The command line interface allows for exporting bibtex [1] from arbitrary searches, allowing seamless integration into LaTeX work flows. Xapers provides source modules for some common online resources: * doi: Digital Object Identifier (https://dx.doi.org/) * arxiv: Open access e-print service (http://arxiv.org/) * dcc: LIGO Document Control Center (https://dcc.ligo.org/) * cryptoeprint: Cryptology ePrint Archive (https://eprint.iacr.org/) Contributions of additional source interface modules is highly encouraged. See the "Document Sources" section below for info on creating new sources. Xapers is heavily inspired by the notmuch mail indexing system [2]. * [0] http://www.xapian.org/ * [1] http://www.bibtex.org/ * [2] http://notmuchmail.org/ ![xapers ncurses UI]](screenshot.png "xapers ncurses UI") Contact ======= Xapers was written by: Jameson Graef Rollins Xapers has a mailing list: xapers@lists.mayfirst.org https://lists.mayfirst.org/mailman/listinfo/xapers We also hang out on IRC: channel: #xapers server: irc.freenode.net Please submit all bug reports to the Debian bug tracking system (BTS): https://bugs.debian.org/xapers https://www.debian.org/Bugs/Reporting Getting Xapers ============== Source ------ Clone the repo: $ git clone git://finestructure.net/xapers $ cd xapers Dependencies : * python3 * python3-xapian - Python Xapian search engine bindings * python3-pybtex - Python bibtex parser * poppler-utils - PDF processing tools * python3-pycurl - Python bindings to libcurl Recommends (for curses UI) : * python3-urwid - Python Urwid curses library * xdg-utils - Desktop tools for opening files and URLs * xclip - X clipboard support for copying document fields On Debian: $ sudo apt-get install python3-xapian python3-pybtex python3-pycurl poppler-utils python3-urwid xdg-utils xclip Run the tests: $ make test Debian ------ Xapers is a part of Debian: $ apt install xapers Debian/Ubuntu snapshot packages can be easily made from the git source. You can build the package from any branch but it requires an up-to-date local branch of origin/debian, e.g.: $ git branch debian origin/debian Then: $ sudo apt-get install build-essential devscripts pkg-config python-all-dev python-setuptools debhelper dpkg-dev fakeroot $ make debian-snapshot $ sudo dpkg -i build/xapers_0.8_amd64.deb Using Xapers ============ See the included xapers(1) man page for detailed usage and information on source modules and searching. Command line interface ---------------------- The main interface to Xapers is the xapers command line utility. From this interface you can import documents, search, tag, etc. The "add" command allows importing or updating single documents. The "import" command allows importing an entire bibtex databases (.bib file). If the bibtex entries include "file" fields (ala. Mendeley or Jabref), then those files are retrieved, indexed, and imported as well. Curses interface ---------------- The curses interface ("xapers show ...") provides a simple way to display search results and retrieve files. Documents matching searches are displayed with their bibliographic information. Document tags can be manipulated, files and bibtex can be viewed, and source URLs can be opened in a browser. xapers-adder ------------ xapers-adder is a simple script that helps the adding of individual documents to your Xapers database. It can be used e.g. as a PDF handler in your favorite browser. It displays the PDF then presents the user with the option to import the document into Xapers. The user is prompted for any sources to retrieve and any initial tags to add. If the source is known, bibtex is retrieved and indexed. The resulting xapers entry for the document is displayed. Development of more clever import methods is highly encouraged. Python library -------------- Xapers is a python library under the hood: >>> import xapers >>> db = xapers.Database('~/.xapers/docs') >>> docs = db.search('tag:new') >>> for doc in docs: doc.add_tags(['foo']) ... >>> Development of new interfaces to the underlying library is highly encouraged. Document Sources ================ A Xapers "source" is a python module that describes how to interact with a single online journal database, from which document files and bibliographic data can be retrieved. Sources are assigned unique prefixes (e.g. "doi"). Online libraries associate unique document identifiers to individual documents (e.g. "10.1364/JOSAA.29.002092"). A particular online document is therefore described by a unique "source identifier", or "sid", which can take two equivalent forms: full URL http://dx.doi.org/10.1364/JOSAA.29.002092 : doi:10.1364/JOSAA.29.002092 CUSTOM SOURCE MODULES --------------------- Custom source modules may be written to extend the base functionality of Xapers. A source module is described by a single python module (although it may import arbitrary other modules). The base name of the module file is interpreted as the nickname or 'prefix' for the source (e.g. if the module is named "doi.py" the source nickname will be "doi"). The module should include the following properties and functions. If any are missing, some xapers functionality may be undefined. description: a brief string description of the source, e.g.: description = "Digital Object Identifier" url: base URL of source, e.g.: url = 'http://dx.doi.org/' url_format: a printf format string that produces a valid source URL for a specified source identifier string, e.g.: url_format = 'http://dx.doi.org/%s' url_regex: a regular expression string that will match the source identifier string from a given full URL, e.g.: url_regex = 'http://dx.doi.org/(10\.\d{4,}[\w\d\:\.\-\/]+)' scan_regex: a regular expression string that will match the source identifier string in a scan of a documents plain text, e.g.: scan_regex = '(?:doi|DOI)[\s\.\:]{0,2}' + id_regex fetch_bibtex(id): a function that will return a bibtex string for a source document specified by id. fetch_file(id): a function that will return a (file_name, file_data) tuple for a source document specified by id. File should be in PDF format. If your source does not provide bibliographic data directly in bibtex format, the xapers.bibtex module has several helper functions for creating bibtex strings from python dictionaries (data2bib) or json objects (json2bib). See existing source module contributed with the xapers source as examples (lib/xapers/sources/). Source module path ------------------ Once a custom source module has been created, place it ~/.xapers/sources. The module path can be overridden with the XAPERS_SOURCE_PATH environment variable, which can be a colon-separated list of directories to search for modules. Testing ------- Once a module is in place, use the xapers source* commands (sources, source2url, source2bib, source2file) to test it's functionality. Your new module should show up in the source listing with the "sources" command, and should be able to print the relevant data with the other commands. Contributing ------------ If you think your module is stable and of general usefulness to the community, please consider contributing it upstream. Thanks! xapers-0.9.0/TODO000066400000000000000000000066231365520605200135250ustar00rootroot00000000000000* unique bibtex keys (handle conflicting) (use id?) * make 'date' alias of 'year' * nci: translate tex in fields * translate journal abbreviations in bibtex * need *something* for docs without sources * store at least URL * "notes" for documents (editable txt file) * add only opens writable db on doc.sync() * db version * rename files when being added? * separate add and update commands * set/get title, author, tags as data or values (is this faster?) * gtk gui, with pdf thumbs: "convert -thumbnail 500x -background white -alpha remove file.pdf[0] thumb.png" ([0] == pdf page) * rework db/doc interface * doc is just directory and xapian_doc * db does write/index on sync * db: can we force source prefixes to be OP_OR? doc: * add ability to remove source sid * add ability to replace/remove file * what to do with doc 'data' field: * snippet/summary (current) * data for fast retrieval? * bib abstract * custom annotations/notes * nothing cli: * utilize meta-data pulled from parser * update should re-pull from existing source if available * export should produce full mirror of xapers document structure, with index nci: * add update/add commands * how to test?? * meta for other fields (title, authors, year, etc) * custom keybindings * customizable palette sources: * add 'hdl': http://handle.net/proxy.html parser: * extract metadata from pdfs * better handle parse errors * better pdf parser (native python: https://gist.github.com/pazz/5455090) * parsers for other document types ? * emacs UI (need json/sexp output) * store bib data in different format (json instead of bibtex)? * clear old indexed terms when importing new file/bib? * vcs integration (git of root)? BUGS ==== * capitalized prefixed terms are not searchable - dcc:T00000 - key:HaEA2009a If the term is capitalized, a colon gets automatically added: key:foo -> XBIB|foo key:Foo -> XBIB|:Foo * can not add --file when source contains '/' * "xapers add --source=arXiv:hep-th/9806203 --file" * nci choaks on unknown sources: File "/usr/lib/python2.7/dist-packages/xapers/source.py", line 181, in get_source raise SourceError("unknown source: %s" % name) xapers.source.SourceError: unknown source: url * uncaught QueryParserError: servo:~ 1$ xapers show tag:lsc date:2016... Traceback (most recent call last): File "/usr/lib/python2.7/runpy.py", line 174, in _run_module_as_main "__main__", fname, loader, pkg_name) File "/usr/lib/python2.7/runpy.py", line 72, in _run_code exec code in run_globals File "/usr/lib/python2.7/dist-packages/xapers/__main__.py", line 330, in nci.UI(cmd=args) File "/usr/lib/python2.7/dist-packages/xapers/nci/ui.py", line 75, in __init__ self.newbuffer(cmd) File "/usr/lib/python2.7/dist-packages/xapers/nci/ui.py", line 96, in newbuffer buf = search.Search(self, query) File "/usr/lib/python2.7/dist-packages/xapers/nci/search.py", line 370, in __init__ self.__set_search() File "/usr/lib/python2.7/dist-packages/xapers/nci/search.py", line 377, in __set_search count = self.ui.db.count(self.query) File "/usr/lib/python2.7/dist-packages/xapers/database.py", line 318, in count return self._search(query_string).get_matches_estimated() File "/usr/lib/python2.7/dist-packages/xapers/database.py", line 285, in _search query = self.query_parser.parse_query(query_string) xapian.QueryParserError: Unknown range operation servo:~ 1$ xapers-0.9.0/bin/000077500000000000000000000000001365520605200135765ustar00rootroot00000000000000xapers-0.9.0/bin/xapers-adder000077500000000000000000000014061365520605200161040ustar00rootroot00000000000000#!/bin/bash -e if [ -z "$1" ] || [[ "$1" == '--help' ]] || [[ "$1" == '-h' ]]; then echo "usage: $(basename $0) [--noterm] " >&2 exit 1 fi if [[ "$1" == '--noterm' ]]; then term=false shift else term=true fi infile="$1" if [ ! -e "$infile" ] ;then echo "File not found: $infile" >&2 exit 1 fi # open the file with preferred application nohup xdg-open "$infile" &>/dev/null & cmd=" echo 'Xapers-adder' echo '============' echo 'Type C-c at any time to cancel...' echo while ! xapers add --file=\"$infile\" --tags=new --prompt --view; do read -N1 -p 'ENTER to try again, or C-c to quit:' OK done " if [[ "$term" == 'true' ]] ; then exec x-terminal-emulator \ -T "xapers-adder" \ -e bash -c "$cmd" else eval "$cmd" fi xapers-0.9.0/man/000077500000000000000000000000001365520605200136015ustar00rootroot00000000000000xapers-0.9.0/man/man1/000077500000000000000000000000001365520605200144355ustar00rootroot00000000000000xapers-0.9.0/man/man1/xapers-adder.1000066400000000000000000000025421365520605200171010ustar00rootroot00000000000000.\" xapers - journal article indexing system .\" .\" Copyright © 2013 Jameson Rollins .\" .\" Xapers is free software: you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation, either version 3 of the License, or .\" (at your option) any later version. .\" .\" Xapers is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License .\" along with this program. If not, see http://www.gnu.org/licenses/ . .\" .\" Author: Jameson Rollins .TH XAPERS 1 .SH NAME xapers-adder \- "gui" to import individual documents into Xapers database .SH SYNOPSIS .B xapers-adder .IR file.pdf .SH DESCRIPTION The specified PDF file is displayed (using \fBxdg-open\fR(1)), then a terminal is opened (\fBx-terminal-emulator\fR(1)) executing the following command: xapers add \-\-file= \-\-tags=new \-\-prompt \-\-view This program is useful to use as your PDF handler in your browser. See \fBxapers\fR(1) for more information. .SH CONTACT Feel free to email the author: Jameson Rollins xapers-0.9.0/man/man1/xapers.1000066400000000000000000000215711365520605200160270ustar00rootroot00000000000000.\" xapers - journal article indexing system .\" .\" Copyright © 2013 Jameson Rollins .\" .\" Xapers is free software: you can redistribute it and/or modify .\" it under the terms of the GNU General Public License as published by .\" the Free Software Foundation, either version 3 of the License, or .\" (at your option) any later version. .\" .\" Xapers is distributed in the hope that it will be useful, .\" but WITHOUT ANY WARRANTY; without even the implied warranty of .\" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the .\" GNU General Public License for more details. .\" .\" You should have received a copy of the GNU General Public License .\" along with this program. If not, see http://www.gnu.org/licenses/ . .\" .\" Author: Jameson Rollins .TH XAPERS 1 .SH NAME xapers \- personal journal article indexing system . .SH SYNOPSIS .B xapers .IR command " [" args " ...]" . .SH DESCRIPTION Xapers is a personal document indexing system, geared towards academic journal articles. It provides fast search of document text and bibliographic data (synced from online libraries) and simple document and bibtex retrieval. Xapers takes as input document files (as PDF) and source identifiers. Documents are copied into a local document store (~/.xapers/docs by default) and text is extracted from the PDF and fully indexed into a Xapian database. Source identifiers are used to download document bibliographic data from online digital libraries (see \fBSOURCES\fR below), which are then parsed and indexed to prefixed terms in the database. The bibliographic data is also stored as bibtex in the document store for easy retrieval. Documents can be arbitrarily tagged. A curses UI is provided for simple access to documents (see the \fBview\fR command below). Xapers is ultimately a document indexing library, though, so development of alternate user interfaces is encouraged. Underlying Xapers is the wonderful Xapian database/search engine. See http://xapian.org/ for more information. . .SH MAIN COMMANDS The following are the main xapers commands. See \fBSEARCH TERMS\fR below for details of the supported syntax for . . .SS add [options] [] Add a document, or update an existing document. Must specify at least one of \-\-file or \-\-source. If search terms are provided they must match exactly one document and the matching document is updated with the newly provided information. Available options: .RS 4 .TP 4 .BR \-\-source=[|] Source identifier for document. See \fBSOURCES\fR below. This may also be a path to a file that contains a single bibtex entry. .RE .RS 4 .TP 4 .BR \-\-file[=] Document file (as PDF) to add. Text of document will be extracted and indexed. A copy of the file will be placed in the Xapers document store. If provided without path, xapers will attempt to download file from source, assuming source supports file downloads. .RE .RS 4 .TP 4 .BR \-\-tags=[,...] Initial tags to apply to document. Multiple tags can be specified, comma separated. .RE .RS 4 .TP 4 .BR \-\-prompt Prompt user for source/file/tags, if not specified. When prompting for source information input files are automatically scanned for source IDs and found ids are displayed. .RE .RS 4 .TP 4 .BR \-\-view View resulting entry in curses UI when done. See the \fBviewP\fR command below for more info. .RE . .SS import [options] Import an existing bibtex database. Each bibtex entry will be added as a new document. If bibtex key, or any sources found in bibtex, match an existing document, that document is instead updated (this makes the command effectively idempotent). Any "file" fields will be parsed for document files to add. Files can be specified as a single path, or in Mendeley/Jabref format. Available options: .RS 4 .TP 4 .BR \-\-tags=[,...] Tags to apply to all imported documents. Multiple tags can be specified, comma separated. .RE . .SS tag +|- [...] [--] Add/remove tags from documents. '--' can be used to separate tagging operations from search terms. . .SS search [options] Search for documents in the database. Document information is printed to stdout. .RS 4 .TP 4 .BR \-\-output=[summary|bibtex|tags|sources|keys|files] Specify document information to be output: .B summary outputs a single-line summary of the documents (default). .B bibtex outputs bibtex for all documents (if available). .B tags outputs all tags associated with documents. .B sources outputs all sources associated with documents. .B keys outputs all bibtex citation keys associated with documents. .B files outputs the full paths to all files associated with documents. Default is .B summary. .RE .RS 4 .TP 4 .BR \-\-limit=N Limit number of results returned to N. .RE . .SS bibtex Short for "search \-\-output=bibtex ". . .SS count Return a simple count of search results. . .SS view [] .SS show [] View search results in curses search UI. Documents matching search are displayed with their bibliographic information and a short text summary. It allows for manipulating document tags and for retrieved for document files and source URLs for viewing (see .B xdg-open(1) for more info). Initial search terms can be provided, but further searches can be performed from within the UI. While in the UI type "?" for available commands. NOTE: At the moment only the top 20 search results are displayed, due to synchronous loading restrictions. This obviously needs to be fixed. . .SS export Copy PDF files of resulting documents into , named with document titles when available. . .SS delete Delete documents from the database. All document files will purged from the document store. .RS 4 .TP 4 .BR \-\-noprompt Do not prompt to confirm deletion of documents. .RE . .SS restore Restore a database from existing xapers root. . .SH SOURCE COMMANDS These commands provide access to some of the source module methods. See \fBSOURCES\fR below. . .SS sources List available sources. . .SS source2url [...] Parse a source identifier string and print the corresponding source URL. . .SS source2bib [...] Retrieve bibtex from source for a specified URL or source id, and write to stdout. . .SS source2file Retrieve file from source for a specified URL or source id, and write to stdout. . .SS scandoc Scan a document file (PDF) for source IDs, and print and recognized source ids to stdout. . .SH SOURCES Sources are online databases from which document bibliographic data can be retrieved. In Xapers, online libraries are assigned unique prefixes. The online libraries associate unique document identifiers to individual documents. See 'xapers sources' for a list of available online sources. Xapers recognizes document a source identifier, or \fBsid\fR, in two forms: full URL http://dx.doi.org/10.1364/JOSAA.29.002092 sid of form : doi:10.1364/JOSAA.29.002092 URLs are parsed into sources and source ids when recognized, and this information is used to retrieve bibtex from the online library databases. The sources and sids for a given document are stored as prefixed terms in the Xapers database (see below). . .SH SEARCH TERMS Xapers supports a common syntax for search terms. Search can consist of free-form text and quoted phrases. Terms can be combined with standard Boolean operators. All terms are combined with a logical OR by default. Parentheses can be used to group operators, but must be protect from shell interpretation. The string '*' will match all documents in the database. Additionally, the following prefixed terms are understood (where indicate user-supplied values): id: Xapers document ID author: string in authors (also a:) title: string in title (also t:) tag: specific user tag : specific source id (sid) source: specific source key: specific bibtex citation key year: specific publication year (also y:) year:.. publication year range (also y:) year:.. year:.. Publication years must be four-digit integers. See the following for more information on search terms: http://xapian.org/docs/queryparser.html . .SH ENVIRONMENT The following environment variables can be used to control the behavior of xapers: . .SS XAPERS_ROOT Location of the Xapers document store. Defaults to "~/.xapers/docs" if not specified. . .SS XAPERS_SOURCE_PATH Path specification for location of additional custom Xapers source modules. Defaults to "~/.xapers/sources" if not specified. . .SH CONTACT Feel free to email the author: Jameson Rollins xapers-0.9.0/screenshot.png000066400000000000000000000670061365520605200157220ustar00rootroot00000000000000PNG  IHDR;C5G-sBITO IDATx^3GbߡX|^`h&# ԫ9RǏ_[ @UW. @$(^@ p5_{lWS&? @8KcK+;[Kڽ z-?‰ @mcu[R)6]c!_ CƋ޶WR.TXwEœR&؁&kl-&B @G`R9jjٓ*p磙> Tly-RHeYckz $0mH䍆4J;˲f}<:,L]ɒa30&ɽej@'U+4ij!:+^JI %uX@}%_n89J=j5V!@oMkA G$!@7&t,oe˥ w!|Ye-S} otavڭ^ޥYl͔* kgV!@x#Jc~(im2LT{lʽ’FsT5>y M);Hm~Uyl# @K`R y _jVU n@ E]?toӟA !@]Ou @*(k @ PlO8 @*(k @ PlO8 @*(k @ PlO8 @*(k @ PlO8 @*(k @ PlO8 @*(k @ 'H @ "OF @?}.GnL} zvu˫'8?鏟V Ojg@Tr^xghofj?TOPg%IR_2۴ZO6@B }yIXiQ}?Ɗm [\\ݵ ŒA 0 UA1w@x8(yͲ5\V6x6 >e΃V䪲䍹F;ތ^iyJ+jh=|3[Bf԰VDa | &͔MHE/]|$Yrsh[Rz&2l#5UY0 >31 @  ,o>CH!K83`W^J,칿l)Wy5[rٲ?@X WTwܳ=Ԙd\Tlp-$ldެN&$ R BDvv<"7-ҧɊ 0QZvT!@%`;[&%hyɡM4Ř6=nqsm%߰Yr a&z[UeTۘ%0jXd}]b |)L`uѫW cuhTۄGOOt_5,oYVl vSlY9yX @[fJ9=N.Wa h  w!W͜b]x[<_Cߒ MC ~y7.C %P-?mF!zT<3=(9{]G;ϟ0>Χ=y-\Lso6g|d)8L8D`Ǧ+)+Dzc߇pw/jj~>dl[Z-h3M̜gP's'?/vu84<WI4K~hĿb'U}rT VJˢO,֙sK_L-H{|<8{ Ssb3y< vO2Yy"%m=W̓ELۏ9ٜ"SʙY΅r\w]|r-igY4`>,Zmb{l>ukWu9lx j]_Yn' ͜'36̞-ZX|~ O>GI2lҟ^$fzQ~ a?_,d rxbÜ$M{\$Cf!Pd^9h|f2{ZWxҫL&v8W"{EҮ3 u*ҟg6 Wp4oy*J|NzV[I<;CbZ'A-=~OQ? g 4&:T+'iVkoEfWY,TM7?E}sWOGաxv:K &TWO o }LyC/пddׯv˯/B|]駉y|?Ee4lptWO]4QzF$ù4~fE'ZE<̵1, $PѼنC0lKZ996OfsՋEg'}kI'{Ȝ%eV{srWX @`/: kfK|@~3r{k>*=4?K^3y\\=[Ef3P ]GgGTlME}a&4Q򡡚1Ͼ6iQ%@_E+֤!C9\ɚIuynF5T/~3*XPY|gTyh!jx4v>8m@0Sr-[$c8bmMM^ja?:k[S3, 37cZlMڳe(se㰟IBbZgKxI-a *Ŧei=5s֦ʵ-M.v.$Zeפ|!]|fP\& e5,9ØiE%2-Ej~4LԌOL@'aot]³I1B"\Wя} a/"N@X#2 "m@ /' &oY!?q @/!0Rk,w#5F @`#)B-uX3)FoDL*@ pXЄWTzl?O9OthE鷮8|z"pVi Ra6 Rq0GK᳭2Jzt-lʬIj.O3F@ #0Vln+Xyd\,L1Tj5' @hD5+ycث;=ipW @; 6er,dF z64i/6D lf@w;{~Ye3JY~mMUcmYKܛ-!2, f:&k:?;@>WX{iͺpX @@ TwEiOQ33 @ɃK$9 @fbk~4 uHf@ p"ӣv-}|.덴PՈp@ Zɲ4 wrM2SHu@>b,^H'z @`#}l"Df:l*˵lN@ p[QS }YRnTZOe3{G${*}Oͯb7Ug!g  %0yu>@O_an|3C@ pcwE耜 @5u$ @4?vp\%[+s瞽WXy>v'] ْ a <)&žu17yse ŝɝHO"~g,@3 >%Tgm)|\aٳ9kZtr344<ճ%@x&/o+r~ gr^s4sWs @8J`f<75e13}wN@@[[`}m9$ o1MB 'P)Onfll 2jux܌\kJ>9J-eȖ^,Rg` (6Peb>W K/aq+[{}h5>ٲ64u3oyrlό' @!0Plk]z&w5)gŚ7 pg+&õ9UȓgKN DݲTrNiiaW]y@c[nEE[W[q9m6Z9ŲdϬzQ&Y4@[a7Q-~,96m*Bҧ$ZǎM0mD)TQK5p3ꡟ<͹ п+Z)]MQ:u3 @"ʿwEQ;v_uI6@'p'fx8@ p>ypC딀 @_B`}f!\m kh>36ə%Ktk}l '}cQoJ{¨An8զOEݲsAp~5BB @x!6w$H @gͯZfSRgoPOQWjC =ܨ or ֍sɖ O7ۚokiGYu6lp`(%.& |DY.4z>"IG4gy&k0}͎FZ@ KMqÜgJ<bgN. ڻ;ʋ~N  P}@5 #ӟw @ܷ 4 \AvUrB Pl;i  \AvUrB Pl;i  \AvUrB Pl;i  \AvUrB Pl;i  \AvUrB Pl;i  \AvUrB Pl;i  \AvUrB %C~5rDٳc^ ?OSe  7|Z!{e1RH{ymX}bT,K cd.o=Og  @jS9xrg97gMQ}?f0Ky_Y @E>ln_m/ŚV >qKkxW԰\U1(ˠnh6d k  a` >C(>p|X By/C9hJᕙ5y*3c x^f 5gQ&Y'd* IDATYx8/ƜoMj!rrxv>axWUT:ے E}NɃ  \CK"%AKHlC^h0AXi Wmy~fFӡm*,zǾjH3vV޾ΎH@^.zd\ Rӊĭ4-[GP=Ig5T@zYoGqM(?U@,IC  @h~-ҕ7R&*ԹZ44d VȣC1 {e<V3W6aX@87 5s;W=V9W/褾,VF#rz1T{j~*lϳ9@r ^E`Ǧ+4%Z7)?`ſO{Q/g ^hB-{:T+0Ggjod+<u԰B,9*[jӃ'7{&>jXN2P]ݴ,)r=9*7}+h͝4\+d00YWuc>O3רj0vŰg; rϓ  [],KfیϻJ^t%}({G{u쥚9~\E-3z!L]QUTg63S{|{&߽]2boٴ9ע*˒vK۩>Lz)Xu\Ԃ[TKO/jEi[ȶ @q-kBC5Y.Q3^u^^ﳋ2@;?zz|fګk쳋|<ӳxD BrkDe ekMR-z"DN=*[Π1ys>ky ]=]5$ 5"fsB,ʿB^'q@{tҜ3jnl&92?Sk&O M:q6I_k]%K4kZiSY@4wgxf8o0yOyђg_c83fgTwE}&Y(|gl!6]K+duA2J`'su = WbFt8)Ŏ3"EמkbCf&]cGە'ghf:UouhL= u;kPc6'>khγr]_`dK!I"t}W7󱨣mޒ~֨.D~XI'z지{&d$a'r(ta' ɭdZҋ5G<93,tjN0iNlYv$kߖ3' @I`t^+"' @Nm0$ @I @(͟gCDߎ{'VcҋOhߛo.WiC.n&QLԌO(@>Yr_[}13&It^~i$i~yj*$plQ@z >Z xUQi.oT=[}T}]~L1glf6,I§|pk(6S 7]c>zL|Ϥwh׃#gufb4K4{O3ehVoƵ+\¤!gVK7Gt{&bҞ+3C~2Uv fuBHU@ T-]MCYo3oWP 'oE^MU_;YK'GF|da,,WO={{`>}l:uil'YmJ hx%|!Iq+МG֝V{L3.VY!+כg;VW4ɲz>NzS3k>5P7* Y=6RU ol"wEuB^!]kbnz>cmHbQMjit|Æ~fNzzo˞uR^CZ\?zmu l͢*ҏc bۥF=*UIѶ! @~cnVCR^Q >u>aahw?n3 @Vn[PC߶Tw֧ @`X-6,{i5RV!@x;(!W7?w @I`b3 QŽyb{ @ *ŖՒOޯ4_֌Rr! )(I!@x /T:g;_eB  {U ˩x[f Xv,e}zf$a={cAGm!p @O#i7_|mQ"dt@7T{9*k~[rlihSW݂̓&T%'@!𬻢ζ3庛óȖ2 g1SRޭ4S3ȝ7-Zs^=όLTn  L`f>,wvIzQ˼嗀uZՌXi cYËT!Jb{Ԣ-DiE5ga]~3z y"d?~XyiyyOZGg@A}/=|TN.v3PEcCkK3O.QǧQb1cn#ن=h`ϐuf CfkϷ{n30{Էi>,2[8 G38k>9G]7#/: a$^' gh#h-}C-s 6&iA~FKR[&;,Z9,zԞXQH&g` Wj˝0qv)5vry$ZP0 fy-jW?MA "P}m:hTvwZMX6/خafq\O%ͣJLaX>Ӊo7h93yrkIsdXra@O&P]Q@ _ QYMo۷kŷmb(Kfq}up]!scSnÀzM€C%ךXK'jN| y֌f䑴y m홐` -^quf Cya+7@xb{rYG]@<Y @@:s'[ru}õǡ,׵gr?!n@ Tk5YC|6%$~[Q5\QF %خ|h5 -ͦF@>&LŋtĮdFqK(~UO먃4*j'LC@O`ؼcv²dhf˶N&")D].;3>>p gB @3 ڐy,^ړrִNaҞ^Μ}ɏ5I2ԝB!@([֓bJ v&Ug!pAX @+LDHB |(6Ay쫞7 @3P  |6}b |}f9 !|T&Z'վǧE3 "0PlvQWwzx~MtO1zY]6rxm|ou I`hh -T ! R>KM.-{'b @$[Ξ7xz>b7QE.jZ+z\Z7kQQٽ9F@FRl&irP_򽪰c:#VY_]}oABEPNtv:|a@#kiY@n#P)6=uR" 8,)JKR:qB*2V +;YZ9 @X P)tS3 |m rۮ<!@xOu췻.5J̌kmdVS_+c cSA@]6#"A7XfUBsp>! =厗Er^3-KMLfqof  L>v`V`Wux]/:d <}4@W@}ifH@ޚOC Pl_q &b{G |Wf @ < @%Ǐ?&*h?%Ϟ]j4y @C C6h|~e㵓yӌe_vIR_27OôZsDC pNMIr6 TUcڦ0?S(vz5jek6A @VKY'd˿v"<,dlɥmisxW԰\U1M9K_O.6oCM  pC:71 4QQ8xMIY&5CM3|߾ %xֵ3D5y*-@x6C"Eu02tx( /J#/|Ͻ㹆{l͆t0MϭTE{l䈃 Xִ:<$W2ڶym#uau6N4tH3m %pHrX_?ˊ3;6S' HqW #uBѓFbiʹ֤6RS,eOt@Jʷ6ڢe[ xL ȜQߵN(<ac p/tBV`jDUX]3jBFX<|Zj8Pbزl2_VBƮ}m@=кTڮ҅g@$4$l5ń ='o_QCJ&@:807r~bh \!@x˟|w83?!mbϹ0G+jY:ua&{fQ3^ۓy|o&^3i!\GcS-%? qIgn!EsWs)a${q Qڛx<4Oڬ>Dы {NkLWkf..f2j+9[$J6,9xՐg7sw4{6hZb T{l5s#R_![rXZhQM]rb}Hp2ZT; sya`j5+l)g<Sϓ3gbghia=MY>SX@IJ EEgK3VBe٨ؕwL2xyuL}kO<>yF sՂ?5a@ͭFn=7cA{l5P 'mf",0a |cv "ڞv6\ O @uգ<ϟf \M5x##@ocl  ^ +TJN@<<ə  Uqy}A} K=7gz=R}љ* &7v9|LVuG—.lۂSL?nf-5}vw̒\zNlf|4C &g 5c&*z` g p%b9PBg#>|&U착$S"vkj#GY>P3'c5}fTb%gh/鱆 fc9EHfj@xJ;Wqw쮮{,J5z5[y'w;ciffTIlI'զ:\;_0B^G`>6m,ex6YmJe{oY+WcZ]>ecŖnČEېBH\@JUT"zcs|/zw9>ZخknO//WL^sf?d& lf\PL'paUcYTP[EȫC}ƒg7}f@+̤Lƽ>7We'ْNB T{lOh'̶Sfxu[[H}@ۥÓlQ~Ea @sL}se@ fn8?~T>ҊqoM  $0vvS5U"kF2 ,Rl[ZgbVuK @hYiJezl?]SiΦ!DmhIr^9  @X 0Vlwf},inŁLKU)3g*={MdfZT!@*'7T$~Sm8  @`%mJث?!9s  @O&0Ƀ&Ba!$7άZU,V@fT{lf=mz,~iX,o6^Nf Wj-E]jfλn q @T ޤ!ʹߝ[g_B @Fc;aɃVrx]] '}`z2Pfp @m<8Tg@ y=DwK?~+,F`9gN @؂SB T @x,jPMoymW-@ +Tk,;nE=[+&ќ3[e=Qr*k @`"ӣv-}|BGCn@8${'pBޮQC +L)6ɂ nWx4Qv4? @fD [ky-L2t2yk38k[  Žj8mEr*z/x;U٭Os!=X@ 0yu> <@ǖwΤ" |J dW= g>y̎  |OSlg;cy<3Q||I'䔇ϙG-!g[:hCju5|4[emdQOX'rB3Q[#  @@ mF LcЯ0w|a-[ Dr34>s 乆i -KqGJE9k]r%Ky4Q &JIJ-w*C-9.^.:<3ggFleV  @V3?A[̿(,r&Sp4^B3G:°z{XT8;=hc楙{uCvMK,z ~zQ~Wp,+鳠.ն,ͫ{GhvXԲ ^yܳ?S}:rinPdρFy&O+[y<4{ 1 d"ȵ̿yofl]Gc83䜻,Om׌ O%0xcD1o /we>GEhcGc83X @xb{r%ɘGD]@@ɃK  @`@6-rxof1_@>@z\{PB*H;ԬBIҸA V^% [2^Ҁ$1&[v  @O`DB闁Ş}˯_B_tI%KhF1[C3*Sgނ$ )]QmXG6Ȫ= '7l+K6R%gچ!IenVEL٦&WÏkb=eK3&csl{-j@$uB96Ř6=N!SirPbIkE-ڇg}h>os KeSۘZo(vy &uBVi 弮P0^xȱЗX^ g6P>YRƐ9a7jsZ} (?ۛ"ڿl 'QX-KFKhQbΓzQylPM!?N1ڰ),Ivmh(Ry})rHqIO{Y fz@{T{lUA`5}>gD~.3rWa<4]̨ }UzQWϵh܋ [ˣ~\՛\=[ArXy!MlWz#W+}}5ᨧϕ'uz/ 8gT==39Ì9m&_637Y.G  #P67>3W_.*r5RLݬUB9' ~hd$1 @Ƀ~P2B+[^>[z9C .!<+ @ Wl?3y{]lO-Jo,w2UѤD\DNgox>h\dl9): n_'\yr!Uۋ%%ޫ^=Oq1_~.2liv?y}ϋjmKizb pv*[jw[*n; }gsl1Y|'y <5>=V?,<K~Sz{tYs%W_1]rl9b2 ~^9S%[ >35ӛ)w5L󨃍T3Ie4ΟYFܦlqZ,z’>䣉Z/JW3UOxeM4`s YmFseuz- 5l$߳k5.9*Z?!OmjF{VOɖ9^4E.$fiH64zO_p֥f?ϠXaΟk?حV"[|?r4 p;b]]5]Hb>zL|ϤjgGf/g Lz_ K)[ji9}XXՇX3CB͐C36=pY<l󞖡\?[z-y{L"Û=ɛ yfyrS7\iWpε![ggynO'(2ju{b`iFJ۫_s2}pu)li-j&;ȵE>P9p~7 0ɃӖ-RiO6 [гhVu:ip&!-O[F&345, K3k35}0:<=@PN#hҟk9!@V޴ۓ|(V+K(R,q.f(]ߢIvޮ+ZJOWle,>Wځ !0ɃA @;xbп*珞!@S!򷬡޾ό w$0^Eřc;m%=C Ɗ-ܬԇ&c|aӡiߺjB!蝋Z3F@ PVn[PC[*fh;|U&RIn B5 ZZY%if@zƊYMc",k) 漜ۘ_͵c @M(f%o {uCy5ypi$ @L]Lyf7?3 }N4ư6&+@&poV;Ԙ}볜e&X @x-Jyer_wgKr3zk @8vWT$}zԎŖ=s@^Kb^>Y_rT\&b_{ @Rl^vݞ @ l$PMę̰wBC-Se6-I@ 0c ;j]2_ʍ\뉿bH$zSEFD IDATS,LB 4*ݦOn<'[r!, gx]{~gDʖBѐ-=KQS'rMSxHh }Y# @9{l&#gO%K[~ ^M%HkѫI QBu_7y ֲ堗27}-د{8sjf| ~M_xrŵ mj#XN[R;Ξ=,j\7K(l=d|5 -كE٢\  @=6 sQy ͤB*Q3PBANurid7o#V"Ifg|f2<ْ` J*ܩ 8!"f(U!dϪ~fT_G p'[HCnNk3cVQf>[\Osޣ!t.MֳՄJƃjv~5a'w LuK0je}.&2ж^ ϨOS P\˯jcdjÛ'5L- lͨ0WgH#GXX @BKpי)LE}&>GGc>{u;? 5Z3Qw4#t2䝧\褗6Cr@ ْ~%J5/ͭ$ ;hP%j\  @'P)^DRXT<ʶKZ'[z; @O&rWv';ozuD+}-UBWfѐ'dKo@ <@/k=<{Qg/OX~ipU*?+lu'jN9>Ǐb @V65u9;/פ\VKT7"Of,9@ގ%MWm=)6z|C^^!@!0+ܨMWrl嚩X[9V؇gQ3ug|rX @x> 0$3Zav<CUd^<ٱ(E >A!iB-3WoFaC$q @!*v]MU'g2_'|f^b!@xK63oqm6uI6@'&s>_2@AF@ =6@%6tXhdHx?\s3><>?!v6{,^^ <3`3[^846gQy)Xr9jmm-L-| @]QC[sb*E{yKp-sj yO\z{s]Z l!0+ʨ/&M-7h-2azpÄ҃A1 @ Tdg@Vȭgjy6ZB QLƟQz^rQY,fsa @Q*D|ߦ~| CP6Af|r`b'>7[SS+;}ȗl9by$GPyP9>6{2)52_gfo>O3|+E~*( niB hv0Z?¤9QyޤҤl`hc9  ұrໟC{Cg(-Z<-oA+~^gvUw'~OIT< UCqpIENDB`xapers-0.9.0/setup.py000077500000000000000000000031221365520605200145410ustar00rootroot00000000000000#!/usr/bin/env python3 # much of the structure here was cribbed from # https://github.com/pypa/sampleproject from setuptools import setup, find_packages from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() version = {} with open("xapers/version.py") as f: exec(f.read(), version) setup( name='xapers', version=version['__version__'], description='Personal journal article management system', long_description=long_description, author='Jameson Graef Rollins', author_email='jrollins@finestructure.net', url='https://finestructure.net/xapers', license='GPLv3+', keywords=[], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', 'Environment :: Console', 'Environment :: Console :: Curses', 'Operating System :: POSIX', 'Programming Language :: Python :: 3 :: Only'], # install_requires = [ # 'xapian', # 'pybtex', # 'urwid', # ], packages=[ 'xapers', 'xapers.parsers', 'xapers.sources', 'xapers.nci', ], # https://chriswarrick.com/blog/2014/09/15/python-apps-the-right-way-entry_points-and-scripts/ # should we have a 'gui_scripts' as well? entry_points={ 'console_scripts': [ 'xapers = xapers.__main__:main', ], }, ) xapers-0.9.0/test/000077500000000000000000000000001365520605200140055ustar00rootroot00000000000000xapers-0.9.0/test/all000077500000000000000000000400571365520605200145110ustar00rootroot00000000000000#!/usr/bin/env bash test_description='basic command line usage.' . ./test-lib.sh ################################################################ # FIXME: update with source already in db # FIXME: add with prompting ################################################################ test_expect_code 1 'fail search without database' \ 'xapers search tag:foo' test_expect_code 1 'fail to add without file or source' \ 'xapers add --tags=new' test_begin_subtest 'add file without source' xapers add \ --file=$DOC_DIR/1.pdf \ --tags=new,foo >OUTPUT cat <EXPECTED id:1 [] {} (foo new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_expect_success 'new docdir exists' \ 'test -d $XAPERS_ROOT/0000000001' test_begin_subtest 'tag file exists' cat <EXPECTED foo new EOF test_expect_equal_file "$XAPERS_ROOT"/0000000001/tags EXPECTED test_expect_code 1 'fail to add non-bibtex file as source' \ 'xapers add \ --source=$DOC_DIR/1.pdf' test_begin_subtest 'add bib without file' xapers add \ --source=$DOC_DIR/2.bib \ --tags=new,bar >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'bib file exists and is correct' cat <EXPECTED @article{Good_Bad_Up_Down_Left_Right_et_al._2012, author = "Good, Bob and Bad, Sam and Up, Steve and Down, Joseph and Left, Aidan and Right, Kate and et al.", title = "Multicolor cavity sadness", volume = "29", url = "http://dx.doi.org/10.9999/FOO.1", DOI = "10.9999/FOO.1", number = "10", journal = "Journal of the Color Feelings", publisher = "Optical Society of America", year = "2012", month = "Sep", pages = "2092" } EOF test_expect_equal_file "$XAPERS_ROOT"/0000000002/bibtex EXPECTED test_begin_subtest 'add with file and bib' xapers add \ --file=$DOC_DIR/3.pdf \ --source=$DOC_DIR/3.bib \ --tags=qux >OUTPUT cat <EXPECTED id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_expect_code 1 'fail to add non-existant file' \ 'xapers add --file=foo.pdf' test_expect_code 1 'fail to add non-existant source' \ 'xapers add --source=foo.bib' test_expect_code 1 'fail to add non-bibtex file as source' \ 'xapers add --source=$DOC_DIR/3.pdf' test_expect_code 1 'fail to add source doc already associated with different doc' \ 'xapers add --source=doi:10.9999/FOO.1 id:1' test_begin_subtest 'update doc with bib' xapers add --source=$DOC_DIR/1.bib id:1 xapers search id:1 >OUTPUT cat <EXPECTED id:1 [arxiv:1234] {arxiv:1234} (foo new) "Creation of the Universe" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'update with different bib overwrites previous' xapers add --source=$DOC_DIR/1a.bib id:1 xapers search id:1 >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'previous source no longer in db' xapers search arxiv:1234 >OUTPUT cat <EXPECTED EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'update doc with file' xapers add \ --file=$DOC_DIR/2\ file.pdf \ doi:10.9999/FOO.1 >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'add bib without file' xapers add \ --source=$DOC_DIR/4.bib \ --tags=new cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'add file without source' xapers add \ --file=$DOC_DIR/5.pdf \ --tags=new cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED ################################################################ test_begin_subtest 'count all' output=`xapers count` test_expect_equal "$output" 5 test_begin_subtest 'count all (*)' output=`xapers count '*'` test_expect_equal "$output" 5 test_begin_subtest 'count search' output=`xapers count tag:new` test_expect_equal "$output" 4 test_expect_code 1 'fail search without query' \ 'xapers search' test_begin_subtest 'search all' xapers search '*' >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search all pipe' xapers search '*' | cat >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search all --limit' xapers search --limit=3 '*' >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search text' xapers search --output=summary lorem >OUTPUT cat <EXPECTED id:5 [] {} (new) "" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search sort year' xapers search --output=summary --sort=year lorem >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix title:' xapers search title:cavity >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix author:' xapers search author:cruise >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search year' xapers search y:1869 >OUTPUT cat <EXPECTED id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search year (none)' xapers search year:1868 >OUTPUT cat <EXPECTED EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search year range' xapers search y:1980..2011 >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search year open start' xapers search year:..1990 >OUTPUT cat <EXPECTED id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search year open end' xapers search year:1900.. >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix id:' xapers search id:3 >OUTPUT cat <EXPECTED id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix :' xapers search doi:10.9999/FOO.1 >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix bib:' test_subtest_known_broken xapers search key:Good_Bad_Up_Down_Left_Right_et_al._2012 >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search different prefix bib:' xapers search key:fake:1234 >OUTPUT cat <EXPECTED id:3 [] {fake:1234} (qux) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search prefix tag:' xapers search tag:new >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=tags' xapers search --output=tags tag:foo | sort >OUTPUT cat <EXPECTED foo new EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=tags all' xapers search --output=tags '*' | sort >OUTPUT cat <EXPECTED bar foo new qux EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=sources' xapers search --output=sources tag:bar >OUTPUT cat <EXPECTED doi:10.9999/FOO.1 EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=sources all' xapers search --output=sources '*' >OUTPUT cat <EXPECTED arxiv:1235 doi:10.9999/FOO.1 doi:10.9999/FOO.2 EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=keys' xapers search --output=keys tag:bar >OUTPUT cat <EXPECTED Good_Bad_Up_Down_Left_Right_et_al._2012 EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=keys all' xapers search --output=keys '*' >OUTPUT cat <EXPECTED 30929234 Good_Bad_Up_Down_Left_Right_et_al._2012 arxiv:1235 fake:1234 EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=files' xapers search --output=files '*' | sed "s|$XAPERS_ROOT|XAPERS_ROOT|" >OUTPUT cat <EXPECTED XAPERS_ROOT/0000000002/2 file.pdf XAPERS_ROOT/0000000001/1.pdf XAPERS_ROOT/0000000003/3.pdf XAPERS_ROOT/0000000005/5.pdf EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search --output=bibtex single' xapers search --output=bibtex tag:foo | sed 's|\\_|_|g' | sed "s|$XAPERS_ROOT|XAPERS_ROOT|" >OUTPUT cat <EXPECTED @article{arxiv:1235, author = "Dole, Bob and Cruise, Tim", title = "Creation of the γ-verses", year = "2011", eprint = "1235", file = ":XAPERS_ROOT/0000000001/1.pdf:pdf" } EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'bibtex multiple' xapers bibtex tag:new | sed 's|\\_|_|g' | sed "s|$XAPERS_ROOT|XAPERS_ROOT|" >OUTPUT cat <EXPECTED @article{Good_Bad_Up_Down_Left_Right_et_al._2012, author = "Good, Bob and Bad, Sam and Up, Steve and Down, Joseph and Left, Aidan and Right, Kate and et al.", title = "Multicolor cavity sadness", volume = "29", url = "http://dx.doi.org/10.9999/FOO.1", DOI = "10.9999/FOO.1", number = "10", journal = "Journal of the Color Feelings", publisher = "Optical Society of America", year = "2012", month = "Sep", pages = "2092", file = ":XAPERS_ROOT/0000000002/2 file.pdf:pdf" } @article{arxiv:1235, author = "Dole, Bob and Cruise, Tim", title = "Creation of the γ-verses", year = "2011", eprint = "1235", file = ":XAPERS_ROOT/0000000001/1.pdf:pdf" } @article{30929234, author = "Me and You and We Know, Everyone", title = "The Circle and the Square: Forbidden Love", url = "http://dx.doi.org/10.9999/FOO.2", DOI = "10.9999/FOO.2", journal = "Shaply Letters", year = "1869" } EOF test_expect_equal_file OUTPUT EXPECTED ################################################################ test_expect_code 1 'fail tag without operation' \ 'xapers tag tag:foo' test_expect_code 1 'fail tag without search' \ 'xapers tag +baz' test_begin_subtest 'add tag' xapers tag +baz -- tag:foo xapers search tag:baz >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (baz foo new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'check tags added to tag file' cat <EXPECTED baz foo new EOF test_expect_equal_file "$XAPERS_ROOT"/0000000001/tags EXPECTED test_begin_subtest 'remove tag' xapers tag -baz -- tag:baz xapers search tag:baz >OUTPUT cat <EXPECTED EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'check tags removed from tag file' cat <EXPECTED foo new EOF test_expect_equal_file "$XAPERS_ROOT"/0000000001/tags EXPECTED test_begin_subtest 'add and remove tags' xapers tag -foo +zzz -- tag:foo and tag:zzz xapers search tag:foo and tag:zzz >OUTPUT cat <EXPECTED EOF test_expect_equal_file OUTPUT EXPECTED ################################################################ rm -rf "$TMP_DIRECTORY"/export test_expect_code 1 'fail export no query' \ 'xapers export $TMP_DIRECTORY/export' test_begin_subtest 'export all' xapers export "$TMP_DIRECTORY"/export '*' find "$TMP_DIRECTORY"/export -mindepth 1 | sed "s|$TMP_DIRECTORY|TMP_DIRECTORY|" | sort >OUTPUT cat <EXPECTED TMP_DIRECTORY/export/5.pdf TMP_DIRECTORY/export/When_the_liver_meats_the_pavement.pdf TMP_DIRECTORY/export/Multicolor_cavity_sadness.pdf TMP_DIRECTORY/export/Creation_of_the_γ-verses.pdf EOF test_expect_equal_file OUTPUT EXPECTED rm -rf "$TMP_DIRECTORY"/export test_begin_subtest 'export query' xapers export "$TMP_DIRECTORY"/export lorem find "$TMP_DIRECTORY"/export -mindepth 1 | sed "s|$TMP_DIRECTORY|TMP_DIRECTORY|" | sort >OUTPUT cat <EXPECTED TMP_DIRECTORY/export/5.pdf TMP_DIRECTORY/export/Creation_of_the_γ-verses.pdf EOF test_expect_equal_file OUTPUT EXPECTED test_expect_success 'restore to existing db' \ "xapers restore" test_begin_subtest 'database intact after restore' xapers search '*' >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_expect_code 1 'fail delete with no query' \ "xapers delete" # purge the db from the root rm -rf $XAPERS_ROOT/.xapers test_expect_success 'restore purged db' \ "xapers restore" test_begin_subtest 'database intact after restore' xapers search '*' >OUTPUT cat <EXPECTED id:2 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (bar new) "Multicolor cavity sadness" id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'delete single document noprompt' echo 'yes' | xapers delete id:2 xapers search '*' >OUTPUT cat <EXPECTED id:1 [arxiv:1235] {arxiv:1235} (foo new) "Creation of the γ-verses" id:3 [] {fake:1234} (qux) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:5 [] {} (new) "" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'delete document search w/ prompt' xapers delete --noprompt lorem xapers search lorem >OUTPUT cat <EXPECTED EOF test_expect_equal_file OUTPUT EXPECTED test_expect_code 1 'check for deleted docdirs' " test -d $XAPERS_ROOT/0000000001 \ || test -d $XAPERS_ROOT/0000000002 \ || test -d $XAPERS_ROOT/0000000005 " ################################################################ test_done xapers-0.9.0/test/basic000077500000000000000000000045201365520605200150150ustar00rootroot00000000000000#!/usr/bin/env bash # # Copyright (c) 2005 Junio C Hamano # test_description='the test framework itself.' . ./test-lib.sh ################################################################ test_expect_success 'success is reported like this' ' : ' test_set_prereq HAVEIT haveit=no test_expect_success HAVEIT 'test runs if prerequisite is satisfied' ' test_have_prereq HAVEIT && haveit=yes ' clean=no test_expect_success 'tests clean up after themselves' ' test_when_finished clean=yes ' cleaner=no test_expect_code 1 'tests clean up even after a failure' ' test_when_finished cleaner=yes && (exit 1) ' if test $clean$cleaner != yesyes then say "bug in test framework: cleanup commands do not work reliably" exit 1 fi test_expect_code 2 'failure to clean up causes the test to fail' ' test_when_finished "(exit 2)" ' # Ensure that all tests are being run test_begin_subtest 'Ensure that all available tests will be run by xapers-test' eval $(sed -n -e '/^TESTS="$/,/^"$/p' $TEST_DIRECTORY/xapers-test) eval $(sed -n -e '/^TESTS_NET="$/,/^"$/p' $TEST_DIRECTORY/xapers-test) tests_in_suite=$(for i in $TESTS $TESTS_NET; do echo $i; done | sort) available=$(find "$TEST_DIRECTORY" -maxdepth 1 -type f -perm /111 \ ! -name '*~' \ ! -name test-aggregate-results \ ! -name test-verbose \ ! -name xapers-test \ | sed 's,.*/,,' | sort) test_expect_equal "$tests_in_suite" "$available" EXPECTED=$TEST_DIRECTORY/test.expected-output suppress_diff_date() { sed -e 's/\(.*\-\-\- test-verbose\.4\.\expected\).*/\1/' \ -e 's/\(.*\+\+\+ test-verbose\.4\.\output\).*/\1/' } test_begin_subtest "Ensure that test output is suppressed unless the test fails" output=$(cd $TEST_DIRECTORY; ./test-verbose 2>&1 | suppress_diff_date) expected=$(cat $EXPECTED/test-verbose-no | suppress_diff_date) test_expect_equal "$output" "$expected" test_begin_subtest "Ensure that -v does not suppress test output" output=$(cd $TEST_DIRECTORY; ./test-verbose -v 2>&1 | suppress_diff_date) expected=$(cat $EXPECTED/test-verbose-yes | suppress_diff_date) # Do not include the results of test-verbose in totals rm $TEST_DIRECTORY/test-results/test-verbose-* rm -r $TEST_DIRECTORY/tmp.test-verbose test_expect_equal "$output" "$expected" ################################################################ test_done xapers-0.9.0/test/docs/000077500000000000000000000000001365520605200147355ustar00rootroot00000000000000xapers-0.9.0/test/docs/1.bib000066400000000000000000000002261365520605200155530ustar00rootroot00000000000000@article{ arxiv:1234, author = "Dole, Bob and Cruise, Toom", title = "Creation of the Universe", year = "2012", eprint = "1234" } xapers-0.9.0/test/docs/1.pdf000066400000000000000000000342671365520605200156040ustar00rootroot00000000000000%PDF-1.5 % 3 0 obj << /Length 596 /Filter /FlateDecode >> stream xmS=! +dgOȤH&2)0+Ȱ>@;W>Ӌ )}TzW)v(cK|[L妩a.9.vq ocjfS/ o| &TmC`\iMȡ=mն=S k-oB T024)'GzI  Cjwmk\;A6au|HI/啤 !=#!k]I>'Ct]/~H4O !fK,R4=d(!aVgH#ZZ"k6ޤan:u4ykaQi)+R-& .E挱uL:ziwXG lt Kv0IgJ=ot &7tڴG_L5>> stream xڍP\Cpw% 68=@pwww A{Gɹޫ_w*re5&[#-ؑ `e`feeGRp-F;X؂e f2t|:)؂N67++C[{> @ k 9 Rڹ[;#֘0 b06 A6 j GrAohlhlko&Hpp4@  m@UƌHP7pKfkbh -A`N`=58@MFde,#18f6ZXJ̎C - ^  )0|--,(7,6Odn,7ZML(ɎEl $#ɫ beer@ Wcs?ܫفT!~`Z` 8;GJqfȮʜ90*Z(Yq"wiqjӇ$ԑ{BH QCv#tWGV$-[f.~+C23gS·اi{c&6 T խ YH Dq2H-D - Hjw"ѥU ކ.(}M# % *ds W&fȦlh3mpVE׋<&V~R35Q4dExJ'9Q\D^g"VzX_/a9 u̇OPxI?kUi[ #p0| +RbO槦Y ek„ռoLJnx4+>'Ȍk?q.(o=q|1Cϡ6dY% 1'OU'Q6 [߅s^cD&z:T_ɯX\ q dtM})d(>: Iޘ9UnmX.38߾>d(ѹof033G ûxDy2;#Q(mS/@=#L-sJ{eLdgQ8"ܝvij#L o.90F Ƚ kȻK#5=kHsÏ U=D)X"CIR/W@T.CU|y{+'^f@8z4!BBd뷞ѳדR°m`4oRxҵ`ڶlfDH]xrYNrT^'XH|+f,s,AbhDZ^IGWtB-bىF<[Vi%WʍQ/P# 7=bgzIC5_*sPg|6㸧Sauް/7|ϊ&%qeݮIE9،js:_A)@n_{)$o66-gC?7>s$I4 !^P2{S!vcK0' NXvf_12)sm^BG180I4LgESp-oF>nrI蛣Y }%ׇZjdn\i{.n wQ3+d[WK(fkLR]oZ1 7р3(I9FUnHk]+xsƆ JPcªz)Z[X4e@%DKr"aFɤyza rŕSVg*~m+ˈAB),96R8>Q]a2YD@QϣCeچ8oDb|XqeT .%=8{sH-;F~z!9Ns[=ҺLJn!1Q/!~:#7M"+Vȍ%zh\ L:ݴG"| z+yȰK$'#lꍿ13/ "N tP%Vmq'v[W~bZLx f Ol|mfi!۞ܑ˸{٢5d '6IfiTw"{J!>,mq٤*~nlʣh~$7vh7`€Kjf$2ݙ, )P`[};_U|,h_]$ Mk~ 7YؼGrrU*\C-pv`)OȊ ֵ aidl+L)N姉 U]{ɃM;T |BSϲ:w{xj&XV Fg^\5iP> F&xn :4`EKoF34whe>|u{\}~Exѷ8 bK|=GEÊOfEU,EfcUTq]&?:\gD'N>jeF(+F3cBab( ͨAIhF\&XJoQj4 V;mD fF)R~36+Dm_R~d32h,f:ŚW[U-BOs&DLX?-04GE^r9U &sk0Y%> 82=acЕ[\C|JduY\`B" ^Qg$ p0Y`k6[jHe]*skEQ"m׻Y2ؐKE >gb_Qh7%|ǰ.r9dU=Ҩ; f[:$"%^&/Z]6I!S pu{LbNJ3$#A^a^¥ [1Ќc/0/ mLTN<Hw AH2 CNm565s@hOUs׽W+9L@=HJ󽢝HX"';HH&yQ;,ÅD4lwC;1p] gPY~BsbI)<۲n󯶕 !DF" qUG츬С4Wayײi?Sy_b/ 9:TOސnv ?]w3'Gҭ$Ƅ(R)38wu{Yʔ!^rCR̺ M,K- $Z}((>djǞug?9Vl[Z:&D%Rԍ-cD?],:"98Ţs? tz}I/B2 !def_&DB^oXIgbY3:.v@,H9=L 5|zG?ꚩ=fJ7 {̢A﫤*{ܕ@*pTVhTS 18<#|h0!S/y-%a1LFʣ;obf[!B;2֯OY uoU(5 pdUGWFCmvz۲ꦰżW> X=]acQB:|♏-I`U1fi~@F~ "Wd.r.w%erCm:@#hYUPu]A^/D^ԩ%y0+%Kv2U-@uB1WNXQ89'viU]Zu~Ŝ}$l0q.kgcoMǁ5!C\V޳|.voYUW7PSmԆAarQ{u*^s8EP8";cU0%X} y" a=yvch[.!$qŬ% kR*ט3K%—лD6Bd\YQӠl$P;HADf툼_ua z?ᪧé#)$*Ẅ́Gx<Ӥ"0"~8 TL]]&^u8qUn]ɎGWw4E:] /xX: b>. ̶k*coLW/ͷBi5ާS~1Zұ 8rKbequ;OkZArRF9;*ʼn =J}vp6zLd ;`qXL탻q ۻۯv̓Bq ;Ȧ^Q"OՅ>zc'V'_Y +zuq}g. ⍳T2>Z%G?/-֦"V[9sYA d^¡'j_`K.`)}S#r(,>f;Ü3MF5T* =-_D]شj"0_-G-}]l`f>t0H[أLt(Ӟ~cS+n2XÃ)x_4Li);%@ivI2l8!!to+ )&&*w{ Yf7RsZ?3k #@K_;afJ!+@*PvA]y[D$j 89pnIӛh~i=)=D@6"K=uϾI' -p}8 0*aU]s_$h& oL$oN2cHjYffo :Gs "RfŃխCb`cAPLadr5%repа buߤTJFN]5jL uwud0%z }XW5һ#n)t ∬$q;DK[)WK lRKY7Ү[hcSeÐLvjpebuٟ@W1괇X tHRÆ:ߛt+v4]jUISD[3K8#Qe(ir]$YzZ(Q{eU{vo!$H\0OPT'"'3lj$<E+y3-6+\]t6Ys{՛r&G"&ϒ+UM{7&7e. ¬֊cZ*fsǮ8D!̃/`]s}rK^Sz^ wqQrWEp5Hy̛K|fΟ8??M `SKcF0Z!U\+[1k*mQB<@̞cwHF o"{k4C׃4FLc]3vNeRL ˁzOmhN=1~_x;Ase0# I-)'mAvAsd/0ڃ$$PAߡ3L\ c -%j2JڃC4 +[~;zZ7&_^o5ϿHN-"i3XPN@h4-YyxSqPJ%ZFF'}{w)^ ߓxFf\j02rMX;c̙֩$< 58Z'ލ^7I ʏDɫ%ҒՄSt!QSN߯9t/ӑW5^9Y`İww(9FWͿdSد#a/CQ7^mݙ|7T`o*[.w."g ~T\ˣg{4=d$fєO*(uZc${h8# ]RI4)Q2P*UUtC@oITn!ԑ*M.ȱm/^Jxo0ýѤ0/nAʾyCx+P+ G;ZCd 9*g$`<&@R<||A>}&4X#&΁q푚𰗋S cd_`US~}bUbmsE/ .P"wM< 8IpvOڭv#- _6}~2QE;ox ׶"Nmy0h ?Tٷ\$|O `v 15hdag fׁOo݉e+ֳY6938v2|WxGDZi.HT Նd߾q)x˟ۢ~L5;gw71:? JFUe-SS;Ȅgc’eV"},X!u nl!\)ONt g<Z]#kJV:YޡdVzs'N!ǀ?a]Z'E7#-agK夵!xs԰J49 x{` YB{lf(W?C4zo~cw`2A*h-;k+' ෥ CTs[H٪yVV(Dr%ՌOPM춃&GV\Gd"vy( ɌT%MjբtAo8@<=L }1Ig%4sDF㰥 {㔛miުCKAk:h54C0njtͽj]c;KBOk2@)0N ]ے @|- 'zFcv0e2iuvzN+qvz{xٝ?tva]2ӻ2߷>PvM)s3_=*ֈƣM0DݙelLr d$vF 7u)m b_KϺF] U#u?D;Q5:ՁPͧ)EDMA]yč_!i?NQ4c0 b0R84oЪ JE0ov/Cwd/JhtZ (Yf\Eϯ_-ӢDV^m](@f}JWYF_?mz=,1#HG; ێs ,wJCܬr w4tq)/]J2A!\B˶4>doͪzU,%ٰdDINM%3`z/r'rW~m,~8{fZGO )\fE;]H.:t)Ґ@ő99FcO%_A?p.#BZ-%ws}Ohnt1YMkk c \ȱAB1" j^A3'Br7᥃-fj1P=_A|k{]xd Kqb|* DEuEr}<Qw ICl,fDC`D"h!?]]our#b-Q&b*Vλ7[2Id̜6#-X'v{ Zh]Z.`^'`F<簍'-r 2./6utZ"Ōzõ.]4~"nypb@߃97A4 W\@ch}a gl~BV_[aJ_>;nZxlB`\q`up~D&DC8kPiy@^zC2Kf4~a )ayGX]6SG?7, 9)7u7/ak`-~oGe6x:i:md+koJQTTgךG=>G 0NE":kDڸrO*eۿ(Yu^WЪ+D 1}7/A3 4Yi0hɅ0Hs7K~Rn$'.TeO7 ϗf ?=ҕNY.qpxhntIҐi$6g$b3TwI;Mdr--=;MTlvjrbK_ܛWI/b/g # ɩJfmX=?n*2O߱(AdB"9^6A8j} ??yR!$!ra/I 'nj7YRX8+}=D<#eeHjL+ɉ\Q-sh_٢`xMDk!48 %lTb ɷ6]S< Eqj~ e4Bˍ/v3o<ip=|r=gJzQJړ6{ ᗟP> T}U641 Gݥ,y7z}e.=ٟQ߻վV!pe˷J86zO"l-"D3]^LDw/ie͢Sg#||/K^2TC7/~OF8.m0442aQ;rm+'73/ A?r﯅kWmz`bq;AK-֞`S+ endstream endobj 11 0 obj << /Producer (pdfTeX-1.40.13) /Creator (TeX) /CreationDate (D:20130413212405-07'00') /ModDate (D:20130413212405-07'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/Debian) kpathsea version 6.1.0) >> endobj 5 0 obj << /Type /ObjStm /N 7 /First 40 /Length 557 /Filter /FlateDecode >> stream xڥSn0+آDQ +hv0|`dVRaK$wH9^c$8{pF:x( H $Rgh>6pnrമz[H"X]o3WMi& )'b%;kOǀ)(8Y,8oli{XQY `_zX;JP+HXN+A=hbJY<"0iPJ[I#d$I1dS[(SE|&~nEi2e[s8(7}mjVuOq(rJ8P< ]X endstream endobj 12 0 obj << /Type /XRef /Index [0 13] /Size 13 /W [1 2 1] /Root 10 0 R /Info 11 0 R /ID [<3332CC463BB586FDA86A16786A1D4E0B> <3332CC463BB586FDA86A16786A1D4E0B>] /Length 50 /Filter /FlateDecode >> stream xc`` @fb&FM 631``4fr endstream endobj startxref 14219 %%EOF xapers-0.9.0/test/docs/1a.bib000066400000000000000000000002261365520605200157140ustar00rootroot00000000000000@article{ arxiv:1235, author = "Dole, Bob and Cruise, Tim", title = "Creation of the γ-verses", year = "2011", eprint = "1235" } xapers-0.9.0/test/docs/2 file.pdf000066400000000000000000000340331365520605200164740ustar00rootroot00000000000000%PDF-1.5 % 3 0 obj << /Length 645 /Filter /FlateDecode >> stream xڅTKo0 W(9~$ס+vvvPm5% ˮ#E9K;o}HgRVk!}]ѵMU7}q_҃ʗ] M݋fV%jڰRӒge=Pf׈h\6rMg,Ub#mx6Ut iuP|XWح <, M-A3l=blN6_1jR^!:g;¤&aޢ qv>4#%.W$t=*N{cn5\|.ѥ4s0~ia$ΐNO>*9N*&SHչ}8 Nl\1>Oٳc҉>2bcb{!WfD@Ղ7l PV~w&^bh\Gzi(ku ى/-oj7Ay.FX|IU5ΖqvMm.ҮF&۶"|sY9U5oܛ9Hyo>H؁KqU`/f55ߩdG̤=i!KD+&2n|].N?BA&Kd6lf_eb%>Ӈ endstream endobj 8 0 obj << /Length1 1837 /Length2 11129 /Length3 0 /Length 12278 /Filter /FlateDecode >> stream xڍP w0݂CpA`pwww n5 !Xpww 8r9*ղv{ j e5f1[#3  9X@vdjjuo32&bkq0&rzS9[8ll<@ O?@1(lmv3sc@gL`a3 f vl 's +1?tNNv, kG[3az&+ v;M()cACٚ:WllcvPP&߽? 6&m@63) P٘r} V ׀?+TWs4v998Bk%mLm6N'qݝuBlLLalǪawJjBfvp@>Nv36g^oO;[;7898=_0;f_`ӿ;@c'2r'eԒ`K}ۺ<98\l>^^,ZAkC(kcj Kk#褐{c{(tL' 36 )g+?t k aCmlN1o!R72-[Alʶ?3|fl8՟.:6ƶ&;7rG;uAMnN65*`jǍrX%0x8 ?5R`UqX5AV"W?`5/|E{Oa5d՛ ҙCZOī /Zտkq@R٬ƾ>rf#vn/-&: /J/S]3o:8>`y~X Ȣ6Fĕy{\hz[3=C:b2}ufÍXpҦ$ݵaK=bhkJoGx6_?&!2x={i[¶@wQ;+ܹI-nTsˣ _Kvo4kcV@&@9O__Akvќ`3؀S(@R>s(izAIB`r^Yo";I7]ir/%\ 13Yl>|I͒pEN(8u{$}j,-1?AgT:nc15gQN(Nv!aOsZn.c!"XҳXd yS9q 4-v6ܢ׹osc12,E FcŭFD['W/+H_۞s"\"qV趲-yxeX 8SUd۸UN!nk41Iz1,FTDȷ PRTđnSzZSwLX֬wpUhr2!P%r0#{hG1)Dh^E#naBMXv,>IMܯ~Mz5Hl*vLs,#Y"shTT60gsL}2z{!rL X_V%RpE!17SvHp,A%u [+oz>hrj*?hrB7))zݘk_ .iSp,Z26Uebpsb<p3,|.tW7Qv²x/6u)JB2V}8a3,R^y>)>#ˍ\N$"N4$Ri75mR, S螄b@*>qFԁj~n\q(v56āW5,[(Pס{4X9_mFTb~q HߏU4gp%3ըJ刖8V~cb`Zz]~16KT; "ST"ij@A҇)[n/E E* tRoXzy.N.ZU ^#2}Nos.pbCLl!R"7Z!U~yHrK iDI=6s]\CoګH%zsKs,~x\P̱ e+Y:`U+S'F*L:SrŞ?N?,1gF)<(E0|ݯvKУl( 2VbqL2(I"qš}P7 Zrk#1&H8$dkVAi_Pd72d,n:̈OcKGG%IYcG|?n9O[^'dVXxE3{(qi r (UNp`@xs=3 twKP~nS^/7wPtʄ-)ת\Ez_{֝( S}&& L?E/5wD/bx\`yU4:ϣHhfIk>T-mg|Rn̎7ƊA tGTd©v.J6[:/gj;b(3` c^m .-wz6m^opEX`iNmE;N G=#xwnY*Fp@R`o`sL2GJ>i4$DE_LLe6TpYE< ; qG֦31Mt 2I%-6>JeG\n+C J:֍6M!NSRDB,=^Dˮ$Lc{Dztv3/%kcF[.㵈}ޭh珞5@q˓u+3+ q k黍,dK^p%V̹ JJY^HIG8 NQ +~^R}Z"TS5H܃oPˤ$BX4xA+C_+n`MM2FGfKŕ[GŠ:ٺY]|5_{ɠ/MLN)LnjxBlcqFʅTs@.N@ǀhYl>̎~SF?v&fpcVRB?%Ugp1tJCj& ]$|Յ*gz{; k+䐈t3">Ch>s/E-q1\ɽFڣ҈b F3֝pPє{4roV.O9XuT=J4USoFFC]W{r~U){=./ N[>] J%ʦEuULY[o"~fd)6c~Dq_K[ąX&+ЊE/ *{?腘|xLy0#-ǀq:M-腻'6`YQUuִok>}Kd!g>4-br'{qС|.K, Ww5bpHjuпtU\#xoS,CONj>SpXe%xb&T0%[ )1:PU[+z d%Я$SCD&E3=:**񄆗E cUä|hx\MS.hr:1ȯ3 aL9x;!ϐ)4̘0zgB MMYjSՂ 2Pi~33|8>1To+OՇ&f(jR^3 [sW :i34+I#Ħ F(_+:m+X mgrliF 0Jp*DuQۍEl/UF ;*^B{:FlZO ģ:rbCuWߤ30cMK!BmW&.҆z8e}s_t4^tXT/zVOכ{.˷;0M(y*+ؗY_.[9 7瀢Kk|,B1?Oen5-ǗtG;_CtŒ ~3N jFuCpW7nOL(`8ҨTw[~$͸ ́e2i D l 92n:c?ˇ{Ԣʻjs`zS_aC[EܕZfYv)  # XgZeoӐ.zL bߙY-'ҺW6,4Eћ2m E"+`WVZ!Q>om2EU1vF@w.PRH ,yS$H6{v ȱw~Fpɯ?M 3"2rψgx?sCܞtTAl>OQXUzdBe}[Z&i>f1d@sJD6$< ޻a6`xL.U(mpWj`C]{\-܌#^4 *dzyk؇L`q_\d\#0`tD2ZͣW\)Lcۭbmڦ%%Qk.ɰT0 p7FH`y=[kjhRqz&ҭZe1^I>#. I G҃0r&%'2'a(6r: \-HʍH`ߛ?oGA(lFb<~ B tw$2TGc<:DQbѻ#@M|? WY평;rØGdn/xЃFZCΕnrta`v2@&\΁ehw{+r+ېQcmy"i*X\Kj  Լ 75缑=zo:˞dzv/=epm +i,e~V9P여␀$: avdw|tcSbʖ,xlR4esgz^n>i'dYڮNI|>f] ט͌G"mo7EpqTv%v$,0+rDM_.'OBڻgl*c)#%c. 7J{mg V;W {e᷒({4Nw=BYz=#lBSX#g[ͭw 꼰PYղe$F-iy粠dj.iҥ:|r 8Js?i$y2k.*:6!;Smq3>WCEL5 MvN]/> %@G̔ha^PnKJ( q5X ʦOb؝\tҩ Ra)T.wL^{E;N\.ӛv)a&l|.&4"2 f{.'dZ 2{=yQU K)s\Tl".M< ^|+?L9j5&U>7L. { lwA]!5=:g7UE@T}]^(V=6x[s<ۜ¨rWr'3]Yw[ˉ%$WwU''"0W C%z 7xx \,$FTāe'ejC]HpE<տ+0'rA"lZ0vͱ0or nsZ'h5+ VUmӸ\k*` S7@) Y' 0ڋWLa"ܓ壺iYj`N+zՐ0;e͸MK<檈YY~ 2g}}V $#ե\鐂:aاБovxBi[nC*VUWѓ^J>K'UEZHEʬ:]|U2CadNꆶIfxjO2Wt3l@{&ūf:o뉌4dq-Z _ ҁ+Hr3V)A@q5]ۣ'p-=0sUBd䄲s4%,LI}(xswbɷkT%wL!*m6nvu)O,}SgKD&W{pS壪8&S@Pً+^ޯ-;yJ^+S/mr {jzLk؊s6 ÈoG/2}&FfdQē] S/ ;87Wt27Be{6,kmU: Y˦LoA+"Kp:{k.h<;بq,ܿ{Xb,TR(րۭTq@hG}~sG^_ ]~0uÿƃ13 B(O_ɖ.٭a99~NDL3yt$#&F㉁*X<߷?ڰOF]NdYmv]ގ jy<ƄAbހtIٺ;$'3?rЎ+Z4_$N߂g).z meƷc;Fd./ >VˣċԻ*5 F_+d>3q YP씾 b{n4=.~6yYTP(B- QfC2sah=g?bt}`B=r,CA.!"-GÌmSg@+g]Xr =׷!yLЬKp? ozK\(uD{:a)_?k=)|:F ^G]h=4z*FbQ%K*U>e\ɵrl(ބ(Ñ3¢=p~AtߌK!UeWOxkl3a q`wXU}cp|Iipawև! t85%1:u-; 9{jn` e#W}SH&+?TK"\cj-~yP0wc%h+6E3ziH%diP8⎧ y؂R~&8w` qVg.Yo*Gr$PJ*1y?,`aoywoeUwv?e l[$yɠwRl+2pSv-"xDY= E|֕d&5>b;pPg;P*m"s" IJ haw?ẃV8(=Џ<;Ւk3YD~J`T7ޡA'!*%0~:uZ;T-uU_4[rj*8(6@W>(+!zI[ŏht|ʎDX$[$6ޭZ6$ϊe;(5p<R+ [| LĜB??zLr2eǸ˔IQh,9:Pl|io{L׸9LgLӠ׸1˔n؃|M,9bl)&wo)/Eݭ'Jr`o9!)> ۶Lhk&}mQ. 9nGTAʪس[/bi+%Ar$8+/i%G`ѮǕVm>Da>Qc`UKm{'\>? YUr t~Vj!EQĵ/{cxq)̔vڻ8 IxW~[4_QNɦ0xZF g$g8=Cj4SU-[4gX? p&͑gŸqvC)vE{z9E` ?52.D&)!ƣ- D ,Lt?6U٦?[ZVؚ;~Cm`Q<,:g(9E㰇^}8wαΒw;ED%ٯh ̑\d"(=+&vG u4*`w c0ހg^ld/EKbW!6SE $cʻY6T R<9i#_fVE11.fC]$[_ƌ- 9.8207<Wq9vdbokIe#'iN7<4[\Mb`q\Pmk &$M ̅o~w/.P-;u,aUVg:nC,$ /%\X[3ϣ'ͿT2>W\9DU~$߄y\ IИm?hb{y.$ǽ`98Dc`$tSl`0;ʾg0V5ȾG[t>y:[h?7ԾoOkV! E.>\<$a|݉Cb;.K&7-|Ձ'O|"o|4sQ\uUO6_Eg}dhaR`951O=/}lH =[d Ni9UG-N}wkЄ2g3G9࠼ ln0G f:N雹*-&*84N(pkv0_KJPF#h,>/j?=+M(_5,|m^B!FUl=U;s6kDs+&Kfy#5c+"&ܘwZGc>Ņߺ_~? endstream endobj 11 0 obj << /Producer (pdfTeX-1.40.13) /Creator (TeX) /CreationDate (D:20130413212529-07'00') /ModDate (D:20130413212529-07'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/Debian) kpathsea version 6.1.0) >> endobj 5 0 obj << /Type /ObjStm /N 7 /First 40 /Length 553 /Filter /FlateDecode >> stream xڥSMo0 WaHhI,A&Ykm2 u4@b{3~c;`A${hC XAC,@1F>7p&I]u,X0>f[ !9$%o#q3pbJN:{2)tl:8Қ'-Rz(t*_"KN{a2$\ H).uP( šL2FH=K|̣6錣CdZ]=vL$bWƵ^=b&8g+vP}LП~m.ũY[6]1S~z2yX0N][z؏Ȁ^71$qHsrk3?'fʼ S]0]V@SgKq?H0('>#+f}[4s,xo Sa;c5֕Ӗa;<+f%%W#}n txT8Əߍj _YΕ{z4SmWX2-nMrޗ[ kk$yS΋ endstream endobj 12 0 obj << /Type /XRef /Index [0 13] /Size 13 /W [1 2 1] /Root 10 0 R /Info 11 0 R /ID [<0C860A0B723005BB60821FB8FF0F2E89> <0C860A0B723005BB60821FB8FF0F2E89>] /Length 50 /Filter /FlateDecode >> stream xc`` @$ D1#c3a}u endstream endobj startxref 14063 %%EOF xapers-0.9.0/test/docs/2.bib000066400000000000000000000006171365520605200155600ustar00rootroot00000000000000@article{Good_Bad_Up_Down_Left_Right_et_al._2012, title={Multicolor cavity sadness}, volume={29}, url={http://dx.doi.org/10.9999/FOO.1}, DOI={10.9999/FOO.1}, number={10}, journal={Journal of the Color Feelings}, publisher={Optical Society of America}, author={Good, Bob and Bad, Sam and Up, Steve and Down, Joseph and Left, Aidan and Right, Kate and et al.}, year={2012}, month={Sep}, pages={2092}} xapers-0.9.0/test/docs/3.bib000066400000000000000000000002311365520605200155510ustar00rootroot00000000000000@article{ fake:1234, author = "Reed, Lou and Björk", title = "When the liver meats the pavement", year = "1980", journal = "fake" } xapers-0.9.0/test/docs/3.pdf000066400000000000000000000342101365520605200155720ustar00rootroot00000000000000%PDF-1.5 % 3 0 obj << /Length 547 /Filter /FlateDecode >> stream xmS=0 +<@Zx-zC]KTr%ED=x,Lr:;fV6Yߵeq~oa]U^4謅ѧ #ޑz6F7u6OɰĎ~f@!6i''^Վf}fw Dkw˫/%,8ot ~v΀VX|Cx.:~8tVzCncɅۗ,/a(j:|/_r1 endstream endobj 8 0 obj << /Length1 1852 /Length2 11327 /Length3 0 /Length 12485 /Filter /FlateDecode >> stream xڍT-S܋C(.Cqm݊w+P"E ŝBqwx㽑1%sZKhT5XL@2vgVN6A:' VlیJ rtA t;?ۤ '7O_O @lPb(A@Nv` Ke`YLۂ%yESc)_ BnnnlƶNlv",7%@rt P6 i vˮagf<l s x^!PA V+88;"0dcSS;[{c;b0ۀ*2lPg1@c'|cWcsddwvbs!]Iڂ N'v>o;_'k sx 3?DسkA. yCM,@^~ AM-t`, 6=z:Ύ. oP99f`Sg  A 2 ?# x=Nn/3;/[㓐/s: q6۸.Sd< 5J 3z坍Cba6dP*)<ۀ! U;'w |fj|8=՟.8S;?掋`h^\O5Al;9,`nlj0 ~̿Hx">?i/zf1 gWa|Yoij |7\oV!seQ=:ݿ=>?~s?Wjy!޷MÅ7\oR3oy]~υ UL]?A (unuUmPu8֘N*#c-&rcUfx i yϽzD;{x6^jm{=8xi[÷v*:`]BFBfԶޠݗMFkEOd}'zJ 5}q93Dό]䩻sc&S1 .9NgIъHk8mv<կwO h#VZlxV-&N,* ':^u4VmN2A[PuK:JHo(9lNfX}4~C3Ho"wIjh:(?}1溻d|na߹=dfKlD Sݚ#L.BHMݠ6`vk1j :~VٓxUE@&N!rlM\ڨKokR˸k a1 !g:_F?/`b+АBWx4Kj,~ʄu(+fY!ξsh#0H%ת=ҿ= o&vRpnFȇ{)Tc'v5wǮ233YO__![\n#9ɷ!>ɃG07d@K(iF!n "`r8BE ȋNZ !7z\'3e)ׂƨlL6z H;T0T[Fd9%cȂPSԡꔥMpY,k'6'2IXM67X.PxU\,Y90'ʕ̡iߵDIt>D=o9qZ6^͘ǭY丸tƨ??LN?߿3[bb̾63 WU5 aƇV9#$n( ﷱT`$M0HahiZ/ٸvH aoGh%3ȱGbPϫiG& 8J~]ȈN~7!S! Ô0DT>&f'?B=WC9oU17͈.kGrH; >3~_ƒ|*uS9` G*C[NJI5D/o 2F{W&BpoMZ&m߳V4ܐ|ʐ>kXYto8)vGc|GGaC}_I w]Rj}o ]g='| 8ou^$osN[9 3Dg## 7E b2Ֆ:^3? V;n#iw%IK6ba\K. ײ TU( "ҫ.Ω7j -U5lXOՐeq W8sW0jlJrC‰+j6䄳'[Ex0ЂW@::€}ϖw V9Y^DIG47l ~g_W&נ:XGn{}T ;mt:ڤ30H Oe )i:[?xsH[g[7[zN dI(EUIcX|Il{|?d&q%P9O.?\7 "/<uٔőPW:BڋIU% .v_jdg䓯:8̻[/Q"(W2AT^\12^GA/dB}G#E$ZW o߇)J43*0aMֈߑuވ)F^Wn.r-W~8O ВA#&Fy#6ՄZ||ȒrUZeHC]f ۤ*dM;^ϲ.2k]N"9珄;nr@ 79T ޹Ŧya_I8e :v&  鸖i s9M\hx\%)$'HRk^Qc,LYY]0r৕x{aϐ)C4iQ.:SE^#JUL+A|_GLVd󎺲U2m)a 9K P0%#W ܔDc%iG:3+c_{?>|f&&RJfjEMёtXG#}Ff=9=+СlP9}{dF^@x[ȩ0VV=[Ydjq!ܵDR`ix/(i+k8_ o*jL PS,-hzi&Q}輞m(> m[Σ4i>lq aNA[iP۶3v_okѬo gAGצ\jX/ҕ b9jY 0BBS\F.[pZRܑNt}o#r q$Z8..q.2}n l^T3Volb՚Uqm 1oF^ ^kn 3oxB,=YˆH}36W5sǜGH3ǵ 6L+pE82"{/;ޔy"5)prK=*oVtɮr,̶UHS `n8舾1f\Ә0s.ۮa$ /gߊpxtgirȴ8TNJV2ypH AV&IB.-+t%R|)]w ]8l vT@4m T_,ëe}ġnIl`W~́eri  ќ9WrЈwXCNq=QHݒu@ d^}C>U]UcmyU?Ju|5xYADŽ9Nn2㵅R"=4DBLj`2e '4&vZ/pJ#@z k\ʁf{]adP/ذƦtH7$"*=~Sw2-3rH 3OweĬoQ6@Djt!d4իB][ɑŌ*ܭDZ}@fvyܕjDt2sY ܲ`A<&@Y~ԐMejǹ"/n*.Lט0Pӈ'n'/\C8'c35ґ :"{'weQ+/r$Ƕ.AڦPw$Oqհ 6awGtH_ xRJjhzݤHWRc՟^^rp[R@4nsV*lFv\6kaX\.e8'7AQ[Wpq0d{0^4&:_os7""V2CM5))eN`OQ"?kE&u}-}59ըt='>5fBEȐŎq‚k WHS$`,mUj{AK&[_,C[k|֭0&&Ф:6 ܼ ݐ 9"=\$e!.dUOٰVoA8Dr=n3*nZ)RBdLbɣWuDy y"VRfz+f|mnCdF.=% tbORY8SI~y\G?3q4kS [~cGV[DdWcu@gM쥏m|+mrŴ(szkjΏIXSK @,*jשtD"8SF瀢.퟿zL:F3xaLRNn=_NY4$ ̹sŰh>Z/ީPiըT%Z"".78d?ܩHךDIG&9[(/4 xSƛ|dru) %Y<e)kO`#x)gC}:%s'GK2R㫎\xXXz%;X&<!׃3ǐ!˦!p9 DH\_R2S[a$y /Cbġ820M3E!$U2=&O5)(3KR.','!%8,\j ^0j\ij!~TVp8ed%[CMc-[8@,?# Ⓩ9%ؽs H#CGj &/v@BoOgiJ.˴hװLzZtLc嫛)e\fj=DG ]xNU 13Kk)2ul[q7MR'|GDcD"m"pv}u>e4#1 |JRM!ɯ55:/;Z1auXŬWb{\N7lOԈfA};29}#+n:lN@Dȟ|vjdGCR>DRT|wxW-*5c}/ *6so_`P3vʐ1M~01r5{An;3改` X\"  ێ[4ʌ0y?痺Z!td\XZ-wsHSE͆ ;RF:h0 j!TFC>c>*x^/:6Fte#Y߳ҷF)cLRh )c{.ƞEieu<}q OЋ0>)^֗!JݭWq(,q~.ub4}.ڌzݹȷBwVp -_(}hფu2K/|F1 V)Յn~tnmvlryV9m3q PQR. M΃1?ZRՎL#7 +zAG$[MGplgᑆ_aRG 5;П65zetGdtR偝54՚J ')+>>&"L4i:U8x:A8S7?<"Y Wׁ=GdvY6zpO`Jӑ%I/x'xmV:&8dkP-$c9?+Wk@~,CVRpJMtz!~6b7E |UflK* w>jN!-(2Rwfwl*/5W* ŵM Iuc7M tH;3M Yb j5㤉/ uZ0J}ޅsgx:#Zd N֒69=*( b")HiE=u=ȚCZV/)ts5\P&[yó8t,5L;+RW,pn)qg7䙧I,;6*rCXbU)!:?: q&POa! ?2d.)z!/?23V4C_-jb0S5ذ~.(W|3 hʜ!՝5%\J(EA+-ylՇS ,i/gļk]~WS)x;| ɮoԎ 5h9egf%Ofn-VЦh 4^>..3@zqZ@R:'}Tp)ZaҧD ^sfEs{4xNS$'-|"~*,KmuɍOV).!s?3<նܓU.Z춶4mkK0PBPZ[%4dO RYU\%G#V zf06E1 5Iƿ&̾`vOZ"Aaq eQ-#VD aA- '[!)*@ŕG~{N}}M .muf_9ם>If:jEl8!+#3%RV;v.^x Fnk3r>Wo .(i2rze+*'^v-Tn MJ~eTxJQ:%z !;z,$u;nrg'fsģ&#YU;kfgQ޵ۛ==aaFݛ-sxwhG5>:x'a%L0鼮q$,Lxzln_ -MNM2`bXU3+AZט}~p 쾻6D2߃ũ9Mq|Wo!W+S軫jOo84~ҧU1 (48ڏìw{E\|Mw7Gsz©5?'%&?-̉FWU[Z6@E y4=Ȩ2* o#DRoARdM^N6~8bBa^-n NYi)s\#]T} xuzS ۗsM{j~CTmCy+B,<hD 3ZEqP<e~P,T="PPRk8琹iWT ꔆ[TS?8(ٮd}W vVEj }fȼ4Qi#-|LO. CjtH:.$> endobj 5 0 obj << /Type /ObjStm /N 7 /First 40 /Length 553 /Filter /FlateDecode >> stream xڥSKo@+ت2²+YlGU˲$[=j^7f8@i4D" AX:Vx{cin|4o=l-hcK?+J^ŒY>%4->Ņ@JI4A|D$!E*Po@y@om},YiVh&-,qkC8[$guNs*p-wϾbؿxo-nHy^/ɗÊqӼpQw#1JA'.s{)p6_M*Mb}#ft_f*<\2 #"m.} 0.qO+fՇC;4s,xo Sa?qƺ2`cڲ/lGRRzuԗPQީHLθ{47#ktm ~IaLy\'P它 YJ#so\N endstream endobj 12 0 obj << /Type /XRef /Index [0 13] /Size 13 /W [1 2 1] /Root 10 0 R /Info 11 0 R /ID [ ] /Length 50 /Filter /FlateDecode >> stream xc`` @8fb&FF200h[ endstream endobj startxref 14172 %%EOF xapers-0.9.0/test/docs/4.bib000066400000000000000000000005721365520605200155620ustar00rootroot00000000000000@article{30929234, title={The Circle and the Square: Forbidden Love}, url={http://dx.doi.org/10.9999/FOO.2}, DOI={10.9999/FOO.2}, journal={Shaply Letters}, author={Me and You and We Know, Everyone}, year={1869}} @article{30929, title={Circle are Squares}, url={http://dx.doi.org/10.9999/FOO.3}, DOI={10.9999/FOO.3}, journal={Sharp Letters}, author={Me and You}, year={1869}} xapers-0.9.0/test/docs/4.pdf000066400000000000000000000326451365520605200156050ustar00rootroot00000000000000%PDF-1.5 % 3 0 obj << /Length 498 /Filter /FlateDecode >> stream xmS0 eFkwC[،Brt_Jtz  )||d/\UR.V:( yT7'G{2Mm\NNi!ϵn0!P D  Dۺݞ'R~D>?oUÏR=~e+偘OqN>OߑY= $%F&lBј֫eeH)D_(-zvY<ѳ5i!᥼;2#Ta)loy=9nVnЄH[TA`.{B=77 IX;=Uk\) Dagο($M~Y|Y "&3Jܩ4uc[kFl7+B|.N ,Fc;aZ6r;r,:>"\\q@fs9jXU"C(\4F $s 6]@2FyIU5-W۽ ?B endstream endobj 8 0 obj << /Length1 1791 /Length2 10683 /Length3 0 /Length 11804 /Filter /FlateDecode >> stream xڍTj-L#]Cwwww 1 Ctw4ҝ"H 9~uO #>H#s32ݝ@@07 DydS?i@jN^~G?.01l@@n.P~XYdA05ۃ:Z.``wwqs{yyqݸ\`vR/=@ylhA1Bg؃غ{a l =ex@l@0Ss@ WrS? !&]@b;J\ @S>vZ=99$ >5 uwr;A2OwqvAO Y?_7q l?Hx@ !`W!O&mv w  y[sQ A }" ۂ܀ ;://lف!V2lO{Lx /'yٸ@| ~tet7_'  ?Ol<TغDtv*XVwry2Myy?ϔ?QHO7˟ v;IO[ }k5A6`Cb1ݔ _-- Oe .O)'z[7*$ 'p+DZ ~?H)/zAOi{'o mO}~OnoP|74i oi ~}zs?$_x<v >M >M|*|:Omks yї]_Rxq|ayFMg}Mr!LG{jdGnEN'gTh2\ ]=Du +V#wtwk1+9 LgHP9{^\L>Ҩ%o%V1R!^O1e,-[gwMl::h]O͙'XH Uc;TRiSP򎿨{ [qh0Dwzo }PY^E7@# JvILWV D-ڻڽn _5=:{;广Dx|n2Խ;fLf"Wב^NKܘGq)NLO:Yc¬akn^u- ÊGBxh* 7ʟ7#7բX"ϞWM|8;2+રӗrJ#0N8)J0m'|+W`C X WX'˿s}@"c#AwEa~x{08΅{V1c7j`}:a7=k3@ g9>p;W!#Gp7!Aq&fY%Hm+I4]b/)·$y ̵݂^W^bms`DΣja%15Y՛v+Oj;oMr5yFewBY9iکZu}B]/mv{:5fL7g?SKnq€,kC);y3梪0J1PdY5 6frX)v@#7 Sۭ(9\tиj޵b%_&^}}Uy}ȗŒ9&}'fK~4w9Rx;-܉q}:\,Ѿ_Rˏk-`Ui2YECe/ (TvܦW`xvJ4-ARsF)wGYoM/v4z*ApzFpAҴNo5V SIo[&SY䟭fY uw&^eՓvLtc?7e:ʰzfx{lfд} z$j ONuY۰;*%ѿU%eYP7d#$. 4ee& ;^DdejFߗ3b>f `<}`I|% 0Ac?m bNx ;<Ϗ@%bޒ\4S.p]}!R&:ʜ[';,ptLd;ޠxiR EuX%E:Qk+USKZ2UH;?T6-p(QO} C$@IC1?2U#$Q 2]93"HtM:m>ۇy׽фĴz|zn(M+BuoG æ Ә y;/Tͳ Y/ . \y4+ pǩbW2f fvF*б0cG*_TdxUu$Q2<%Pge]_DIm&BUIX_{ 6;U֪/=0KeϹݨwJ>":c #f K;~!AEN^Y5Z~71dl/%#nMZSqM֟+NVn0#fN_KvG2Sd {odawGE8ʿu}ι+T5- uid\wH$/WtDM%qJs}Xשĥ9GnS;/B2*%"(MX=S {C|` ހt*!(ށaMI%\>M)Puҗżruj =g'mTP !ܧjx ts]U޷ҳ̣"b,qE˨(pcB1HdYXE `%k;Ew|U?"jၔ_hAe3P ̬95ľ]qU-׌C pĶ>z l[&0)*"KXb_bʯdg 7pZ/1 Vkz~N6 F=f2$ o;,f2BsUn~‡%YUO9Ջ@魙mu^m``xsau#$MQJmP7is>g9tP-ʄ5,t%99/v,:E o3|@8$9XW[XKݦ(ssqdঁ|@ewyʜ2n$Q5 :%%Eg=ڛ>?g -ה=Lω(jPV,*7ʛ3[c. $4RpcṿulՖ/Კ+KcP&q wp:噒'&Zx ՔeP¹qHLA0MB JC?*θS!rgrbtVMr'}ręgӚzqąv5o ^]xT;_zD #vR9Jmmc ,7p"LTxACXNmŧډf-,\V^xe*BMP }2g6qsB)& DyItRa-KfS<پ5cI%"A(WCsor⟁f9idKL/s2o֐>4yI>/.=:-d"݅&L͆!ҫD^QeuyޓnIz2wٔݱ@|V@zyAm5:R>/m6klG8o똼G^*>W:I#J}W;&F[33+~o v2()V0c8dŠT#gǤԒ`8K7N(8ZGX$[k]fTzB_6# ;BpVVk\&ES[ap3w(<5i"u@4uRB` ǦR#䘓 j ؉$.~U#k- S/1 RHYޫi VˑklM=9v:n3LiOJv~9K`ݭU=A4JMo5S}:ԍYC`$}>/BY7mɖKG>3rl|H\ +ۄX~rmjU&[c9WKz Nhe.tcG s0<= |& b-IY\pfs:~hcdKudyGpDӏS3ޅfV.^}zPr:!#b$'`vg1v\(kxC^bk/b_0levCr3kW":ĐbC?|q gXt 2t{;3#J7X ׶Ӊ'-ymbQ%S4Jn~I\z'bcՔh]HwѵOvp30D1g,[cwn]"/qR:aowER޿O06`)>4EAցhc=`Ws kv"T .?gʁ6^1ԇ!)sq$$V96vOjr([2I׏M굮NnX[@4n:ܩ]'+0Mvt(*!(!vKozǶY=tD#kI<5T1Y~rE*,z#pʰuN_!yE`y\VZbh#QP;_ts8J|[,W{,,&&K /r@y!o(?Ô-FfG5C+P1A,DpEW`p ~ ^ T tFR@h.k42ҷ@9&%=ưc*> h}"giP E'P夎B7f뺗X53Tm:Y]3=iryrEZ6JGcvi6#c_`QG?p*vm'#]7j"!Gx$Ɂ+%Hl,҃>=.:S$rnY67,E[Eǻ:bL"}fC6oE J6+.P2U3uOؓ[M(6.h=TΦYٛ()ṬN8y)]3jf7(zn&'`QfIllD˷'S! ~I3حnYy:͇Ђf+Wwr$,MlipSClI. bϺ+:W`ၗzl.<Υ4zMJ{gŠ1Rkg F4\18Cu(R\eed ʡG T穏:\ŧM)ʳ_px3|aBS>RaHSK;-sśUVG|PG~ux^^륯^I|+S1{ץb4!l71%^AUz8i<$RQo) 38LX.vpY*ż+tdUމMY/cO>mI󹵖_e9‹Uasl?&L!P9rH#QA?տtIі|hR)Jz532$3m5 : =j/GHfN֪k\/ h)*n;y5_K!$&ðw_7kSd  "jQN:={Eku$queHI\°b&V<8Z'5~@]mַ4KU?d#r \HeA$E M˔ۛyFF\sАJq"Z̆Emy$D tYZiegJKʜec&H Ev2;d׌xt{XĝD3Jg>;VET3mIG^ztE&ceeiݚ2(VU--/6EߕU,;ؐu({^"Tq}XGxɇ H~mk6z7k*_4晫42\.ihm$$5򣕨UǕY_d0Cif? Z*{ʶ`H2 =<ҼV)+|oLu]]LXlBiNU&E{> VZ_&o_9֬qotw%5ɬOoͻ2+19uyъ @ ,$xoQ]O0RnW 8-/DŀZ*M$tk]G+N&Mr 1D?C|ՙū0r~trO`v۾M߫}Beޒ-,:E0;l=.a$78}%d`s!LNNM LsͲ.i[^?2GrpY"-ux'8.vU l>ztN?앿qE;F(T/g6٭7 4xKC#rGRz$ "EǴaYΊꮙDl(4y7 Qn?0I\>ӲPY/O> Jrዸ85 ϹzecPhdZF=V'T'_'z =<@ߗɛ]~d"ꖔ ;7=KO ^ 2B:)\*B-YZjf{`<&ޡ޾WRN/?&6ß|8O v^s%t9i{2Nw?K1zdLVY ?չF]a"УVU[R,D}mwg P}*Xe&^'ݗLTr]=é)ފs0Y sc3 .tR`r65XmSw=굛lLilabYj\^x;fǁO?d|Y}UXW8)ݖ˂"gψTB'ơo-w؛?8 [`K|XG#KXvAbUn6V).7߿}T7v@EՈ>!Uof"ٟ{4)l^oy@t8F}Gjne5@M/E+qM=S6؎|'˦:XO%".q~p詳KϏBqgVj%(PأP6֩Uӎ e39K86~W9o kbTNMM_>\2Oi߀\qEݖM %pc]A; ԢM#-t^[.WwFkEJ$smlyC t>W*5L\JuEyWr Уc8$^VUK=/m#LYHв ΄d%B*/âł!ď%41L[%o*"Å_X*٫f*\Lz5wm5Xݹ ǻlbKwh@.;M- 1r'{GD6A2AozBosJ8pghBd4I;Į8e'\ӣ[#y UŁTnh8dޞ?=d(,75Kc)HZukp^ڭ>˝9)pKvFc. n( !} X`n˯[KnX#My߳Mx`= r%wV&hzm=oGW)q(Xit53A^﹪6"[*KՁ WpK=z>+"N[X(܄t,՛ endstream endobj 11 0 obj << /Producer (pdfTeX-1.40.13) /Creator (TeX) /CreationDate (D:20130413212533-07'00') /ModDate (D:20130413212533-07'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/Debian) kpathsea version 6.1.0) >> endobj 5 0 obj << /Type /ObjStm /N 7 /First 40 /Length 544 /Filter /FlateDecode >> stream xڥSn0 }WqP[@حE 98Qr "y!Epr!8(0Bg`T >9{['Ͷs΃$iS}[:Ojq떵70gP\@n"!zK<(\ (9E"x6u07{`Hs B RT3}ge `|@4hPJ]IQ2˘LrNHm"K̃`tfQ;㌩} 2B Ǜ0fp˸₭{D1YSKֻ8wkq{]RzW{08 >niaVӇS^z\\"'TIO9mV5 endstream endobj 12 0 obj << /Type /XRef /Index [0 13] /Size 13 /W [1 2 1] /Root 10 0 R /Info 11 0 R /ID [ ] /Length 50 /Filter /FlateDecode >> stream xc`` @-fb&F 30T2jo endstream endobj startxref 13433 %%EOF xapers-0.9.0/test/docs/5.pdf000066400000000000000000000345401365520605200156020ustar00rootroot00000000000000%PDF-1.5 % 3 0 obj << /Length 727 /Filter /FlateDecode >> stream xmTM0Qčm븥]hBJv@BNҌiޓ>>(+.Ekjݝ+ &;ȣjMk]nrŴp7ZRD<;4M'~9N f=-+lk}w(ڼ{\e.ehQU:^9fӦZWD~7lIɃ̈c+L{6׼ee+ 會 #t;DhǏVs5 F7X5`J+܂[j3Ǭi+BMQPF<`U&T9V0Wݬ$kwe M3I)m@ 9 Z'CYo4G^Ia$Ywn ҉AЬ\ԚO#nHqQA#>U0oVp)AOyL4 `m/* ȲB5: xHՌ=sT/Yz6 p[5C ֪QO˜Ik |H>5ܸH_a|8[(;^|WDX^<[Y-!=k > stream xڍP\-ݚww!hq -H@ CpǑ{rs751HH(@ Zd یL rpB 2rzI9)B67??`h4rX#2ЙxLـ&FD#k _%-YY]]]YlYl̅`' *2Ahd 25@]x1XM@Ǘ g)r8@M@+_Lg`caCL6213!35$͉ `1#%lmdgFi1 98YPd˔ 66 #I@&/cwgf Ͽbj Sg;V $'wȋ <|=fbGyuw;ПN?/ =lf/$@`3  2l0!ȿAfw/cËLm!_V wbڌ1'.nd0sxy<\@WiU6wA9-/ / UAE)t4yb)Mېn? %;;ln@7T &+L6s2z1?c;J@`'$[x)o m/f/ ]wċ /қKs6!K+}sV_b_^rf2˻_[_ 8 _q?) .?52A^5 l#qeJg\phwCGL_qK\ڐ]xs[*UoQJ#5"PgFpb&{ŀ}1sy5=L!ˈ}Q詻u;\EH {tS|7I`޳pa4y}l&zcKQaLzWSJlMegɒcG&85O* V]v."^6C^b+ :=4|!Ae܆f>tC)/~{KnCr3S.{ ָ'&c6DЉ B-ْ($2ɜGR%f|hPF4PuB^|ڿ[Q,VUÛU$u6/m_'vȚ|h]p޹VEX|f K8unR̻(.}^'- ^={!sRL+DYm0@dZ'Z޽#60S3F s%r6?5jhA]?KM}>>6GIwxA\aoHId-|#>N rMvB#gtIM!J˖w("Wis=U1~ϒ)^ႂ>pS/Gi _*A69*%-־!ksލ#|UN7q'h6enw4ܞvfsa`GS25"iSnSڠ_&$v#oQS1yzDTRh9atf蓟 lThq׾ nJfL柎42O.KB3/2ro<\;Tj}ɇŲNd 萇wB^8Vݙ0XmiZσmȽʍW$]ԅL@(|+**.h1?_J|VDX6}xJY^Bk9nFX#q$!%]Iju 'Ɵ6y5'qIY̍a49*,ij&ԑƪHIvs,A}"_` %^$i[g\ !+v%0-Dʐv}.X]J%(x6\E.Y瞲HCb_XiCNqnݦEM9،ns2_I-DfS!h@?Z @W5,c)wMG;žLXgPD"É" IG" 0Fa5eAkW>2\+iKfK?_1!w:; wuȇ5.,yNF詚h>¨b#=h 'hMgĜ&pWpx\e<,A+V*RAY&dEYVi_-y&iR1@Xp#-0tE5oz481[o|fma'^98U*@›ǵ E1Tx.\e8|PL Glƈ?k_:wqH}&QU$ʶc#S(x{f:Wzj`5 ؙΞƒl/GFG4:LhZI,/@t.S!KqFfaS)^;8Bxm8N\%qdǛںLb/4w!֫ݘiw5ֱxID8 Q*K>dbps~g릓8p1/叢>I9+NFԛ|+=eaq+:}!B$iЭr`$34J^y!1>!ŎZ7ǁ‰$z\itD:UʞUEaݓdM M 3qQgOبys~#⎽Oeq=Q !fQLD !ܥ) ݚϔ yOPq뫊YKߏ*e4oV3k lIj\|HKzGV;^_ń`jZ]~6Kt; "St%"q~»im#U^ʠSvUtдM~ISqo4OK8-@?eCd+ iW &(ӑ ܌FԳ^in15Q74vk S~_gi-C: `|We Wao_TJS䏹k>VL Pnߙ3#hrП22cLucij'pl ) ٫r@P6`mN3< *NM22řdPBAP#Zvk%1!}c'6p@8J׬4۶|)N.wdD°e6gW[U#FO&BJ\0-4Z3 _uOKՀsNnc霰OO@)'G\"r,DQW1LzbLv[RtKH*? ]8o,t YQVt?>95d4`M:z>m0O}`.%nzQ4foq=@EWc5~qDzʆ4v\bcƈ&)_~j^h獞5@rAbޒu+I0+ s i黍=/fIWw/:`z+f_EI&ĥuRtR}Q+vAJ}F,ǁ8xOIn7 AI2U}59Um-;W{,o +jfFUBї3?LL}iob *Tef_fI6-O2^Ij(ޞ|gD|Ow5uI:絍Gޏ́cW#+L{U/P/WITD %7RuWeRk/_uҙ98<#r鄈!.8iKYVC#AiDbfS!W#Ih-.ٌXuU=k@A; 6s$_/ܬ#mͮP.,d48cxCs0'ᙏ/Khu^!Sg(ֽأ⥿ D@|9/e,cB?Z%.(Њ}**,Aڲ}!s3kP? `VFxC-腻+6`YV}+(qrmVշ|<3 LM>9YkQoMPYLIWebpwHjukX\W %jbq>tڣEP` ̈=Ks!GB%kGwFfb"RWQ,+[~&^JgŌ_4;L -T@bw GVXgNDF`d(W3b͓< 0R3ct_띵ڧ =G746V{GMv*3T\]+0Gޞ'k}Flˋe~A{nRsZA琘lC`j>L'|W@PGQHT7< 5|MuC ;cLCJ Ki>O1m{tZN,g1WQ=> GpS&?^Z9_YN//ґRG+3b A`44>u;]h}آ0]S{cʻ:.&|Sz"z6Av"Tn@&8m[WrΊu'QZx34sVo̴#|laGMDž>kg j|4gP9~4+]6 a$p9i:I+.ºLWxo-U䢼N#SLHQk:Z'Yz.\vZwhDS3|ɹ.N1=ה]ajjWv3ks^m3Œ9qơk|Ss\ o݅b5n3n^ rcvPVh~fCۍnD ;*^.B{_zljOă:r|CUuIg`ǪBڎPuL<9z8`*6/h&i7oB,N:U=ˍZa&\В53V7IyiAF%t-+1dMfl6~[-pi'~9C}g^ 럨륷 k]ݸSPApɌ7}IqJ5eS."deu0kǦʆc{"ʺ%jsa@&T==$j`CZDܕZXWթ  _! XfX oѐ.zL a%ߘ[-Ӻ+ VmOE1MSbn|GaaUR&ʱv'Ψ`zzJI2N#w|ZWQ_lg‚/"ߋ0'.kwxŒx?3ӡhnKD*'c6|H K=,,b*TTpF2}ey]Z "ny,~jiC4&?tekR),[33n^N#KIF!J8Vam` oB⮏fj5zHh|ohRqz"ԫ ])ƲHҁUږM|E`u'䯚;}V\QI&pJb@FElŅ$jrPA.Ƥٶ:66z|KM Êl]z h_#,ֹs Vqg3B{&q69$FqF(ѵPqQsMbw!>-BRFi 1phՁA X& Mќ+2Imc슙HEDrҏIAsf!tBe==Cg2=CቺK%A}ϧET= {fBGԩ"`wu˖gcV7;R\[H2VFkߥ@7s{H6.ޑiEA}[z8.K[>sS)#GNJ%W~n|&鑎Pl{ PGV-wI zyu"^ij~pNSsBDkx@CsB=kڷ",%؇NՋK$Xkݽle8O|d!V>kp`]5EH*ws=P7U)ךV MRkf'69E3AXB0Vl1_eOV(s_u=ց]H ~Wy8tyRB8 BqQ*U:)0~Wm Mj$"Y{00A:VCAŒ(._F8G+;DU#XaB-[9P)za-&Ts%FHqt7x<1@R8?Angiޱ _nw8N_>B)S|[E#Rb}+:pKnS{jn?VHM}KY2g^&qY8*v@vwcJQb(&/Z$p08K)Y8j&C+HC>C%c|҈ģSO" ,ujGߌSƶ%%$lFč֌ck')7VgJJd앝D3XKJcW5C±ܥc[&s א@p D$'(!]\s㯬x(Q'P^xVt(뺌>(6]Tce%g<.La@>) j'v^Uڿ8D [[Yxwӌd g5ݓ6:݈QGa+7 ',M f jAm\ڍȫ,byb9e0B]=bcBqr6&IOlUY rYbsm1? w7cȍT"[!,\8f蹕6j~9Jwg*,gJ Kg>y&2fR>.n3|?7A8HҔ{^>Ajt/_֧5aȂ=MW@:f#7`i\ »k|=>cohD03T~i8R!)%žAHK!1:5B?E4fAWj*G뭀 y2ޢg ?JBem1_^>D/ -JS%A;N.7"E_c. P" ua ߿eHr NJ)$pnW!\6)iK/4gb 緧O#":5"H (yhc$84] ZHbZ.9Yq:O:6CGa'w ֳUMЇS(xal(ZVtFJp'9jh{t$M]m˝GŭjHc$/Ҙn8hۚRsJڟ캒Ϋ|F4nX7`ヨ*Yr/l4Z6[pZa  [QTp|5)|+HJC(0'>妀(^]^ ϵxu;a!7ۧϣb7:K(EͶ"(eүޝ!tyZ5L<@D L +p/ϭvV -%:K[&cYE6bM ^zo^ə=-sB~S1Mб kʛ(Z) ljv&O_DgJ4jT? Tn d?_7R |yLh:BnH`TF~ l!c9 06IrIbQjb{9;^yJ«P !9A|/0[0!R]- m-,VߧYAS^1YaleγýbfaY)-rHHu`AX hM;ѥ_HSP9F(1ɗ[Nse&Ec#D>Dp"~\ì*J>,Pn{y#g6 cE:qt8k97ޠK1 ~WJ~H7r=fhzs)|Ҍ(.ّaQ3EC6 8Vk`b,&#Gj_% <-hZ$ؘ$.x}Ouv,$X[;ڎ.>S} 2k9v(dA211-GDbW6* 0̡x'1X bHy 9|?bxR'Vz7[ ['N`Wqtwe;q / jwH#*]2,_Q<ʸ]HԶy8u u7r;:oK V]d}@k VOnv % PR}DY~U MqI\u՚(`0Ѭ@ ѡ ߃YX'̛@ J)ߝ=?;]W¥9 #fKx\oVO#ifJntK'ln[ X$"3f& i~q43!ӯ #0tu),|OB80@QfޡP5J@^8 ׎ѿrc8)G'esɂ$2cP,z|{ݡȱ` ħۧ1{㤰muy{ *܈{AF% 7h7Y:_Q08G N+]˚-L*axm22eƬ4x{%&v^z\8zԂOxPCLIx`]]яy^,^H)tf^pms6r}Z9+ J3NgXuOb`-/[OPB$KpCx%?~K..N~3z 2p&/A`-<4UAA0&~> wY3ѱo6as_+NH1V$mޙoI2n[:R#BL`Ӳ~eRp0y#T"2 o~Q^E a THD pp K-0ٸ6SEqOy#B! BloО04~:ޜP-fog=`.A ./]UdJ,}mt9Kji!1xrYXb)vOl{yR WDg!p-lQ] %Ӿ-O'OG&iK-3e1B?)H jj>*%Ȏ)|틍Wܘ1Nf;7ƨ_ЫX$b Y[lJ^ $ k}$ԻK)[riR v3~;u<$>*];m+|~ !3pK1ך[2ݼ/jHu T7;(HckF m e[1,Xh~}8LEF_;N3s7i+Z4Զ?&R{3k^$9+YFoE+o﬛wԎ0ùp*1(&g6 zbz5 vԒ@B;2՜Ex/J 2^$X[ endstream endobj 11 0 obj << /Producer (pdfTeX-1.40.13) /Creator (TeX) /CreationDate (D:20130413212535-07'00') /ModDate (D:20130413212535-07'00') /Trapped /False /PTEX.Fullbanner (This is pdfTeX, Version 3.1415926-2.4-1.40.13 (TeX Live 2012/Debian) kpathsea version 6.1.0) >> endobj 5 0 obj << /Type /ObjStm /N 7 /First 40 /Length 553 /Filter /FlateDecode >> stream xڥSKo@+ت2²+Yl,ԨI:Q- @Wɿ{`53`4B"C A ,+o:5Ya%4->Ņ@JI4A|D$!E*PoDy@om},QhR:i&-(qsC8[$guc*p-wbؿ0}Ut_vI~̒/qop Pw1JA'>w{)0گ*JR& !V.{y|Euv> 5~'s̛. X-`fciERBE_Z#U3z\ѝRZu` cf({2;)VmzXqL2aSx#ty0 endstream endobj 12 0 obj << /Type /XRef /Index [0 13] /Size 13 /W [1 2 1] /Root 10 0 R /Info 11 0 R /ID [<0D826DDE839BE08F0848B1953C6480A7> <0D826DDE839BE08F0848B1953C6480A7>] /Length 48 /Filter /FlateDecode >> stream x "Sg7-⿥њ<^  endstream endobj startxref 14390 %%EOF xapers-0.9.0/test/docs/all.bib000066400000000000000000000022601365520605200161630ustar00rootroot00000000000000@article{ arxiv:1235, author = "Dole, Bob and Cruise, Tim", title = "Creation of the γ-verses", year = "2012", eprint = "1235", file = {:__DOC_DIR__/1.pdf:pdf} } @article{Good_Bad_Up_Down_Left_Right_et_al._2012, title={Multicolor cavity sadness}, volume={29}, url={http://dx.doi.org/10.9999/FOO.1}, DOI={10.9999/FOO.1}, number={10}, journal={Journal of the Color Feelings}, publisher={Optical Society of America}, author={Good, Bob and Bad, Sam and Up, Steve and Down, Joseph and Left, Aidan and Right, Kate and et al.}, year={2012}, month={Sep}, pages={2092}, file={:__DOC_DIR__/2 file.pdf:pdf}} @article{ fake:1234, author = "Reed, Lou and Björk", title = "When the liver meats the pavement", year = "1980", journal = "fake", file = {:__DOC_DIR__/5.pdf:} } @article{30929234, title={The Circle and the Square: Forbidden Love}, url={http://dx.doi.org/10.9999/FOO.2}, DOI={10.9999/FOO.2}, file={:}, journal={Shaply Letters}, author={Me and You and We Know, Everyone}, year={1869}} @article{30929, title={Circle are Squares}, url={http://dx.doi.org/10.9999/FOO.3}, DOI={10.9999/FOO.3}, journal={Sharp Letters}, author={Me and You}, year={1869}} xapers-0.9.0/test/import000077500000000000000000000051271365520605200152520ustar00rootroot00000000000000#!/usr/bin/env bash test_description='bibtex database importing.' . ./test-lib.sh ################################################################ test_expect_code 1 'fail import without bibtex' \ 'xapers import' sed "s|__DOC_DIR__|$DOC_DIR|g" <"$DOC_DIR"/all.bib >all.bib # the following two tests provides entries so we can test that import # updates existing entries test_begin_subtest 'add initial documents' xapers add --tags=foo --source="$DOC_DIR"/2.bib xapers add --tags=bar --source="$DOC_DIR"/3.bib xapers search '*' >OUTPUT cat <EXPECTED id:1 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (foo) "Multicolor cavity sadness" id:2 [] {fake:1234} (bar) "When the liver meats the pavement" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'import full bibtex with files' xapers import --tags=new all.bib xapers search '*' >OUTPUT cat <EXPECTED id:5 [arxiv:1235] {arxiv:1235} (new) "Creation of the γ-verses" id:1 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (foo new) "Multicolor cavity sadness" id:2 [] {fake:1234} (bar new) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:3 [doi:10.9999/FOO.3] {30929} (new) "Circle are Squares" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search id:' xapers search id:5 >OUTPUT cat <EXPECTED id:5 [arxiv:1235] {arxiv:1235} (new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search bib:' xapers search key:30929234 >OUTPUT cat <EXPECTED id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 'search text' xapers search --output=summary lorem >OUTPUT cat <EXPECTED id:2 [] {fake:1234} (bar new) "When the liver meats the pavement" id:5 [arxiv:1235] {arxiv:1235} (new) "Creation of the γ-verses" EOF test_expect_equal_file OUTPUT EXPECTED test_begin_subtest 're-import produces identical results' xapers import --tags=new all.bib xapers search '*' >OUTPUT cat <EXPECTED id:5 [arxiv:1235] {arxiv:1235} (new) "Creation of the γ-verses" id:1 [doi:10.9999/FOO.1] {Good_Bad_Up_Down_Left_Right_et_al._2012} (foo new) "Multicolor cavity sadness" id:2 [] {fake:1234} (bar new) "When the liver meats the pavement" id:4 [doi:10.9999/FOO.2] {30929234} (new) "The Circle and the Square: Forbidden Love" id:3 [doi:10.9999/FOO.3] {30929} (new) "Circle are Squares" EOF test_expect_equal_file OUTPUT EXPECTED ################################################################ test_done xapers-0.9.0/test/sources000077500000000000000000000027541365520605200154260ustar00rootroot00000000000000#!/usr/bin/env bash test_description='Sources interface' . ./test-lib.sh export XAPERS_SOURCE_PATH=':' ################################################################ # FIXME: add test for source2bib # FIXME: add test for scandoc test_begin_subtest 'list sources' xapers sources | sort >OUTPUT cat <EXPECTED dcc: LIGO Document Control Center (https://dcc.ligo.org/) [builtin] doi: Digital Object Identifier (https://dx.doi.org/) [builtin] arxiv: Open access e-print service (http://arxiv.org/) [builtin] cryptoeprint: Cryptology ePrint Archive (https://eprint.iacr.org/) [builtin] EOF test_expect_equal_file OUTPUT EXPECTED # test_begin_subtest 'source2bib doi' # xapers source2bib 'doi:10.1364/JOSAA.29.002092' >OUTPUT # cat <EXPECTED # @article{Izumi_2012, # author = "Izumi, Kiwamu and Arai, Koji and Barr, Bryan and Betzwieser, Joseph and Brooks, Aidan and Dahl, Katrin and Doravari, Suresh and Driggers, Jennifer C. and Korth, W. Zach and Miao, Haixing and et al.", # title = "Multicolor cavity metrology", # volume = "29", # ISSN = "1520-8532", # url = "http://dx.doi.org/10.1364/JOSAA.29.002092", # DOI = "10.1364/josaa.29.002092", # number = "10", # journal = "Journal of the Optical Society of America A", # publisher = "Optical Society of America (OSA)", # year = "2012", # pages = "2092" # } # EOF # test_expect_equal_file OUTPUT EXPECTED ################################################################ test_done xapers-0.9.0/test/test-aggregate-results000077500000000000000000000027561365520605200203470ustar00rootroot00000000000000#!/usr/bin/env bash fixed=0 success=0 failed=0 broken=0 total=0 for file do while read type value do case $type in '') continue ;; fixed) fixed=$(($fixed + $value)) ;; success) success=$(($success + $value)) ;; failed) failed=$(($failed + $value)) ;; broken) broken=$(($broken + $value)) ;; total) total=$(($total + $value)) ;; esac done <"$file" done pluralize () { case $2 in 1) case $1 in test) echo test ;; failure) echo failure ;; esac ;; *) case $1 in test) echo tests ;; failure) echo failures ;; esac ;; esac } echo "Xapers test suite complete." if [ "$fixed" = "0" ] && [ "$failed" = "0" ]; then tests=$(pluralize "test" $total) printf "All $total $tests " if [ "$broken" = "0" ]; then echo "passed." else failures=$(pluralize "failure" $broken) echo "behaved as expected ($broken expected $failures)." fi; else echo "$success/$total tests passed." if [ "$broken" != "0" ]; then tests=$(pluralize "test" $broken) echo "$broken broken $tests failed as expected." fi if [ "$fixed" != "0" ]; then tests=$(pluralize "test" $fixed) echo "$fixed broken $tests now fixed." fi if [ "$failed" != "0" ]; then tests=$(pluralize "test" $failed) echo "$failed $tests failed." fi fi skipped=$(($total - $fixed - $success - $failed - $broken)) if [ "$skipped" != "0" ]; then tests=$(pluralize "test" $skipped) echo "$skipped $tests skipped." fi xapers-0.9.0/test/test-lib.sh000066400000000000000000000444761365520605200161030ustar00rootroot00000000000000# # Copyright (c) 2005 Junio C Hamano # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/ . if [ ${BASH_VERSINFO[0]} -lt 4 ]; then echo "Error: The notmuch test suite requires a bash version >= 4.0" echo "due to use of associative arrays within the test suite." echo "Please try again with a newer bash (or help us fix the" echo "test suite to be more portable). Thanks." exit 1 fi # if --tee was passed, write the output not only to the terminal, but # additionally to the file test-results/$BASENAME.out, too. case "$GIT_TEST_TEE_STARTED, $* " in done,*) # do not redirect again ;; *' --tee '*|*' --va'*) mkdir -p test-results BASE=test-results/$(basename "$0" .sh) (GIT_TEST_TEE_STARTED=done ${SHELL-sh} "$0" "$@" 2>&1; echo $? > $BASE.exit) | tee $BASE.out test "$(cat $BASE.exit)" = 0 exit ;; esac # Keep the original TERM for say_color and test_emacs ORIGINAL_TERM=$TERM # For repeatability, reset the environment to known value. LANG=C.UTF-8 LC_ALL=C.UTF-8 PAGER=cat TZ=UTC TERM=dumb export LANG LC_ALL PAGER TERM TZ GIT_TEST_CMP=${GIT_TEST_CMP:-diff -u} TEST_EMACS=${TEST_EMACS:-${EMACS:-emacs}} # Protect ourselves from common misconfiguration to export # CDPATH into the environment unset CDPATH unset GREP_OPTIONS # Convenience # # A regexp to match 5 and 40 hexdigits _x05='[0-9a-f][0-9a-f][0-9a-f][0-9a-f][0-9a-f]' _x40="$_x05$_x05$_x05$_x05$_x05$_x05$_x05$_x05" _x04='[0-9a-f][0-9a-f][0-9a-f][0-9a-f]' _x32="$_x04$_x04$_x04$_x04$_x04$_x04$_x04$_x04" # Each test should start with something like this, after copyright notices: # # test_description='Description of this test... # This test checks if command xyzzy does the right thing... # ' # . ./test-lib.sh [ "x$ORIGINAL_TERM" != "xdumb" ] && ( TERM=$ORIGINAL_TERM && export TERM && [ -t 1 ] && tput bold >/dev/null 2>&1 && tput setaf 1 >/dev/null 2>&1 && tput sgr0 >/dev/null 2>&1 ) && color=t while test "$#" -ne 0 do case "$1" in -d|--d|--de|--deb|--debu|--debug) debug=t; shift ;; -i|--i|--im|--imm|--imme|--immed|--immedi|--immedia|--immediat|--immediate) immediate=t; shift ;; -l|--l|--lo|--lon|--long|--long-|--long-t|--long-te|--long-tes|--long-test|--long-tests) GIT_TEST_LONG=t; export GIT_TEST_LONG; shift ;; -h|--h|--he|--hel|--help) help=t; shift ;; -v|--v|--ve|--ver|--verb|--verbo|--verbos|--verbose) verbose=t; shift ;; -q|--q|--qu|--qui|--quie|--quiet) quiet=t; shift ;; --with-dashes) with_dashes=t; shift ;; --no-color) color=; shift ;; --no-python) # noop now... shift ;; --va|--val|--valg|--valgr|--valgri|--valgrin|--valgrind) valgrind=t; verbose=t; shift ;; --tee) shift ;; # was handled already --root=*) root=$(expr "z$1" : 'z[^=]*=\(.*\)') shift ;; *) echo "error: unknown test option '$1'" >&2; exit 1 ;; esac done if test -n "$debug"; then print_subtest () { printf " %-4s" "[$((test_count - 1))]" } else print_subtest () { true } fi if test -n "$color"; then say_color () { ( TERM=$ORIGINAL_TERM export TERM case "$1" in error) tput bold; tput setaf 1;; # bold red skip) tput bold; tput setaf 2;; # bold green pass) tput setaf 2;; # green info) tput setaf 3;; # brown *) test -n "$quiet" && return;; esac shift printf " " printf "$@" tput sgr0 print_subtest ) } else say_color() { test -z "$1" && test -n "$quiet" && return shift printf " " printf "$@" print_subtest } fi error () { say_color error "error: $*\n" GIT_EXIT_OK=t exit 1 } say () { say_color info "$*" } test "${test_description}" != "" || error "Test script did not set test_description." if test "$help" = "t" then echo "Tests ${test_description}" exit 0 fi echo $(basename "$0"): "Testing ${test_description}" exec 5>&1 test_failure=0 test_count=0 test_fixed=0 test_broken=0 test_success=0 die () { code=$? rm -rf "$TEST_TMPDIR" if test -n "$GIT_EXIT_OK" then exit $code else echo >&5 "FATAL: Unexpected exit with code $code" exit 1 fi } GIT_EXIT_OK= # Note: TEST_TMPDIR *NOT* exported! TEST_TMPDIR=$(mktemp -d "${TMPDIR:-/tmp}/test-$$.XXXXXX") trap 'die' EXIT test_decode_color () { sed -e 's/.\[1m//g' \ -e 's/.\[31m//g' \ -e 's/.\[32m//g' \ -e 's/.\[33m//g' \ -e 's/.\[34m//g' \ -e 's/.\[35m//g' \ -e 's/.\[36m//g' \ -e 's/.\[m//g' } q_to_nul () { perl -pe 'y/Q/\000/' } q_to_cr () { tr Q '\015' } append_cr () { sed -e 's/$/Q/' | tr Q '\015' } remove_cr () { tr '\015' Q | sed -e 's/Q$//' } test_begin_subtest () { if [ -n "$inside_subtest" ]; then exec 1>&6 2>&7 # Restore stdout and stderr error "bug in test script: Missing test_expect_equal in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}" fi test_subtest_name="$1" test_reset_state_ # Remember stdout and stderr file descriptors and redirect test # output to the previously prepared file descriptors 3 and 4 (see # below) if test "$verbose" != "t"; then exec 4>test.output 3>&4; fi exec 6>&1 7>&2 >&3 2>&4 inside_subtest=t } # Pass test if two arguments match # # Note: Unlike all other test_expect_* functions, this function does # not accept a test name. Instead, the caller should call # test_begin_subtest before calling this function in order to set the # name. test_expect_equal () { exec 1>&6 2>&7 # Restore stdout and stderr inside_subtest= test "$#" = 3 && { prereq=$1; shift; } || prereq= test "$#" = 2 || error "bug in the test script: not 2 or 3 parameters to test_expect_equal" output="$1" expected="$2" if ! test_skip "$test_subtest_name" then if [ "$output" = "$expected" ]; then test_ok_ "$test_subtest_name" else testname=$this_test.$test_count echo "$expected" > $testname.expected echo "$output" > $testname.output test_failure_ "$test_subtest_name" "$(diff -u $testname.expected $testname.output)" fi fi } # Like test_expect_equal, but takes two filenames. test_expect_equal_file () { exec 1>&6 2>&7 # Restore stdout and stderr inside_subtest= test "$#" = 3 && { prereq=$1; shift; } || prereq= test "$#" = 2 || error "bug in the test script: not 2 or 3 parameters to test_expect_equal" output="$1" expected="$2" if ! test_skip "$test_subtest_name" then if diff -q "$expected" "$output" >/dev/null ; then test_ok_ "$test_subtest_name" else testname=$this_test.$test_count cp "$output" $testname.output cp "$expected" $testname.expected test_failure_ "$test_subtest_name" "$(diff -u $testname.expected $testname.output)" fi fi } # Like test_expect_equal, but arguments are JSON expressions to be # canonicalized before diff'ing. If an argument cannot be parsed, it # is used unchanged so that there's something to diff against. test_expect_equal_json () { output=$(echo "$1" | python -mjson.tool || echo "$1") expected=$(echo "$2" | python -mjson.tool || echo "$2") shift 2 test_expect_equal "$output" "$expected" "$@" } # Use test_set_prereq to tell that a particular prerequisite is available. # The prerequisite can later be checked for in two ways: # # - Explicitly using test_have_prereq. # # - Implicitly by specifying the prerequisite tag in the calls to # test_expect_{success,failure,code}. # # The single parameter is the prerequisite tag (a simple word, in all # capital letters by convention). test_set_prereq () { satisfied="$satisfied$1 " } satisfied=" " test_have_prereq () { case $satisfied in *" $1 "*) : yes, have it ;; *) ! : nope ;; esac } # declare prerequisite for the given external binary test_declare_external_prereq () { binary="$1" test "$#" = 2 && name=$2 || name="$binary(1)" hash $binary 2>/dev/null || eval " test_missing_external_prereq_${binary}_=t $binary () { echo -n \"\$test_subtest_missing_external_prereqs_ \" | grep -qe \" $name \" || test_subtest_missing_external_prereqs_=\"\$test_subtest_missing_external_prereqs_ $name\" false }" } # Explicitly require external prerequisite. Useful when binary is # called indirectly (e.g. from emacs). # Returns success if dependency is available, failure otherwise. test_require_external_prereq () { binary="$1" if [ "$(eval echo -n \$test_missing_external_prereq_${binary}_)" = t ]; then # dependency is missing, call the replacement function to note it eval "$binary" else true fi } # You are not expected to call test_ok_ and test_failure_ directly, use # the text_expect_* functions instead. test_ok_ () { if test "$test_subtest_known_broken_" = "t"; then test_known_broken_ok_ "$@" return fi test_success=$(($test_success + 1)) say_color pass "%-6s" "PASS" echo " $@" } test_failure_ () { if test "$test_subtest_known_broken_" = "t"; then test_known_broken_failure_ "$@" return fi test_failure=$(($test_failure + 1)) test_failure_message_ "FAIL" "$@" test "$immediate" = "" || { GIT_EXIT_OK=t; exit 1; } return 1 } test_failure_message_ () { say_color error "%-6s" "$1" echo " $2" shift 2 echo "$@" | sed -e 's/^/ /' if test "$verbose" != "t"; then cat test.output; fi } test_known_broken_ok_ () { test_reset_state_ test_fixed=$(($test_fixed+1)) say_color pass "%-6s" "FIXED" echo " $@" } test_known_broken_failure_ () { test_reset_state_ test_broken=$(($test_broken+1)) test_failure_message_ "BROKEN" "$@" return 1 } test_debug () { test "$debug" = "" || eval "$1" } test_run_ () { test_cleanup=: if test "$verbose" != "t"; then exec 4>test.output 3>&4; fi eval >&3 2>&4 "$1" eval_ret=$? eval >&3 2>&4 "$test_cleanup" return 0 } test_skip () { test_count=$(($test_count+1)) to_skip= for skp in $XAPERS_SKIP_TESTS do case $this_test.$test_count in $skp) to_skip=t esac done if test -z "$to_skip" && test -n "$prereq" && ! test_have_prereq "$prereq" then to_skip=t fi case "$to_skip" in t) test_report_skip_ "$@" ;; *) test_check_missing_external_prereqs_ "$@" ;; esac } test_check_missing_external_prereqs_ () { if test -n "$test_subtest_missing_external_prereqs_"; then say_color skip >&1 "missing prerequisites:" echo "$test_subtest_missing_external_prereqs_" >&1 test_report_skip_ "$@" else false fi } test_report_skip_ () { test_reset_state_ say_color skip >&3 "skipping test:" echo " $@" >&3 say_color skip "%-6s" "SKIP" echo " $1" } test_subtest_known_broken () { test_subtest_known_broken_=t } test_expect_success () { test "$#" = 3 && { prereq=$1; shift; } || prereq= test "$#" = 2 || error "bug in the test script: not 2 or 3 parameters to test-expect-success" test_reset_state_ if ! test_skip "$@" then test_run_ "$2" run_ret="$?" # test_run_ may update missing external prerequisites test_check_missing_external_prereqs_ "$@" || if [ "$run_ret" = 0 -a "$eval_ret" = 0 ] then test_ok_ "$1" else test_failure_ "$@" fi fi } test_expect_code () { test "$#" = 4 && { prereq=$1; shift; } || prereq= test "$#" = 3 || error "bug in the test script: not 3 or 4 parameters to test-expect-code" test_reset_state_ if ! test_skip "$@" then test_run_ "$3" run_ret="$?" # test_run_ may update missing external prerequisites, test_check_missing_external_prereqs_ "$@" || if [ "$run_ret" = 0 -a "$eval_ret" = "$1" ] then test_ok_ "$2" else test_failure_ "$@" fi fi } # test_external runs external test scripts that provide continuous # test output about their progress, and succeeds/fails on # zero/non-zero exit code. It outputs the test output on stdout even # in non-verbose mode, and announces the external script with "* run # : ..." before running it. When providing relative paths, keep in # mind that all scripts run in "trash directory". # Usage: test_external description command arguments... # Example: test_external 'Perl API' perl ../path/to/test.pl test_external () { test "$#" = 4 && { prereq=$1; shift; } || prereq= test "$#" = 3 || error >&5 "bug in the test script: not 3 or 4 parameters to test_external" descr="$1" shift test_reset_state_ if ! test_skip "$descr" "$@" then # Announce the script to reduce confusion about the # test output that follows. say_color "" " run $test_count: $descr ($*)" # Run command; redirect its stderr to &4 as in # test_run_, but keep its stdout on our stdout even in # non-verbose mode. "$@" 2>&4 if [ "$?" = 0 ] then test_ok_ "$descr" else test_failure_ "$descr" "$@" fi fi } # Like test_external, but in addition tests that the command generated # no output on stderr. test_external_without_stderr () { # The temporary file has no (and must have no) security # implications. tmp="$TMPDIR"; if [ -z "$tmp" ]; then tmp=/tmp; fi stderr="$tmp/git-external-stderr.$$.tmp" test_external "$@" 4> "$stderr" [ -f "$stderr" ] || error "Internal error: $stderr disappeared." descr="no stderr: $1" shift if [ ! -s "$stderr" ]; then rm "$stderr" test_ok_ "$descr" else if [ "$verbose" = t ]; then output=`echo; echo Stderr is:; cat "$stderr"` else output= fi # rm first in case test_failure exits. rm "$stderr" test_failure_ "$descr" "$@" "$output" fi } # This is not among top-level (test_expect_success) # but is a prefix that can be used in the test script, like: # # test_expect_success 'complain and die' ' # do something && # do something else && # test_must_fail git checkout ../outerspace # ' # # Writing this as "! git checkout ../outerspace" is wrong, because # the failure could be due to a segv. We want a controlled failure. test_must_fail () { "$@" test $? -gt 0 -a $? -le 129 -o $? -gt 192 } # test_cmp is a helper function to compare actual and expected output. # You can use it like: # # test_expect_success 'foo works' ' # echo expected >expected && # foo >actual && # test_cmp expected actual # ' # # This could be written as either "cmp" or "diff -u", but: # - cmp's output is not nearly as easy to read as diff -u # - not all diff versions understand "-u" test_cmp() { $GIT_TEST_CMP "$@" } # This function can be used to schedule some commands to be run # unconditionally at the end of the test to restore sanity: # # test_expect_success 'test core.capslock' ' # git config core.capslock true && # test_when_finished "git config --unset core.capslock" && # hello world # ' # # That would be roughly equivalent to # # test_expect_success 'test core.capslock' ' # git config core.capslock true && # hello world # git config --unset core.capslock # ' # # except that the greeting and config --unset must both succeed for # the test to pass. test_when_finished () { test_cleanup="{ $* } && (exit \"\$eval_ret\"); eval_ret=\$?; $test_cleanup" } test_done () { GIT_EXIT_OK=t test_results_dir="$TEST_DIRECTORY/test-results" mkdir -p "$test_results_dir" test_results_path="$test_results_dir/${0%.sh}-$$" echo "total $test_count" >> $test_results_path echo "success $test_success" >> $test_results_path echo "fixed $test_fixed" >> $test_results_path echo "broken $test_broken" >> $test_results_path echo "failed $test_failure" >> $test_results_path echo "" >> $test_results_path echo [ -n "$EMACS_SERVER" ] && test_emacs '(kill-emacs)' if [ "$test_failure" = "0" ]; then if [ "$test_broken" = "0" ]; then rm -rf "$remove_tmp" fi exit 0 else exit 1 fi } test_python() { export LD_LIBRARY_PATH=$TEST_DIRECTORY/../lib export PYTHONPATH=$TEST_DIRECTORY/../bindings/python # Some distros (e.g. Arch Linux) ship Python 2.* as /usr/bin/python2, # most others as /usr/bin/python. So first try python2, and fallback to # python if python2 doesn't exist. cmd=python2 [[ "$test_missing_external_prereq_python2_" = t ]] && cmd=python (echo "import sys; _orig_stdout=sys.stdout; sys.stdout=open('OUTPUT', 'w')"; cat) \ | $cmd - } test_reset_state_ () { test -z "$test_init_done_" && test_init_ test_subtest_known_broken_= test_subtest_missing_external_prereqs_= } # called once before the first subtest test_init_ () { test_init_done_=t # skip all tests if there were external prerequisites missing during init test_check_missing_external_prereqs_ "all tests in $this_test" && test_done } # Test the binaries we have just built. The tests are kept in # test/ subdirectory and are run in 'trash directory' subdirectory. TEST_DIRECTORY=$(pwd) export PATH # Test repository test="tmp.$(basename "$0" .sh)" test -n "$root" && test="$root/$test" case "$test" in /*) TMP_DIRECTORY="$test" ;; *) TMP_DIRECTORY="$TEST_DIRECTORY/$test" ;; esac test ! -z "$debug" || remove_tmp=$TMP_DIRECTORY rm -fr "$test" || { GIT_EXIT_OK=t echo >&5 "FATAL: Cannot prepare test area" exit 1 } mkdir -p "${test}" # load local test library shopt -s expand_aliases source ./test-local.sh # Use -P to resolve symlinks in our working directory so that the cwd # in subprocesses like git equals our $PWD (for pathname comparisons). cd -P "$test" || error "Cannot setup test environment" if test "$verbose" = "t" then exec 4>&2 3>&1 else exec 4>test.output 3>&4 fi this_test=${0##*/} for skp in $XAPERS_SKIP_TESTS do to_skip= for skp in $XAPERS_SKIP_TESTS do case "$this_test" in $skp) to_skip=t esac done case "$to_skip" in t) say_color skip >&3 "skipping test $this_test altogether" say_color skip "skip all tests in $this_test" test_done esac done # Provide an implementation of the 'yes' utility yes () { if test $# = 0 then y=y else y="$*" fi while echo "$y" do : done } # Fix some commands on Windows case $(uname -s) in *MINGW*) # Windows has its own (incompatible) sort and find sort () { /usr/bin/sort "$@" } find () { /usr/bin/find "$@" } sum () { md5sum "$@" } # git sees Windows-style pwd pwd () { builtin pwd -W } # no POSIX permissions # backslashes in pathspec are converted to '/' # exec does not inherit the PID ;; *) test_set_prereq POSIXPERM test_set_prereq BSLASHPSPEC test_set_prereq EXECKEEPSPID ;; esac test -z "$NO_PERL" && test_set_prereq PERL test -z "$NO_PYTHON" && test_set_prereq PYTHON # test whether the filesystem supports symbolic links ln -s x y 2>/dev/null && test -h y 2>/dev/null && test_set_prereq SYMLINKS rm -f y xapers-0.9.0/test/test-local.sh000066400000000000000000000004071365520605200164110ustar00rootroot00000000000000# declare prerequisites for external binaries used in tests test_declare_external_prereq python3 export PYTHONPATH="$TEST_DIRECTORY"/..:$PYTHONPATH alias xapers="python3 -m xapers" export DOC_DIR="$TEST_DIRECTORY/docs" export XAPERS_ROOT="$TMP_DIRECTORY/docs" xapers-0.9.0/test/test-verbose000077500000000000000000000013131365520605200163530ustar00rootroot00000000000000#!/usr/bin/env bash test_description='the verbosity options of the test framework itself.' . ./test-lib.sh test_expect_success 'print something in test_expect_success and pass' ' echo "hello stdout" && echo "hello stderr" >&2 && true ' test_expect_success 'print something in test_expect_success and fail' ' echo "hello stdout" && echo "hello stderr" >&2 && false ' test_begin_subtest 'print something between test_begin_subtest and test_expect_equal and pass' echo "hello stdout" echo "hello stderr" >&2 test_expect_equal "a" "a" test_begin_subtest 'print something test_begin_subtest and test_expect_equal and fail' echo "hello stdout" echo "hello stderr" >&2 test_expect_equal "a" "b" test_done xapers-0.9.0/test/test.expected-output/000077500000000000000000000000001365520605200201225ustar00rootroot00000000000000xapers-0.9.0/test/test.expected-output/test-verbose-no000066400000000000000000000011451365520605200231020ustar00rootroot00000000000000test-verbose: Testing the verbosity options of the test framework itself. PASS print something in test_expect_success and pass FAIL print something in test_expect_success and fail echo "hello stdout" && echo "hello stderr" >&2 && false hello stdout hello stderr PASS print something between test_begin_subtest and test_expect_equal and pass FAIL print something test_begin_subtest and test_expect_equal and fail --- test-verbose.4.expected 2010-11-14 21:41:12.738189710 +0000 +++ test-verbose.4.output 2010-11-14 21:41:12.738189710 +0000 @@ -1 +1 @@ -b +a hello stdout hello stderr xapers-0.9.0/test/test.expected-output/test-verbose-yes000066400000000000000000000012311365520605200232620ustar00rootroot00000000000000test-verbose: Testing the verbosity options of the test framework itself. hello stdout hello stderr PASS print something in test_expect_success and pass hello stdout hello stderr FAIL print something in test_expect_success and fail echo "hello stdout" && echo "hello stderr" >&2 && false hello stdout hello stderr PASS print something between test_begin_subtest and test_expect_equal and pass hello stdout hello stderr FAIL print something test_begin_subtest and test_expect_equal and fail --- test-verbose.4.expected 2010-11-14 21:41:06.650023289 +0000 +++ test-verbose.4.output 2010-11-14 21:41:06.650023289 +0000 @@ -1 +1 @@ -b +a xapers-0.9.0/test/xapers-test000077500000000000000000000020441365520605200162120ustar00rootroot00000000000000#!/usr/bin/env bash # Run tests # # Copyright (c) 2005 Junio C Hamano # # Adapted from a Makefile to a shell script by Carl Worth (2010) if [ ${BASH_VERSINFO[0]} -lt 4 ]; then echo "Error: The notmuch test suite requires a bash version >= 4.0" echo "due to use of associative arrays within the test suite." echo "Please try again with a newer bash (or help us fix the" echo "test suite to be more portable). Thanks." exit 1 fi cd $(dirname "$0") TESTS=" basic sources all import " # setup TESTS=${XAPERS_TESTS:=$TESTS} # Clean up any results from a previous run rm -rf test-results docs/.xapers # test for timeout utility if command -v timeout >/dev/null; then TEST_TIMEOUT_CMD="timeout 2m " echo "INFO: using 2 minute timeout for tests" else TEST_TIMEOUT_CMD="" fi trap 'e=$?; kill $!; exit $e' HUP INT TERM # Run the tests for test in $TESTS; do $TEST_TIMEOUT_CMD ./$test "$@" & wait $! done trap - HUP INT TERM # Report results ./test-aggregate-results test-results/* # Clean up rm -rf test-result xapers-0.9.0/xapers/000077500000000000000000000000001365520605200143305ustar00rootroot00000000000000xapers-0.9.0/xapers/__init__.py000066400000000000000000000016021365520605200164400ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ from .database import Database from .database import DatabaseError from .database import DatabaseUninitializedError from .database import DatabaseLockError from .documents import Documents, Document xapers-0.9.0/xapers/__main__.py000077500000000000000000000434741365520605200164410ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with notmuch. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import sys import signal from . import cli from .bibtex import Bibtex from .source import Sources, SourceAttributeError from .parser import ParseError ######################################################################## PROG = 'xapers' def usage(): print("Usage:", PROG, " [args...]") print(""" Commands: add [options] [] Add a new document or update existing. If provided, search should match a single document. --source=[|] source id, for online retrieval, or bibtex file path --file[=] PDF file to index and archive --tags=[,...] initial tags --prompt prompt for unspecified options --view view entry after adding import Import entries from a bibtex database. --tags=[,...] tags to apply to all imported documents delete Delete documents from database. --noprompt do not prompt to confirm deletion restore Restore database from an existing xapers root directory. tag +|- [...] [--] Add/remove tags. search [options] Search for documents. --output=[summary|bibtex|tags|sources|keys|files] output format (default is 'summary') --sort=[relevance|year] sort output (default is 'relevance') --limit=N limit number of results returned tags Short for \"search --output=tags\". bibtex Short for \"search --output=bibtex\". view View search in curses UI. count Count matches. export Export documents to a directory of files named for document titles. sources List available sources. source2url [...] Output URLs for sources. source2bib [...] Retrieve bibtex for sources and print to stdout. source2file Retrieve file for source and write to stdout. scandoc Scan PDF file for source ids. version Print version number. help [search] This usage, or search term help. The xapers document store is specified by the XAPERS_ROOT environment variable, or defaults to '~/.xapers/docs' if not specified (the directory is allowed to be a symlink). See 'xapers help search' for more information on term definitions and search syntax.""") def usage_search(): print("""Xapers supports a common syntax for search terms. Search can consist of free-form text and quoted phrases. Terms can be combined with standard Boolean operators. All terms are combined with a logical AND by default. Parentheses can be used to group operators, but must be protect from shell interpretation. The string '*' will match all documents. Additionally, the following prefixed terms are understood (where indicate user-supplied values): id: Xapers document id author: string in authors (also a:) title: string in title (also t:) tag: specific user tag : specific source id (sid) source: specific Xapers source key: specific bibtex citation key year: specific publication year (also y:) year:.. publication year range (also y:) year:.. year:.. Publication years must be four-digit integers. See the following for more information on search terms: http://xapian.org/docs/queryparser.html""") ######################################################################## # combine a list of terms with spaces between, so that simple queries # don't have to be quoted at the shell level. def make_query_string(terms, require=True): string = str.join(' ', terms) if string == '': if require: print("Must specify a search term.", file=sys.stderr) sys.exit(1) else: string = '*' return string def import_nci(): try: from . import nci except ImportError: print("The python3-urwid package does not appear to be installed.", file=sys.stderr) print("Please install to be able to use the curses UI.", file=sys.stderr) sys.exit(1) return nci ######################################################################## def main(): signal.signal(signal.SIGINT, signal.SIG_DFL) signal.signal(signal.SIGPIPE, signal.SIG_DFL) if len(sys.argv) > 1: cmd = sys.argv[1] else: cmd = [] ######################################## if cmd in ['add', 'a']: tags = None infile = None sid = None prompt = False view = False query = None argc = 2 while True: if argc >= len(sys.argv): break elif '--source=' in sys.argv[argc]: sid = sys.argv[argc].split('=', 1)[1] elif '--file' in sys.argv[argc]: if '=' in sys.argv[argc]: infile = sys.argv[argc].split('=', 1)[1] else: infile = True elif '--tags=' in sys.argv[argc]: tags = sys.argv[argc].split('=', 1)[1].split(',') elif '--prompt' in sys.argv[argc]: prompt = True elif '--view' in sys.argv[argc]: view = True else: break argc += 1 if argc == (len(sys.argv) - 1): query = make_query_string(sys.argv[argc:]) with cli.initdb(writable=True, create=True) as db: docid = cli.add(db, query, infile=infile, sid=sid, tags=tags, prompt=prompt) if view and docid: nci = import_nci() nci.UI(cmd=['search', 'id:'+str(docid)]) ######################################## elif cmd in ['import', 'i']: tags = [] argc = 2 while True: if argc >= len(sys.argv): break elif '--tags=' in sys.argv[argc]: tags = sys.argv[argc].split('=', 1)[1].split(',') elif '--overwrite' in sys.argv[argc]: overwrite = True else: break argc += 1 try: bibfile = sys.argv[argc] except IndexError: print("Must specify bibtex file to import.", file=sys.stderr) sys.exit(1) if not os.path.exists(bibfile): print("File not found: %s" % bibfile, file=sys.stderr) sys.exit(1) with cli.initdb(writable=True, create=True) as db: cli.importbib(db, bibfile, tags=tags) ######################################## elif cmd in ['update']: argc = 2 query = make_query_string(sys.argv[argc:]) with cli.initdb(writable=True) as db: for doc in db.search(query): try: print("Updating %s..." % doc.docid, end=' ', file=sys.stderr) doc.update_from_bibtex() doc.sync() print("done.", file=sys.stderr) except: print("\n", file=sys.stderr) raise ######################################## elif cmd in ['delete']: prompt = True argc = 2 while True: if argc >= len(sys.argv): break elif '--noprompt' in sys.argv[argc]: prompt = False else: break argc += 1 query = make_query_string(sys.argv[argc:]) with cli.initdb(writable=True) as db: count = db.count(query) if count == 0: print("No documents found for query.", file=sys.stderr) sys.exit(1) for doc in db.search(query): if prompt: resp = input("Type 'yes' to delete document id:%d: " % doc.docid) if resp != 'yes': continue print("deleting id:%d..." % doc.docid, end=' ', file=sys.stderr) doc.purge() print("done.", file=sys.stderr) ######################################## elif cmd in ['search', 's']: oformat = 'summary' sort = 'relevance' limit = 0 argc = 2 while True: if argc >= len(sys.argv): break if '--output=' in sys.argv[argc]: oformat = sys.argv[argc].split('=')[1] elif '--sort=' in sys.argv[argc]: sort = sys.argv[argc].split('=')[1] elif '--limit=' in sys.argv[argc]: limit = int(sys.argv[argc].split('=')[1]) else: break argc += 1 if oformat not in ['summary', 'bibtex', 'tags', 'sources', 'keys', 'files']: print("Unknown output format.", file=sys.stderr) sys.exit(1) if sort not in ['relevance', 'year']: print("Unknown sort parameter.", file=sys.stderr) sys.exit(1) query = make_query_string(sys.argv[argc:]) with cli.initdb() as db: cli.search(db, query, oformat=oformat, sort=sort, limit=limit) ######################################## elif cmd in ['tags']: argc = 2 query = make_query_string(sys.argv[argc:], require=False) with cli.initdb() as db: cli.search(db, query, oformat='tags') ######################################## elif cmd in ['bibtex', 'bib', 'b']: argc = 2 query = make_query_string(sys.argv[argc:]) with cli.initdb() as db: cli.search(db, query, oformat='bibtex') ######################################## elif cmd in ['nci', 'view', 'show', 'select']: nci = import_nci() if cmd == 'nci': args = sys.argv[2:] else: query = make_query_string(sys.argv[2:], require=False) args = ['search', query] nci.UI(cmd=args) ######################################## elif cmd in ['tag', 't']: add_tags = [] remove_tags = [] argc = 2 for arg in sys.argv[argc:]: if argc >= len(sys.argv): break if arg == '--': argc += 1 continue if arg[0] == '+': add_tags.append(arg[1:]) elif arg[0] == '-': remove_tags.append(arg[1:]) else: break argc += 1 if not add_tags and not remove_tags: print("Must specify tags to add or remove.", file=sys.stderr) sys.exit(1) if '' in add_tags: print("Null tags not allowed.", file=sys.stderr) sys.exit(1) query = make_query_string(sys.argv[argc:]) with cli.initdb(writable=True) as db: for doc in db.search(query): doc.add_tags(add_tags) doc.remove_tags(remove_tags) doc.sync() ######################################## elif cmd in ['dumpterms']: prefix = None argc = 2 while True: if argc >= len(sys.argv): break if '--prefix=' in sys.argv[argc]: prefix = sys.argv[argc].split('=')[1] else: break argc += 1 query = make_query_string(sys.argv[argc:], require=True) with cli.initdb() as db: if query == '*': for term in db.term_iter(prefix): print(term) else: for doc in db.search(query): for term in doc.term_iter(prefix): print(term) ######################################## elif cmd in ['maxid']: docid = 0 with cli.initdb() as db: for doc in db.search('*'): docid = max(docid, doc.docid) print('id:%d' % docid) ######################################## elif cmd in ['count']: query = make_query_string(sys.argv[2:], require=False) with cli.initdb() as db: print(db.count(query)) ######################################## elif cmd in ['export']: outdir = sys.argv[2] query = make_query_string(sys.argv[3:]) with cli.initdb() as db: cli.export(db, outdir, query) ######################################## elif cmd in ['restore']: with cli.initdb(writable=True, create=True, force=True) as db: db.restore(log=True) ######################################## elif cmd in ['sources']: sources = Sources() w = 0 for source in sources: w = max(len(source.name), w) format = '%'+str(w)+'s: %s[%s]' for source in sources: name = source.name desc = '' try: desc += '%s ' % source.description except AttributeError: pass try: desc += '(%s) ' % source.url except AttributeError: pass if source.is_builtin: path = 'builtin' else: path = source.path print(format % (name, desc, path)) ######################################## elif cmd in ['source2bib', 's2b', 'source2url', 's2u', 'source2file', 's2f']: outraw = False argc = 2 for arg in sys.argv[argc:]: if argc >= len(sys.argv): break elif sys.argv[argc] == '--raw': outraw = True else: break argc += 1 try: sss = sys.argv[argc:] except IndexError: print("Must specify source to retrieve.", file=sys.stderr) sys.exit(1) if cmd in ['source2file', 's2f']: if len(sss) > 1: print("source2file can only retrieve file for single source.", file=sys.stderr) sys.exit(1) sources = Sources() for ss in sss: item = sources.match_source(ss) if not item: print("String '{}' matches no known source.".format(ss), file=sys.stderr) sys.exit(1) if cmd in ['source2url', 's2u']: print(item.url) continue elif cmd in ['source2bib', 's2b']: try: bibtex = item.fetch_bibtex() except SourceAttributeError as e: print(e, file=sys.stderr) sys.exit(1) if outraw: print(bibtex) else: try: print(Bibtex(bibtex)[0].as_string()) except: print("Failed to parse retrieved bibtex data.", file=sys.stderr) print("Use --raw option to view raw retrieved data.", file=sys.stderr) sys.exit(1) elif cmd in ['source2file', 's2f']: try: name, data = item.fetch_file() print(data) except Exception as e: print("Could not retrieve file: %s" % e, file=sys.stderr) sys.exit(1) ######################################## elif cmd in ['scandoc', 'sd']: try: infile = sys.argv[2] except IndexError: print("Must specify document to scan.", file=sys.stderr) sys.exit(1) try: items = Sources().scan_file(infile) except ParseError as e: print("Parse error: %s" % e, file=sys.stderr) print("Is file '%s' a PDF?" % infile, file=sys.stderr) sys.exit(1) for item in items: print(item) ######################################## elif cmd in ['version', '--version', '-v']: from . import version print('xapers', version.__version__) ######################################## elif cmd in ['help', 'h', '--help', '-h']: if len(sys.argv) > 2: if sys.argv[2] == 'search': usage_search() else: usage() ######################################## else: if cmd: print("Unknown command '%s'." % cmd, file=sys.stderr) else: print("Command not specified.", file=sys.stderr) print("See \"help\" for more information.", file=sys.stderr) sys.exit(1) if __name__ == '__main__': main() xapers-0.9.0/xapers/bibtex.py000066400000000000000000000137771365520605200161760ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import sys import io import re import json import pybtex from pybtex.bibtex.utils import split_name_list from pybtex.database import Entry, Person from pybtex.database.input import bibtex as inparser from pybtex.database.output import bibtex as outparser def clean_bib_string(string): for char in ['{', '}']: string = string.replace(char,'') return string ################################################## class BibtexError(Exception): """Base class for Xapers bibtex exceptions.""" def __init__(self, msg): self.msg = msg def __str__(self): return self.msg ################################################## class Bibtex(): """Represents a bibtex database. """ # http://www.bibtex.org/Format/ def __init__(self, bibtex): parser = inparser.Parser(encoding='utf-8') if os.path.exists(bibtex): try: bibdata = parser.parse_file(bibtex) except TokenRequired as e: raise BibtexError(e.get_context()) else: # StringIO requires unicode input # http://nedbatchelder.com/text/unipain.html assert type(bibtex) is str, "Bibtex strings must be unicode" with io.StringIO(bibtex) as stream: bibdata = parser.parse_stream(stream) self.keys = list(bibdata.entries.keys()) self.entries = list(bibdata.entries.values()) self.index = -1 self.max = len(self.entries) def __getitem__(self, index): key = self.keys[index] entry = self.entries[index] return Bibentry(key, entry) def __iter__(self): return self def __len__(self): return self.max def __next__(self): self.index = self.index + 1 if self.index == self.max: raise StopIteration return self[self.index] ################################################## class Bibentry(): """Represents an individual entry in a bibtex database. """ def __init__(self, key, entry): self.key = key self.entry = entry def get_authors(self): """Return a list of authors.""" authors = [] if 'author' in self.entry.persons: for p in self.entry.persons['author']: authors.append(clean_bib_string(str(p))) return authors def get_fields(self): """Return a dict of non-author fields.""" bibfields = self.entry.fields # entry.fields is actually already a dict, but we want to # clean the strings first fields = {} for field in bibfields: # Treat all keys as lowercase fields[field.lower()] = str(clean_bib_string(bibfields[field])) return fields def set_file(self, path): # FIXME: what's the REAL proper format for this self.entry.fields['file'] = ':%s:%s' % (path, 'pdf') def get_file(self): """Returns file path if file field exists. Expects either single path string or Mendeley/Jabref format. """ try: parsed = re.split(r'(? 1: return parsed[1] else: return parsed[0] except KeyError: return None except IndexError: return None def _entry2db(self): db = pybtex.database.BibliographyData() db.add_entry(self.key, self.entry) return db def as_string(self): """Return entry as formatted bibtex string.""" writer = outparser.Writer() with io.StringIO() as stream: writer.write_stream(self._entry2db(), stream) string = stream.getvalue() string = string.strip() return string def to_file(self, path): """Write entry bibtex to file.""" writer = outparser.Writer(encoding='utf-8') writer.write_file(self._entry2db(), path) ################################################## def data2bib(data, key, type='article'): """Convert a python dict into a Bibentry object.""" if not data: return # need to remove authors field from data authors = None if 'authors' in data: authors = data['authors'] if isinstance(authors, str): authors = split_name_list(authors) if len(authors) == 1: authors = authors[0].split(',') del data['authors'] entry = Entry(type, fields=data) if authors: for p in authors: entry.add_person(Person(p), 'author') return Bibentry(key, entry).as_string() def json2bib(jsonstring, key, type='article'): """Convert a json string into a Bibentry object.""" if not json: return data = json.loads(jsonstring) # need to remove authors field from data authors = None if 'author' in data: authors = data['author'] del data['author'] if 'issued' in data: data['year'] = str(data['issued']['date-parts'][0][0]) del data['issued'] # delete other problematic fields if 'editor' in data: del data['editor'] entry = Entry(type, fields=data) if authors: for author in authors: entry.add_person(Person(first=author['given'], last=author['family']), 'author') return Bibentry(key, entry).as_string() xapers-0.9.0/xapers/cli.py000066400000000000000000000334411365520605200154560ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import sys import shutil import readline from . import database from .documents import Document from .source import Sources, SourceError from .parser import ParseError from .bibtex import Bibtex, BibtexError ############################################################ def initdb(writable=False, create=False, force=False): xroot = os.getenv('XAPERS_ROOT', os.path.expanduser(os.path.join('~','.xapers','docs'))) try: return database.Database(xroot, writable=writable, create=create, force=force) except database.DatabaseUninitializedError as e: print(e, file=sys.stderr) print("Import a document to initialize.", file=sys.stderr) sys.exit(1) except database.DatabaseInitializationError as e: print(e, file=sys.stderr) print("Either clear the directory and add new files, or use 'retore' to restore from existing data.", file=sys.stderr) sys.exit(1) except database.DatabaseError as e: print(e, file=sys.stderr) sys.exit(1) ############################################################ # readline completion class class Completer: def __init__(self, words): self.words = words def terms(self, prefix, index): matching_words = [ w for w in self.words if w.startswith(prefix) ] try: return matching_words[index] except IndexError: return None def prompt_for_file(infile): if infile: print('file: %s' % infile, file=sys.stderr) else: readline.set_startup_hook() readline.parse_and_bind('') readline.set_completer() infile = input('file: ') if infile == '': infile = None return infile def prompt_for_source(db, sources): if sources: readline.set_startup_hook(lambda: readline.insert_text(sources[0])) elif db: sources = list(db.term_iter('source')) readline.parse_and_bind("tab: complete") completer = Completer(sources) readline.set_completer(completer.terms) readline.set_completer_delims(' ') source = input('source: ') if source == '': source = None return source def prompt_for_tags(db, tags): # always prompt for tags, and append to initial if tags: print('initial tags: %s' % ' '.join(tags), file=sys.stderr) else: tags = [] if db: itags = list(db.term_iter('tag')) else: itags = None readline.set_startup_hook() readline.parse_and_bind("tab: complete") completer = Completer(itags) readline.set_completer(completer.terms) readline.set_completer_delims(' ') while True: tag = input('tag: ') if tag and tag != '': tags.append(tag.strip()) else: break return tags ############################################################ def print_doc_summary(doc): docid = doc.docid title = doc.get_title() if not title: title = '' tags = doc.get_tags() sources = doc.get_sids() key = doc.get_key() if not key: key = '' print("id:%d [%s] {%s} (%s) \"%s\"" % ( docid, ' '.join(sources), key, ' '.join(tags), title, )) ############################################################ def add(db, query_string, infile=None, sid=None, tags=None, prompt=False): doc = None bibtex = None sources = Sources() doc_sid = sid source = None file_data = None if infile and infile is not True: infile = os.path.expanduser(infile) ################################## # if query provided, find single doc to update if query_string: if db.count(query_string) != 1: print("Search '%s' did not match a single document." % query_string, file=sys.stderr) print("Aborting.", file=sys.stderr) sys.exit(1) for doc in db.search(query_string): break ################################## # do fancy option prompting if prompt: doc_sids = [] if doc_sid: doc_sids = [doc_sid] # scan the file for source info if infile is not True: infile = prompt_for_file(infile) print("Scanning document for source identifiers...", file=sys.stderr) try: ss = sources.scan_file(infile) except ParseError as e: print("\n", file=sys.stderr) print("Parse error: %s" % e, file=sys.stderr) sys.exit(1) if len(ss) == 0: print("0 source ids found.", file=sys.stderr) else: if len(ss) == 1: print("1 source id found:", file=sys.stderr) else: print("%d source ids found:" % (len(ss)), file=sys.stderr) for sid in ss: print(" %s" % (sid), file=sys.stderr) doc_sids += [s.sid for s in ss] doc_sid = prompt_for_source(db, doc_sids) tags = prompt_for_tags(db, tags) if not query_string and not infile and not doc_sid: print("Must specify file or source to import, or query to update existing document.", file=sys.stderr) sys.exit(1) ################################## # process source and get bibtex # check if source is a file, in which case interpret it as bibtex if doc_sid and os.path.exists(doc_sid): bibtex = doc_sid elif doc_sid: # get source object for sid string source = sources.match_source(doc_sid) if not source: sys.exit("String '{}' matches no known source.".format(doc_sid)) # check that the source doesn't match an existing doc sdoc = db.doc_for_source(source.sid) if sdoc: if doc and sdoc != doc: print("A different document already exists for source '%s'." % (doc_sid), file=sys.stderr) print("Aborting.", file=sys.stderr) sys.exit(1) print("Source '%s' found in database. Updating existing document..." % (doc_sid), file=sys.stderr) doc = sdoc try: print("Retrieving bibtex...", end=' ', file=sys.stderr) bibtex = source.fetch_bibtex() print("done.", file=sys.stderr) except SourceError as e: print("\n", file=sys.stderr) print("Could not retrieve bibtex: %s" % e, file=sys.stderr) sys.exit(1) if infile is True: try: print("Retrieving file...", end=' ', file=sys.stderr) file_name, file_data = source.fetch_file() print("done.", file=sys.stderr) except SourceError as e: print("\n", file=sys.stderr) print("Could not retrieve file: %s" % e, file=sys.stderr) sys.exit(1) elif infile is True: print("Must specify source with retrieve file option.", file=sys.stderr) sys.exit(1) if infile and not file_data: with open(infile, 'br') as f: file_data = f.read() file_name = os.path.basename(infile) ################################## # if we still don't have a doc, create a new one if not doc: doc = Document(db) ################################## # add stuff to the doc if bibtex: try: print("Adding bibtex...", end=' ', file=sys.stderr) doc.add_bibtex(bibtex) print("done.", file=sys.stderr) except BibtexError as e: print("\n", file=sys.stderr) print(e, file=sys.stderr) print("Bibtex must be a plain text file with a single bibtex entry.", file=sys.stderr) sys.exit(1) except: print("\n", file=sys.stderr) raise # add source sid if it hasn't been added yet if source and not doc.get_sids(): doc.add_sid(source.sid) if infile: try: print("Adding file...", end=' ', file=sys.stderr) doc.add_file_data(file_name, file_data) print("done.", file=sys.stderr) except ParseError as e: print("\n", file=sys.stderr) print("Parse error: %s" % e, file=sys.stderr) sys.exit(1) except: print("\n", file=sys.stderr) raise if tags: try: print("Adding tags...", end=' ', file=sys.stderr) doc.add_tags(tags) print("done.", file=sys.stderr) except: print("\n", file=sys.stderr) raise ################################## # sync the doc to db and disk try: print("Syncing document...", end=' ', file=sys.stderr) doc.sync() print("done.\n", end=' ', file=sys.stderr) except: print("\n", file=sys.stderr) raise print_doc_summary(doc) return doc.docid ############################################ def importbib(db, bibfile, tags=[], overwrite=False): errors = [] sources = Sources() for entry in sorted(Bibtex(bibfile), key=lambda entry: entry.key): print(entry.key, file=sys.stderr) try: docs = [] # check for doc with this bibkey bdoc = db.doc_for_bib(entry.key) if bdoc: docs.append(bdoc) # check for known sids for source in sources.scan_bibentry(entry): sdoc = db.doc_for_source(source.sid) # FIXME: why can't we match docs in list? if sdoc and sdoc.docid not in [doc.docid for doc in docs]: docs.append(sdoc) if len(docs) == 0: doc = Document(db) elif len(docs) > 0: if len(docs) > 1: print(" Multiple distinct docs found for entry. Using first found.", file=sys.stderr) doc = docs[0] print(" Updating id:%d..." % (doc.docid), file=sys.stderr) doc.add_bibentry(entry) filepath = entry.get_file() if filepath: print(" Adding file: %s" % filepath, file=sys.stderr) doc.add_file(filepath) doc.add_tags(tags) doc.sync() except BibtexError as e: print(" Error processing entry %s: %s" % (entry.key, e), file=sys.stderr) print(file=sys.stderr) errors.append(entry.key) if errors: print(file=sys.stderr) print("Failed to import %d" % (len(errors)), end=' ', file=sys.stderr) if len(errors) == 1: print("entry", end=' ', file=sys.stderr) else: print("entries", end=' ', file=sys.stderr) print("from bibtex:", file=sys.stderr) for error in errors: print(" %s" % (error), file=sys.stderr) sys.exit(1) else: sys.exit(0) ############################################ def search(db, query_string, oformat='summary', sort='relevance', limit=None): if query_string == '*' and oformat in ['tags','sources','keys']: if oformat == 'tags': for tag in db.tag_iter(): print(tag) elif oformat == 'sources': for sid in db.sid_iter(): print(sid) elif oformat == 'keys': for key in db.term_iter('key'): print(key) return otags = set([]) osources = set([]) for doc in db.search(query_string, sort=sort, limit=limit): if oformat in ['summary']: print_doc_summary(doc) continue elif oformat in ['file','files']: for path in doc.get_fullpaths(): print("%s" % (path)) continue elif oformat == 'bibtex': bibtex = doc.get_bibtex() if not bibtex: print("No bibtex for doc id:%d." % doc.docid, file=sys.stderr) else: print(bibtex) print() continue if oformat == 'tags': otags = otags | set(doc.get_tags()) elif oformat == 'sources': osources = osources | set(doc.get_sids()) elif oformat == 'keys': key = doc.get_key() if key: print(key) if oformat == 'tags': for tag in otags: print(tag) elif oformat == 'sources': for source in osources: print(source) ############################################ def export(db, outdir, query_string): try: os.makedirs(outdir) except: pass import pipes for doc in db.search(query_string): title = doc.get_title() origpaths = doc.get_fullpaths() nfiles = len(origpaths) for path in origpaths: if not title: name = os.path.basename(os.path.splitext(path)[0]) else: name = '%s' % (title.replace(' ','_')) ind = 0 if nfiles > 1: name += '.%s' % ind ind += 1 name += '.pdf' outpath = os.path.join(outdir,name) print(outpath) shutil.copyfile(path, outpath.encode('utf-8')) xapers-0.9.0/xapers/database.py000066400000000000000000000321001365520605200164420ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import sys import xapian from . import util from .source import Sources from .documents import Documents, Document # FIXME: add db schema documentation ################################################## class DatabaseError(Exception): """Base class for Xapers database exceptions.""" def __init__(self, msg): self.msg = msg def __str__(self): return self.msg class DatabaseUninitializedError(DatabaseError): pass class DatabaseInitializationError(DatabaseError): pass class DatabaseLockError(DatabaseError): pass DatabaseModifiedError = xapian.DatabaseModifiedError ################################################## class Database(): """Represents a Xapers database""" # http://xapian.org/docs/omega/termprefixes.html BOOLEAN_PREFIX = { 'id': 'Q', 'key': 'XBIB|', 'source': 'XSOURCE|', 'year': 'Y', 'y': 'Y', } # boolean prefixes for which there can be multiple per doc BOOLEAN_PREFIX_MULTI = { 'tag': 'K', } # purely internal prefixes BOOLEAN_PREFIX_INTERNAL = { # FIXME: use this for doi? #'url': 'U', 'file': 'P', # FIXME: use this for doc mime type 'type': 'T', } PROBABILISTIC_PREFIX = { 'title': 'S', 't': 'S', 'author': 'A', 'a': 'A', } # http://xapian.org/docs/facets NUMBER_VALUE_FACET = { 'year': 0, 'y': 0, } # FIXME: need to set the following value fields: # publication date # added date # modified date # FIXME: need database version def _find_prefix(self, name): # FIXME: make this a dictionary union if name in self.BOOLEAN_PREFIX: return self.BOOLEAN_PREFIX[name] if name in self.BOOLEAN_PREFIX_MULTI: return self.BOOLEAN_PREFIX_MULTI[name] if name in self.BOOLEAN_PREFIX_INTERNAL: return self.BOOLEAN_PREFIX_INTERNAL[name] if name in self.PROBABILISTIC_PREFIX: return self.PROBABILISTIC_PREFIX[name] def _find_facet(self, name): if name in self.NUMBER_VALUE_FACET: return self.NUMBER_VALUE_FACET[name] def _make_source_prefix(self, source): return 'X%s|' % (source.upper()) ######################################## def __init__(self, root, writable=False, create=False, force=False): # xapers root self.root = os.path.abspath(os.path.expanduser(root)) # xapers db directory xapers_path = os.path.join(self.root, '.xapers') # xapes directory initialization if not os.path.exists(xapers_path): if create: if os.path.exists(self.root): if os.listdir(self.root) and not force: raise DatabaseInitializationError('Uninitialized Xapers root directory exists but is not empty.') os.makedirs(xapers_path) else: if os.path.exists(self.root): raise DatabaseInitializationError("Xapers directory '%s' does not contain a database." % (self.root)) else: raise DatabaseUninitializedError("Xapers directory '%s' not found." % (self.root)) # the Xapian db xapian_path = os.path.join(xapers_path, 'xapian') if writable: try: self.xapian = xapian.WritableDatabase(xapian_path, xapian.DB_CREATE_OR_OPEN) except xapian.DatabaseLockError: raise DatabaseLockError("Xapers database locked.") else: self.xapian = xapian.Database(xapian_path) stemmer = xapian.Stem("english") # The Xapian TermGenerator # http://trac.xapian.org/wiki/FAQ/TermGenerator self.term_gen = xapian.TermGenerator() self.term_gen.set_stemmer(stemmer) # The Xapian QueryParser self.query_parser = xapian.QueryParser() self.query_parser.set_database(self.xapian) self.query_parser.set_stemmer(stemmer) self.query_parser.set_stemming_strategy(xapian.QueryParser.STEM_SOME) self.query_parser.set_default_op(xapian.Query.OP_AND) # add boolean internal prefixes for name, prefix in self.BOOLEAN_PREFIX.items(): self.query_parser.add_boolean_prefix(name, prefix) # for prefixes that can be applied multiply to the same # document (like tags) set the filter grouping to use AND: # https://xapian.org/docs/apidoc/html/classXapian_1_1QueryParser.html#a67d25f9297bb98c2101a03ff3d60cf30 for name, prefix in self.BOOLEAN_PREFIX_MULTI.items(): self.query_parser.add_boolean_prefix(name, prefix, False) # add probabalistic prefixes for name, prefix in self.PROBABILISTIC_PREFIX.items(): self.query_parser.add_prefix(name, prefix) # add value facets for name, facet in self.NUMBER_VALUE_FACET.items(): self.query_parser.add_valuerangeprocessor( xapian.NumberValueRangeProcessor(facet, name+':') ) # register known source prefixes # FIXME: can we do this by just finding all XSOURCE terms in # db? Would elliminate dependence on source modules at # search time. for source in Sources(): name = source.name self.query_parser.add_boolean_prefix(name, self._make_source_prefix(name)) def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.xapian.close() def reopen(self): self.xapian.reopen() def __contains__(self, docid): try: self.xapian.get_document(docid) return True except xapian.DocNotFoundError: return False def __getitem__(self, docid): if type(docid) not in [int, int]: raise TypeError("docid must be an int") xapian_doc = self.xapian.get_document(docid) return Document(self, xapian_doc) ######################################## # generate a new doc id, based on the last availabe doc id def _generate_docid(self): return self.xapian.get_lastdocid() + 1 ######################################## # return a list of terms for prefix def _term_iter(self, prefix=None): return util.xapian_term_iter(self.xapian, prefix) def term_iter(self, name=None): """Generator of all terms in the database. If a prefix is provided, will iterate over only the prefixed terms, and the prefix will be removed from the returned terms. """ prefix = None if name: prefix = self._find_prefix(name) if not prefix: prefix = name return self._term_iter(prefix) def sid_iter(self): """Generator of all source ids in database""" for source in self.term_iter('source'): # FIXME: do this more efficiently for oid in self._term_iter(self._make_source_prefix(source)): yield '%s:%s' % (source, oid) def get_sids(self): """Get all source ids in database as a list""" return [sid for sid in self.sid_iter()] def tag_iter(self): """Generator of all tags in database""" for tag in self.term_iter('tag'): yield tag def get_tags(self): """Get all tags in database as a list""" return [tag for tag in self.tag_iter()] ######################################## # search for documents based on query string and return mset def _search(self, query_string, sort='relevance', limit=None): enquire = xapian.Enquire(self.xapian) # FIXME: add option for ascending/descending if sort == 'relevance': enquire.set_sort_by_relevance_then_value(self.NUMBER_VALUE_FACET['year'], True) elif sort == 'year': enquire.set_sort_by_value_then_relevance(self.NUMBER_VALUE_FACET['year'], True) else: raise ValueError("sort parameter accepts only 'relevance' or 'year'") if query_string == "*": query = xapian.Query.MatchAll else: # parse the query string to produce a Xapian::Query object. query = self.query_parser.parse_query(query_string) if os.getenv('XAPERS_DEBUG_QUERY'): print("query string:", query_string, file=sys.stderr) print("final query:", query, file=sys.stderr) # FIXME: need to catch Xapian::Error when using enquire enquire.set_query(query) # set order of returned docs as newest first # FIXME: make this user specifiable enquire.set_docid_order(xapian.Enquire.DESCENDING) if limit: mset = enquire.get_mset(0, limit) else: mset = enquire.get_mset(0, self.xapian.get_doccount()) return mset def search(self, query_string, sort='relevance', limit=None): """Search for documents in the database. The `sort` keyword argument can be 'relevance' (default) or 'year'. `limit` can be used to limit the number of returned documents (default is None). """ mset = self._search(query_string, sort=sort, limit=limit) return Documents(self, mset) def count(self, query_string): """Count documents matching search terms.""" return self._search(query_string).get_matches_estimated() def _doc_for_term(self, term): enquire = xapian.Enquire(self.xapian) query = xapian.Query(term) enquire.set_query(query) mset = enquire.get_mset(0, 2) # FIXME: need to throw an exception if more than one match found if mset: return Document(self, mset[0].document) else: return None def doc_for_path(self, path): """Return document for specified path.""" term = self._find_prefix('file') + path return self._doc_for_term(term) def doc_for_source(self, sid): """Return document for source id string.""" source, oid = sid.split(':', 1) term = self._make_source_prefix(source) + oid return self._doc_for_term(term) def doc_for_bib(self, bibkey): """Return document for bibtex key.""" term = self._find_prefix('key') + bibkey return self._doc_for_term(term) ######################################## def replace_document(self, docid, doc): """Replace (sync) document to database.""" self.xapian.replace_document(docid, doc) def delete_document(self, docid): """Delete document from database.""" self.xapian.delete_document(docid) ######################################## def restore(self, log=False): """Restore a database from an existing root.""" docdirs = os.listdir(self.root) docdirs.sort() for ddir in docdirs: docdir = os.path.join(self.root, ddir) # skip things that aren't directories if not os.path.isdir(docdir): continue # if we can't convert the directory name into an integer, # assume it's not relevant to us and continue try: docid = int(ddir) except ValueError: continue if log: print(docdir, file=sys.stderr) docfiles = os.listdir(docdir) if not docfiles: # skip empty directories continue if log: print(' docid:', docid, file=sys.stderr) try: doc = self[docid] except xapian.DocNotFoundError: doc = Document(self, docid=docid) for dfile in docfiles: dpath = os.path.join(docdir, dfile) if dfile == 'bibtex': if log: print(' adding bibtex', file=sys.stderr) doc.add_bibtex(dpath) elif dfile == 'tags': if log: print(' adding tags', file=sys.stderr) with open(dpath, 'r') as f: tags = f.read().strip().split('\n') doc.add_tags(tags) else: #elif os.path.splitext(dpath)[1] == '.pdf': if log: print(' adding file:', dfile, file=sys.stderr) doc.add_file(dpath) doc.sync() xapers-0.9.0/xapers/documents.py000066400000000000000000000356001365520605200167070ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import shutil import xapian from . import util from .parser import parse_data from .source import Sources from .bibtex import Bibtex ################################################## class DocumentError(Exception): """Base class for Xapers document exceptions.""" def __init__(self, msg): self.msg = msg def __str__(self): return self.msg ################################################## class Documents(): """Represents a set of Xapers documents given a Xapian mset.""" def __init__(self, db, mset): self.db = db self.mset = mset self.index = -1 self.max = len(mset) def __getitem__(self, index): m = self.mset[index] doc = Document(self.db, m.document) doc.matchp = m.percent return doc def __iter__(self): return self def __len__(self): return self.max def __next__(self): self.index = self.index + 1 if self.index == self.max: raise StopIteration return self[self.index] ################################################## class Document(): """Represents a single Xapers document.""" def __init__(self, db, xapian_doc=None, docid=None): # Xapers db self.db = db # if Xapian doc provided, initiate for that document if xapian_doc: self.xapian_doc = xapian_doc self.docid = xapian_doc.get_docid() # else, create a new empty document # document won't be added to database until sync is called else: self.xapian_doc = xapian.Document() # use specified docid if provided if docid: if docid in self.db: raise DocumentError('Document already exists for id %d.' % docid) self.docid = docid else: self.docid = self.db._generate_docid() self._add_term(self.db._find_prefix('id'), self.docid) # specify a directory in the Xapers root for document data self.docdir = os.path.join(self.db.root, '%010d' % self.docid) self.bibentry = None self._infiles = {} def get_docid(self): """Return document id of document.""" return self.docid ######################################## def _make_docdir(self): if os.path.exists(self.docdir): if not os.path.isdir(self.docdir): raise DocumentError('File exists at intended docdir location: %s' % self.docdir) else: os.makedirs(self.docdir) def _write_files(self): for name, data in self._infiles.items(): path = os.path.join(self.docdir, name) with open(path, 'bw') as f: f.write(data) def _write_bibfile(self): bibpath = self.get_bibpath() # reload bibtex only if we have new files paths = self.get_fullpaths() if paths: self._load_bib() if self.bibentry: # we put only the first file in the bibtex # FIXME: does jabref/mendeley spec allow for multiple files? if paths and not self.bibentry.get_file(): self.bibentry.set_file(paths[0]) self.bibentry.to_file(bibpath) def _write_tagfile(self): with open(os.path.join(self.docdir, 'tags'), 'w') as f: for tag in self.get_tags(): f.write(tag) f.write('\n') def _rm_docdir(self): if os.path.exists(self.docdir) and os.path.isdir(self.docdir): shutil.rmtree(self.docdir) def sync(self): """Sync document to database.""" # FIXME: add value for modification time # FIXME: catch db not writable errors try: self._make_docdir() self._write_files() self._write_bibfile() self._write_tagfile() self.db.replace_document(self.docid, self.xapian_doc) except: self._rm_docdir() raise def purge(self): """Purge document from database and root.""" # FIXME: catch db not writable errors try: self.db.delete_document(self.docid) except xapian.DocNotFoundError: pass self._rm_docdir() self.docid = None ######################################## # internal stuff # add an individual prefix'd term for the document def _add_term(self, prefix, value): term = '%s%s' % (prefix, value) self.xapian_doc.add_term(term) # remove an individual prefix'd term for the document def _remove_term(self, prefix, value): term = '%s%s' % (prefix, value) try: self.xapian_doc.remove_term(term) except xapian.InvalidArgumentError: pass # Parse 'text' and add a term to 'message' for each parsed # word. Each term will be added both prefixed (if prefix is not # None) and non-prefixed. # http://xapian.org/docs/bindings/python/ # http://xapian.org/docs/quickstart.html # http://www.flax.co.uk/blog/2009/04/02/xapian-search-architecture/ def _gen_terms(self, prefix, text): term_gen = self.db.term_gen term_gen.set_document(self.xapian_doc) if prefix: term_gen.index_text(text, 1, prefix) term_gen.index_text(text) # return a list of terms for prefix def _term_iter(self, prefix=None): return util.xapian_term_iter(self.xapian_doc, prefix) def term_iter(self, name=None): """Iterator over all terms in the document. If a prefix is provided, will iterate over only the prefixed terms, and the prefix will be removed from the returned terms. """ prefix = None if name: prefix = self.db._find_prefix(name) if not prefix: prefix = name return self._term_iter(prefix) # set the data object for the document def _set_data(self, text): self.xapian_doc.set_data(text) def get_data(self): """Get data object for document.""" return self.xapian_doc.get_data() ######################################## # files def add_file_data(self, name, data): """Add a file data to document. 'name' is the name of the file, 'data' is the file data, e.g. a string of text extracted from the document. File will not copied into docdir until sync(). """ # FIXME: set mime type term # parse the file data into text text = parse_data(data) # generate terms from the text self._gen_terms(None, text) # set data to be text sample # FIXME: is this the right thing to put in the data? summary = text[0:997] + '...' self._set_data(summary) # FIXME: should files be renamed to something generic (0.pdf)? prefix = self.db._find_prefix('file') self._add_term(prefix, name) # add it to the cache to be written at sync() self._infiles[name] = data def add_file(self, infile): """Add a file to document. Added file will have the same name. File will not copied in to docdir until sync(). """ with open(infile, 'br') as f: data = f.read() name = os.path.basename(infile) self.add_file_data(name, data) def get_files(self): """Return files associated with document.""" return list(self.term_iter('file')) def get_fullpaths(self): """Return fullpaths of files associated with document.""" list = [] for path in self.get_files(): # FIXME: this is a hack for old path specifications that # included the docdir path = os.path.basename(path) list.append(os.path.join(self.docdir, path)) return list ######################################## # SOURCES def _purge_sources_prefix(self, source): # purge all terms for a given source prefix prefix = self.db._make_source_prefix(source) for i in self._term_iter(prefix): self._remove_term(prefix, i) self._remove_term(self.db._find_prefix('source'), source) def add_sid(self, sid): """Add source sid to document.""" source, oid = sid.split(':', 1) source = source.lower() # remove any existing terms for this source self._purge_sources_prefix(source) # add a term for the source self._add_term(self.db._find_prefix('source'), source) # add a term for the sid, with source as prefix self._add_term(self.db._make_source_prefix(source), oid) def get_sids(self): """Return a list of sids for document.""" sids = [] for source in self.term_iter('source'): for oid in self._term_iter(self.db._make_source_prefix(source)): sids.append('%s:%s' % (source, oid)) return sids # TAGS def add_tags(self, tags): """Add tags from list to document.""" prefix = self.db._find_prefix('tag') for tag in tags: self._add_term(prefix, tag) def get_tags(self): """Return a list of tags associated with document.""" return list(self.term_iter('tag')) def remove_tags(self, tags): """Remove tags from a document.""" prefix = self.db._find_prefix('tag') for tag in tags: self._remove_term(prefix, tag) # TITLE def _set_title(self, title): pt = self.db._find_prefix('title') for term in self._term_iter(pt): self._remove_term(pt, term) # FIXME: what's the clean way to get these prefixes? for term in self._term_iter('ZS'): self._remove_term('ZS', term) self._gen_terms(pt, title) # AUTHOR def _set_authors(self, authors): pa = self.db._find_prefix('author') for term in self._term_iter(pa): self._remove_term(pa, term) # FIXME: what's the clean way to get these prefixes? for term in self._term_iter('ZA'): self._remove_term('ZA', term) self._gen_terms(pa, authors) # YEAR def _set_year(self, year): # FIXME: what to do if year is not an int? try: year = int(year) except ValueError: return prefix = self.db._find_prefix('year') for term in self._term_iter(prefix): self._remove_term(prefix, year) self._add_term(prefix, year) facet = self.db._find_facet('year') self.xapian_doc.add_value(facet, xapian.sortable_serialise(year)) ######################################## # bibtex def get_bibpath(self): """Return path to document bibtex file.""" return os.path.join(self.docdir, 'bibtex') def _set_bibkey(self, key): prefix = self.db._find_prefix('key') for term in self._term_iter(prefix): self._remove_term(prefix, term) self._add_term(prefix, key) def _index_bibentry(self, bibentry): authors = bibentry.get_authors() fields = bibentry.get_fields() if 'title' in fields: self._set_title(fields['title']) if 'year' in fields: self._set_year(fields['year']) if authors: # authors should be a list, so we make a single text string # FIXME: better way to do this? self._set_authors(' '.join(authors)) # add any sources in the bibtex for source in Sources().scan_bibentry(bibentry): self.add_sid(source.sid) # FIXME: index 'keywords' field as regular terms self._set_bibkey(bibentry.key) def add_bibentry(self, bibentry): """Add bibentry object.""" self.bibentry = bibentry self._index_bibentry(self.bibentry) def add_bibtex(self, bibtex): """Add bibtex to document, as string or file path.""" self.add_bibentry(Bibtex(bibtex)[0]) def _load_bib(self): if self.bibentry: return bibpath = self.get_bibpath() if os.path.exists(bibpath): self.bibentry = Bibtex(bibpath)[0] def get_bibtex(self): """Get the bib for document as a bibtex string.""" bibpath = self.get_bibpath() if os.path.exists(bibpath): with open(bibpath, 'r') as f: bibtex = f.read() return bibtex.strip() def get_bibdata(self): self._load_bib() if self.bibentry: data = self.bibentry.get_fields() data['authors'] = self.bibentry.get_authors() return data def update_from_bibtex(self): """Update document metadata from document bibtex.""" self._load_bib() self._index_bibentry(self.bibentry) ######################################## def get_key(self): """Get the document key.""" # FIXME: get from facet not bib self._load_bib() if not self.bibentry: return return self.bibentry.key def get_title(self): """Get document full title from bibtex.""" self._load_bib() if not self.bibentry: return fields = self.bibentry.get_fields() if 'title' in fields: return fields['title'] def get_authors(self): """Get document author(s) from bibtex.""" self._load_bib() if not self.bibentry: return return self.bibentry.get_authors() def get_year(self): """Get document year from bibtex.""" # FIXME: get from facet not bib self._load_bib() if not self.bibentry: return fields = self.bibentry.get_fields() if 'year' in fields: return fields['year'] def get_urls(self): """Get all URLs associated with document.""" sources = Sources() urls = [] # get urls associated with known sources for sid in self.get_sids(): urls.append(sources[sid].url) # get urls from bibtex self._load_bib() if self.bibentry: fields = self.bibentry.get_fields() if 'url' in fields: urls.append(fields['url']) if 'adsurl' in fields: urls.append(fields['adsurl']) return urls xapers-0.9.0/xapers/nci/000077500000000000000000000000001365520605200151015ustar00rootroot00000000000000xapers-0.9.0/xapers/nci/__init__.py000066400000000000000000000015651365520605200172210ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import logging if os.getenv('XAPERS_LOG_FILE'): logging.basicConfig(filename=os.getenv('XAPERS_LOG_FILE'), level=logging.DEBUG) from .ui import UI xapers-0.9.0/xapers/nci/bibview.py000066400000000000000000000017411365520605200171050ustar00rootroot00000000000000import urwid ############################################################ class Bibview(urwid.Frame): def __init__(self, ui, query): self.ui = ui htxt = [urwid.Text("Bibview: " + query)] header = urwid.AttrMap(urwid.Columns(htxt), 'header') string = '' db = self.ui.db if db.count(query) == 0: self.ui.set_status('No documents found.') else: for doc in db.search(query, limit=20): bibtex = doc.get_bibtex() if bibtex: string += '\n' + bibtex + '\n' content = [urwid.Text(s) for s in string.split('\n')] body = urwid.ListBox(urwid.SimpleListWalker(content)) super(Bibview, self).__init__(body, header=header) def help(self): return [] def keypress(self, size, key): if key == ' ': return self.get_body().keypress(size, 'page down') return super(Bibview, self).keypress(size, key) xapers-0.9.0/xapers/nci/defaults/000077500000000000000000000000001365520605200167105ustar00rootroot00000000000000xapers-0.9.0/xapers/nci/defaults/bindings000066400000000000000000000004431365520605200204310ustar00rootroot00000000000000[global] ?: help s: promptSearch S: promptSearch A: promptAdd q: killBuffer Q: quit [search] n: nextEntry p: prevEntry] enter: viewFile u: viewURL b: viewBibtex +: addTags -: removeTags u: update a: archive meta i: copyID meta f: copyPath meta u: copyURL meta b: copyBibtex f: filterSearch xapers-0.9.0/xapers/nci/help.py000066400000000000000000000032341365520605200164050ustar00rootroot00000000000000import urwid ############################################################ class Help(urwid.Frame): def __init__(self, ui, target=None): self.ui = ui if target: tname = target.__class__.__name__ htxt = [urwid.Text("Help: " + tname)] else: htxt = [urwid.Text("Help")] header = urwid.AttrMap(urwid.Columns(htxt), 'header') pile = [] # format command help line def fch(k, h): return urwid.Columns([('fixed', 10, urwid.Text(k)), urwid.Text(h), ]) if target: for k, h in target.help(): if not k: pile.append(urwid.Text('')) pile.append(urwid.Text('')) pile.append(urwid.Text(h)) pile.append(urwid.Text('')) else: pile.append(fch(k, h)) pile.append(urwid.Text('')) pile.append(urwid.Text('')) pile.append(urwid.Text('Global commands:')) pile.append(urwid.Text('')) for k, cmd in self.ui.keys.items(): f = getattr(ui, cmd) h = str(getattr(f, '__doc__')) pile.append(fch(k, h)) body = urwid.ListBox(urwid.SimpleListWalker(pile)) super(Help, self).__init__(body, header=header) def keypress(self, size, key): # ignore help in help if key == '?': return if key == ' ': return self.get_body().keypress(size, 'page down') return super(Help, self).keypress(size, key) def help(self): return [] xapers-0.9.0/xapers/nci/search.py000066400000000000000000000432761365520605200167340ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import urwid import subprocess import collections from ..cli import initdb from ..database import DatabaseLockError, DatabaseModifiedError PALETTE = [ ('head', 'dark blue, bold', ''), ('head focus', 'white, bold', 'dark blue'), ('field', 'light gray', ''), ('field focus', '', 'dark gray', '', '', 'g19'), ('tags', 'dark green', ''), ('tags focus', 'light green', 'dark blue'), ('title', 'yellow', ''), ('title focus', 'yellow', 'dark gray', '', 'yellow', 'g19'), ('authors', 'light cyan', ''), ('authors focus', 'light cyan', 'dark gray', '', 'light cyan', 'g19'), ('journal', 'dark magenta', '',), ('journal focus', 'dark magenta', 'dark gray', '', 'dark magenta', 'g19'), ('bibkey', 'dark magenta', '',), ('bibkey focus', 'dark magenta', 'dark gray', '', 'dark magenta', 'g19'), ] ############################################################ def xdg_open(path): """open document file""" with open(os.devnull) as devnull: subprocess.Popen(['xdg-open', path], stdin=devnull, stdout=devnull, stderr=devnull) def xclip(text): """Copy text into X clipboard.""" p = subprocess.Popen(["xclip", "-i"], stdin=subprocess.PIPE) p.communicate(text.encode('utf-8')) ############################################################ class DocItem(urwid.WidgetWrap): FIELDS = ['title', 'authors', 'journal', 'year', 'source', 'bibkey', #'tags', #'file', #'summary', ] keys = collections.OrderedDict([ ('enter', "viewFile"), ('u', "viewURL"), ('b', "viewBibtex"), ('t', "promptTag"), ('+', "addTags"), ('-', "removeTags"), ('meta i', "copyID"), ('meta u', "copyURL"), ('meta b', "copyBibtex"), ('meta k', "copyKey"), ('meta f', "copyPath"), ]) def __init__(self, ui, doc, doc_ind, total_docs): self.ui = ui self.doc = doc self.docid = 'id:{}'.format(doc.docid) c1width = 10 field_data = dict.fromkeys(self.FIELDS, '') field_data['tags'] = ' '.join(doc.get_tags()) field_data['bibkey'] = doc.get_key() or '' bibdata = doc.get_bibdata() if bibdata: for field, value in bibdata.items(): if 'title' == field: field_data[field] = value elif 'authors' == field: field_data[field] = ' and '.join(value[:4]) if len(value) > 4: field_data[field] += ' et al.' elif 'year' == field: field_data[field] = value # FIXME: this translation should not be done here if field_data['journal'] == '': if 'journal' == field: field_data['journal'] = value elif 'container-title' == field: field_data['journal'] = value elif 'arxiv' == field: field_data['journal'] = 'arXiv.org' elif 'dcc' == field: field_data['journal'] = 'LIGO DCC' urls = doc.get_urls() if urls: field_data['source'] = urls[0] summary = doc.get_data() if not summary: summary = 'NO FILE' field_data['summary'] = summary def gen_field_row(field, value): if field in ['journal', 'year', 'source', 'bibkey']: color = 'journal' elif field in ['file']: color = 'field' else: color = field return urwid.Columns([ ('fixed', c1width, urwid.Text(('field', field + ':'))), urwid.Text((color, value)), ]) self.tag_field = urwid.Text(field_data['tags']) header = urwid.AttrMap(urwid.Columns([ ('fixed', c1width, urwid.Text('%s' % (self.docid))), urwid.AttrMap(self.tag_field, 'tags'), urwid.Text('%s%% match (%s/%s)' % (doc.matchp, doc_ind, total_docs), align='right'), ]), 'head') pile = [urwid.AttrMap(urwid.Divider(' '), '', ''), header] + \ [gen_field_row(field, field_data[field]) for field in self.FIELDS] for f in doc.get_files(): pile += [gen_field_row('file', os.path.basename(f))] w = urwid.AttrMap(urwid.AttrMap(urwid.Pile(pile), 'field'), '', {'head': 'head focus', 'field': 'field focus', 'tags': 'tags focus', 'title': 'title focus', 'authors': 'authors focus', 'journal': 'journal focus', }, ) self.__super.__init__(w) def keypress(self, size, key): if key in self.keys: cmd = eval("self.{}".format(self.keys[key])) cmd() else: return key #################### def viewFile(self): """open document file""" paths = self.doc.get_fullpaths() if not paths: self.ui.set_status('No file for document {}.'.format(self.docid)) return for path in paths: if not os.path.exists(path): self.ui.error('{}: file not found.'.format(self.docid)) else: self.ui.set_status('opening file: {}...'.format(path)) xdg_open(path) def viewURL(self): """open document source URL in browser""" urls = self.doc.get_urls() if not urls: self.ui.set_status('No URLs for document {}.'.format(self.docid)) return # FIXME: open all instead of just first? url = urls[0] self.ui.set_status('opening url: {}...'.format(url)) xdg_open(url) def viewBibtex(self): """view document bibtex""" self.ui.newbuffer(['bibview', self.docid]) def copyID(self): """copy document ID to clipboard""" xclip(self.docid) self.ui.set_status('yanked docid: {}'.format(self.docid)) def copyKey(self): """copy document bibtex key to clipboard""" bibkey = self.doc.get_key() if not bibkey: self.ui.set_status('No bibtex key for document {}.'.format(self.docid)) return xclip(bibkey) self.ui.set_status('yanked bibtex key: {}'.format(bibkey)) def copyPath(self): """copy document file path to clipboard""" path = self.doc.get_fullpaths()[0] if not path: self.ui.set_status('No files for document {}.'.format(self.docid)) return xclip(path) self.ui.set_status('yanked file path: {}'.format(path)) def copyURL(self): """copy document source URL to clipboard""" urls = self.doc.get_urls() if not urls: self.ui.set_status('No URLs for document {}.'.format(self.docid)) return # FIXME: copy all instead of just first? url = urls[0] xclip(url) self.ui.set_status('yanked source url: {}'.format(url)) def copyBibtex(self): """copy document bibtex to clipboard""" bibtex = self.doc.get_bibtex() if not bibtex: self.ui.set_status('No bibtex for document {}.'.format(self.docid)) return xclip(bibtex) self.ui.set_status('yanked bibtex: %s...' % bibtex.split('\n')[0]) def addTags(self): """add tags to document (space separated)""" self.promptTag('+') def removeTags(self): """remove tags from document (space separated)""" self.promptTag('-') def promptTag(self, sign=None): """apply tags to document (space separated, +add/-remove)""" prompt = "tag document {} (+add -remove): ".format(self.docid) initial = '' if sign == '+': initial = '+' elif sign == '-': initial = '-' elif sign: raise ValueError("sign must be '+' or '-'.") if sign == '-': completions = self.doc.get_tags() else: completions = self.ui.db.get_tags() self.ui.prompt((self.applyTags, []), prompt, initial=initial, completions=completions, history=self.ui.tag_history) def applyTags(self, tag_string): if not tag_string: self.ui.set_status("No tags set.") return try: with initdb(writable=True) as db: doc = db[self.doc.docid] for tag in tag_string.split(): if tag[0] == '+': if tag[1:]: doc.add_tags([tag[1:]]) elif tag[0] == '-': doc.remove_tags([tag[1:]]) else: doc.add_tags([tag]) doc.sync() msg = "applied tags: {}".format(tag_string) tags = doc.get_tags() self.tag_field.set_text(' '.join(tags)) if self.ui.tag_history and tag_string == self.ui.tag_history[-1]: pass else: self.ui.tag_history.append(tag_string) except DatabaseLockError as e: msg = e.msg self.ui.db.reopen() self.ui.set_status(msg) ############################################################ class DocWalker(urwid.ListWalker): def __init__(self, ui, docs): self.ui = ui self.docs = docs self.ndocs = len(docs) self.focus = 0 self.items = {} def __getitem__(self, pos): if pos < 0: raise IndexError if pos not in self.items: self.items[pos] = DocItem(self.ui, self.docs[pos], pos+1, self.ndocs) return self.items[pos] def set_focus(self, focus): if focus == -1: focus = self.ndocs - 1 self.focus = focus self._modified() def next_position(self, pos): return pos + 1 def prev_position(self, pos): return pos - 1 ############################################################ class Search(urwid.Frame): keys = collections.OrderedDict([ ('n', "nextEntry"), ('down', "nextEntry"), ('p', "prevEntry"), ('up', "prevEntry"), ('N', "pageDown"), ('page down', "pageDown"), (' ', "pageDown"), ('P', "pageUp"), ('page up', "pageUp"), ('<', "firstEntry"), ('>', "lastEntry"), ('a', "archive"), ('o', "toggleSort"), ('l', "filterSearch"), ('meta S', "copySearch"), ('B', "viewBibtex"), ('T', "promptTag"), ('=', "refresh"), ]) __sort = collections.deque(['relevance', 'year']) def __init__(self, ui, query=None): self.ui = ui self.query = query super(Search, self).__init__(urwid.SolidFill()) if not self.ui.search_history or query != self.ui.search_history[-1]: self.ui.search_history.append(query) self.__set_search() @property def sort_order(self): return self.__sort[0] def __set_search(self): try: count = self.ui.db.count(self.query) except DatabaseModifiedError: self.ui.db.reopen() if count == 0: self.ui.set_status('No documents found.') docs = [] else: docs = [doc for doc in self.ui.db.search(self.query, sort=self.sort_order)] if count == 1: cstring = "%d result" % (count) else: cstring = "%d results" % (count) htxt = [('pack', urwid.Text("Search: ")), ('pack', urwid.AttrMap(urwid.Text("%s" % (self.query), align='left'), 'header_args')), ('pack', urwid.Text(" [{}]".format(self.sort_order))), urwid.Text(cstring, align='right'), ] header = urwid.AttrMap(urwid.Columns(htxt), 'header') self.set_header(header) self.lenitems = count self.docwalker = DocWalker(self.ui, docs) self.listbox = urwid.ListBox(self.docwalker) body = self.listbox self.set_body(body) def keypress(self, size, key): # reset the status on key presses self.ui.set_status() entry, pos = self.listbox.get_focus() # key used if keypress returns None if entry and not entry.keypress(size, key): return # check if we can use key elif key in self.keys: cmd = eval("self.%s" % (self.keys[key])) cmd(size, key) # else we didn't use key so return else: return key def help(self): def get_keys(o): for k, cmd in list(o.keys.items()): yield (k, str(getattr(getattr(o, cmd), '__doc__'))) yield (None, "Document commands:") for o in get_keys(DocItem): yield o yield (None, "Search commands:") for o in get_keys(Search): yield o ########## def refresh(self, size, key): """refresh current search""" self.ui.db.reopen() self.__set_search() # FIXME: try to reset position to closet place in search, # rather than resetting to the top def toggleSort(self, size, key): """toggle search sort order between year/relevance""" self.__sort.rotate() self.__set_search() def filterSearch(self, size, key): """modify current search or add additional terms""" prompt = 'filter search: {} '.format(self.query) self.ui.prompt((self.filterSearch_done, []), prompt) def filterSearch_done(self, newquery): if not newquery: self.ui.set_status() return self.ui.newbuffer(['search', self.query, newquery]) def nextEntry(self, size, key): """next entry""" entry, pos = self.listbox.get_focus() if not entry: return if pos + 1 >= self.lenitems: return self.listbox.set_focus(pos + 1) def prevEntry(self, size, key): """previous entry""" entry, pos = self.listbox.get_focus() if not entry: return if pos == 0: return self.listbox.set_focus(pos - 1) def pageDown(self, size, key): """page down""" self.listbox.keypress(size, 'page down') # self.listbox.set_focus_valign('bottom') # self.prevEntry(None, None) def pageUp(self, size, key): """page up""" self.listbox.keypress(size, 'page up') # self.listbox.set_focus_valign('top') def lastEntry(self, size, key): """last entry""" self.listbox.set_focus(-1) def firstEntry(self, size, key): """first entry""" self.listbox.set_focus(0) def archive(self, size, key): """archive document (remove 'new' tag) and advance""" entry = self.listbox.get_focus()[0] if not entry: return entry.applyTags('-new') self.nextEntry(None, None) def copySearch(self, size, key): """copy current search string to clipboard""" xclip(self.query) self.ui.set_status('yanked search: {}'.format(self.query)) def viewBibtex(self, size, key): """view bibtex for all documents in current search""" self.ui.newbuffer(['bibview', self.query]) def promptTag(self, size, key): """tag all documents in current search""" prompt = "tag all (+add -remove): " initial = '' completions = self.ui.db.get_tags() self.ui.prompt((self.applyTags, []), prompt, initial=initial, completions=completions, history=self.ui.tag_history) def applyTags(self, tag_string): """apply tags to current search (space separated, +add/-remove)""" if not tag_string: self.ui.set_status("No tags set.") return tags_add = [] tags_sub = [] for tag in tag_string.split(): if tag[0] == '+': tags_add.append(tag[1:]) elif tag[0] == '-': tags_sub.append(tag[1:]) else: tags_add.append(tag) try: with initdb(writable=True) as db: count = db.count(self.query) for doc in db.search(self.query): doc.add_tags(tags_add) doc.remove_tags(tags_sub) doc.sync() msg = "applied tags to {} docs: {}".format(count, tag_string) if not self.ui.tag_history or tag_string != self.ui.tag_history[-1]: self.ui.tag_history.append(tag_string) except DatabaseLockError as e: msg = e.msg self.refresh(None, None) self.ui.set_status(msg) xapers-0.9.0/xapers/nci/ui.py000066400000000000000000000241731365520605200160770ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import sys import urwid import logging import collections from ..cli import initdb from . import search from . import bibview from . import help ############################################################ PALETTE = [ ('header', 'light gray', 'dark blue'), ('header_args', 'white', 'dark blue'), ('footer', 'light gray', 'dark blue'), ('prompt', 'black', 'dark green'), ] class UI(): keys = collections.OrderedDict([ ('s', "promptSearch"), ('q', "killBuffer"), ('Q', "quit"), ('?', "help"), ]) default_status_string = "s: new search, q: close buffer, Q: quit, ?: help" buffers = [] search_history = [] tag_history = [] def __init__(self, cmd=None): self.db = initdb() # FIXME: set this properly self.palette = list(set(PALETTE) | set(search.PALETTE)) self.view = urwid.Frame(urwid.SolidFill()) self.set_status() self.mainloop = urwid.MainLoop( self.view, self.palette, unhandled_input=self.keypress, handle_mouse=False, ) self.mainloop.screen.set_terminal_properties(colors=88) if not cmd: cmd = ['search', 'tag:new'] self.newbuffer(cmd) self.mainloop.run() ########## def set_status(self, text=None): if text: T = [urwid.Text(text)] else: T = [('pack', urwid.Text('Xapers [{}]'.format(len(self.buffers)))), urwid.Text(self.default_status_string, align='right'), ] self.view.set_footer(urwid.AttrMap(urwid.Columns(T), 'footer')) def newbuffer(self, cmd): if not cmd: cmd = ['search', '*'] if cmd[0] == 'search': query = ' '.join(cmd[1:]) buf = search.Search(self, query) elif cmd[0] == 'bibview': query = ' '.join(cmd[1:]) buf = bibview.Bibview(self, query) elif cmd[0] == 'help': target = None if len(cmd) > 1: target = cmd[1] if isinstance(target, str): target = None buf = help.Help(self, target) else: buf = help.Help(self) self.set_status("Unknown command '%s'." % (cmd[0])) self.buffers.append(buf) self.view.set_body(buf) self.set_status() def killBuffer(self): """close current buffer""" if len(self.buffers) == 1: return self.buffers.pop() buf = self.buffers[-1] self.view.set_body(buf) self.set_status() self.mainloop.draw_screen() def prompt(self, final, *args, **kwargs): """user prompt final is a (func, args) tuple to be executed upon complection: func(text, *args) further args and kwargs are passed to PromptEdit """ pe = PromptEdit(*args, **kwargs) urwid.connect_signal(pe, 'done', self.prompt_done, final) self.view.set_footer(urwid.AttrMap(pe, 'prompt')) self.view.set_focus('footer') def prompt_done(self, text, final): self.view.set_focus('body') urwid.disconnect_signal(self, self.prompt, 'done', self.prompt_done) (func, args) = final func(text, *args) ########## def promptSearch(self): """search database""" prompt = 'search: ' self.prompt((self.promptSearch_done, []), prompt, history=self.search_history) def promptSearch_done(self, query): if not query: self.set_status() return self.newbuffer(['search', query]) def quit(self): """quit""" sys.exit() def help(self): """help""" self.newbuffer(['help', self.buffers[-1]]) def keypress(self, key): if key in self.keys: cmd = "self.%s()" % (self.keys[key]) eval(cmd) ############################################################ class PromptEdit(urwid.Edit, metaclass=urwid.signals.MetaSignals): signals = ['done'] def __init__(self, prompt, initial=None, completions=None, history=None): super(PromptEdit, self).__init__(caption=prompt) if initial: self.insert_text(initial) self.completions = completions self.completion_data = {} self.history = history self.history_pos = -1 self.last_text = '' def keypress(self, size, key): if self.last_text and self.edit_text != self.last_text: self.completion_data.clear() self.history_pos = -1 if key == 'enter': urwid.emit_signal(self, 'done', self.get_edit_text()) return elif key in ['esc', 'ctrl g']: urwid.emit_signal(self, 'done', None) return # navigation elif key == 'ctrl a': # move to beginning key = 'home' elif key == 'ctrl e': # move to end key = 'end' elif key == 'ctrl b': # back character self.set_edit_pos(self.edit_pos-1) elif key == 'ctrl f': # forward character self.set_edit_pos(self.edit_pos+1) elif key == 'meta b': # back word text = self.edit_text pos = self.edit_pos - 1 inword = False while True: try: text[pos] except IndexError: break if text[pos] != ' ' and not inword: inword = True continue if inword: if text[pos] == ' ': break pos -= 1 self.set_edit_pos(pos+1) elif key == 'meta f': # forward word text = self.edit_text pos = self.edit_pos inword = False while True: try: text[pos] except IndexError: break if text[pos] != ' ' and not inword: inword = True continue if inword: if text[pos] == ' ': break pos += 1 self.set_edit_pos(pos+1) # deletion elif key == 'ctrl d': # delete character text = self.edit_text pos = self.edit_pos ntext = text[:pos] + text[pos+1:] self.set_edit_text(ntext) elif key == 'ctrl k': # delete to end self.set_edit_text(self.edit_text[:self.edit_pos]) # history elif key in ['up', 'ctrl p']: if self.history: if self.history_pos == -1: self.history_full = self.history + [self.edit_text] try: self.history_pos -= 1 self.set_edit_text(self.history_full[self.history_pos]) self.set_edit_pos(len(self.edit_text)) except IndexError: self.history_pos += 1 elif key in ['down', 'ctrl n']: if self.history: if self.history_pos != -1: self.history_pos += 1 self.set_edit_text(self.history_full[self.history_pos]) self.set_edit_pos(len(self.edit_text)) # tab completion elif key == 'tab' and self.completions: # tab complete on individual words # retrieve current text and position text = self.edit_text pos = self.edit_pos # find the completion prefix tpos = pos - 1 while True: try: if text[tpos] == ' ': tpos += 1 break except IndexError: break tpos -= 1 prefix = text[tpos:pos] # FIXME: this prefix stripping should not be done here prefix = prefix.lstrip('+-') # find the end of the word tpos += 1 while True: try: if text[tpos] == ' ': break except IndexError: break tpos += 1 # record/clear completion data if self.completion_data: # clear the data if the prefix is new if prefix != self.completion_data['prefix']: self.completion_data.clear() # otherwise rotate the queue else: self.completion_data['q'].rotate(-1) else: self.completion_data['prefix'] = prefix # harvest completions q = collections.deque() for c in self.completions: if c.startswith(prefix): q.append(c) self.completion_data['q'] = q logging.debug(self.completion_data) # insert completion at point if self.completion_data and self.completion_data['q']: c = self.completion_data['q'][0][len(prefix):] ntext = text[:pos] + c + text[tpos:] self.set_edit_text(ntext) self.set_edit_pos(pos) # record the last text self.last_text = self.edit_text return super(PromptEdit, self).keypress(size, key) xapers-0.9.0/xapers/parser.py000066400000000000000000000031001365520605200161700ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os class ParseError(Exception): pass def parse_data(data, mimetype='pdf'): """Parse binary data of specified mime type into text (str) """ try: mod = __import__('xapers.parsers.' + mimetype, fromlist=['Parser']) parse_func = getattr(mod, 'parse') except ImportError: raise ParseError("Unsupported mime type '%s'." % mimetype) try: text = parse_func(data) except Exception as e: raise ParseError("Could not parse file: %s" % e) return text def parse_file(path): """Parse file for text (str) """ # FIXME: determine mime type if not os.path.exists(path): raise ParseError("File '%s' not found." % path) if not os.path.isfile(path): raise ParseError("File '%s' is not a regular file." % path) with open(path, 'br') as f: data = f.read() return parse_data(data) xapers-0.9.0/xapers/parsers/000077500000000000000000000000001365520605200160075ustar00rootroot00000000000000xapers-0.9.0/xapers/parsers/__init__.py000066400000000000000000000000001365520605200201060ustar00rootroot00000000000000xapers-0.9.0/xapers/parsers/pdf.py000066400000000000000000000006051365520605200171330ustar00rootroot00000000000000import subprocess def parse(data): cmd = ['pdftotext', '-', '-'] proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=open('/dev/null','w'), ) (stdout, stderr) = proc.communicate(input=data) proc.wait() return stdout.decode() xapers-0.9.0/xapers/source.py000066400000000000000000000174421365520605200162120ustar00rootroot00000000000000""" This file is part of xapers. Xapers is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Xapers is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xapers. If not, see . Copyright 2012-2017 Jameson Rollins """ import os import re import pkgutil from urllib.parse import urlparse from . import sources from .parser import parse_file ################################################## class SourceError(Exception): pass class SourceAttributeError(SourceError): def __init__(self, source, msg): self.source = source self.msg = msg def __str__(self): return "Source '%s' does not implement the %s." % (self.source.name, self.msg) ################################################## class Source(object): """Xapers class representing an online document source. The Source object is build from a source nickname (name) and possibly user-defined source module. """ def __init__(self, name, module): self.name = name self.module = module def __repr__(self): return '%s(%s, %s)' % (self.__class__, self.name, self.module) def __str__(self): return self.name def __getitem__(self, id): return SourceItem(self, id) @property def path(self): return self.module.__file__ @property def is_builtin(self): bpath = os.path.dirname(sources.__file__) spath = os.path.dirname(self.path) return os.path.commonprefix([bpath, spath]) == bpath @property def description(self): try: return self.module.description except AttributeError: raise SourceAttributeError(self, "'description' property") @property def url(self): try: return self.module.url except AttributeError: raise SourceAttributeError(self, "'url' property") @property def url_regex(self): try: return self.module.url_regex except AttributeError: raise SourceAttributeError(self, "'url_regex' property") @property def scan_regex(self): try: return self.module.scan_regex except AttributeError: raise SourceAttributeError(self, "'scan_regex' property") def fetch_bibtex(self, id): try: func = self.module.fetch_bibtex except AttributeError as e: raise SourceAttributeError(self, "fetch_bibtex() function") from e return func(id) def fetch_file(self, id): try: func = self.module.fetch_file(id) except AttributeError as e: raise SourceAttributeError(self, "fetch_file() function") from e return func(id) class SourceItem(Source): """Xapers class representing an item from an online source. """ def __init__(self, source, id): super(SourceItem, self).__init__(source.name, source.module) self.id = id self.sid = '%s:%s' % (self.name, self.id) def __repr__(self): s = super(SourceItem, self).__repr__() return '%s(%s, %s)' % (self.__class__, s, self.id) def __hash__(self): return hash(self.sid) def __eq__(self, other): if isinstance(other, self.__class__): return self.sid == other.sid return NotImplemented def __ne__(self, other): return not self.__eq__(other) def __str__(self): return self.sid @property def url(self): try: return self.module.url_format % self.id except AttributeError: raise SourceAttributeError(self, "'url_format' property") def fetch_bibtex(self): return super(SourceItem, self).fetch_bibtex(self.id) def fetch_file(self): return super(SourceItem, self).fetch_file(self.id) ################################################## class Sources(object): def __init__(self): self.sourcespath = sources.__path__ extra = os.getenv('XAPERS_SOURCE_PATH', None) if extra: for path in extra.split(':'): if path: self.sourcespath.insert(0, path) else: self.sourcespath.insert(0, os.path.expanduser(os.path.join('~','.xapers','sources'))) self._sources = {} for (loader, name, ispkg) in pkgutil.walk_packages(self.sourcespath): if ispkg: continue #self._modules[name] = loader.find_module(name).load_module(name) module = loader.find_module(name).load_module(name) self._sources[name] = Source(name, module) def __repr__(self): return '%s(%s)' % (self.__class__, self.sourcespath) def get_source(self, name, id=None): try: source = self._sources[name] except KeyError: raise SourceError("unknown source: %s" % name) if id: return source[id] else: return source def __contains__(self, source): return source in self._sources def __getitem__(self, sid): name = None id = None try: vals = sid.split(':') except ValueError: raise SourceError("could not parse sid string") name = vals[0] if len(vals) > 1: id = vals[1] return self.get_source(name, id) def __iter__(self): return iter(self._sources.values()) def match_source(self, string): """Return Source object from URL or source identifier string. Return None for no match. """ o = urlparse(string) # if the scheme is http, look for source match if o.scheme in ['http', 'https']: for source in self: try: regex = source.url_regex except SourceAttributeError: # FIXME: warning? continue match = re.match(regex, string) if match: return source[match.group(1)] elif o.scheme != '' and o.path != '': return self.get_source(o.scheme, o.path) def scan_file(self, file): """Scan document file for source identifiers Source 'scan_regex' attributes are used. Returns a list of SourceItem objects. """ text = parse_file(file) items = set() for source in self: try: regex = re.compile(source.scan_regex) except SourceAttributeError: # FIXME: warning? continue matches = regex.findall(text) if not matches: continue for match in matches: items.add(source[match]) return list(items) def scan_bibentry(self, bibentry): """Scan bibentry for source identifiers. Bibentry keys are searched for source names, and bibentry values are assumed to be individual identifier strings. Returns a list of SourceItem objects. """ fields = bibentry.get_fields() items = set() for field, value in fields.items(): field = field.lower() if field in self: items.add(self.get_source(field, value)) # FIXME: how do we get around special exception for this? if 'eprint' in fields: items.add(self.get_source('arxiv', fields['eprint'])) return list(items) xapers-0.9.0/xapers/sources/000077500000000000000000000000001365520605200160135ustar00rootroot00000000000000xapers-0.9.0/xapers/sources/__init__.py000066400000000000000000000000001365520605200201120ustar00rootroot00000000000000xapers-0.9.0/xapers/sources/arxiv.py000066400000000000000000000043621365520605200175230ustar00rootroot00000000000000import urllib.request, urllib.parse, urllib.error from html.parser import HTMLParser from xapers.bibtex import data2bib description = "Open access e-print service" url = 'http://arxiv.org/' url_format = 'http://arxiv.org/abs/%s' url_regex = 'http://arxiv.org/(?:abs|pdf|format)/([^/]*)' # http://arxiv.org/help/arxiv_identifier scan_regex = 'arXiv:([0-9]{4}\.[0-9]{4,5})(?:v[0-9]+)?' # html parser override to override handler methods class MyHTMLParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.lefthead = False self.title = None self.author = [] self.year = None self.sid = None def handle_starttag(self, tag, attrs): title = False author = False date = False sid = False if self.lefthead: return if tag != 'meta': return for attr in attrs: if attr[0] == 'name': if attr[1] == 'citation_title': title = True if attr[1] == 'citation_author': author = True if attr[1] == 'citation_date': date = True if attr[1] == 'citation_arxiv_id': sid = True if attr[0] == 'content': if title: self.title = attr[1] if author: self.author.append(attr[1]) if date: self.year = attr[1].split('/')[0] if sid: self.sid = attr[1] def handle_endtag(self, tag): if tag == 'head': self.lefthead = True def fetch_bibtex(id): url = url_format % id f = urllib.request.urlopen(url) html = f.read() f.close() parser = MyHTMLParser() parser.feed(str(html)) data = { 'arxiv': id, 'title': parser.title, 'authors': parser.author, 'year': parser.year, 'eprint': id, 'url': url_format % id, } return data2bib(data, 'arxiv:%s' % id) def fetch_file(id): url = 'http://arxiv.org/pdf/%s' % id f = urllib.request.urlopen(url) data = f.read() f.close() name = '%s.pdf' % id return name, data xapers-0.9.0/xapers/sources/cryptoeprint.py000066400000000000000000000031451365520605200211320ustar00rootroot00000000000000import urllib.request, urllib.parse, urllib.error from html.parser import HTMLParser import codecs description = "Cryptology ePrint Archive" url = "https://eprint.iacr.org/" url_format = 'https://eprint.iacr.org/%s' url_regex = 'https?://eprint.iacr.org/(\d{4,}/\d{3,})' # don't know what a scan_regex looks like for IACR eprints. i don't # think there is one, because i think the submission process happens # after the pdf is formalized. # custom definitions for IACR eprints: bibtex_url = 'https://eprint.iacr.org/eprint-bin/cite.pl?entry=%s' pdf_url = 'https://eprint.iacr.org/%s.pdf' # html parser override to override handler methods class IACRParser(HTMLParser): def __init__(self): HTMLParser.__init__(self) self.pre = False self.data = None def handle_starttag(self, tag, attrs): if (tag == 'pre'): self.pre = True def handle_endtag(self, tag): if (tag == 'pre'): self.pre = False def handle_data(self, data): if (self.pre): self.data = data def fetch_bibtex(id): url = bibtex_url % id f = urllib.request.urlopen(url) html = f.read() ct = [x for x in map(str.strip, f.getheader('content-type').split(';')) if x.startswith('charset=')] if ct: charset = ct[-1].split('=')[1] else: charset = 'iso8859-1' f.close() html = codecs.decode(html, charset) p = IACRParser() p.feed(html) return p.data def fetch_file(id): url = pdf_url % id f = urllib.request.urlopen(url) pdf = f.read() f.close() return (id.split('/').pop() + '.pdf', pdf) xapers-0.9.0/xapers/sources/dcc.py000066400000000000000000000045721365520605200171260ustar00rootroot00000000000000import sys import pycurl import io import tempfile from xapers.bibtex import data2bib description = "LIGO Document Control Center" url = 'https://dcc.ligo.org/' url_format = 'https://dcc.ligo.org/%s' url_regex = 'https://dcc.ligo.org/(?:LIGO-)?([^/]*)' def dccRetrieveXML(docid): url = 'https://dcc.ligo.org/Shibboleth.sso/Login?target=https%3A%2F%2Fdcc.ligo.org%2Fcgi-bin%2Fprivate%2FDocDB%2FShowDocument?docid=' + docid + '%26outformat=xml&entityID=https%3A%2F%2Flogin.ligo.org%2Fidp%2Fshibboleth' curl = pycurl.Curl() cookies = tempfile.NamedTemporaryFile() curl.setopt(pycurl.URL, url) curl.setopt(pycurl.UNRESTRICTED_AUTH, 1) curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_GSSNEGOTIATE) curl.setopt(pycurl.COOKIEJAR, cookies.name) curl.setopt(pycurl.USERPWD, ':') curl.setopt(pycurl.FOLLOWLOCATION, 1) doc = io.StringIO() curl.setopt(pycurl.WRITEFUNCTION, doc.write) try: curl.perform() except: import traceback traceback.print_exc(file=sys.stderr) sys.stderr.flush() xml = doc.getvalue() curl.close() cookies.close() doc.close() return xml def dccXMLExtract(xmlstring): from xml.dom.minidom import parse, parseString xml = parseString(xmlstring) etitle = xml.getElementsByTagName("title")[0].firstChild if etitle: title = etitle.data else: title = None alist = xml.getElementsByTagName("author") authors = [] for author in alist: authors.append(author.getElementsByTagName("fullname")[0].firstChild.data) eabstract = xml.getElementsByTagName("abstract")[0].firstChild if eabstract: abstract = eabstract.data else: abstract = None # FIXME: find year/date year = None return title, authors, year, abstract def fetch_bibtex(id): xml = dccRetrieveXML(id) try: title, authors, year, abstract = dccXMLExtract(xml) except: print(xml, file=sys.stderr) raise data = { 'institution': 'LIGO Laboratory', 'number': id, 'dcc': id, 'url': url_format % id } if title: data['title'] = title if authors: data['authors'] = authors if abstract: data['abstract'] = abstract if year: data['year'] = year key = 'dcc:%s' % id btype = '@techreport' return data2bib(data, key, type=btype) xapers-0.9.0/xapers/sources/doi.py000066400000000000000000000024001365520605200171340ustar00rootroot00000000000000import urllib.request, urllib.error, urllib.parse description = "Digital Object Identifier" url = 'https://dx.doi.org/' # produces URL string when supplied with valid source identifier url_format = 'https://dx.doi.org/%s' id_regex = '(10\.\d{4,}[\w\d\:\.\-\/]+)' # for regex matching a supplied URL. match group 1 should return the # source identifier string url_regex = 'https?://dx.doi.org/(10\.\d{4,}[\w\d\:\.\-\/]+)' # for regex scanning of document text #scan_regex = '[doi|DOI][\s\.\:]{0,2}(10\.\d{4}[\d\:\.\-\/a-z]+)[A-Z\s]' #scan_regex = '\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])[[:graph:]])+)\b' #scan_regex = '(doi|DOI)(10[.][0-9]{4,}(?:[.][0-9]+)*[\/\.](?:(?!["&\'<>])[[:graph:]])+)' #scan_regex = '(?:doi|DOI)[\s\.\:]{0,2}(10\.\d{4,}[\w\d\:\.\-\/]+)' scan_regex = '(?:doi|DOI)[\s\.\:]{0,2}' + id_regex # function to fetch a bibtex entry for a given source identifier def fetch_bibtex(id): # http://www.crosscite.org/cn/ url = url_format % id req = urllib.request.Request(url) req.add_header('Accept', 'application/x-bibtex') req.add_header('Accept-Charset', 'utf-8') f = urllib.request.urlopen(req) # DECODE the returned byte string to get a unicode string bibtex = f.read().decode('utf-8') f.close return bibtex xapers-0.9.0/xapers/util.py000066400000000000000000000014371365520605200156640ustar00rootroot00000000000000def xapian_term_iter(xapian_object, prefix=None): """Prefix term iterator for xapian objects `xapian_object` can be either a full database or a single document. Iterates over all terms, or just those with prefix if specified """ term_iter = iter(xapian_object) if prefix: plen = len(prefix) # https://www.python.org/dev/peps/pep-0479/ try: term = term_iter.skip_to(prefix).term.decode() except StopIteration: return if not term.startswith(prefix): return yield term[plen:] for tli in term_iter: term = tli.term.decode() if prefix: if not term.startswith(prefix): break yield term[plen:] else: yield term xapers-0.9.0/xapers/version.py000066400000000000000000000000261365520605200163650ustar00rootroot00000000000000__version__ = '0.8.2'