splitter-r653/0000775000175300017530000000000014352507254014536 5ustar builderbuilder00000000000000splitter-r653/.classpath0000664000175300017530000000105314352507254016520 0ustar builderbuilder00000000000000 splitter-r653/.project0000664000175300017530000000064514352507254016212 0ustar builderbuilder00000000000000 splitter org.eclipse.jdt.core.javabuilder org.eclipse.jdt.core.javanature org.apache.ivyde.eclipse.ivynature splitter-r653/build.xml0000664000175300017530000003207314352507254016364 0ustar builderbuilder00000000000000 splitter-r653/ivy.xml0000664000175300017530000000303614352507254016071 0ustar builderbuilder00000000000000 splitter-r653/ivysettings.xml0000664000175300017530000000151514352507254017652 0ustar builderbuilder00000000000000 splitter-r653/splitter.iml0000664000175300017530000000554214352507254017115 0ustar builderbuilder00000000000000 splitter-r653/splitter.ipr0000664000175300017530000015114614352507254017130 0ustar builderbuilder00000000000000 splitter-r653/.idea/0000775000175300017530000000000014352507253015515 5ustar builderbuilder00000000000000splitter-r653/.idea/compiler.xml0000664000175300017530000000242714352507253020056 0ustar builderbuilder00000000000000 splitter-r653/.idea/dataSources.xml0000664000175300017530000000010014352507253020503 0ustar builderbuilder00000000000000 splitter-r653/.idea/encodings.xml0000664000175300017530000000024614352507253020212 0ustar builderbuilder00000000000000 splitter-r653/.idea/excludeFromValidation.xml0000664000175300017530000000010014352507253022516 0ustar builderbuilder00000000000000 splitter-r653/.idea/fileColors.xml0000664000175300017530000000010014352507253020327 0ustar builderbuilder00000000000000 splitter-r653/.idea/misc.xml0000664000175300017530000000562314352507253017200 0ustar builderbuilder00000000000000 http://www.w3.org/1999/xhtml splitter-r653/.idea/modules.xml0000664000175300017530000000040214352507253017703 0ustar builderbuilder00000000000000 splitter-r653/.idea/projectCodeStyle.xml0000664000175300017530000001620314352507253021523 0ustar builderbuilder00000000000000 splitter-r653/.idea/sqldialects.xml0000664000175300017530000000010014352507253020536 0ustar builderbuilder00000000000000 splitter-r653/.idea/templateLanguages.xml0000664000175300017530000000010014352507253021670 0ustar builderbuilder00000000000000 splitter-r653/.idea/uiDesigner.xml0000664000175300017530000002113214352507253020334 0ustar builderbuilder00000000000000 splitter-r653/.idea/validation.xml0000664000175300017530000000010014352507253020360 0ustar builderbuilder00000000000000 splitter-r653/.idea/vcs.xml0000664000175300017530000000025114352507253017030 0ustar builderbuilder00000000000000 splitter-r653/.idea/copyright/0000775000175300017530000000000014352507253017525 5ustar builderbuilder00000000000000splitter-r653/.idea/copyright/gnu_2_3.xml0000664000175300017530000000146214352507253021506 0ustar builderbuilder00000000000000 splitter-r653/.idea/copyright/gnu_3.xml0000664000175300017530000000145414352507253021266 0ustar builderbuilder00000000000000 splitter-r653/.idea/copyright/profiles_settings.xml0000664000175300017530000000016414352507253024013 0ustar builderbuilder00000000000000 splitter-r653/.idea/inspectionProfiles/0000775000175300017530000000000014352507253021374 5ustar builderbuilder00000000000000splitter-r653/.idea/inspectionProfiles/Project_Default.xml0000664000175300017530000002261514352507253025176 0ustar builderbuilder00000000000000 splitter-r653/.idea/inspectionProfiles/profiles_settings.xml0000664000175300017530000000126314352507253025663 0ustar builderbuilder00000000000000 splitter-r653/.idea/runConfigurations/0000775000175300017530000000000014352507253021234 5ustar builderbuilder00000000000000splitter-r653/doc/0000775000175300017530000000000014352507253015302 5ustar builderbuilder00000000000000splitter-r653/doc/LICENSE-LGPL.txt0000664000175300017530000001674314352507253017674 0ustar builderbuilder00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. splitter-r653/doc/LICENSE-apache-2.0.txt0000664000175300017530000002613614352507253020651 0ustar builderbuilder00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. splitter-r653/doc/LICENSE-gpl-3.0.txt0000664000175300017530000010451314352507253020207 0ustar builderbuilder00000000000000 GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . splitter-r653/doc/LICENSE-xpp3.txt0000664000175300017530000000421714352507253020021 0ustar builderbuilder00000000000000Indiana University Extreme! Lab Software License Version 1.1.1 Copyright (c) 2002 Extreme! Lab, Indiana University. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The end-user documentation included with the redistribution, if any, must include the following acknowledgment: "This product includes software developed by the Indiana University Extreme! Lab (http://www.extreme.indiana.edu/)." Alternately, this acknowledgment may appear in the software itself, if and wherever such third-party acknowledgments normally appear. 4. The names "Indiana Univeristy" and "Indiana Univeristy Extreme! Lab" must not be used to endorse or promote products derived from this software without prior written permission. For written permission, please contact http://www.extreme.indiana.edu/. 5. Products derived from this software may not use "Indiana Univeristy" name nor may "Indiana Univeristy" appear in their name, without prior written permission of the Indiana University. THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS, COPYRIGHT HOLDERS OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. splitter-r653/doc/LICENSE.readme0000664000175300017530000000055414352507253017547 0ustar builderbuilder00000000000000 The code of the project itself is under GPL-3 This project depends on several externally developed jar files. The licenses for each jar included in the lib directory is summarised below. The full text of each license is included in this directory. xpp3 - xpp3 fastutil.jar - apache-2.0 testng.jar - apache-2.0 protobuf.jar - apache-2.0 osmpbf.jar - LGPL-3+ splitter-r653/doc/intro.txt0000664000175300017530000001100614352507253017174 0ustar builderbuilder00000000000000 * The size of each tile is modified depending on the density of features. So tiles can remain large for the most part but shrink down in areas of high density. * Tiles meet exactly and do not overlap. * Tiles are split on boundries that are exact at a low zoom level in Garmin units, ensuring that the tiles do not overlap or have gaps in the overview map. * Lines crossing the boundry are cut into two pieces exactly on the boundry. Part of the line will be in one tile, the rest in the other tile. There is no overlap. * The same is true for polygons. They are cut with a straight line. * If a line crosses a corner of a tile but does not have a node inside the tile it will still appear. * Includes relations. * The size of each tile is modified depending on the density of features. So tiles can remain large for the most part but shrink down in areas of high density. * Tiles meet exactly and - using the saved boundng box - do not overlap. * The overlap (in garmin units) determines how many * Tiles are split on boundaries that are exact at a low zoom level in Garmin units, ensuring that the tiles do not overlap or have gaps in the overview map. * Lines crossing the boundary are cut into two pieces exactly on the boundary. * * * Algorithm with default parms (r 200): * Pass 1: - For all nodes in the OSM data: calculate the desensity - For all ways and relations: do nothing (just read and count them) - Calculate the split areas so that none contains more than max-nodes (default: 1.600.000) * Pass 2: - For all nodes: calculate the area(s) that the node has to be written to. Since the default value for overlap is 2000, some nodes are written into up to 4 different areas (tiles). Write the node to the calculated areas. - For all ways: lookup the areas of the all nodes. Write the way (id, tags, and node ids) to all areas found and save the information to which areas the way was written to. - For all relations: lookup the areas of the all nodes and ways (sub-relations are ignored). Write the relation (id, tags, and member ids) to all these areas. * New Algorithm with default parms (r > 200): * - Read user defined list of ways and relations that are known to cause trouble ("problem rels" and "problem ways"). Copy the "problem way" list to the "needed way" list * Pass 1: - For all nodes in the OSM data: calculate the desensity - For all ways and relations: do nothing (if reader allows it: ignore them) - Calculate the split areas so that none contains more than max-nodes (default: 1.600.000) * Pass 2a: - Read and save all relations - Recursively add all sub relations of the user given problem relations to the list of problem relations - Forget relations that are not in the problem list - For all way members in the problem relation list: add the ids to the list of "needed ways" - For all node members in the problem relation list: add the ids to the list of "needed nodes" * Pass 2b: - For all ways, if the way is in the "needed way" list: add the nodes to the list of "needed nodes" * Pass 2c: - For all nodes: if the node is in the "needed nodes" list: save the coords - For all ways: if the way is in the "needed way" list: Use the coords of the nodes to calculate the bounding box of the way and find all tile areas that are intersecting with it. Save those areas. Save also the way nodes for pass 2d * Pass 2d: - For all relations in the "problem rel" list: Calculate the bounding box of the relation and find the tile areas that intersect with it. For each member of the relation: Merge the list of the members' tile areas with the list of the relations' tile area. * Pass 3: - For all nodes: calculate the area(s) that the node has to be written to. Since the default value for overlap is 2000, some nodes are written into up to 4 different areas (tiles). Write the node to the calculated areas. If the node is in the "needed nodes" list: Write the node also to the areas only calculated in pass 2d - For all ways: If the way is in the "needed way" list: Write the way to the areas calculated in pass 2d, else lookup the areas of the all nodes and write the way (id, tags, and node ids) to all areas found and save the information to which areas the way was written to. - For all relations: if the relation is in "problem rel" list: Write the rel to the areas calculated in pass 2d, else lookup the areas of the all nodes and ways (sub-relations are ignored). Write the relation (id, tags, and member ids) to all these areas. splitter-r653/doc/splitter.10000664000175300017530000003727014352507253017243 0ustar builderbuilder00000000000000'\" -*- coding: us-ascii -*- .if \n(.g .ds T< \\FC .if \n(.g .ds T> \\F[\n[.fam]] .de URL \\$2 \(la\\$1\(ra\\$3 .. .if \n(.g .mso www.tmac .TH mkgmap-splitter 1 "9 January 2015" "" "" .SH NAME mkgmap-splitter \- tile splitter for mkgmap .SH SYNOPSIS 'nh .fi .ad l \fBmkgmap-splitter\fR \kx .if (\nx>(\n(.l/2)) .nr x (\n(.l/5) 'in \n(.iu+\nxu [\fIoptions\fR] \fIfile.osm\fR 'in \n(.iu-\nxu .ad b 'hy > \fI\*(T<\fIsplitter.log\fR\*(T>\fR .SH DESCRIPTION \fBmkgmap-splitter\fR splits an .osm file that contains large well mapped regions into a number of smaller tiles, to fit within the maximum size used for the Garmin maps format. .PP The two most important features are: .TP 0.2i \(bu Variable sized tiles to prevent a large number of tiny files. .TP 0.2i \(bu Tiles join exactly with no overlap or gaps. .PP You will need a lot of memory on your computer if you intend to split a large area. A few options allow configuring how much memory you need. With the default parameters, you need about 2 bytes for every node and way. This doesn't sound a lot but there are about 4300 million nodes in the whole planet file (Jan 2018) and so you cannot process the whole planet in one pass on a 32 bit machine using this utility, as the maximum java heap space is 2G. It is possible with 64 bit java and about 10GB of heap or with multiple passes. .PP On the other hand a single country, even a well mapped one such as Germany or the UK, will be possible on a modest machine, even a netbook. .SH USAGE Splitter requires java 1.6 or higher. Basic usage is as follows. .PP .nf \*(T< \fBmkgmap\-splitter\fR \fI\fIfile.osm\fR\fR > \fI\fIsplitter.log\fR\fR \*(T> .fi .PP If you have less than 2 GB of memory on your computer you should reduce the \*(T<\fB\-Xmx\fR\*(T> option by setting the JAVA_OPTS environment variable. .PP .nf \*(T< JAVA_OPTS="\fI\-Xmx512m\fR" \fBmkgmap\-splitter\fR \fI\fIfile.osm\fR\fR > \fI\fIsplitter.log\fR\fR \*(T> .fi .PP This will produce a number of .osm.pbf files that can be read by \fBmkgmap\fR(1). There are also other files produced: .PP The \*(T<\fItemplate.args\fR\*(T> file is a file that can be used with the \*(T<\fB\-c\fR\*(T> option of \fBmkgmap\fR that will compile all the files. You can use it as is or you can edit it to include your own options. For example instead of each description being "OSM Map" it could be "NW Scotland" as appropriate. .PP The \*(T<\fIareas.list\fR\*(T> file is the list of bounding boxes that were calculated. If you want, you can use this on subsequent calls of splitter using the \*(T<\fB\-\-split\-file\fR\*(T> option to use exactly the same areas as last time. This might be useful if you produce a map regularly and want to keep the tile areas the same from month to month. It is also useful to avoid the time it takes to regenerate the file each time (currently about a third of the overall time taken to perform the split). Of course if the map grows enough that one of the tiles overflows, you will have to re-calculate the areas again. .PP The \*(T<\fIareas.poly\fR\*(T> file contains the bounding polygon of the calculated areas. See option \*(T<\fB\-\-polygon\-file\fR\*(T> how this can be used. .PP The \*(T<\fIdensities\-out.txt\fR\*(T> file is written when no split-file is given and contains debugging information only. .PP You can also use a gzip'ed or bz2'ed compressed .osm file as the input file. Note that this can slow down the splitter considerably (particularly true for bz2) because decompressing the .osm file can take quite a lot of CPU power. If you are likely to be processing a file several times you're probably better off converting the file to one of the binary formats pbf or o5m. The o5m format is faster to read, but requires more space on the disk. .SH OPTIONS There are a number of options to fine tune things that you might want to try. .TP \*(T<\fB\-\-boundary\-tags=\fR\*(T>\fIstring\fR A comma separated list of tag values for relations. Used to filter multipolygon and boundary relations for problem-list processing. See also option \-\-wanted\-admin\-level. Default: use-exclude-list .TP \*(T<\fB\-\-cache=\fR\*(T>\fIstring\fR Deprecated, now does nothing .TP \*(T<\fB\-\-description=\fR\*(T>\fIstring\fR Sets the desciption to be written in to the \*(T<\fItemplate.args\fR\*(T> file. .TP \*(T<\fB\-\-geonames\-file=\fR\*(T>\fIstring\fR The name of a GeoNames file to use for determining tile names. Typically \*(T<\fIcities15000.zip\fR\*(T> from .URL http://download.geonames.org/export/dump geonames \&. .TP \*(T<\fB\-\-keep\-complete=\fR\*(T>\fIboolean\fR Use \*(T<\fB\-\-keep\-complete=false\fR\*(T> to disable two additional program phases between the split and the final distribution phase (not recommended). The first phase, called gen-problem-list, detects all ways and relations that are crossing the borders of one or more output files. The second phase, called handle-problem-list, collects the coordinates of these ways and relations and calculates all output files that are crossed or enclosed. The information is passed to the final dist-phase in three temporary files. This avoids broken polygons, but be aware that it requires to read the input files at least two additional times. Do not specify it with \*(T<\fB\-\-overlap\fR\*(T> unless you have a good reason to do so. Defaulte: true .TP \*(T<\fB\-\-mapid=\fR\*(T>\fIint\fR Set the filename for the split files. In the example the first file will be called \*(T<\fI63240001.osm.pbf\fR\*(T> and the next one will be \*(T<\fI63240002.osm.pbf\fR\*(T> and so on. Default: 63240001 .TP \*(T<\fB\-\-max\-areas=\fR\*(T>\fIint\fR The maximum number of areas that can be processed in a single pass during the second stage of processing. This must be a number from 1 to 9999. Higher numbers mean fewer passes over the source file and hence quicker overall processing, but also require more memory. If you find you are running out of memory but don't want to increase your \*(T<\fB\-\-max\-nodes\fR\*(T> value, try reducing this instead. Changing this will have no effect on the result of the split, it's purely to let you trade off memory for performance. Note that the first stage of the processing has a fixed memory overhead regardless of what this is set to so if you are running out of memory before the \*(T<\fIareas.list\fR\*(T> file is generated, you need to either increase your \*(T<\fB\-Xmx\fR\*(T> value or reduce the size of the input file you're trying to split. Default: 2048 .TP \*(T<\fB\-\-max\-nodes=\fR\*(T>\fIint\fR The maximum number of nodes that can be in any of the resultant files. The default is fairly conservative, you could increase it quite a lot before getting any 'map too big' messages. Not much experimentation has been done. Also the bigger this value, the less memory is required during the splitting stage. Default: 1600000 .TP \*(T<\fB\-\-max\-threads=\fR\*(T>\fIvalue\fR The maximum number of threads used by \fBmkgmap-splitter\fR. Default: 4 (auto) .TP \*(T<\fB\-\-mixed=\fR\*(T>\fIboolean\fR Specify this if the input osm file has nodes, ways and relations intermingled or the ids are not strictly sorted. To increase performance, use the \fBosmosis\fR sort function. Default: false .TP \*(T<\fB\-\-no\-trim=\fR\*(T>\fIboolean\fR Don't trim empty space off the edges of tiles. This option is ignored when \*(T<\fB\-\-polygon\-file\fR\*(T> is used. Default: false .TP \*(T<\fB\-\-num\-tiles=\fR\*(T>\fIvalue\fR\*(T<\fBstring\fR\*(T> A target value that is used when no split-file is given. Splitting is done so that the given number of tiles is produced. The \*(T<\fB\-\-max\-nodes\fR\*(T> value is ignored if this option is given. .TP \*(T<\fB\-\-output=\fR\*(T>\fIstring\fR The format in which the output files are written. Possible values are xml, pbf, o5m, and simulate. The default is pbf, which produces the smallest file sizes. The o5m format is faster to write, but creates around 40% larger files. The simulate option is for debugging purposes. .TP \*(T<\fB\-\-output\-dir=\fR\*(T>\fIpath\fR The directory to which splitter should write the output files. If the specified path to a directory doesn't exist, \fBmkgmap-splitter\fR tries to create it. Defaults to the current working directory. .TP \*(T<\fB\-\-overlap=\fR\*(T>\fIstring\fR Deprecated since r279. With \*(T<\fB\-\-keep\-complete=false\fR\*(T>, \fBmkgmap-splitter\fR should include nodes outside the bounding box, so that \fBmkgmap\fR can neatly crop exactly at the border. This parameter controls the size of that overlap. It is in map units, a default of 2000 is used which means about 0.04 degrees of latitude or longitude. If \*(T<\fB\-\-keep\-complete=true\fR\*(T> is active and \*(T<\fB\-\-overlap\fR\*(T> is given, a warning will be printed because this combination rarely makes sense. .TP \*(T<\fB\-\-polygon\-desc\-file=\fR\*(T>\fIpath\fR An osm file (.o5m, .pbf, .osm) with named ways that describe bounding polygons with OSM ways having tags name and mapid. .TP \*(T<\fB\-\-polygon\-file=\fR\*(T>\fIpath\fR The name of a file containing a bounding polygon in the .URL "" "osmosis polygon file format" \&. \fBmkgmap-splitter\fR uses this file when calculating the areas. It first calculates a grid using the given \*(T<\fB\-\-resolution\fR\*(T>. The input file is read and for each node, a counter is increased for the related grid area. If the input file contains a bounding box, this is applied to the grid so that nodes outside of the bounding box are ignored. Next, if specified, the bounding polygon is used to zero those grid elements outside of the bounding polygon area. If the polygon file describes one or more rectilinear areas with no more than 40 vertices, \fBmkgmap-splitter\fR will try to create output files that fit exactly into each area, otherwise it will approximate the polygon area with rectangles. .TP \*(T<\fB\-\-precomp\-sea=\fR\*(T>\fIpath\fR The name of a directory containing precompiled sea tiles. If given, \fBmkgmap-splitter\fR will use the precompiled sea tiles in the same way as \fBmkgmap\fR does. Use this if you want to use a polygon-file or \*(T<\fB\-\-no\-trim=true\fR\*(T> and \fBmkgmap\fR creates empty *.img files combined with a message starting "There is not enough room in a single garmin map for all the input data". .TP \*(T<\fB\-\-problem\-file=\fR\*(T>\fIpath\fR The name of a file containing ways and relations that are known to cause problems in the split process. Use this option if \*(T<\fB\-\-keep\-complete\fR\*(T> requires too much time or memory and \*(T<\fB\-\-overlap\fR\*(T> doesn't solve your problem. Syntax of problem file: .nf \*(T< way: # comment... rel: # comment... \*(T> .fi example: .nf \*(T< way:2784765 # Ferry Guernsey \- Jersey \*(T> .fi .TP \*(T<\fB\-\-problem\-report=\fR\*(T>\fIpath\fR The name of a file to write the generated problem list created with \*(T<\fB\-\-keep\-complete\fR\*(T>. The parameter is ignored if \*(T<\fB\-\-keep\-complete=false\fR\*(T>. You can reuse this file with the \*(T<\fB\-\-problem\-file\fR\*(T> parameter, but do this only if you use the same values for \*(T<\fB\-\-max\-nodes\fR\*(T> and \*(T<\fB\-\-resolution\fR\*(T>. .TP \*(T<\fB\-\-resolution=\fR\*(T>\fIint\fR The resolution of the density map produced during the first phase. A value between 1 and 24. Default is 13. Increasing the value to 14 requires four times more memory in the split phase. The value is ignored if a \*(T<\fB\-\-split\-file\fR\*(T> is given. .TP \*(T<\fB\-\-search\-limit=\fR\*(T>\fIint\fR Search limit in split algo. Higher values may find better splits, but will take longer. Default: 200000 .TP \*(T<\fB\-\-split\-file=\fR\*(T>\fIpath\fR Use the previously calculated tile areas instead of calculating them from scratch. The file can be in .list or .kml format. .TP \*(T<\fB\-\-status\-freq=\fR\*(T>\fIint\fR Displays the amount of memory used by the JVM every \*(T<\fB\-\-status\-freq\fR\*(T> seconds. Set =0 to disable. Default: 120 .TP \*(T<\fB\-\-stop\-after=\fR\*(T>\fIstring\fR Debugging: stop after a given program phase. Can be split, gen-problem-list, or handle-problem-list. Default is dist which means execute all phases. .TP \*(T<\fB\-\-wanted\-admin\-level=\fR\*(T>\fIint\fR Specifies the lowest admin_level value of boundary relations that should be kept complete. Used to filter boundary relations for problem-list processing. The default value 5 means that boundary relations are kept complete when the admin_level is 5 or higher (5..11). The parameter is ignored if \*(T<\fB\-\-keep\-complete=false\fR\*(T>. Default: 5 .TP \*(T<\fB\-\-write\-kml=\fR\*(T>\fIpath\fR The name of a kml file to write out the areas to. This is in addition to \*(T<\fIareas.list\fR\*(T> (which is always written out). .PP Special options .TP \*(T<\fB\-\-version\fR\*(T> If the parameter \*(T<\fB\-\-version\fR\*(T> is found somewhere in the options, \fBmkgmap-splitter\fR will just print the version info and exit. Version info looks like this: .nf \*(T< splitter 279 compiled 2013\-01\-12T01:45:02+0000 \*(T> .fi .TP \*(T<\fB\-\-help\fR\*(T> If the parameter \*(T<\fB\-\-help\fR\*(T> is found somewhere in the options, \fBmkgmap-splitter\fR will print a list of all known normal options together with a short help and exit. .SH TUNING Tuning for best performance .PP A few hints for those that are using \fBmkgmap-splitter\fR to split large files. .TP 0.2i \(bu For faster processing with \*(T<\fB\-\-keep\-complete=true\fR\*(T>, convert the input file to o5m format using: .nf \*(T< \fBosmconvert\fR \fB\-\-drop\-version\fR \fIfile.osm\fR \fB\-o=\fR\fB\fIfile.o5m\fR\fR \*(T> .fi .TP 0.2i \(bu The option \*(T<\fB\-\-drop\-version\fR\*(T> is optional, it reduces the file to that data that is needed by \fBmkgmap-splitter\fR and \fBmkgmap\fR. .TP 0.2i \(bu If you still experience poor performance, look into \*(T<\fIsplitter.log\fR\*(T>. Search for the word Distributing. You may find something like this in the next line: .nf \*(T< Processing 1502 areas in 3 passes, 501 areas at a time \*(T> .fi This means splitter has to read the input file input three times because the \*(T<\fB\-\-max\-areas\fR\*(T> parameter was much smaller than the number of areas. If you have enough heap, set \*(T<\fB\-\-max\-areas\fR\*(T> value to a value that is higher than the number of areas, e.g. \*(T<\fB\-\-max\-areas=2048\fR\*(T>. Execute \fBmkgmap-splitter\fR again and you should find .nf \*(T< Processing 1502 areas in a single pass \*(T> .fi .TP 0.2i \(bu More areas require more memory. Make sure that \fBmkgmap-splitter\fR has enough heap (increase the \*(T<\fB\-Xmx\fR\*(T> parameter) so that it doesn't waste much time in the garbage collector (GC), but keep as much memory as possible for the systems I/O caches. .TP 0.2i \(bu If available, use two different disks for input file and output directory, esp. when you use o5m format for input and output. .TP 0.2i \(bu If you use \fBmkgmap\fR r2415 or later and disk space is no concern, consider to use \*(T<\fB\-\-output=o5m\fR\*(T> to speed up processing. .PP Tuning for low memory requirements .PP If your machine has less than 1 GB free memory (eg. a netbook), you can still use \fBmkgmap-splitter\fR, but you might have to be patient if you use the parameter \*(T<\fB\-\-keep\-complete\fR\*(T> and want to split a file like \*(T<\fIgermany.osm.pbf\fR\*(T> or a larger one. If needed, reduce the number of parallel processed areas to 50 with the \*(T<\fB\-\-max\-areas\fR\*(T> parameter. You have to use \*(T<\fB\-\-keep\-complete=false\fR\*(T> when splitting an area like Europe. .SH NOTES .TP 0.2i \(bu There is no longer an upper limit on the number of areas that can be output (previously it was 255). More areas just mean potentially more passes being required over the \&.osm file, and hence the splitter will take longer to run. .TP 0.2i \(bu There is no longer a limit on how many areas a way or relation can belong to (previously it was 4). .SH "SEE ALSO" \fBmkgmap\fR(1), \fBosmconvert\fR(1) splitter-r653/doc/splitter.1.xml0000664000175300017530000006341114352507253020036 0ustar builderbuilder00000000000000 mkgmap-splitter 1 mkgmap-splitter tile splitter for mkgmap mkgmap-splitter options file.osm > splitter.log DESCRIPTION mkgmap-splitter splits an .osm file that contains large well mapped regions into a number of smaller tiles, to fit within the maximum size used for the Garmin maps format. The two most important features are: Variable sized tiles to prevent a large number of tiny files. Tiles join exactly with no overlap or gaps. You will need a lot of memory on your computer if you intend to split a large area. A few options allow configuring how much memory you need. With the default parameters, you need about 2 bytes for every node and way. This doesn't sound a lot but there are about 4300 million nodes in the whole planet file (Jan 2018) and so you cannot process the whole planet in one pass on a 32 bit machine using this utility as the maximum java heap space is 2G. It is possible with 64 bit java and about 10GB of heap or with multiple passes. On the other hand a single country, even a well mapped one such as Germany or the UK, will be possible on a modest machine, even a netbook. USAGE Splitter requires java 1.6 or higher. Basic usage is as follows. mkgmap-splitter file.osm > splitter.log If you have less than 2 GB of memory on your computer you should reduce the option by setting the JAVA_OPTS environment variable. JAVA_OPTS="-Xmx512m" mkgmap-splitter file.osm > splitter.log This will produce a number of .osm.pbf files that can be read by mkgmap 1 . There are also other files produced: The template.args file is a file that can be used with the option of mkgmap that will compile all the files. You can use it as is or you can edit it to include your own options. For example instead of each description being "OSM Map" it could be "NW Scotland" as appropriate. The areas.list file is the list of bounding boxes that were calculated. If you want, you can use this on subsequent calls of splitter using the option to use exactly the same areas as last time. This might be useful if you produce a map regularly and want to keep the tile areas the same from month to month. It is also useful to avoid the time it takes to regenerate the file each time (currently about a third of the overall time taken to perform the split). Of course if the map grows enough that one of the tiles overflows, you will have to re-calculate the areas again. The areas.poly file contains the bounding polygon of the calculated areas. See option how this can be used. The densities-out.txt file is written when no split-file is given and contains debugging information only. You can also use a gzip'ed or bz2'ed compressed .osm file as the input file. Note that this can slow down the splitter considerably (particularly true for bz2) because decompressing the .osm file can take quite a lot of CPU power. If you are likely to be processing a file several times you're probably better off converting the file to one of the binary formats pbf or o5m. The o5m format is faster to read, but requires more space on the disk. OPTIONS There are a number of options to fine tune things that you might want to try. A comma separated list of tag values for relations. Used to filter multipolygon and boundary relations for problem-list processing. See also option . Default: use-exclude-list Deprecated, now does nothing Sets the desciption to be written in to the template.args file. The name of a GeoNames file to use for determining tile names. Typically cities15000.zip from geonames. Use to disable two additional program phases between the split and the final distribution phase (not recommended). The first phase, called gen-problem-list, detects all ways and relations that are crossing the borders of one or more output files. The second phase, called handle-problem-list, collects the coordinates of these ways and relations and calculates all output files that are crossed or enclosed. The information is passed to the final dist-phase in three temporary files. This avoids broken polygons, but be aware that it requires to read the input files at least two additional times. Do not specify it with unless you have a good reason to do so. Defaulte: true Set the filename for the split files. In the example the first file will be called 63240001.osm.pbf and the next one will be 63240002.osm.pbf and so on. Default: 63240001 The maximum number of areas that can be processed in a single pass during the second stage of processing. This must be a number from 1 to 9999. Higher numbers mean fewer passes over the source file and hence quicker overall processing, but also require more memory. If you find you are running out of memory but don't want to increase your value, try reducing this instead. Changing this will have no effect on the result of the split, it's purely to let you trade off memory for performance. Note that the first stage of the processing has a fixed memory overhead regardless of what this is set to so if you are running out of memory before the areas.list file is generated, you need to either increase your value or reduce the size of the input file you're trying to split. Default: 2048 The maximum number of nodes that can be in any of the resultant files. The default is fairly conservative, you could increase it quite a lot before getting any 'map too big' messages. Not much experimentation has been done. Also the bigger this value, the less memory is required during the splitting stage. Default: 1600000 The maximum number of threads used by mkgmap-splitter. Default: 4 (auto) Specify this if the input osm file has nodes, ways and relations intermingled or the ids are not strictly sorted. To increase performance, use the osmosis sort function. Default: false Don't trim empty space off the edges of tiles. This option is ignored when is used. Default: false A target value that is used when no split-file is given. Splitting is done so that the given number of tiles is produced. The value is ignored if this option is given. The format in which the output files are written. Possible values are xml, pbf, o5m, and simulate. The default is pbf, which produces the smallest file sizes. The o5m format is faster to write, but creates around 40% larger files. The simulate option is for debugging purposes. The directory to which splitter should write the output files. If the specified path to a directory doesn't exist, mkgmap-splitter tries to create it. Defaults to the current working directory. Deprecated since r279. With , mkgmap-splitter should include nodes outside the bounding box, so that mkgmap can neatly crop exactly at the border. This parameter controls the size of that overlap. It is in map units, a default of 2000 is used which means about 0.04 degrees of latitude or longitude. If is active and is given, a warning will be printed because this combination rarely makes sense. An osm file (.o5m, .pbf, .osm) with named ways that describe bounding polygons with OSM ways having tags name and mapid. The name of a file containing a bounding polygon in the osmosis polygon file format. mkgmap-splitter uses this file when calculating the areas. It first calculates a grid using the given . The input file is read and for each node, a counter is increased for the related grid area. If the input file contains a bounding box, this is applied to the grid so that nodes outside of the bounding box are ignored. Next, if specified, the bounding polygon is used to zero those grid elements outside of the bounding polygon area. If the polygon-file describes one or more rectilinear areas with no more than 40 vertices, mkgmap-splitter will try to create output files that fit exactly into each area, otherwise it will approximate the polygon area with rectangles. The name of a directory containing precompiled sea tiles. If given, mkgmap-splitter will use the precompiled sea tiles in the same way as mkgmap does. Use this if you want to use a polygon-file or and mkgmap creates empty *.img files combined with a message starting "There is not enough room in a single garmin map for all the input data". The name of a file containing ways and relations that are known to cause problems in the split process. Use this option if requires too much time or memory and doesn't solve your problem. Syntax of problem file: way:<id> # comment... rel:<id> # comment... example: way:2784765 # Ferry Guernsey - Jersey The name of a file to write the generated problem list created with . The parameter is ignored if . You can reuse this file with the parameter, but do this only if you use the same values for and . The resolution of the density map produced during the first phase. A value between 1 and 24. Default is 13. Increasing the value to 14 requires four times more memory in the split phase. The value is ignored if a is given. Search limit in split algo. Higher values may find better splits, but will take longer. Default: 200000 Use the previously calculated tile areas instead of calculating them from scratch. The file can be in .list or .kml format. Displays the amount of memory used by the JVM every seconds. Set =0 to disable. Default: 120 Debugging: stop after a given program phase. Can be split, gen-problem-list, or handle-problem-list. Default is dist which means execute all phases. Specifies the lowest admin_level value of boundary relations that should be kept complete. Used to filter boundary relations for problem-list processing. The default value 5 means that boundary relations are kept complete when the admin_level is 5 or higher (5..11). The parameter is ignored if . Default: 5 The name of a kml file to write out the areas to. This is in addition to areas.list (which is always written out). Special options If the parameter is found somewhere in the options, mkgmap-splitter will just print the version info and exit. Version info looks like this: splitter 279 compiled 2013-01-12T01:45:02+0000 If the parameter is found somewhere in the options, mkgmap-splitter will print a list of all known normal options together with a short help and exit. TUNING Tuning for best performance A few hints for those that are using mkgmap-splitter to split large files. For faster processing with , convert the input file to o5m format using: osmconvert file.osm The option is optional, it reduces the file to that data that is needed by mkgmap-splitter and mkgmap. If you still experience poor performance, look into splitter.log. Search for the word Distributing. You may find something like this in the next line: Processing 1502 areas in 3 passes, 501 areas at a time This means splitter has to read the input file input three times because the parameter was much smaller than the number of areas. If you have enough heap, set value to a value that is higher than the number of areas, e.g. . Execute mkgmap-splitter again and you should find Processing 1502 areas in a single pass More areas require more memory. Make sure that mkgmap-splitter has enough heap (increase the parameter) so that it doesn't waste much time in the garbage collector (GC), but keep as much memory as possible for the systems I/O caches. If available, use two different disks for input file and output directory, esp. when you use o5m format for input and output. If you use mkgmap r2415 or later and disk space is no concern, consider to use to speed up processing. Tuning for low memory requirements If your machine has less than 1 GB free memory (eg. a netbook), you can still use mkgmap-splitter, but you might have to be patient if you use the parameter and want to split a file like germany.osm.pbf or a larger one. If needed, reduce the number of parallel processed areas to 50 with the parameter. You have to use when splitting an area like Europe. NOTES There is no longer an upper limit on the number of areas that can be output (previously it was 255). More areas just mean potentially more passes being required over the .osm file, and hence the splitter will take longer to run. There is no longer a limit on how many areas a way or relation can belong to (previously it was 4). SEE ALSO mkgmap 1 , osmconvert 1 splitter-r653/doc/splitter.txt0000664000175300017530000003170314352507253017715 0ustar builderbuilder00000000000000=Tile splitter for mkgmap= The format used for Garmin maps has, in effect, a maximum size, meaning that you have to split an .osm file that contains large well mapped regions into a number of smaller tiles. This program does that. There are at least two stages of processing required. The first stage is to calculate what area each tile should cover, based on the distribution of nodes. The second stage writes out the nodes, ways and relations from the original .osm file into separate smaller .osm files, one for each area that was calculated in stage one. With option keep-complete=true, two additional stages are used to avoid broken ways and polygons. The two most important features are: * Variable sized tiles so that you don't get a large number of tiny files. * Tiles join exactly with no overlap or gaps. == First == You will need a lot of memory on your computer if you intend to split a large area. A few options allow configuring how much memory you need. With the default parameters, you need about 2 bytes for every node and way. This doesn't sound a lot but there are about 4300 million nodes in the whole planet file (Jan 2018) and so you cannot process the whole planet in one pass on a 32 bit machine using this utility, as the maximum java heap space is 2G. It is possible with 64 bit java and about 10GB of heap or with multiple passes. On the other hand a single country, even a well mapped one such as Germany or the UK, will be possible on a modest machine, even a netbook. == Download == Download from the [http://www.mkgmap.org.uk/download/splitter.html splitter download directory] The source code is available from subversion: at http://svn.mkgmap.org.uk/splitter/trunk == Usage == Splitter requires java 1.6 or higher. Run the following. If you have less than 2G of memory on your computer you should reduce the -Xmx argument java -Xmx2000m -jar splitter.jar file.osm > splitter.log This will produce a number of .osm.pbf files that can be read by mkgmap. There are also other files produced: The ''template.args'' file is a file that can be used with the -c option of mkgmap that will compile all the files. You can use it as is or you can edit it to include your own options. For example instead of each description being "OSM Map" it could be "NW Scotland" as appropriate. The ''areas.list'' file is the list of bounding boxes that were calculated. If you want, you can use this on subsequent calls of splitter using the --split-file option to use exactly the same areas as last time. This might be useful if you produce a map regularly and want to keep the tile areas the same from month to month. It is also useful to avoid the time it takes to regenerate the file each time (currently about a third of the overall time taken to perform the split). Of course if the map grows enough that one of the tiles overflows, you will have to re-calculate the areas again. The ''areas.poly'' file contains the bounding polygon of the calculated areas. The ''densities-out.txt'' file is written when no split-file is given and contains debugging information only. You can also use a gzip'ed or bz2'ed compressed .osm file as the input file. Note that this can slow down the splitter considerably (particularly true for bz2) because decompressing the .osm file can take quite a lot of CPU power. If you are likely to be processing a file several times you're probably better off converting the file to one of the binary formats pbf or o5m. The o5m format is faster to read, but requires more space on the disk. == Options == There are a number of options to fine tune things that you might want to try. ; --boundary-tags=use-exclude-list : A comma separated list of tag values for relations. Used to filter multipolygon and boundary relations for problem-list processing. See also option --wanted-admin-level. Default: use-exclude-list ; --cache= : Deprecated, now does nothing. ;--description=OSM Map : Sets the desciption to be written in to the template.args file. ;--geonames-file= : The name of a GeoNames file to use for determining tile names. Typically cities15000.zip from [http://download.geonames.org/export/dump geonames] ;--keep-complete=true : Use keep-complete=false to disable two additional program phases between the split and the final distribution phase (not recommended). The first phase, called gen-problem-list, detects all ways and relations that are crossing the borders of one or more output files. The second phase, called handle-problem-list, collects the coordinates of these ways and relations and calculates all output files that are crossed or enclosed. The information is passed to the final dist-phase in three temporary files. This avoids broken polygons, but be aware that it requires to read the input files at least two additional times.

Do not specify it with --overlap unless you have a good reason to do so. ;--mapid=63240001 : Set the filename for the split files. In the example the first file will be called 63240001.osm.pbf and the next one will be 63240002.osm.pbf and so on. ;--max-areas=2048 : The maximum number of areas that can be processed in a single pass during the second stage of processing. This must be a number from 1 to 9999. Higher numbers mean fewer passes over the source file and hence quicker overall processing, but also require more memory. If you find you are running out of memory but don't want to increase your --max-nodes value, try reducing this instead. Changing this will have no effect on the result of the split, it's purely to let you trade off memory for performance. Note that the first stage of the processing has a fixed memory overhead regardless of what this is set to so if you are running out of memory before the areas.list file is generated, you need to either increase your -Xmx value or reduce the size of the input file you're trying to split. ;--max-nodes=1600000 : The maximum number of nodes that can be in any of the resultant files. The default is fairly conservative, I think you could increase it quite a lot before getting any 'map too big' messages. I've not experimented much. Also the bigger this value, the less memory is required during the splitting stage. ;--max-threads : The maximum number of threads used by splitter. Default is auto. ;--mixed : Specify this if the input osm file has nodes, ways and relations intermingled or the ids are not strictly sorted. To increase performance, use the osmosis sort function. ;--no-trim : Don't trim empty space off the edges of tiles. This option is ignored when --polygon-file is used. ;--output=pbf : The format in which the output files are written. Possible values are xml, pbf, o5m, and simulate. The default is pbf, which produces the smallest file sizes. The o5m format is faster to write, but creates around 40% larger files. The simulate option is for debugging purposes. ;--output-dir=. : The directory to which splitter should write the output files. If the specified path to a directory doesn't exist, splitter tries to create it. Defaults to the current working directory. ;--overlap= : Deprecated since r279. With keep-complete=false, splitter should include nodes outside the bounding box, so that mkgmap can neatly crop exactly at the border. This parameter controls the size of that overlap. It is in map units, a default of 2000 is used which means about 0.04 degrees of latitude or longitude. If --keep-complete=true is active and --overlap is given, a warning will be printed because this combination rarely makes sense. ;--polygon-file : The name of a file containing a bounding polygon in the [http://wiki.openstreetmap.org/wiki/Osmosis/Polygon_Filter_File_Format osmosis polygon file format]. Splitter uses this file when calculating the areas. It first calculates a grid using the given --resolution. The input file is read and for each node, a counter is increased for the related grid area. If the input file contains a bounding box, this is applied to the grid so that nodes outside of the bounding box are ignored. Next, if specified, the bounding polygon is used to zero those grid elements outside of the bounding polygon area. If the polygon-file describes one or more rectilinear areas with no more than 40 vertices, splitter will try to create output files that fit exactly into each area, otherwise it will approximate the polygon area with rectangles. ;--precomp-sea : The name of a directory containing precompiled sea tiles. If given, splitter will use the precompiled sea tiles in the same way as mkgmap does. Use this if you want to use a polygon-file or --no-trim=true and mkgmap creates empty *.img files combined with a message starting "There is not enough room in a single garmin map for all the input data". ;--problem-file : The name of a file containing ways and relations that are known to cause problems in the split process. Use this option if --keep-complete requires too much time or memory and --overlap doesn't solve your problem. : Syntax of problem file: way: # comment... rel: # comment... example: way:2784765 # Ferry Guernsey - Jersey ;--problem-report : The name of a file to write the generated problem list created with --keep-complete. The parameter is ignored if --keep-complete=false. You can reuse this file with the --problem-file parameter, but do this only if you use the same values for max-nodes and resolution. ;--resolution=13 : The resolution of the density map produced during the first phase. A value between 1 and 24. Default is 13. Increasing the value to 14 requires four times more memory in the split phase. The value is ignored if a --split-file is given. ;--split-file=areas.list : Use the previously calculated tile areas instead of calculating them from scratch. The file can also be in *.kml format. ;--status-freq : Displays the amount of memory used by the JVM every --status-freq seconds. Set =0 to disable. Default is 120. ;--stop-after : Debugging: stop after a given program phase. Can be split, gen-problem-list, or handle-problem-list Default is dist which means execute all phases. ; --wanted-admin-level : Specifies the lowest admin_level value of boundary relations that should be kept complete. Used to filter boundary relations for problem-list processing. The default value 5 means that boundary relations are kept complete when the admin_level is 5 or higher (5..11). The parameter is ignored if --keep-complete=false. Default: 5 ;--write-kml : The name of a kml file to write out the areas to. This is in addition to areas.list (which is always written out). == Special options == ;--version : If the parameter --version is found somewhere in the options, splitter will just print the version info and exit. Version info looks like this: splitter 279 compiled 2013-01-12T01:45:02+0000 ;--help : If the parameter --help is found somewhere in the options, splitter will print a list of all known normal options together with a short help and exit. == Tuning == === Tuning for best performance === A few hints for those that are using splitter to split large files. * For faster processing with --keep-complete=true, convert the input file to o5m format using: osmconvert --drop-version file.osm -o=file.o5m * The option --drop-version is optional, it reduces the file to that data that is needed by splitter and mkgmap. * If you still experience poor performance, look into splitter.log. Search for the word Distributing. You may find something like this in the next line: Processing 1502 areas in 3 passes, 501 areas at a time

This means splitter has to read the input file input three times because the max-areas parameter was much smaller than the number of areas. If you have enough heap, set max-areas value to a value that is higher than the number of areas, e.g. --max-areas=2048. Execute splitter again and you should find Processing 1502 areas in a single pass * More areas require more memory. Make sure that splitter has enough heap (increase the -Xmx parameter) so that it doesn't waste much time in the garbage collector (GC), but keep as much memory as possible for the systems I/O caches. * If available, use two different disks for input file and output directory, esp. when you use o5m format for input and output. * If you use mkgmap r2415 or later and disk space is no concern, consider to use --output=o5m to speed up processing. === Tuning for low memory requirements === If your machine has less than 1GB free memory (eg. a netbook), you can still use splitter, but you might have to be patient if you use the parameter --keep-complete and want to split a file like germany.osm.pbf or a larger one. If needed, reduce the number of parallel processed areas to 50 with the max-areas parameter. You have to use --keep-complete=false when splitting an area like Europe. == Notes == * There is no longer an upper limit on the number of areas that can be output (previously it was 255). More areas just mean potentially more passes being required over the .osm file, and hence the splitter will take longer to run. * There is no longer a limit on how many areas a way or relation can belong to (previously it was 4). splitter-r653/resources/0000775000175300017530000000000014352507267016554 5ustar builderbuilder00000000000000splitter-r653/resources/splitter-version.properties0000664000175300017530000000007314352507267024223 0ustar builderbuilder00000000000000svn.version: 653 build.timestamp: 2022-12-27T06:20:39+0000 splitter-r653/src/0000775000175300017530000000000014352507253015324 5ustar builderbuilder00000000000000splitter-r653/src/org/0000775000175300017530000000000014352507253016113 5ustar builderbuilder00000000000000splitter-r653/src/org/apache/0000775000175300017530000000000014352507253017334 5ustar builderbuilder00000000000000splitter-r653/src/org/apache/tools/0000775000175300017530000000000014352507253020474 5ustar builderbuilder00000000000000splitter-r653/src/org/apache/tools/bzip2/0000775000175300017530000000000014352507253021522 5ustar builderbuilder00000000000000splitter-r653/src/org/apache/tools/bzip2/BZip2Constants.java0000664000175300017530000001124214352507253025210 0ustar builderbuilder00000000000000/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* * This package is based on the work done by Keiron Liddle, Aftex Software * to whom the Ant project is very grateful for his * great code. */ package org.apache.tools.bzip2; /** * Base class for both the compress and decompress classes. * Holds common arrays, and static data. *

* This interface is public for historical purposes. * You should have no need to use it. *

*/ public interface BZip2Constants { int baseBlockSize = 100000; int MAX_ALPHA_SIZE = 258; int MAX_CODE_LEN = 23; int RUNA = 0; int RUNB = 1; int N_GROUPS = 6; int G_SIZE = 50; int N_ITERS = 4; int MAX_SELECTORS = (2 + (900000 / G_SIZE)); int NUM_OVERSHOOT_BYTES = 20; /** * This array really shouldn't be here. * Again, for historical purposes it is. * *

FIXME: This array should be in a private or package private * location, since it could be modified by malicious code.

*/ int[] rNums = { 619, 720, 127, 481, 931, 816, 813, 233, 566, 247, 985, 724, 205, 454, 863, 491, 741, 242, 949, 214, 733, 859, 335, 708, 621, 574, 73, 654, 730, 472, 419, 436, 278, 496, 867, 210, 399, 680, 480, 51, 878, 465, 811, 169, 869, 675, 611, 697, 867, 561, 862, 687, 507, 283, 482, 129, 807, 591, 733, 623, 150, 238, 59, 379, 684, 877, 625, 169, 643, 105, 170, 607, 520, 932, 727, 476, 693, 425, 174, 647, 73, 122, 335, 530, 442, 853, 695, 249, 445, 515, 909, 545, 703, 919, 874, 474, 882, 500, 594, 612, 641, 801, 220, 162, 819, 984, 589, 513, 495, 799, 161, 604, 958, 533, 221, 400, 386, 867, 600, 782, 382, 596, 414, 171, 516, 375, 682, 485, 911, 276, 98, 553, 163, 354, 666, 933, 424, 341, 533, 870, 227, 730, 475, 186, 263, 647, 537, 686, 600, 224, 469, 68, 770, 919, 190, 373, 294, 822, 808, 206, 184, 943, 795, 384, 383, 461, 404, 758, 839, 887, 715, 67, 618, 276, 204, 918, 873, 777, 604, 560, 951, 160, 578, 722, 79, 804, 96, 409, 713, 940, 652, 934, 970, 447, 318, 353, 859, 672, 112, 785, 645, 863, 803, 350, 139, 93, 354, 99, 820, 908, 609, 772, 154, 274, 580, 184, 79, 626, 630, 742, 653, 282, 762, 623, 680, 81, 927, 626, 789, 125, 411, 521, 938, 300, 821, 78, 343, 175, 128, 250, 170, 774, 972, 275, 999, 639, 495, 78, 352, 126, 857, 956, 358, 619, 580, 124, 737, 594, 701, 612, 669, 112, 134, 694, 363, 992, 809, 743, 168, 974, 944, 375, 748, 52, 600, 747, 642, 182, 862, 81, 344, 805, 988, 739, 511, 655, 814, 334, 249, 515, 897, 955, 664, 981, 649, 113, 974, 459, 893, 228, 433, 837, 553, 268, 926, 240, 102, 654, 459, 51, 686, 754, 806, 760, 493, 403, 415, 394, 687, 700, 946, 670, 656, 610, 738, 392, 760, 799, 887, 653, 978, 321, 576, 617, 626, 502, 894, 679, 243, 440, 680, 879, 194, 572, 640, 724, 926, 56, 204, 700, 707, 151, 457, 449, 797, 195, 791, 558, 945, 679, 297, 59, 87, 824, 713, 663, 412, 693, 342, 606, 134, 108, 571, 364, 631, 212, 174, 643, 304, 329, 343, 97, 430, 751, 497, 314, 983, 374, 822, 928, 140, 206, 73, 263, 980, 736, 876, 478, 430, 305, 170, 514, 364, 692, 829, 82, 855, 953, 676, 246, 369, 970, 294, 750, 807, 827, 150, 790, 288, 923, 804, 378, 215, 828, 592, 281, 565, 555, 710, 82, 896, 831, 547, 261, 524, 462, 293, 465, 502, 56, 661, 821, 976, 991, 658, 869, 905, 758, 745, 193, 768, 550, 608, 933, 378, 286, 215, 979, 792, 961, 61, 688, 793, 644, 986, 403, 106, 366, 905, 644, 372, 567, 466, 434, 645, 210, 389, 550, 919, 135, 780, 773, 635, 389, 707, 100, 626, 958, 165, 504, 920, 176, 193, 713, 857, 265, 203, 50, 668, 108, 645, 990, 626, 197, 510, 357, 358, 850, 858, 364, 936, 638 }; } splitter-r653/src/org/apache/tools/bzip2/CBZip2InputStream.java0000664000175300017530000010476314352507253025625 0ustar builderbuilder00000000000000/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* * This package is based on the work done by Keiron Liddle, Aftex Software * to whom the Ant project is very grateful for his * great code. */ package org.apache.tools.bzip2; import java.io.IOException; import java.io.InputStream; /** * An input stream that decompresses from the BZip2 format (without the file * header chars) to be read as any other stream. * *

The decompression requires large amounts of memory. Thus you * should call the {@link #close() close()} method as soon as * possible, to force CBZip2InputStream to release the * allocated memory. See {@link CBZip2OutputStream * CBZip2OutputStream} for information about memory usage.

* *

CBZip2InputStream reads bytes from the compressed * source stream via the single byte {@link java.io.InputStream#read() * read()} method exclusively. Thus you should consider to use a * buffered source stream.

* *

Instances of this class are not threadsafe.

*/ public class CBZip2InputStream extends InputStream implements BZip2Constants { /** * Index of the last char in the block, so the block size == last + 1. */ private int last; /** * Index in zptr[] of original string after sorting. */ private int origPtr; /** * always: in the range 0 .. 9. * The current block size is 100000 * this number. */ private int blockSize100k; private boolean blockRandomised; private int bsBuff; private int bsLive; private final CRC crc = new CRC(); private int nInUse; private InputStream in; private final boolean decompressConcatenated; private int currentChar = -1; private static final int EOF = 0; private static final int START_BLOCK_STATE = 1; private static final int RAND_PART_A_STATE = 2; private static final int RAND_PART_B_STATE = 3; private static final int RAND_PART_C_STATE = 4; private static final int NO_RAND_PART_A_STATE = 5; private static final int NO_RAND_PART_B_STATE = 6; private static final int NO_RAND_PART_C_STATE = 7; private int currentState = START_BLOCK_STATE; private int storedBlockCRC, storedCombinedCRC; private int computedBlockCRC, computedCombinedCRC; // Variables used by setup* methods exclusively private int su_count; private int su_ch2; private int su_chPrev; private int su_i2; private int su_j2; private int su_rNToGo; private int su_rTPos; private int su_tPos; private char su_z; /** * All memory intensive stuff. * This field is initialized by initBlock(). */ private CBZip2InputStream.Data data; /** * Constructs a new CBZip2InputStream which decompresses bytes read from * the specified stream. This doesn't suppprt decompressing * concatenated .bz2 files. * *

Although BZip2 headers are marked with the magic * "Bz" this constructor expects the next byte in the * stream to be the first one after the magic. Thus callers have * to skip the first two bytes. Otherwise this constructor will * throw an exception.

* * @throws IOException * if the stream content is malformed or an I/O error occurs. * @throws NullPointerException * if in == null */ public CBZip2InputStream(final InputStream in) throws IOException { this(in, false); } /** * Constructs a new CBZip2InputStream which decompresses bytes * read from the specified stream. * *

Although BZip2 headers are marked with the magic * "Bz" this constructor expects the next byte in the * stream to be the first one after the magic. Thus callers have * to skip the first two bytes. Otherwise this constructor will * throw an exception.

* * @param in the InputStream from which this object should be created * @param decompressConcatenated * if true, decompress until the end of the input; * if false, stop after the first .bz2 stream and * leave the input position to point to the next * byte after the .bz2 stream * * @throws IOException * if the stream content is malformed or an I/O error occurs. * @throws NullPointerException * if in == null */ public CBZip2InputStream(final InputStream in, final boolean decompressConcatenated) throws IOException { super(); this.in = in; this.decompressConcatenated = decompressConcatenated; init(true); initBlock(); setupBlock(); } /** {@inheritDoc} */ @Override public int read() throws IOException { if (this.in != null) { return read0(); } else { throw new IOException("stream closed"); } } /* * (non-Javadoc) * * @see java.io.InputStream#read(byte[], int, int) */ @Override public int read(final byte[] dest, final int offs, final int len) throws IOException { if (offs < 0) { throw new IndexOutOfBoundsException("offs(" + offs + ") < 0."); } if (len < 0) { throw new IndexOutOfBoundsException("len(" + len + ") < 0."); } if (offs + len > dest.length) { throw new IndexOutOfBoundsException("offs(" + offs + ") + len(" + len + ") > dest.length(" + dest.length + ")."); } if (this.in == null) { throw new IOException("stream closed"); } final int hi = offs + len; int destOffs = offs; for (int b; (destOffs < hi) && ((b = read0()) >= 0);) { dest[destOffs++] = (byte) b; } return (destOffs == offs) ? -1 : (destOffs - offs); } private void makeMaps() { final boolean[] inUse = this.data.inUse; final byte[] seqToUnseq = this.data.seqToUnseq; int nInUseShadow = 0; for (int i = 0; i < 256; i++) { if (inUse[i]) { seqToUnseq[nInUseShadow++] = (byte) i; } } this.nInUse = nInUseShadow; } private int read0() throws IOException { final int retChar = this.currentChar; switch (this.currentState) { case EOF: return -1; case START_BLOCK_STATE: throw new IllegalStateException(); case RAND_PART_A_STATE: throw new IllegalStateException(); case RAND_PART_B_STATE: setupRandPartB(); break; case RAND_PART_C_STATE: setupRandPartC(); break; case NO_RAND_PART_A_STATE: throw new IllegalStateException(); case NO_RAND_PART_B_STATE: setupNoRandPartB(); break; case NO_RAND_PART_C_STATE: setupNoRandPartC(); break; default: throw new IllegalStateException(); } return retChar; } private boolean init(boolean isFirstStream) throws IOException { if (null == in) { throw new IOException("No InputStream"); } if (isFirstStream) { if (in.available() == 0) { throw new IOException("Empty InputStream"); } } else { int magic0 = this.in.read(); if (magic0 == -1) { return false; } int magic1 = this.in.read(); if (magic0 != 'B' || magic1 != 'Z') { throw new IOException("Garbage after a valid BZip2 stream"); } } int magic2 = this.in.read(); if (magic2 != 'h') { throw new IOException(isFirstStream ? "Stream is not in the BZip2 format" : "Garbage after a valid BZip2 stream"); } int blockSize = this.in.read(); if ((blockSize < '1') || (blockSize > '9')) { throw new IOException("Stream is not BZip2 formatted: illegal " + "blocksize " + (char) blockSize); } this.blockSize100k = blockSize - '0'; this.bsLive = 0; this.computedCombinedCRC = 0; return true; } private void initBlock() throws IOException { char magic0; char magic1; char magic2; char magic3; char magic4; char magic5; while (true) { // Get the block magic bytes. magic0 = bsGetUByte(); magic1 = bsGetUByte(); magic2 = bsGetUByte(); magic3 = bsGetUByte(); magic4 = bsGetUByte(); magic5 = bsGetUByte(); // If isn't end of stream magic, break out of the loop. if (magic0 != 0x17 || magic1 != 0x72 || magic2 != 0x45 || magic3 != 0x38 || magic4 != 0x50 || magic5 != 0x90) { break; } // End of stream was reached. Check the combined CRC and // advance to the next .bz2 stream if decoding concatenated // streams. if (complete()) { return; } } if (magic0 != 0x31 || // '1' magic1 != 0x41 || // ')' magic2 != 0x59 || // 'Y' magic3 != 0x26 || // '&' magic4 != 0x53 || // 'S' magic5 != 0x59 // 'Y' ) { this.currentState = EOF; throw new IOException("bad block header"); } else { this.storedBlockCRC = bsGetInt(); this.blockRandomised = bsR(1) == 1; /** * Allocate data here instead in constructor, so we do not * allocate it if the input file is empty. */ if (this.data == null) { this.data = new Data(this.blockSize100k); } // currBlockNo++; getAndMoveToFrontDecode(); this.crc.initialiseCRC(); this.currentState = START_BLOCK_STATE; } } private void endBlock() throws IOException { this.computedBlockCRC = this.crc.getFinalCRC(); // A bad CRC is considered a fatal error. if (this.storedBlockCRC != this.computedBlockCRC) { // make next blocks readable without error // (repair feature, not yet documented, not tested) this.computedCombinedCRC = (this.storedCombinedCRC << 1) | (this.storedCombinedCRC >>> 31); this.computedCombinedCRC ^= this.storedBlockCRC; reportCRCError(); } this.computedCombinedCRC = (this.computedCombinedCRC << 1) | (this.computedCombinedCRC >>> 31); this.computedCombinedCRC ^= this.computedBlockCRC; } private boolean complete() throws IOException { this.storedCombinedCRC = bsGetInt(); this.currentState = EOF; this.data = null; if (this.storedCombinedCRC != this.computedCombinedCRC) { reportCRCError(); } // Look for the next .bz2 stream if decompressing // concatenated files. return !decompressConcatenated || !init(false); } @Override public void close() throws IOException { InputStream inShadow = this.in; if (inShadow != null) { try { if (inShadow != System.in) { inShadow.close(); } } finally { this.data = null; this.in = null; } } } private int bsR(final int n) throws IOException { int bsLiveShadow = this.bsLive; int bsBuffShadow = this.bsBuff; if (bsLiveShadow < n) { final InputStream inShadow = this.in; do { int thech = inShadow.read(); if (thech < 0) { throw new IOException("unexpected end of stream"); } bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; } while (bsLiveShadow < n); this.bsBuff = bsBuffShadow; } this.bsLive = bsLiveShadow - n; return (bsBuffShadow >> (bsLiveShadow - n)) & ((1 << n) - 1); } private boolean bsGetBit() throws IOException { int bsLiveShadow = this.bsLive; int bsBuffShadow = this.bsBuff; if (bsLiveShadow < 1) { int thech = this.in.read(); if (thech < 0) { throw new IOException("unexpected end of stream"); } bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; this.bsBuff = bsBuffShadow; } this.bsLive = bsLiveShadow - 1; return ((bsBuffShadow >> (bsLiveShadow - 1)) & 1) != 0; } private char bsGetUByte() throws IOException { return (char) bsR(8); } private int bsGetInt() throws IOException { return (((((bsR(8) << 8) | bsR(8)) << 8) | bsR(8)) << 8) | bsR(8); } /** * Called by createHuffmanDecodingTables() exclusively. */ private static void hbCreateDecodeTables(final int[] limit, final int[] base, final int[] perm, final char[] length, final int minLen, final int maxLen, final int alphaSize) { for (int i = minLen, pp = 0; i <= maxLen; i++) { for (int j = 0; j < alphaSize; j++) { if (length[j] == i) { perm[pp++] = j; } } } for (int i = MAX_CODE_LEN; --i > 0;) { base[i] = 0; limit[i] = 0; } for (int i = 0; i < alphaSize; i++) { base[length[i] + 1]++; } for (int i = 1, b = base[0]; i < MAX_CODE_LEN; i++) { b += base[i]; base[i] = b; } for (int i = minLen, vec = 0, b = base[i]; i <= maxLen; i++) { final int nb = base[i + 1]; vec += nb - b; b = nb; limit[i] = vec - 1; vec <<= 1; } for (int i = minLen + 1; i <= maxLen; i++) { base[i] = ((limit[i - 1] + 1) << 1) - base[i]; } } private void recvDecodingTables() throws IOException { final Data dataShadow = this.data; final boolean[] inUse = dataShadow.inUse; final byte[] pos = dataShadow.recvDecodingTables_pos; final byte[] selector = dataShadow.selector; final byte[] selectorMtf = dataShadow.selectorMtf; int inUse16 = 0; /* Receive the mapping table */ for (int i = 0; i < 16; i++) { if (bsGetBit()) { inUse16 |= 1 << i; } } for (int i = 256; --i >= 0;) { inUse[i] = false; } for (int i = 0; i < 16; i++) { if ((inUse16 & (1 << i)) != 0) { final int i16 = i << 4; for (int j = 0; j < 16; j++) { if (bsGetBit()) { inUse[i16 + j] = true; } } } } makeMaps(); final int alphaSize = this.nInUse + 2; /* Now the selectors */ final int nGroups = bsR(3); final int nSelectors = bsR(15); for (int i = 0; i < nSelectors; i++) { int j = 0; while (bsGetBit()) { j++; } selectorMtf[i] = (byte) j; } /* Undo the MTF values for the selectors. */ for (int v = nGroups; --v >= 0;) { pos[v] = (byte) v; } for (int i = 0; i < nSelectors; i++) { int v = selectorMtf[i] & 0xff; final byte tmp = pos[v]; while (v > 0) { // nearly all times v is zero, 4 in most other cases pos[v] = pos[v - 1]; v--; } pos[0] = tmp; selector[i] = tmp; } final char[][] len = dataShadow.temp_charArray2d; /* Now the coding tables */ for (int t = 0; t < nGroups; t++) { int curr = bsR(5); final char[] len_t = len[t]; for (int i = 0; i < alphaSize; i++) { while (bsGetBit()) { curr += bsGetBit() ? -1 : 1; } len_t[i] = (char) curr; } } // finally create the Huffman tables createHuffmanDecodingTables(alphaSize, nGroups); } /** * Called by recvDecodingTables() exclusively. */ private void createHuffmanDecodingTables(final int alphaSize, final int nGroups) { final Data dataShadow = this.data; final char[][] len = dataShadow.temp_charArray2d; final int[] minLens = dataShadow.minLens; final int[][] limit = dataShadow.limit; final int[][] base = dataShadow.base; final int[][] perm = dataShadow.perm; for (int t = 0; t < nGroups; t++) { int minLen = 32; int maxLen = 0; final char[] len_t = len[t]; for (int i = alphaSize; --i >= 0;) { final char lent = len_t[i]; if (lent > maxLen) { maxLen = lent; } if (lent < minLen) { minLen = lent; } } hbCreateDecodeTables(limit[t], base[t], perm[t], len[t], minLen, maxLen, alphaSize); minLens[t] = minLen; } } private void getAndMoveToFrontDecode() throws IOException { this.origPtr = bsR(24); recvDecodingTables(); final InputStream inShadow = this.in; final Data dataShadow = this.data; final byte[] ll8 = dataShadow.ll8; final int[] unzftab = dataShadow.unzftab; final byte[] selector = dataShadow.selector; final byte[] seqToUnseq = dataShadow.seqToUnseq; final char[] yy = dataShadow.getAndMoveToFrontDecode_yy; final int[] minLens = dataShadow.minLens; final int[][] limit = dataShadow.limit; final int[][] base = dataShadow.base; final int[][] perm = dataShadow.perm; final int limitLast = this.blockSize100k * 100000; /* Setting up the unzftab entries here is not strictly necessary, but it does save having to do it later in a separate pass, and so saves a block's worth of cache misses. */ for (int i = 256; --i >= 0;) { yy[i] = (char) i; unzftab[i] = 0; } int groupNo = 0; int groupPos = G_SIZE - 1; final int eob = this.nInUse + 1; int nextSym = getAndMoveToFrontDecode0(0); int bsBuffShadow = this.bsBuff; int bsLiveShadow = this.bsLive; int lastShadow = -1; int zt = selector[groupNo] & 0xff; int[] base_zt = base[zt]; int[] limit_zt = limit[zt]; int[] perm_zt = perm[zt]; int minLens_zt = minLens[zt]; while (nextSym != eob) { if ((nextSym == RUNA) || (nextSym == RUNB)) { int s = -1; for (int n = 1; true; n <<= 1) { if (nextSym == RUNA) { s += n; } else if (nextSym == RUNB) { s += n << 1; } else { break; } if (groupPos == 0) { groupPos = G_SIZE - 1; zt = selector[++groupNo] & 0xff; base_zt = base[zt]; limit_zt = limit[zt]; perm_zt = perm[zt]; minLens_zt = minLens[zt]; } else { groupPos--; } int zn = minLens_zt; // Inlined: // int zvec = bsR(zn); while (bsLiveShadow < zn) { final int thech = inShadow.read(); if (thech >= 0) { bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; continue; } else { throw new IOException("unexpected end of stream"); } } int zvec = (bsBuffShadow >> (bsLiveShadow - zn)) & ((1 << zn) - 1); bsLiveShadow -= zn; while (zvec > limit_zt[zn]) { zn++; while (bsLiveShadow < 1) { final int thech = inShadow.read(); if (thech >= 0) { bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; continue; } else { throw new IOException("unexpected end of stream"); } } bsLiveShadow--; zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1); } nextSym = perm_zt[zvec - base_zt[zn]]; } final byte ch = seqToUnseq[yy[0]]; unzftab[ch & 0xff] += s + 1; while (s-- >= 0) { ll8[++lastShadow] = ch; } if (lastShadow >= limitLast) { throw new IOException("block overrun"); } } else { if (++lastShadow >= limitLast) { throw new IOException("block overrun"); } final char tmp = yy[nextSym - 1]; unzftab[seqToUnseq[tmp] & 0xff]++; ll8[lastShadow] = seqToUnseq[tmp]; /* This loop is hammered during decompression, hence avoid native method call overhead of System.arraycopy for very small ranges to copy. */ if (nextSym <= 16) { for (int j = nextSym - 1; j > 0;) { yy[j] = yy[--j]; } } else { System.arraycopy(yy, 0, yy, 1, nextSym - 1); } yy[0] = tmp; if (groupPos == 0) { groupPos = G_SIZE - 1; zt = selector[++groupNo] & 0xff; base_zt = base[zt]; limit_zt = limit[zt]; perm_zt = perm[zt]; minLens_zt = minLens[zt]; } else { groupPos--; } int zn = minLens_zt; // Inlined: // int zvec = bsR(zn); while (bsLiveShadow < zn) { final int thech = inShadow.read(); if (thech >= 0) { bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; continue; } else { throw new IOException("unexpected end of stream"); } } int zvec = (bsBuffShadow >> (bsLiveShadow - zn)) & ((1 << zn) - 1); bsLiveShadow -= zn; while (zvec > limit_zt[zn]) { zn++; while (bsLiveShadow < 1) { final int thech = inShadow.read(); if (thech >= 0) { bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; continue; } else { throw new IOException("unexpected end of stream"); } } bsLiveShadow--; zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1); } nextSym = perm_zt[zvec - base_zt[zn]]; } } this.last = lastShadow; this.bsLive = bsLiveShadow; this.bsBuff = bsBuffShadow; } private int getAndMoveToFrontDecode0(final int groupNo) throws IOException { final InputStream inShadow = this.in; final Data dataShadow = this.data; final int zt = dataShadow.selector[groupNo] & 0xff; final int[] limit_zt = dataShadow.limit[zt]; int zn = dataShadow.minLens[zt]; int zvec = bsR(zn); int bsLiveShadow = this.bsLive; int bsBuffShadow = this.bsBuff; while (zvec > limit_zt[zn]) { zn++; while (bsLiveShadow < 1) { final int thech = inShadow.read(); if (thech >= 0) { bsBuffShadow = (bsBuffShadow << 8) | thech; bsLiveShadow += 8; continue; } else { throw new IOException("unexpected end of stream"); } } bsLiveShadow--; zvec = (zvec << 1) | ((bsBuffShadow >> bsLiveShadow) & 1); } this.bsLive = bsLiveShadow; this.bsBuff = bsBuffShadow; return dataShadow.perm[zt][zvec - dataShadow.base[zt][zn]]; } private void setupBlock() throws IOException { if (this.data == null) { return; } final int[] cftab = this.data.cftab; final int[] tt = this.data.initTT(this.last + 1); final byte[] ll8 = this.data.ll8; cftab[0] = 0; System.arraycopy(this.data.unzftab, 0, cftab, 1, 256); for (int i = 1, c = cftab[0]; i <= 256; i++) { c += cftab[i]; cftab[i] = c; } for (int i = 0, lastShadow = this.last; i <= lastShadow; i++) { tt[cftab[ll8[i] & 0xff]++] = i; } if ((this.origPtr < 0) || (this.origPtr >= tt.length)) { throw new IOException("stream corrupted"); } this.su_tPos = tt[this.origPtr]; this.su_count = 0; this.su_i2 = 0; this.su_ch2 = 256; /* not a char and not EOF */ if (this.blockRandomised) { this.su_rNToGo = 0; this.su_rTPos = 0; setupRandPartA(); } else { setupNoRandPartA(); } } private void setupRandPartA() throws IOException { if (this.su_i2 <= this.last) { this.su_chPrev = this.su_ch2; int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff; this.su_tPos = this.data.tt[this.su_tPos]; if (this.su_rNToGo == 0) { this.su_rNToGo = BZip2Constants.rNums[this.su_rTPos] - 1; if (++this.su_rTPos == 512) { this.su_rTPos = 0; } } else { this.su_rNToGo--; } this.su_ch2 = su_ch2Shadow ^= (this.su_rNToGo == 1) ? 1 : 0; this.su_i2++; this.currentChar = su_ch2Shadow; this.currentState = RAND_PART_B_STATE; this.crc.updateCRC(su_ch2Shadow); } else { endBlock(); initBlock(); setupBlock(); } } private void setupNoRandPartA() throws IOException { if (this.su_i2 <= this.last) { this.su_chPrev = this.su_ch2; int su_ch2Shadow = this.data.ll8[this.su_tPos] & 0xff; this.su_ch2 = su_ch2Shadow; this.su_tPos = this.data.tt[this.su_tPos]; this.su_i2++; this.currentChar = su_ch2Shadow; this.currentState = NO_RAND_PART_B_STATE; this.crc.updateCRC(su_ch2Shadow); } else { this.currentState = NO_RAND_PART_A_STATE; endBlock(); initBlock(); setupBlock(); } } private void setupRandPartB() throws IOException { if (this.su_ch2 != this.su_chPrev) { this.currentState = RAND_PART_A_STATE; this.su_count = 1; setupRandPartA(); } else if (++this.su_count >= 4) { this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff); this.su_tPos = this.data.tt[this.su_tPos]; if (this.su_rNToGo == 0) { this.su_rNToGo = BZip2Constants.rNums[this.su_rTPos] - 1; if (++this.su_rTPos == 512) { this.su_rTPos = 0; } } else { this.su_rNToGo--; } this.su_j2 = 0; this.currentState = RAND_PART_C_STATE; if (this.su_rNToGo == 1) { this.su_z ^= 1; } setupRandPartC(); } else { this.currentState = RAND_PART_A_STATE; setupRandPartA(); } } private void setupRandPartC() throws IOException { if (this.su_j2 < this.su_z) { this.currentChar = this.su_ch2; this.crc.updateCRC(this.su_ch2); this.su_j2++; } else { this.currentState = RAND_PART_A_STATE; this.su_i2++; this.su_count = 0; setupRandPartA(); } } private void setupNoRandPartB() throws IOException { if (this.su_ch2 != this.su_chPrev) { this.su_count = 1; setupNoRandPartA(); } else if (++this.su_count >= 4) { this.su_z = (char) (this.data.ll8[this.su_tPos] & 0xff); this.su_tPos = this.data.tt[this.su_tPos]; this.su_j2 = 0; setupNoRandPartC(); } else { setupNoRandPartA(); } } private void setupNoRandPartC() throws IOException { if (this.su_j2 < this.su_z) { int su_ch2Shadow = this.su_ch2; this.currentChar = su_ch2Shadow; this.crc.updateCRC(su_ch2Shadow); this.su_j2++; this.currentState = NO_RAND_PART_C_STATE; } else { this.su_i2++; this.su_count = 0; setupNoRandPartA(); } } private static final class Data extends Object { // (with blockSize 900k) final boolean[] inUse = new boolean[256]; // 256 byte final byte[] seqToUnseq = new byte[256]; // 256 byte final byte[] selector = new byte[MAX_SELECTORS]; // 18002 byte final byte[] selectorMtf = new byte[MAX_SELECTORS]; // 18002 byte /** * Freq table collected to save a pass over the data during * decompression. */ final int[] unzftab = new int[256]; // 1024 byte final int[][] limit = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte final int[][] base = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte final int[][] perm = new int[N_GROUPS][MAX_ALPHA_SIZE]; // 6192 byte final int[] minLens = new int[N_GROUPS]; // 24 byte final int[] cftab = new int[257]; // 1028 byte final char[] getAndMoveToFrontDecode_yy = new char[256]; // 512 byte final char[][] temp_charArray2d = new char[N_GROUPS][MAX_ALPHA_SIZE]; // 3096 byte final byte[] recvDecodingTables_pos = new byte[N_GROUPS]; // 6 byte //--------------- // 60798 byte int[] tt; // 3600000 byte byte[] ll8; // 900000 byte //--------------- // 4560782 byte //=============== Data(int blockSize100k) { super(); this.ll8 = new byte[blockSize100k * BZip2Constants.baseBlockSize]; } /** * Initializes the {@link #tt} array. * * This method is called when the required length of the array * is known. I don't initialize it at construction time to * avoid unnecessary memory allocation when compressing small * files. */ final int[] initTT(int length) { int[] ttShadow = this.tt; // tt.length should always be >= length, but theoretically // it can happen, if the compressor mixed small and large // blocks. Normally only the last block will be smaller // than others. if ((ttShadow == null) || (ttShadow.length < length)) { this.tt = ttShadow = new int[length]; } return ttShadow; } } private static void reportCRCError() throws IOException { // The clean way would be to throw an exception. //throw new IOException("crc error"); // Just print a message, like the previous versions of this class did System.err.println("BZip2 CRC error"); } } splitter-r653/src/org/apache/tools/bzip2/CRC.java0000664000175300017530000001315114352507253022775 0ustar builderbuilder00000000000000/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /* * This package is based on the work done by Keiron Liddle, Aftex Software * to whom the Ant project is very grateful for his * great code. */ package org.apache.tools.bzip2; /** * A simple class the hold and calculate the CRC for sanity checking * of the data. * */ final class CRC { static final int crc32Table[] = { 0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9, 0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005, 0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61, 0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd, 0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9, 0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75, 0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011, 0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd, 0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039, 0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5, 0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81, 0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d, 0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49, 0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95, 0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1, 0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d, 0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae, 0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072, 0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16, 0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca, 0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde, 0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02, 0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066, 0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba, 0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e, 0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692, 0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6, 0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a, 0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e, 0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2, 0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686, 0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a, 0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637, 0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb, 0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f, 0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53, 0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47, 0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b, 0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff, 0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623, 0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7, 0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b, 0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f, 0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3, 0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7, 0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b, 0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f, 0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3, 0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640, 0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c, 0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8, 0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24, 0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30, 0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec, 0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088, 0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654, 0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0, 0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c, 0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18, 0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4, 0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0, 0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c, 0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668, 0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4 }; CRC() { initialiseCRC(); } void initialiseCRC() { globalCrc = 0xffffffff; } int getFinalCRC() { return ~globalCrc; } int getGlobalCRC() { return globalCrc; } void setGlobalCRC(int newCrc) { globalCrc = newCrc; } void updateCRC(int inCh) { int temp = (globalCrc >> 24) ^ inCh; if (temp < 0) { temp = 256 + temp; } globalCrc = (globalCrc << 8) ^ CRC.crc32Table[temp]; } void updateCRC(int inCh, int repeat) { int globalCrcShadow = this.globalCrc; while (repeat-- > 0) { int temp = (globalCrcShadow >> 24) ^ inCh; globalCrcShadow = (globalCrcShadow << 8) ^ crc32Table[(temp >= 0) ? temp : (temp + 256)]; } this.globalCrc = globalCrcShadow; } int globalCrc; } splitter-r653/src/org/openstreetmap/0000775000175300017530000000000014352507253021001 5ustar builderbuilder00000000000000splitter-r653/src/org/openstreetmap/osmosis/0000775000175300017530000000000014352507253022475 5ustar builderbuilder00000000000000splitter-r653/src/org/openstreetmap/osmosis/core/0000775000175300017530000000000014352507253023425 5ustar builderbuilder00000000000000splitter-r653/src/org/openstreetmap/osmosis/core/OsmosisRuntimeException.java0000664000175300017530000000277714352507253031164 0ustar builderbuilder00000000000000// This software is released into the Public Domain. See copying.txt for details. package org.openstreetmap.osmosis.core; /** * The root of the unchecked exception hierarchy for the application. All typed * runtime exceptions subclass this exception. * * @author Brett Henderson */ public class OsmosisRuntimeException extends RuntimeException { private static final long serialVersionUID = 1L; /** * Constructs a new exception with null as its detail message. */ public OsmosisRuntimeException() { super(); } /** * Constructs a new exception with the specified detail message. The * cause is not initialized, and may subsequently be initialized by * a call to {@link #initCause}. * * @param message the detail message. */ public OsmosisRuntimeException(String message) { super(message); } /** * Constructs a new exception with the specified cause and a detail * message of (cause==null ? null : cause.toString()) (which * typically contains the class and detail message of cause). * * @param cause the cause. */ public OsmosisRuntimeException(Throwable cause) { super(cause); } /** * Constructs a new exception with the specified detail message and * cause. * * @param message the detail message. * @param cause the cause. */ public OsmosisRuntimeException(String message, Throwable cause) { super(message, cause); } } splitter-r653/src/org/openstreetmap/osmosis/core/filter/0000775000175300017530000000000014352507253024712 5ustar builderbuilder00000000000000splitter-r653/src/org/openstreetmap/osmosis/core/filter/common/0000775000175300017530000000000014352507253026202 5ustar builderbuilder00000000000000splitter-r653/src/org/openstreetmap/osmosis/core/filter/common/PolygonFileReader.java0000664000175300017530000002163314352507253032424 0ustar builderbuilder00000000000000// This software is released into the Public Domain. See copying.txt for details. package org.openstreetmap.osmosis.core.filter.common; import java.awt.geom.Area; import java.awt.geom.Path2D; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.util.logging.Level; import java.util.logging.Logger; import org.openstreetmap.osmosis.core.OsmosisRuntimeException; /** * Reads the contents of a polygon file into an Area instance. *

* The file format is defined at http://www.maproom.psu.edu/dcw/. An example is * provided here. The first line contains the name of the file, the second line * contains the name of an individual polygon and if it is prefixed with ! it * means it is a negative polygon to be subtracted from the resultant extraction * polygon. *

* * australia_v
* 1
* 0.1446763E+03 -0.3825659E+02
* 0.1446693E+03 -0.3826255E+02
* 0.1446627E+03 -0.3825661E+02
* 0.1446763E+03 -0.3824465E+02
* 0.1446813E+03 -0.3824343E+02
* 0.1446824E+03 -0.3824484E+02
* 0.1446826E+03 -0.3825356E+02
* 0.1446876E+03 -0.3825210E+02
* 0.1446919E+03 -0.3824719E+02
* 0.1447006E+03 -0.3824723E+02
* 0.1447042E+03 -0.3825078E+02
* 0.1446758E+03 -0.3826229E+02
* 0.1446693E+03 -0.3826255E+02
* END
* !2
* 0.1422483E+03 -0.3839481E+02
* 0.1422436E+03 -0.3839315E+02
* 0.1422496E+03 -0.3839070E+02
* 0.1422543E+03 -0.3839025E+02
* 0.1422574E+03 -0.3839155E+02
* 0.1422467E+03 -0.3840065E+02
* 0.1422433E+03 -0.3840048E+02
* 0.1422420E+03 -0.3839857E+02
* 0.1422436E+03 -0.3839315E+02
* END
* END
*
* * @author Brett Henderson */ public class PolygonFileReader { /** * Our logger for debug and error -output. */ private static final Logger LOG = Logger.getLogger(PolygonFileReader.class.getName()); /** * Where we read from. */ private Reader fileReader; /** * The filename for error-messages. */ private String polygonFile; /** * The name of the polygon as stated in the file-header. */ private String myPolygonName; /** * Creates a new instance. * * @param polygonFile * The file to read polygon units from. * @param name to report in debug output */ public PolygonFileReader(final InputStream polygonFile, final String name) { this.polygonFile = name; this.fileReader = new InputStreamReader(polygonFile); } /** * Creates a new instance. * * @param polygonFile * The file to read polygon units from. */ public PolygonFileReader(final File polygonFile) { try { this.polygonFile = polygonFile.getName(); this.fileReader = new FileReader(polygonFile); } catch (IOException e) { throw new OsmosisRuntimeException("Unable to read from polygon file " + polygonFile + ".", e); } } /** * Releases any resources remaining open. */ private void cleanup() { if (fileReader != null) { try { fileReader.close(); } catch (Exception e) { LOG.log(Level.SEVERE, "Unable to close polygon file reader.", e); } finally { fileReader = null; } } } /** * Builds an Area configured with the polygon information defined in the * file. * * @return A fully configured area. */ public Area loadPolygon() { try { Area resultArea; BufferedReader bufferedReader; // Create a new area. resultArea = new Area(); // Open the polygon file. bufferedReader = new BufferedReader(fileReader); // Read the file header. myPolygonName = bufferedReader.readLine(); if (myPolygonName == null || myPolygonName.trim().length() == 0) { throw new OsmosisRuntimeException("The file must begin with a header naming the polygon file."); } // We now loop until no more sections are available. while (true) { String sectionHeader; boolean positivePolygon; Area sectionArea; // Read until a non-empty line is obtained. do { // Read the section header. sectionHeader = bufferedReader.readLine(); // It is invalid for the file to end without a global "END" record. if (sectionHeader == null) { throw new OsmosisRuntimeException("File terminated prematurely without a section END record."); } // Remove any whitespace. sectionHeader = sectionHeader.trim(); } while (sectionHeader.length() == 0); // Stop reading when the global END record is reached. if ("END".equals(sectionHeader)) { break; } // If the section header begins with a ! then the polygon is to // be subtracted from the result area. positivePolygon = (sectionHeader.charAt(0) != '!'); // Create an area for this polygon. sectionArea = loadSectionPolygon(bufferedReader); // Add or subtract the section area from the overall area as // appropriate. if (positivePolygon) { resultArea.add(sectionArea); } else { resultArea.subtract(sectionArea); } } return resultArea; } catch (IOException e) { throw new OsmosisRuntimeException("Unable to read from polygon file " + polygonFile + ".", e); } finally { cleanup(); } } /** * Loads an individual polygon from the polygon file. * * @param bufferedReader * The reader connected to the polygon file placed at the first * record of a polygon section. * @return An area representing the section polygon. */ private static Area loadSectionPolygon(BufferedReader bufferedReader) throws IOException { Path2D.Double polygonPath; double[] beginPoint = null; // Create a new path to represent this polygon. polygonPath = new Path2D.Double(); while (true) { String sectionLine; double[] coordinates; // Read until a non-empty line is obtained. do { sectionLine = bufferedReader.readLine(); // It is invalid for the file to end without a section "END" record. if (sectionLine == null) { throw new OsmosisRuntimeException("File terminated prematurely without a section END record."); } // Remove any whitespace. sectionLine = sectionLine.trim(); } while (sectionLine.length() == 0); // Stop reading when the section END record is reached. if ("END".equals(sectionLine)) { break; } // Parse the line into its coordinates. coordinates = parseCoordinates(sectionLine); // Add the current point to the path. if (beginPoint != null) { polygonPath.lineTo(coordinates[0], coordinates[1]); } else { polygonPath.moveTo(coordinates[0], coordinates[1]); beginPoint = coordinates; } } // If we received data, draw another line from the final point back to the beginning point. if (beginPoint != null) { polygonPath.moveTo(beginPoint[0], beginPoint[1]); } // Convert the path into an area and return. Area area = new Area(polygonPath); if (!area.isSingular()) throw new OsmosisRuntimeException("Invalid polygon ring in polygon-file, possibly self-intersecting"); return area; } /** * Parses a coordinate line into its constituent double precision * coordinates. * * @param coordinateLine * The raw file line. * @return A pair of coordinate values, first is longitude, second is * latitude. */ private static double[] parseCoordinates(String coordinateLine) { String[] rawTokens; double[] results; int tokenCount; // Split the line into its sub strings separated by whitespace. rawTokens = coordinateLine.split("\\s"); // Copy the non-zero tokens into a result array. tokenCount = 0; results = new double[2]; for (int i = 0; i < rawTokens.length; i++) { if (rawTokens[i].length() > 0) { // Ensure we have no more than 2 coordinate values. if (tokenCount >= 2) { throw new OsmosisRuntimeException( "A polygon coordinate line must contain 2 numbers, not (" + coordinateLine + ")." ); } // Parse the token into a double precision number. try { results[tokenCount++] = Double.parseDouble(rawTokens[i]); } catch (NumberFormatException e) { throw new OsmosisRuntimeException( "Unable to parse " + rawTokens[i] + " into a double precision number."); } } } // Ensure we found two tokens. if (tokenCount < 2) { throw new OsmosisRuntimeException("Could not find two coordinates on line (" + coordinateLine + ")."); } return results; } /** * This method must only be called after {@link #loadPolygon()}. * @return The name of the polygon as stated in the file-header. */ public String getPolygonName() { return myPolygonName; } } splitter-r653/src/uk/0000775000175300017530000000000014352507253015743 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/0000775000175300017530000000000014352507253016344 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/0000775000175300017530000000000014352507253020125 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/0000775000175300017530000000000014352507254021774 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/AbstractMapProcessor.java0000664000175300017530000000414614352507254026745 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.concurrent.BlockingQueue; public abstract class AbstractMapProcessor implements MapProcessor { public static final int UNASSIGNED = Short.MIN_VALUE; @Override public boolean skipTags(){ return false; } @Override public boolean skipNodes(){ return false; } @Override public boolean skipWays(){ return false; } @Override public boolean skipRels(){ return false; } @Override public void boundTag(Area bounds) { } @Override public void processNode(Node n) { } @Override public void processWay(Way w) { } @Override public void processRelation(Relation r) { } @Override public boolean endMap() { return true; } @Override public int getPhase() { return 1; } @Override public void startFile() { } /** * Simple method that allows all processors to use the producer/consumer pattern */ @Override public final boolean consume(BlockingQueue queue) { while (true) { try { OSMMessage msg = queue.take(); switch (msg.type) { case ELEMENTS: for (Element el : msg.elements) { if (el == null) break; if (el instanceof Node) processNode((Node) el); else if (el instanceof Way) processWay((Way) el); else if (el instanceof Relation) processRelation((Relation) el); } break; case BOUNDS: boundTag(msg.bounds); break; case END_MAP: return endMap(); case START_FILE: startFile(); break; case EXIT: return true; default: break; } } catch (InterruptedException e) { throw new RuntimeException(e); } } } } splitter-r653/src/uk/me/parabola/splitter/Area.java0000664000175300017530000001242514352507254023513 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.awt.Rectangle; import java.awt.geom.Rectangle2D; /** * A map area in map units. There is a constructor available for creating * in lat/long form. * * @author Steve Ratcliffe */ public class Area { public static final Area EMPTY = new Area(); private int mapId; private String name; private final int minLat; private final int minLong; private final int maxLat; private final int maxLong; private Rectangle javaRect; private boolean isJoinable = true; private boolean isPseudoArea; public boolean isJoinable() { return isJoinable; } public void setJoinable(boolean isJoinable) { this.isJoinable = isJoinable; } /** * Create an area from the given Garmin coordinates. We ensure that no dimension is zero. * * @param minLat The western latitude. * @param minLong The southern longitude. * @param maxLat The eastern lat. * @param maxLong The northern long. */ public Area(int minLat, int minLong, int maxLat, int maxLong) { this.minLat = minLat; if (maxLat == minLat) this.maxLat = minLat + 1; else this.maxLat = maxLat; this.minLong = minLong; if (minLong == maxLong) this.maxLong = maxLong + 1; else this.maxLong = maxLong; } /** * Apply bbox to area. * @param area the area * @param bbox the bounding box * @return A new area instance that covers the intersection of area and bbox * or null if they don't intersect */ public static Area calcArea (Area area, Rectangle bbox) { Rectangle dest = new Rectangle(); Rectangle2D.intersect(area.getRect(), bbox, dest); if (dest.getHeight() > 0 && dest.getWidth() > 0) return new Area(dest.y, dest.x, dest.y + dest.height, dest.x + dest.width); return null; } /** * Creates an empty area. */ private Area() { minLat = 0; maxLat = 0; minLong = 0; maxLong = 0; } public boolean verify(){ return ! (minLat > maxLat || minLong > maxLong || minLong < Utils.MIN_LON_MAP_UNITS || maxLong > Utils.MAX_LON_MAP_UNITS || minLat < Utils.MIN_LAT_MAP_UNITS || maxLat > Utils.MAX_LAT_MAP_UNITS); } public Rectangle getRect(){ if (javaRect == null) javaRect = new Rectangle(this.minLong, this.minLat, this.maxLong-this.minLong, this.maxLat-this.minLat); return javaRect; } /** * * @return a new {@link java.awt.geom.Area} instance */ public java.awt.geom.Area getJavaArea(){ return new java.awt.geom.Area(getRect()); } public void setMapId(int mapId) { this.mapId = mapId; } public int getMapId() { return mapId; } public String getName() { return name; } public void setName(String name) { this.name = name; } public int getMinLat() { return minLat; } public int getMinLong() { return minLong; } public int getMaxLat() { return maxLat; } public int getMaxLong() { return maxLong; } public int getWidth() { return maxLong - minLong; } public int getHeight() { return maxLat - minLat; } @Override public String toString() { return "(" + Utils.toDegrees(minLat) + ',' + Utils.toDegrees(minLong) + ") to (" + Utils.toDegrees(maxLat) + ',' + Utils.toDegrees(maxLong) + ')'; } public String toHexString() { return "(0x" + Integer.toHexString(minLat) + ",0x" + Integer.toHexString(minLong) + ") to (0x" + Integer.toHexString(maxLat) + ",0x" + Integer.toHexString(maxLong) + ')'; } public boolean contains(int lat, int lon) { return lat >= minLat && lat <= maxLat && lon >= minLong && lon <= maxLong; } public boolean contains(Node node) { return contains(node.getMapLat(), node.getMapLon()); } /** * * @param other an area * @return true if the other area is inside the Area (it may touch the boundary) */ public final boolean contains(Area other) { return other.getMinLat() >= minLat && other.getMaxLat() <= maxLat && other.getMinLong() >= minLong && other.getMaxLong() <= maxLong; } /** * Checks if this area intersects the given bounding box at least * in one point. * * @param bbox an area * @return true if this area intersects the bbox; * false else */ public final boolean intersects(Area bbox) { return minLat <= bbox.getMaxLat() && maxLat >= bbox.getMinLat() && minLong <= bbox.getMaxLong() && maxLong >= bbox.getMinLong(); } public final boolean overlaps(Area bbox) { return minLat < bbox.getMaxLat() && maxLat > bbox.getMinLat() && minLong < bbox.getMaxLong() && maxLong > bbox.getMinLong(); } public Area add(Area area) { return new Area( Math.min(minLat, area.minLat), Math.min(minLong, area.minLong), Math.max(maxLat, area.maxLat), Math.max(maxLong, area.maxLong) ); } public boolean isPseudoArea() { return isPseudoArea; } public void setPseudoArea(boolean isPseudoArea) { this.isPseudoArea = isPseudoArea; } } splitter-r653/src/uk/me/parabola/splitter/AreaDictionary.java0000664000175300017530000001301014352507254025530 0ustar builderbuilder00000000000000/* * Copyright (c) 2011,2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.awt.Rectangle; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; /** * Maps a set containing the used areas to an int value. * An OSM element is written to one or more areas. Every used * combination of areas is translated to an int. * @author Gerd Petermann * */ public class AreaDictionary { private static final int DICT_START = Short.MAX_VALUE; private final Area[] areas; private final ArrayList sets; private final int numOfAreas; private final HashMap index; private final HashSet simpleNeighbours = new HashSet<>(); private final int overlapAmount; /** * Create a dictionary for a given array of areas. * @param overlapAmount * @param areas the array of areas */ AreaDictionary(List areas, int overlapAmount){ this.areas = areas.toArray(new Area[areas.size()]); this.overlapAmount = overlapAmount; this.numOfAreas = areas.size(); sets = new ArrayList<>(); index = new HashMap<>(areas.size() * 4, 0.5f); init(); } /** * Initialize the dictionary with sets containing a single area. */ private void init() { ArrayList rectangles = new ArrayList<>(numOfAreas); ArrayList areaSets = new ArrayList<>(numOfAreas); for (int i = 0; i < numOfAreas; i++) { AreaSet b = new AreaSet(i); translate(b); rectangles.add(Utils.area2Rectangle(areas[i], 0)); areaSets.add(b); } findSimpleNeigbours(rectangles, areaSets); System.out.println("cached " + simpleNeighbours.size() + " combinations of areas that form rectangles."); } /** * Calculate the int value for a given AreaSet. The AreaSet must not * contain values higher than numOfAreas. * @param areaSet the AreaSet * @return an Integer value that identifies this AreaSet, never null */ public Integer translate(final AreaSet areaSet) { Integer combiIndex = index.get(areaSet); if (combiIndex == null) { combiIndex = (sets.size() - DICT_START); if (combiIndex == Integer.MAX_VALUE) { throw new SplitFailedException("areaDictionary is full. Try to decrease number of areas."); } AreaSet set = new AreaSet(areaSet); set.lock(); sets.add(set); index.put(set, combiIndex); if (sets.size() % 1000 == 0) System.out.println("dictionary contains now " + Utils.format(sets.size()) + " entries"); } return combiIndex; } /** * Find those areas that build rectangles when they are * added together. A way or relation that lies exactly within * such a combination cannot cross other areas. * @param rectangles * @param areaSets */ private void findSimpleNeigbours(ArrayList rectangles, ArrayList areaSets){ ArrayList newRectangles = new ArrayList<>(); ArrayList newAreaSets = new ArrayList<>(); for (int i = 0; i < rectangles.size(); i++) { Rectangle r1 = rectangles.get(i); for (int j = i + 1; j < rectangles.size(); j++) { Rectangle r2 = rectangles.get(j); boolean isSimple = r1.y == r2.y && r1.height == r2.height && (r1.x == r2.getMaxX() || r2.x == r1.getMaxX()) || (r1.x == r2.x && r1.width == r2.width && (r1.y == r2.getMaxY() || r2.y == r1.getMaxY())); if (isSimple) { AreaSet simpleNeighbour = new AreaSet(areaSets.get(i)); simpleNeighbour.or(areaSets.get(j)); if (simpleNeighbour.cardinality() <= 10 && !simpleNeighbours.contains(simpleNeighbour)) { simpleNeighbours.add(simpleNeighbour); Rectangle pair = new Rectangle(r1); pair.add(r2); newRectangles.add(pair); newAreaSets.add(simpleNeighbour); } } } } if (!newRectangles.isEmpty()) { rectangles.addAll(newRectangles); areaSets.addAll(newAreaSets); newRectangles = null; newAreaSets = null; if (simpleNeighbours.size() < 1000) findSimpleNeigbours(rectangles, areaSets); } } /** * Return the AreaSet that is related to the int value. * The caller must make sure that the index is valid. * @param idx a value that was returned by the translate() method. * @return the AreaSet */ public AreaSet getSet(final int idx) { return sets.get(DICT_START + idx); } /** * return the number of sets in this dictionary * @return the number of sets in this dictionary */ public int size() { return sets.size(); } public int getNumOfAreas() { return numOfAreas; } public boolean mayCross(AreaSet areaSet) { return !simpleNeighbours.contains(areaSet); } public Area getArea(int idx) { return areas[idx]; } public Area getExtendedArea(int idx) { Area bounds = areas[idx]; if (overlapAmount == 0) return bounds; return new Area(bounds.getMinLat() - overlapAmount, bounds.getMinLong() - overlapAmount, bounds.getMaxLat() + overlapAmount, bounds.getMaxLong() + overlapAmount); } public List getAreas() { return Collections.unmodifiableList(Arrays.asList(areas)); } public static int translate(int singleWriterId) { return (singleWriterId - DICT_START); } } splitter-r653/src/uk/me/parabola/splitter/AreaGrid.java0000664000175300017530000002022014352507254024311 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.BitSet; /** * A grid that covers the area covered by all areas. Each grid element contains * information about the tiles that are intersecting the grid element and whether * the grid element lies completely within such a tile area. * This is used to minimize the needed tests when analyzing coordinates of node coordinates. * @author GerdP * */ public class AreaGrid implements AreaIndex{ private final Grid grid; protected final AreaGridResult r; protected final AreaDictionary areaDictionary; /** * Create a grid to speed up the search of area candidates. * @param areaDictionary */ AreaGrid(AreaDictionary areaDictionary) { this.areaDictionary = areaDictionary; r = new AreaGridResult(); grid = new Grid(null, null); } @Override public Area getBounds(){ return grid.getBounds(); } @Override public AreaGridResult get (final Node n){ return grid.get(n.getMapLat(),n.getMapLon()); } @Override public AreaGridResult get (int lat, int lon){ return grid.get(lat, lon); } private class Grid { private static final int TOP_GRID_DIM_LON = 512; private static final int TOP_GRID_DIM_LAT = 512; private static final int SUB_GRID_DIM_LON = 32; private static final int SUB_GRID_DIM_LAT = 32; private static final int MIN_GRID_LAT = 2048; private static final int MIN_GRID_LON = 2048; private static final int MAX_TESTS = 10; private int gridDivLon, gridDivLat; private int gridMinLat, gridMinLon; // bounds of the complete grid private Area bounds = null; private int[][] indexGrid; private BitSet[] testGrid; private Grid[][] subGrid = null; private final int maxCompares; private int usedSubGridElems = 0; private final int gridDimLon; private final int gridDimLat; public Grid(AreaSet usedAreas, Area bounds) { // each element contains an index to the areaDictionary or unassigned if (usedAreas == null){ gridDimLon = TOP_GRID_DIM_LON; gridDimLat = TOP_GRID_DIM_LAT; } else{ gridDimLon = SUB_GRID_DIM_LON; gridDimLat = SUB_GRID_DIM_LAT; } indexGrid = new int[gridDimLon + 1][gridDimLat + 1]; // is true for an element if the list of areas needs to be tested testGrid = new BitSet[gridDimLon + 1]; for (int lon = 0; lon < testGrid.length; lon++) { testGrid[lon] = new BitSet(gridDimLat + 1); } this.bounds = bounds; maxCompares = fillGrid(usedAreas); } public Area getBounds() { return bounds; } /** * Create the grid and fill each element * @param usedAreas * @return maximum number of area tests needed for any returned GridResult */ private int fillGrid(AreaSet usedAreas) { int gridStepLon, gridStepLat; if (bounds == null) { // calculate grid area Area tmpBounds = null; for (int i = 0; i < areaDictionary.getNumOfAreas(); i++) { Area extBounds = areaDictionary.getExtendedArea(i); if (usedAreas == null || usedAreas.get(i)) tmpBounds = (tmpBounds == null) ? extBounds : tmpBounds.add(extBounds); } if (tmpBounds == null) return 0; // create new Area to make sure that we don't update the existing area bounds = new Area(tmpBounds.getMinLat() , tmpBounds.getMinLong(), tmpBounds.getMaxLat(), tmpBounds.getMaxLong()); } // save these results for later use gridMinLon = bounds.getMinLong(); gridMinLat = bounds.getMinLat(); // calculate the grid element size int gridWidth = bounds.getWidth(); int gridHeight = bounds.getHeight(); gridDivLon = Math.round((gridWidth / gridDimLon + 0.5f) ); gridDivLat = Math.round((gridHeight / gridDimLat + 0.5f)); gridStepLon = Math.round(((gridWidth) / gridDimLon) + 0.5f); gridStepLat = Math.round(((gridHeight) / gridDimLat) + 0.5f); assert gridStepLon * gridDimLon >= gridWidth : "gridStepLon is too small"; assert gridStepLat * gridDimLat >= gridHeight : "gridStepLat is too small"; int maxAreaSearch = 0; AreaSet[][] gridAreas = new AreaSet[gridDimLon+1][gridDimLat+1]; for (int j = 0; j < areaDictionary.getNumOfAreas(); j++) { Area extBounds = areaDictionary.getExtendedArea(j); if (!(usedAreas == null || usedAreas.get(j))) continue; int minLonArea = extBounds.getMinLong(); int maxLonArea = extBounds.getMaxLong(); int minLatArea = extBounds.getMinLat(); int maxLatArea = extBounds.getMaxLat(); int startLon = Math.max(0,(minLonArea- gridMinLon ) / gridDivLon); int endLon = Math.min(gridDimLon,(maxLonArea - gridMinLon ) / gridDivLon); int startLat = Math.max(0,(minLatArea- gridMinLat ) / gridDivLat); int endLat = Math.min(gridDimLat,(maxLatArea - gridMinLat ) / gridDivLat); // add this area to all grid elements that intersect with it for (int lon = startLon; lon <= endLon; lon++) { int testMinLon = gridMinLon + gridStepLon * lon; for (int lat = startLat; lat <= endLat; lat++) { int testMinLat = gridMinLat + gridStepLat * lat; if (gridAreas[lon][lat]== null) gridAreas[lon][lat] = new AreaSet(); // add this area gridAreas[lon][lat].set(j); if (!extBounds.contains(testMinLat, testMinLon) || !extBounds.contains(testMinLat+ gridStepLat, testMinLon+ gridStepLon)){ // grid area is not completely within area testGrid[lon].set(lat); } } } } for (int lon = 0; lon <= gridDimLon; lon++) { for (int lat = 0; lat <= gridDimLat; lat++) { AreaSet areaSet = (gridAreas[lon][lat]); if (areaSet == null) indexGrid[lon][lat] = AbstractMapProcessor.UNASSIGNED; else { areaSet.lock(); if (testGrid[lon].get(lat)){ int numTests = areaSet.cardinality(); if (numTests > MAX_TESTS && gridStepLat > MIN_GRID_LAT && gridStepLon > MIN_GRID_LON){ Area gridPart = new Area(gridMinLat + gridStepLat * lat, gridMinLon + gridStepLon * lon, gridMinLat + gridStepLat * (lat+1), gridMinLon + gridStepLon * (lon+1)); // late allocation if (subGrid == null) subGrid = new Grid [gridDimLon + 1][gridDimLat + 1]; usedSubGridElems++; subGrid[lon][lat] = new Grid(areaSet, gridPart); numTests = subGrid[lon][lat].getMaxCompares() + 1; maxAreaSearch = Math.max(maxAreaSearch, numTests); continue; } maxAreaSearch = Math.max(maxAreaSearch, numTests); } indexGrid[lon][lat] = areaDictionary.translate(areaSet); } } } System.out.println("AreaGridTree [" + gridDimLon + "][" + gridDimLat + "] for grid area " + bounds + " requires max. " + maxAreaSearch + " checks for each node (" + usedSubGridElems + " sub grid(s))" ); return maxAreaSearch; } /** * The highest number of required tests * @return */ private int getMaxCompares() { return maxCompares; } /** * For a given node, return the list of areas that may contain it * @param node the node * @return a reference to an {@link AreaGridResult} instance that contains * the list of candidates and a boolean that shows whether this list * has to be verified or not. */ public AreaGridResult get(final int lat, final int lon){ if (!bounds.contains(lat, lon)) return null; int gridLonIdx = (lon - gridMinLon ) / gridDivLon; int gridLatIdx = (lat - gridMinLat ) / gridDivLat; if (subGrid != null){ Grid sub = subGrid[gridLonIdx][gridLatIdx]; if (sub != null){ // get list of area candidates from sub grid return sub.get(lat, lon); } } // get list of area candidates from grid int idx = indexGrid[gridLonIdx][gridLatIdx]; if (idx == AbstractMapProcessor.UNASSIGNED) return null; r.testNeeded = testGrid[gridLonIdx].get(gridLatIdx); r.set = areaDictionary.getSet(idx); return r; } } } splitter-r653/src/uk/me/parabola/splitter/AreaGridResult.java0000664000175300017530000000142414352507254025515 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * A helper class to combine the results of the {@link AreaGrid} * @author GerdP * */ public class AreaGridResult{ AreaSet set; // set of indexes to the area dictionary boolean testNeeded; // true: the list must be checked with the Area.contains() method } splitter-r653/src/uk/me/parabola/splitter/AreaIndex.java0000664000175300017530000000215114352507254024476 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * * @author Gerd Petermann * */ public interface AreaIndex{ /** * @return the bounding box of the areas. */ public Area getBounds(); /** * Return a set of area candidates for this node. * @param n the node * @return a reference to a static AreaGridResult instance */ public AreaGridResult get (final Node n); /** * Return a set of area candidates for these coordinates * @param lat the latitude value in map units * @param lon the longitude value in map units * @return a reference to a static AreaGridResult instance */ public AreaGridResult get (int lat, int lon); } splitter-r653/src/uk/me/parabola/splitter/AreaList.java0000664000175300017530000002503414352507254024347 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.awt.Point; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.io.Reader; import java.io.Writer; import java.util.ArrayList; import java.util.Collections; import java.util.Date; import java.util.List; import java.util.Locale; import java.util.Set; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.xmlpull.v1.XmlPullParserException; import uk.me.parabola.splitter.geo.City; import uk.me.parabola.splitter.geo.CityFinder; import uk.me.parabola.splitter.geo.CityLoader; import uk.me.parabola.splitter.geo.DefaultCityFinder; import uk.me.parabola.splitter.kml.KmlParser; import uk.me.parabola.splitter.kml.KmlWriter; import uk.me.parabola.splitter.solver.PolygonDesc; /** * A list of areas. It can be read and written to a file. */ public class AreaList { private final List areas; private final String description; private String geoNamesFile; /** * This constructor is called when you are going to be reading in the list from * a file, rather than making it from an already constructed list. */ public AreaList(String description) { this(new ArrayList(), description); } public AreaList(List areas, String description) { this.description = description; this.areas = areas; } /** * Write out a file containing the list of areas that we calculated. This allows us to reuse the * same areas on a subsequent run without having to re-calculate them. * * @param filename The filename to write to. */ public void write(String filename) { try (Writer w = new FileWriter(filename); PrintWriter pw = new PrintWriter(w);) { pw.println("# List of areas"); pw.format("# Generated %s%n", new Date()); pw.println("#"); for (Area area : areas) { pw.format(Locale.ROOT, "%08d: %d,%d to %d,%d%n", area.getMapId(), area.getMinLat(), area.getMinLong(), area.getMaxLat(), area.getMaxLong()); pw.format(Locale.ROOT, "# : %f,%f to %f,%f%n", Utils.toDegrees(area.getMinLat()), Utils.toDegrees(area.getMinLong()), Utils.toDegrees(area.getMaxLat()), Utils.toDegrees(area.getMaxLong())); pw.println(); } } catch (IOException e) { System.err.println("Could not write areas.list file, processing continues"); } } public void read(String filename) throws IOException { String lower = filename.toLowerCase(); if (lower.endsWith(".kml") || lower.endsWith(".kml.gz") || lower.endsWith(".kml.bz2")) { readKml(filename); } else { readList(filename); } } /** * Read in an area definition file that we previously wrote. * Obviously other tools could create the file too. */ private void readList(String filename) throws IOException { areas.clear(); Pattern pattern = Pattern.compile("([0-9]{8})[ ]*:" + "[ ]*([\\p{XDigit}x-]+),([\\p{XDigit}x-]+)" + " to ([\\p{XDigit}x-]+),([\\p{XDigit}x-]+)"); try (Reader r = new FileReader(filename); BufferedReader br = new BufferedReader(r)) { String line; while ((line = br.readLine()) != null) { line = line.trim(); if (line.isEmpty() || line.charAt(0) == '#') continue; try { Matcher matcher = pattern.matcher(line); matcher.find(); String mapid = matcher.group(1); Area area = new Area( Integer.decode(matcher.group(2)), Integer.decode(matcher.group(3)), Integer.decode(matcher.group(4)), Integer.decode(matcher.group(5))); if (!area.verify()) throw new IllegalArgumentException("Invalid area in file "+ filename+ ": " + line); area.setMapId(Integer.parseInt(mapid)); areas.add(area); } catch (IllegalStateException e) { throw new IllegalArgumentException("Cannot parse line " + line); } } } catch (NumberFormatException e) { throw new IllegalArgumentException("Bad number in areas list file"); } } private void readKml(String filename) throws IOException { try { KmlParser parser = new KmlParser(); parser.setReader(Utils.openFile(filename, false)); parser.parse(); List newAreas = parser.getAreas(); areas.clear(); areas.addAll(newAreas); } catch (XmlPullParserException e) { throw new IOException("Unable to parse KML file " + filename, e); } } public List getAreas() { return Collections.unmodifiableList(areas); } public void dump() { System.out.println("Areas read from file"); for (Area area : areas) { System.out.println(area.getMapId() + " " + area.toString()); } } public void dumpHex() { System.out.println(areas.size() + " areas:"); for (Area area : areas) { System.out.format("Area %08d: %d,%d to %d,%d covers %s", area.getMapId(), area.getMinLat(), area.getMinLong(), area.getMaxLat(), area.getMaxLong(), area.toHexString()); if (area.getName() != null) System.out.print(' ' + area.getName()); System.out.println(); } } /** * Write out a poly file containing the bounding polygon for the areas * that we calculated. * * @param filename The poly filename to write to. */ public void writePoly(String filename) { java.awt.geom.Area polygonArea = new java.awt.geom.Area(); for (Area area : areas) { polygonArea.add(new java.awt.geom.Area(Utils.area2Rectangle(area, 0))); } List> shapes = Utils.areaToShapes(polygonArea); // start with outer polygons Collections.reverse(shapes); try (PrintWriter pw = new PrintWriter(filename)) { pw.println("area"); for (int i = 0; i < shapes.size(); i++) { List shape = shapes.get(i); if (Utils.clockwise(shape)) pw.println(i + 1); else pw.println("!" + (i + 1)); Point point = null; for (int j = 0; j < shape.size(); j++) { point = shape.get(j); if (j > 0 && j + 1 < shape.size()) { Point lastPoint = shape.get(j - 1); Point nextPoint = shape.get(j + 1); if ((point.x == nextPoint.x && point.x == lastPoint.x) || (point.y == nextPoint.y && point.y == lastPoint.y)) continue; } pw.format(Locale.ROOT, " %f %f%n", Utils.toDegrees(point.x), Utils.toDegrees(point.y)); } pw.println("END"); } pw.println("END"); } catch (IOException e) { System.err.println("Could not write polygon file " + filename + ", processing continues"); } } /** * Write a file that can be given to mkgmap that contains the correct arguments * for the split file pieces. You are encouraged to edit the file and so it * contains a template of all the arguments that you might want to use. */ public void writeArgsFile(String filename, String outputType, int startMapId) { try (PrintWriter w = new PrintWriter(new FileWriter(filename))){ w.println("#"); w.println("# This file can be given to mkgmap using the -c option"); w.println("# Please edit it first to add a description of each map."); w.println("#"); w.println(); w.println("# You can set the family id for the map"); w.println("# family-id: 980"); w.println("# product-id: 1"); w.println(); w.println("# Following is a list of map tiles. Add a suitable description"); w.println("# for each one."); int mapId = startMapId; if (mapId % 100 == 0) mapId++; for (Area a : areas) { w.println(); w.format("mapname: %08d%n", (startMapId <0) ? a.getMapId() : mapId++); if (a.getName() == null) w.println("# description: OSM Map"); else w.println("description: " + (a.getName().length() > 50 ? a.getName().substring(0, 50) : a.getName())); String ext; if("pbf".equals(outputType)) ext = ".osm.pbf"; else if("o5m".equals(outputType)) ext = ".o5m"; else ext = ".osm.gz"; w.format("input-file: %08d%s%n", a.getMapId(), ext); } w.println(); } catch (IOException e) { throw new SplitFailedException("Could not write template.args file " + filename, e.getCause()); } } public void setAreaNames() { CityFinder cityFinder = null; if (geoNamesFile != null){ CityLoader cityLoader = new CityLoader(true); List cities = cityLoader.load(geoNamesFile); if (cities == null) return; cityFinder = new DefaultCityFinder(cities); } for (Area area : getAreas()) { area.setName(description); if (cityFinder == null) continue; // Decide what to call the area Set found = cityFinder.findCities(area); City bestMatch = null; for (City city : found) { if (bestMatch == null || city.getPopulation() > bestMatch.getPopulation()) { bestMatch = city; } } if (bestMatch != null) area.setName(bestMatch.getCountryCode() + '-' + bestMatch.getName()); } } /** * * @param mapId */ public void setMapIds(int mapId) { for (Area area : getAreas()) { area.setMapId(mapId++); } } public void setGeoNamesFile(String geoNamesFile) { this.geoNamesFile = geoNamesFile; } public void setAreas(List calculateAreas) { areas.clear(); areas.addAll(calculateAreas); } /** * * @param fileOutputDir * @param polygons * @param kmlOutputFile * @param outputType */ public void writeListFiles(File fileOutputDir, List polygons, String kmlOutputFile, String outputType) { for (PolygonDesc pd : polygons) { List areasPart = new ArrayList<>(); for (uk.me.parabola.splitter.Area a : areas) { if (pd.getArea().intersects(a.getRect())) areasPart.add(a); } if (kmlOutputFile != null) { File out = new File(kmlOutputFile); String kmlOutputFilePart = pd.getName() + "-" + out.getName(); if (out.getParent() != null) out = new File(out.getParent(), kmlOutputFilePart); else out = new File(kmlOutputFilePart); if (out.getParent() == null) out = new File(fileOutputDir, kmlOutputFilePart); KmlWriter.writeKml(out.getPath(), areasPart); } AreaList al = new AreaList(areasPart, null); al.setGeoNamesFile(geoNamesFile); al.writePoly(new File(fileOutputDir, pd.getName() + "-" + "areas.poly").getPath()); al.writeArgsFile(new File(fileOutputDir, pd.getName() + "-" + "template.args").getPath(), outputType, pd.getMapId()); } } } splitter-r653/src/uk/me/parabola/splitter/AreaSet.java0000664000175300017530000001042514352507254024165 0ustar builderbuilder00000000000000/* * Copyright (c) 2016 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.Arrays; import java.util.Iterator; import it.unimi.dsi.fastutil.ints.IntArrayList; /** * A partly set implementation. Used as a replacement for BitSet which is slow when * values are rather high, e.g. > 50000. * * @author Gerd Petermann * */ public final class AreaSet implements Iterable { private static final int BIN_SEARCH_LIMIT = 10; private final IntArrayList list; private boolean locked; /** Create empty set. */ public AreaSet() { list = new IntArrayList(); } /** Copy constructor creates set with the same entries. * @param other set to clone */ public AreaSet(final AreaSet other) { if (!other.isEmpty()) { list = new IntArrayList(other.list); } else list = new IntArrayList(); } /** * Create new set with one element. * @param index the index of the element */ AreaSet(final int index) { list = new IntArrayList(); list.add(index); } /** * Lock this set. A locked set cannot be changed. */ public void lock() { this.list.trim(); this.locked = true; } /** * Returns true if the element with the index * {@code bitIndex} is currently in this set; false otherwise. * * @param index the bit index * @return the value of the bit with the specified index */ public boolean get(final int index) { if (list.size() < BIN_SEARCH_LIMIT) { return list.contains(index); } return Arrays.binarySearch(list.elements(), 0, list.size(), index) >= 0; } /** * Add the element to the set. No effect if index is already in the set. * @param index the element */ public void set(final int index) { if (locked) throw new IllegalAccessError("AreaSet is locked"); if (list.isEmpty()) { list.add(index); } else { int p = Arrays.binarySearch(list.elements(), 0, list.size(), index); if (p < 0) { list.add(-p - 1, index); } } } /** * Remove the element from the set. * @param index the element */ public void clear(final int index) { if (locked) throw new IllegalAccessError("AreaSet is locked"); int pos; if (list.size() < BIN_SEARCH_LIMIT) { list.rem(index); } else { pos = Arrays.binarySearch(list.elements(), 0, list.size(), index); if (pos >= 0) { list.removeInt(pos); } } } /** * Merge with other set. Result contains elements of both sets. * @param other the other set */ void or(final AreaSet other) { if (locked) throw new IllegalAccessError("AreaSet is locked"); if (other.isEmpty()) return; if (list.isEmpty()) { list.addAll(other.list); } else { for (int i : other.list) { set(i); } } } /** * Remove elements in this set which are contained in the other set. * @param other the other set */ public void subtract(final AreaSet other) { if (locked) throw new IllegalAccessError("AreaSet is locked"); for (int i : other.list) { clear(i); } } /** * @return number of elements in this set */ public int cardinality() { return list.size(); } /** * @return true if this set contains no elements. */ public boolean isEmpty() { return cardinality() == 0; } /** * remove all elements from the set. Doesn't free storage. */ public void clear() { if (locked) throw new IllegalAccessError("AreaSet is locked"); list.clear(); } /** * @return an iterator over this set. */ @Override public Iterator iterator() { return list.iterator(); } @Override public int hashCode() { return list.hashCode(); } @Override public boolean equals(final Object obj) { if (!(obj instanceof AreaSet)) return false; if (this == obj) return true; AreaSet other = (AreaSet) obj; if (isEmpty() && other.isEmpty()) return true; return list.equals(other.list); } @Override public String toString() { return list.toString(); } } splitter-r653/src/uk/me/parabola/splitter/BackgroundInputStream.java0000664000175300017530000001037414352507254027117 0ustar builderbuilder00000000000000/* * Copyright (c) 2010, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.io.IOException; import java.io.InputStream; import java.util.Arrays; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; public class BackgroundInputStream extends InputStream { private static final int QUEUE_SIZE = 5; private static final int BUFFER_SIZE = 32768; private static final byte[] EOF_MARKER = new byte[0]; // These variables are accessed from both threads private final BlockingQueue inQueue; private final BlockingQueue recycleQueue; private final int bufferSize; protected final InputStream sourceStream; protected volatile boolean closed; // These variables are only accessed from the reader thread private byte[] currentBuffer; private int currentIndex; private Thread loaderThread; public BackgroundInputStream(InputStream source) { this(source, QUEUE_SIZE, BUFFER_SIZE); } public BackgroundInputStream(InputStream source, int queueSize, int bufferSize) { inQueue = new ArrayBlockingQueue<>(queueSize); recycleQueue = new ArrayBlockingQueue<>(queueSize + 1); sourceStream = source; this.bufferSize = bufferSize; } @Override public int read() throws IOException { if (!ensureBuffer()) { return -1; } int b = currentBuffer[currentIndex++]; recycle(); return b; } @Override public int read(byte[] b) throws IOException { return read(b, 0, b.length); } @Override public int read(byte[] b, int off, int len) throws IOException { int count = 0; while (len > 0) { if (!ensureBuffer()) { return count == 0 ? -1 : count; } int remaining = currentBuffer.length - currentIndex; int bytesToCopy = Math.min(remaining, len); System.arraycopy(currentBuffer, currentIndex, b, off, bytesToCopy); count += bytesToCopy; currentIndex += bytesToCopy; off += bytesToCopy; len -= bytesToCopy; recycle(); } return count; } private boolean ensureBuffer() throws IOException { if (loaderThread == null) { loaderThread = new Thread(new Loader(), "BackgroundInputStream"); loaderThread.start(); } if (currentBuffer == null) { try { currentBuffer = inQueue.take(); } catch (InterruptedException e) { throw new IOException("Failed to take a buffer from the queue", e); } currentIndex = 0; } return currentBuffer != EOF_MARKER; } private void recycle() { if (currentIndex == currentBuffer.length) { if (currentIndex == bufferSize) { recycleQueue.offer(currentBuffer); } currentBuffer = null; } } @Override public int available() throws IOException { return currentBuffer == null ? 0 : currentBuffer.length; } @Override public void close() throws IOException { closed = true; inQueue.clear(); recycleQueue.clear(); currentBuffer = null; } private class Loader implements Runnable { @Override public void run() { { int bytesRead = 0; while (!closed) { byte[] buffer = recycleQueue.poll(); if (buffer == null) { buffer = new byte[bufferSize]; } int offset = 0; try { while ((offset < bufferSize) && ((bytesRead = sourceStream.read(buffer, offset, bufferSize - offset)) != -1)) { offset += bytesRead; } } catch (IOException e) { throw new RuntimeException("Unable to read from stream", e); } if (offset < bufferSize) { buffer = Arrays.copyOf(buffer, offset); } try { inQueue.put(buffer); if (bytesRead == -1) { inQueue.put(EOF_MARKER); closed = true; } } catch (InterruptedException e) { throw new RuntimeException("Unable to put data onto queue", e); } if (closed) { try { sourceStream.close(); } catch (IOException e) { throw new RuntimeException("Unable to close source stream", e); } } } } } } } splitter-r653/src/uk/me/parabola/splitter/Convert.java0000664000175300017530000000453714352507254024270 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * Conversion utility methods * * @author Chris Miller */ public class Convert { private static final double[] PowersOfTen = new double[] { 10d, 100d, 1000d, 10000d, 100000d, 1000000d, 10000000d, 100000000d, 1000000000d, 10000000000d, 100000000000d, 1000000000000d, 10000000000000d, 100000000000000d, 1000000000000000d, 10000000000000000d, 100000000000000000d, 1000000000000000000d, 10000000000000000000d, }; /** * Parses a string into a double. This code is optimised for performance * when parsing typical doubles encountered in .osm files. * * @param cs the characters to parse into a double * @return the double value represented by the string. * @throws NumberFormatException if the value failed to parse. */ public static double parseDouble(String cs) throws NumberFormatException { int end = Math.min(cs.length(), 19); // No point trying to handle more digits than a double precision number can deal with int i = 0; char c = cs.charAt(i); boolean isNegative = (c == '-'); if ((isNegative || (c == '+')) && (++i < end)) c = cs.charAt(i); long decimal = 0; int decimalPoint = -1; while (true) { int digit = c - '0'; if ((digit >= 0) && (digit < 10)) { long tmp = decimal * 10 + digit; if (tmp < decimal) throw new NumberFormatException("Overflow! Too many digits in " + cs); decimal = tmp; } else if ((c == '.') && (decimalPoint < 0)) decimalPoint = i; else { // We're out of our depth, let the JDK have a go. This is *much* slower return Double.parseDouble(cs); } if (++i >= end) break; c = cs.charAt(i); } if (isNegative) decimal = -decimal; if (decimalPoint >= 0 && decimalPoint < i - 1) return decimal / PowersOfTen[i - decimalPoint - 2]; return decimal; } } splitter-r653/src/uk/me/parabola/splitter/DataStorer.java0000664000175300017530000001346114352507254024714 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.io.File; import java.io.IOException; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; import uk.me.parabola.splitter.tools.Long2IntClosedMapFunction; import uk.me.parabola.splitter.tools.OSMId2ObjectMap; import uk.me.parabola.splitter.tools.SparseLong2IntMap; import uk.me.parabola.splitter.writer.OSMWriter; /** * Stores data that is needed in different passes of the program. * * @author GerdP * */ public class DataStorer { public static final int NODE_TYPE = 0; public static final int WAY_TYPE = 1; public static final int REL_TYPE = 2; private final int numOfAreas; private final Long2IntClosedMapFunction[] maps = new Long2IntClosedMapFunction[3]; private final AreaDictionary areaDictionary; private final AreaIndex areaIndex; private SparseLong2IntMap usedWays = null; private final OSMId2ObjectMap usedRels = new OSMId2ObjectMap<>(); private boolean idsAreNotSorted; private OSMWriter[] writers; /** * map with relations that should be complete and are written to only one * tile */ private final Long2ObjectOpenHashMap oneDistinctAreaOnlyRels = new Long2ObjectOpenHashMap<>(); private final OSMId2ObjectMap oneTileOnlyRels = new OSMId2ObjectMap<>(); /** * Create a dictionary for a given number of writers * * @param overlapAmount * @param numOfWriters * the number of writers that are used */ DataStorer(List areas, int overlapAmount) { this.numOfAreas = areas.size(); this.areaDictionary = new AreaDictionary(areas, overlapAmount); this.areaIndex = new AreaGrid(areaDictionary); return; } public int getNumOfAreas() { return numOfAreas; } public AreaDictionary getAreaDictionary() { return areaDictionary; } public Area getArea(int idx) { return areaDictionary.getArea(idx); } public Area getExtendedArea(int idx) { return areaDictionary.getExtendedArea(idx); } public void setWriters(OSMWriter[] writers) { this.writers = writers; } public void setWriterMap(int type, Long2IntClosedMapFunction nodeWriterMap) { maps[type] = nodeWriterMap; } public Long2IntClosedMapFunction getWriterMap(int type) { return maps[type]; } public AreaIndex getGrid() { return areaIndex; } public SparseLong2IntMap getUsedWays() { return usedWays; } public OSMId2ObjectMap getUsedRels() { return usedRels; } public void setUsedWays(SparseLong2IntMap ways) { usedWays = ways; } public boolean isIdsAreNotSorted() { return idsAreNotSorted; } public void setIdsAreNotSorted(boolean idsAreNotSorted) { this.idsAreNotSorted = idsAreNotSorted; } public void restartWriterMaps() { for (Long2IntClosedMapFunction map : maps) { if (map != null) { try { map.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } } } public void switchToSeqAccess(File fileOutputDir) throws IOException { boolean msgWritten = false; long start = System.currentTimeMillis(); for (Long2IntClosedMapFunction map : maps) { if (map != null) { if (!msgWritten) { System.out.println("Writing results of MultiTileAnalyser to temp files ..."); msgWritten = true; } map.switchToSeqAccess(fileOutputDir); } } System.out.println("Writing temp files took " + (System.currentTimeMillis() - start) + " ms"); } public void finish() { for (Long2IntClosedMapFunction map : maps) { if (map != null) map.finish(); } } public void stats(final String prefix) { for (Long2IntClosedMapFunction map : maps) { if (map != null) map.stats(prefix); } } public OSMWriter[] getWriters() { return writers; } public void storeRelationAreas(long id, AreaSet areaSet) { oneDistinctAreaOnlyRels.put(id, areaDictionary.translate(areaSet)); } public Integer getOneTileOnlyRels(long id) { return oneTileOnlyRels.get(id); } /** * If the ids in oneTileOnlyRels were produced with a different set * of areas we have to translate the values * * @param distinctAreas * list of distinct (non-overlapping) areas * @param distinctDataStorer */ public void translateDistinctToRealAreas(DataStorer distinctDataStorer) { List distinctAreas = distinctDataStorer.getAreaDictionary().getAreas(); Map map = new HashMap<>(); for (Area distinctArea : distinctAreas) { if (!distinctArea.isPseudoArea()) { AreaSet w = new AreaSet(); for (int i = 0; i < getNumOfAreas(); i++) { if (this.areaDictionary.getArea(i).contains(distinctArea)) { w.set(i); } } map.put(distinctArea, this.areaDictionary.translate(w)); } } for (Entry e : distinctDataStorer.oneDistinctAreaOnlyRels.entrySet()) { AreaSet singleArea = distinctDataStorer.getAreaDictionary().getSet(e.getValue()); assert singleArea.cardinality() == 1; int pos = singleArea.iterator().next(); if (!distinctAreas.get(pos).isPseudoArea()) { Integer areaIdx = map.get(distinctAreas.get(pos)); if (areaIdx == null) throw new SplitFailedException("failed to find index for area " + distinctAreas.get(pos)); oneTileOnlyRels.put(e.getKey(), areaIdx); } else { oneTileOnlyRels.put(e.getKey(), AbstractMapProcessor.UNASSIGNED); } } } } splitter-r653/src/uk/me/parabola/splitter/Element.java0000664000175300017530000000357414352507254024241 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; /** * @author Steve Ratcliffe */ public abstract class Element { protected ArrayList tags; private long id; private int version; public void setId(long id) { this.id = id; } public long getId() { return id; } public int getVersion() { return version; } public void setVersion(int version) { this.version = version; } public static class Tag { public final String key, value; public Tag(String key, String value) { this.key = key; this.value = value; } public String getKey() { return key; } public String getValue() { return value; } @Override public String toString() { return key + "=" + value; } } public void addTag(String key, String value) { if (key.equals("created_by")) return; // Most elements are nodes. Most nodes have no tags. Create the tag table lazily if (tags == null) tags = new ArrayList<>(4); tags.add(new Tag(key, value)); } public boolean hasTags() { return tags != null && !tags.isEmpty(); } public Iterator tagsIterator() { if (tags == null) return Collections.emptyIterator(); return tags.iterator(); } public String getTag (String key){ if (tags == null) return null; for (Tag tag:tags){ if (key.equals(tag.key)) return tag.value; } return null; } } splitter-r653/src/uk/me/parabola/splitter/JVMHealthMonitor.java0000664000175300017530000000513214352507254025772 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * Periodically outputs the elapsed time and amount of memory used by the VM * * @author Chris Miller */ public class JVMHealthMonitor { private final Thread statusThread; private static long startTime; /** * Starts a daemon thread that will periodically report the state of the JVM (heap usage). * This method just needs to be called once and there's nothing else to do. The background * thread will terminate automatically when the application exits. * * @param statusFrequency the number of seconds to sleep between each status update. */ public JVMHealthMonitor (final long statusFrequency) { startTime = System.currentTimeMillis(); statusThread = new Thread(new Runnable() { @Override public void run() { int iter=0; while (true) { iter++; if (iter%10 == 0) { System.out.println("***** Full GC *****"); System.gc(); } long maxMem = Runtime.getRuntime().maxMemory() / 1024 / 1024; long totalMem = Runtime.getRuntime().totalMemory() / 1024 / 1024; long freeMem = Runtime.getRuntime().freeMemory() / 1024 / 1024; long usedMem = totalMem - freeMem; System.out.println("Elapsed time: " + getElapsedTime() + " Memory: Current " + totalMem + "MB (" + usedMem + "MB used, " + freeMem + "MB free) Max " + maxMem + "MB"); try { Thread.sleep(statusFrequency * 1000L); } catch (InterruptedException e) { System.out.println("JVMHealthMonitor sleep was interrupted. Ignoring."); } } } } ); statusThread.setDaemon(true); statusThread.setName("JVMHealthMonitor"); } public void start(){ if (!statusThread.isAlive()) statusThread.start(); } protected static String getElapsedTime() { long elapsed = (System.currentTimeMillis() - startTime) / 1000; long seconds = elapsed % 60; long minutes = elapsed / 60 % 60; long hours = elapsed / (60L * 60) % 60; StringBuilder buf = new StringBuilder(20); if (hours > 0) buf.append(hours).append("h "); if (hours > 0 || minutes > 0) buf.append(minutes).append("m "); buf.append(seconds).append('s'); return buf.toString(); } } splitter-r653/src/uk/me/parabola/splitter/Main.java0000664000175300017530000005110014352507254023520 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.io.File; import java.io.IOException; import java.time.Duration; import java.time.Instant; import java.util.Arrays; import java.util.Date; import java.util.List; import java.util.Map; import uk.me.parabola.splitter.args.ParamParser; import uk.me.parabola.splitter.args.SplitterParams; import uk.me.parabola.splitter.kml.KmlWriter; import uk.me.parabola.splitter.solver.AreasCalculator; import uk.me.parabola.splitter.writer.AbstractOSMWriter; import uk.me.parabola.splitter.writer.BinaryMapWriter; import uk.me.parabola.splitter.writer.O5mMapWriter; import uk.me.parabola.splitter.writer.OSMWriter; import uk.me.parabola.splitter.writer.OSMXMLWriter; import uk.me.parabola.splitter.writer.PseudoOSMWriter; /** * Splitter for OSM files with the purpose of providing input files for mkgmap. *

* The input file is split so that no piece has more than a given number of * nodes in it. * * @author Steve Ratcliffe */ public class Main { private static final String DEFAULT_DIR = "."; /** A list of the OSM files to parse. */ private List fileNameList; /** The amount in map units that tiles overlap. The default is overwritten depending on user settings. */ private int overlapAmount = -1; /** The number of tiles to be written. The default is overwritten depending on user settings. */ private int numTiles = -1; /** The path where the results are written out to. */ private File fileOutputDir; private final OSMFileHandler osmFileHandler = new OSMFileHandler(); private final ProblemLists problemList = new ProblemLists(); private SplitterParams mainOptions; /** * Used for unit tests */ public static void mainNoSystemExit(String... args) { Main m = new Main(); try { m.start(args); } catch (StopNoErrorException e) { if (e.getMessage() != null) System.out.println(e.getMessage()); } } public static void main(String[] args) { Main m = new Main(); try { int rc = m.start(args); if (rc != 0) System.exit(1); } catch (StopNoErrorException e) { if (e.getMessage() != null) System.out.println(e.getMessage()); } } private int start(String[] args) { int rc = 0; JVMHealthMonitor healthMonitor = null; try { mainOptions = readArgs(args); } catch (IllegalArgumentException e) { if (e.getMessage() != null) System.out.println("Error: " + e.getMessage()); return 1; } if (mainOptions.getStatusFreq() > 0) { healthMonitor = new JVMHealthMonitor(mainOptions.getStatusFreq()); healthMonitor.start(); } Instant start = Instant.now(); System.out.println("Time started: " + new Date()); try { // configure the input file handler osmFileHandler.setFileNames(fileNameList); osmFileHandler.setMixed(mainOptions.isMixed()); osmFileHandler.setMaxThreads(mainOptions.getMaxThreads().getCount()); if (mainOptions.isKeepComplete() && mainOptions.getProblemFile() != null) { // read the user list now so that possible problems are reported early if (!problemList.readProblemIds(mainOptions.getProblemFile())) throw new IllegalArgumentException(); } // first step: either read or calculate the list of areas List areas = split(); DataStorer dataStorer; if (mainOptions.isKeepComplete()) { // optional step a: calculate list of ways and relations which are contained in multiple areas dataStorer = calcProblemLists(areas); // optional step b: calculate the writers for the list of "problem" ways and relations useProblemLists(dataStorer); } else { dataStorer = new DataStorer(areas, overlapAmount); } // final step: write the OSM output files writeTiles(dataStorer); dataStorer.finish(); } catch (IOException e) { System.err.println("Error opening or reading file " + e); e.printStackTrace(); return 1; } catch (SplitFailedException e) { if (e.getMessage() != null && e.getMessage().length() > 0) e.printStackTrace(); return 1; } catch (StopNoErrorException e) { if (e.getMessage() != null) { String msg = "Stopped after " + e.getMessage(); System.err.println(msg); System.out.println(msg); } // nothing to do } catch (RuntimeException e) { e.printStackTrace(); return 1; } System.out.println("Time finished: " + new Date()); Duration duration = Duration.between(start, Instant.now()); long seconds = duration.getSeconds(); if (seconds > 0) { long hours = seconds / 3600; seconds -= hours * 3600; long minutes = seconds / 60; seconds -= minutes * 60; System.out.println("Total time taken: " + (hours > 0 ? hours + (hours > 1 ? " hours " : " hour ") : "") + (minutes > 0 ? minutes + (minutes > 1 ? " minutes " : " minute ") : "") + (seconds > 0 ? seconds + (seconds > 1 ? " seconds" : " second") : "")); } else System.out.println("Total time taken: " + duration.getNano() / 1000000 + " ms"); return rc; } /** * Fill the list of areas. The list might be read from an existing file or it might * be freshly calculated by scanning the input files. * @return List of areas which might overlap each other if they were read from an existing file. * @throws IOException */ private List split() throws IOException { final File outputDir = fileOutputDir; if (!outputDir.exists()) { System.out.println("Output directory not found. Creating directory '" + fileOutputDir + "'"); if (!outputDir.mkdirs()) { System.err.println("Unable to create output directory! Using default directory instead"); fileOutputDir = new File(DEFAULT_DIR); } } else if (!outputDir.isDirectory()) { System.err.println( "The --output-dir parameter must specify a directory. The --output-dir parameter is being ignored, writing to default directory instead."); fileOutputDir = new File(DEFAULT_DIR); } final String splitFile = mainOptions.getSplitFile(); // A polygon file in osmosis polygon format final String polygonFile = mainOptions.getPolygonFile(); final String polygonDescFile = mainOptions.getPolygonDescFile(); final AreaList areaList = new AreaList(mainOptions.getDescription()); boolean writeAreas = false; if (splitFile != null) { try { areaList.read(splitFile); areaList.dump(); } catch (IOException e) { throw new IllegalArgumentException("Could not read area list file " + splitFile); } if (polygonFile != null) { System.out.println("Warning: parameter polygon-file is ignored because split-file is used."); } if (polygonDescFile != null) { System.out.println("Warning: parameter polygon-desc-file is ignored because split-file is used."); } } else { writeAreas = true; } areaList.setGeoNamesFile(mainOptions.getGeonamesFile()); AreasCalculator areasCalculator = new AreasCalculator(mainOptions, numTiles); if (areaList.getAreas().isEmpty()) { int resolution = mainOptions.getResolution(); writeAreas = true; int alignment = 1 << (24 - resolution); System.out.println("Map is being split for resolution " + resolution + ':'); System.out.println(" - area boundaries are aligned to 0x" + Integer.toHexString(alignment) + " map units (" + Utils.toDegrees(alignment) + " degrees)"); System.out.println( " - areas are multiples of 0x" + Integer.toHexString(alignment) + " map units wide and high"); areasCalculator.fillDensityMap(osmFileHandler, fileOutputDir); areaList.setAreas(areasCalculator.calcAreas()); if (areaList.getAreas().isEmpty()) { System.err.println("Failed to calculate areas. See stdout messages for details."); System.out.println("Failed to calculate areas."); if (numTiles < 2) { System.out.println("Sorry. Cannot split the file without creating huge, almost empty, tiles."); System.out.println("Please specify a bounding polygon with the --polygon-file parameter."); } else { System.out.println("Probably the number of tiles is too high for the given resolution."); } throw new SplitFailedException(""); } int mapId = mainOptions.getMapid(); if (mapId + areaList.getAreas().size() > 99999999) { throw new SplitFailedException("Too many areas for initial mapid " + mapId); } areaList.setMapIds(mapId); } areaList.setAreaNames(); if (writeAreas) { areaList.write(new File(fileOutputDir, "areas.list").getPath()); areaList.writePoly(new File(fileOutputDir, "areas.poly").getPath()); } List areas = areaList.getAreas(); String kmlOutputFile = mainOptions.getWriteKml(); if (kmlOutputFile != null) { File out = new File(kmlOutputFile); if (!out.isAbsolute()) out = new File(fileOutputDir, kmlOutputFile); KmlWriter.writeKml(out.getPath(), areas); } String outputType = mainOptions.getOutput(); if (!areasCalculator.getPolygons().isEmpty()) { areaList.writeListFiles(outputDir, areasCalculator.getPolygons(), kmlOutputFile, outputType); } areaList.writeArgsFile(new File(fileOutputDir, "template.args").getPath(), outputType, -1); areaList.dumpHex(); if ("split".equals(mainOptions.getStopAfter())) { try { Thread.sleep(1000); } catch (InterruptedException e) { } throw new StopNoErrorException(mainOptions.getStopAfter()); } return areaList.getAreas(); } private DataStorer calcProblemLists(List areas) { DataStorer dataStorer = problemList.calcProblemLists(osmFileHandler, areas, overlapAmount, mainOptions); String problemReport = mainOptions.getProblemReport(); if (problemReport != null) { problemList.writeProblemList(fileOutputDir, problemReport); } if ("gen-problem-list".equals(mainOptions.getStopAfter())) { try { Thread.sleep(1000); } catch (InterruptedException e) { } throw new StopNoErrorException(mainOptions.getStopAfter()); } return dataStorer; } /** * Deal with the command line arguments. */ private SplitterParams readArgs(String[] args) { ParamParser parser = new ParamParser(); SplitterParams params = parser.parse(SplitterParams.class, args); if (!parser.getErrors().isEmpty()) { System.out.println(); System.out.println("Invalid parameter(s):"); for (String error : parser.getErrors()) { System.out.println(" " + error); } System.out.println(); parser.displayUsage(); throw new IllegalArgumentException(); } System.out.println("Splitter version " + Version.VERSION + " compiled " + Version.TIMESTAMP); for (Map.Entry entry : parser.getConvertedParams().entrySet()) { String name = entry.getKey(); Object value = entry.getValue(); System.out.println(name + '=' + (value == null ? "" : value)); } fileNameList = parser.getAdditionalParams(); if (fileNameList.isEmpty()) { throw new IllegalArgumentException("No file name(s) given"); } boolean filesOK = fileNameList.stream().allMatch(fname -> testAndReportFname(fname, "input file")); if (!filesOK) { System.out.println("Make sure that option parameters start with -- "); throw new IllegalArgumentException(); } int mapId = params.getMapid(); if (mapId < 0 || mapId > 99999999 ) { System.err.println("The --mapid parameter must be a value between 0 and 99999999."); throw new IllegalArgumentException(); } if (params.getMaxNodes() < 10000) { System.err.println("Error: Invalid number " + params.getMaxNodes() + ". The --max-nodes parameter must be an integer value of 10000 or higher."); throw new IllegalArgumentException(); } String numTilesParm = params.getNumTiles(); if (numTilesParm != null) { try { numTiles = Integer.parseInt(numTilesParm); if (numTiles < 2) { System.err.println("Error: The --num-tiles parameter must be 2 or higher."); throw new IllegalArgumentException(); } } catch (NumberFormatException e) { System.err.println("Error: Invalid number " + numTilesParm + ". The --num-tiles parameter must be an integer value of 2 or higher."); throw new IllegalArgumentException(); } } // The description to write into the template.args file. String geoNamesFile = params.getGeonamesFile(); checkOptionalFileOption(geoNamesFile, "geonames-file"); String outputType = params.getOutput(); if ("xml pbf o5m simulate".contains(outputType) == false) { System.err.println("The --output parameter must be either xml, pbf, o5m, or simulate. Resetting to xml."); throw new IllegalArgumentException(); } int resolution = params.getResolution(); if (resolution < 1 || resolution > 24) { System.err.println("The --resolution parameter must be a value between 1 and 24. Reasonable values are close to 13."); throw new IllegalArgumentException(); } String outputDir = params.getOutputDir(); fileOutputDir = new File(outputDir == null ? DEFAULT_DIR : outputDir); int maxAreasPerPass = params.getMaxAreas(); if (maxAreasPerPass < 1 || maxAreasPerPass > 9999) { System.err.println("The --max-areas parameter must be a value between 1 and 9999."); throw new IllegalArgumentException(); } String problemFile = params.getProblemFile(); checkOptionalFileOption(params.getProblemFile(), "problem-file"); checkOptionalFileOption(params.getSplitFile(), "split-file"); checkOptionalFileOption(params.getPolygonFile(), "polygon-file"); checkOptionalFileOption(params.getPolygonDescFile(), "polygon-desc-file"); if (params.getPolygonDescFile() != null && params.getPolygonFile() != null) { throw new IllegalArgumentException("--polygon-desc-file and --polygon-file are mutually exclusive"); } String precompSeaDir = params.getPrecompSea(); if (precompSeaDir != null) { File dir = new File(precompSeaDir); if (dir.exists() == false || dir.canRead() == false) { throw new IllegalArgumentException( "precomp-sea directory doesn't exist or is not readable: " + precompSeaDir); } } boolean keepComplete = params.isKeepComplete(); if (params.isMixed() && (keepComplete || problemFile != null)) { System.err.println( "--mixed=true is not supported in combination with --keep-complete=true or --problem-file."); System.err.println("Please use e.g. osomosis to sort the data in the input file(s)"); throw new IllegalArgumentException(); } String overlap = params.getOverlap(); if ("auto".equals(overlap) == false) { try { overlapAmount = Integer.valueOf(overlap); if (overlapAmount < 0) throw new IllegalArgumentException("--overlap=" + overlap + " is not is not a valid option."); } catch (NumberFormatException e) { throw new IllegalArgumentException("--overlap=" + overlap + " is not is not a valid option."); } } String boundaryTagsParm = params.getBoundaryTags(); int wantedAdminLevelString = params.getWantedAdminLevel(); if (wantedAdminLevelString < 0 || wantedAdminLevelString > 12) { throw new IllegalArgumentException("The --wanted-admin-level parameter must be between 0 and 12."); } final List validVersionHandling = Arrays.asList("remove", "fake", "keep"); if (!validVersionHandling.contains(params.getHandleElementVersion())) { throw new IllegalArgumentException( "the --handle-element-version parameter must be one of " + validVersionHandling + "."); } final List validStopAfter = Arrays.asList("split", "gen-problem-list", "handle-problem-list", "dist"); if (!validStopAfter.contains(params.getStopAfter())) { throw new IllegalArgumentException( "the --stop-after parameter must be one of " + validStopAfter + "."); } int searchLimit = params.getSearchLimit(); if (searchLimit < 1000) { throw new IllegalArgumentException("The --search-limit parameter must be 1000 or higher."); } // plausibility checks and default handling if (keepComplete) { if (fileNameList.size() > 1) { System.err.println("Warning: --keep-complete is only used for the first input file. Further files must use higher ids."); } if (overlapAmount > 0) { System.err.println("Warning: --overlap is used in combination with --keep-complete=true "); System.err.println( " The option keep-complete should be used with overlap=0 because it is very unlikely that "); System.err.println( " the overlap will add any important data. It will just cause a lot of additional output which "); System.err.println(" has to be thrown away again in mkgmap."); } else overlapAmount = 0; } else { if (overlapAmount < 0) { overlapAmount = 2000; System.out.println("Setting default overlap=2000 because keep-complete=false is in use."); } if (params.getProblemReport() != null) { System.out.println( "Parameter --problem-report is ignored, because parameter --keep-complete=false is used"); } if (boundaryTagsParm != null) { System.out.println( "Parameter --boundaryTags is ignored, because parameter --keep-complete=false is used"); } } return params; } private static void checkOptionalFileOption(String fname, String option) { if (fname != null && !testAndReportFname(fname, option)) { throw new IllegalArgumentException(); } } private OSMWriter[] createWriters(List areas) { OSMWriter[] allWriters = new OSMWriter[areas.size()]; for (int j = 0; j < allWriters.length; j++) { Area area = areas.get(j); AbstractOSMWriter w; String outputType = mainOptions.getOutput(); if ("pbf".equals(outputType)) w = new BinaryMapWriter(area, fileOutputDir, area.getMapId(), overlapAmount); else if ("o5m".equals(outputType)) w = new O5mMapWriter(area, fileOutputDir, area.getMapId(), overlapAmount); else if ("simulate".equals(outputType)) w = new PseudoOSMWriter(area); else w = new OSMXMLWriter(area, fileOutputDir, area.getMapId(), overlapAmount); switch (mainOptions.getHandleElementVersion()) { case "keep": w.setVersionMethod(AbstractOSMWriter.KEEP_VERSION); break; case "remove": w.setVersionMethod(AbstractOSMWriter.REMOVE_VERSION); break; default: w.setVersionMethod(AbstractOSMWriter.FAKE_VERSION); } allWriters[j] = w; } return allWriters; } private void useProblemLists(DataStorer dataStorer) { problemList.calcMultiTileElements(dataStorer, osmFileHandler); if ("handle-problem-list".equals(mainOptions.getStopAfter())) { try { Thread.sleep(1000); } catch (InterruptedException e) { } throw new StopNoErrorException(mainOptions.getStopAfter()); } } /** * Final pass(es), we have the areas so parse the file(s) again. * * @param dataStorer * collects data used in different program passes */ private void writeTiles(DataStorer dataStorer) throws IOException { List areas = dataStorer.getAreaDictionary().getAreas(); // the final split passes, dataStorer.switchToSeqAccess(fileOutputDir); dataStorer.setWriters(createWriters(areas)); System.out.println("Distributing data " + new Date()); int numPasses = (int) Math.ceil((double) areas.size() / mainOptions.getMaxAreas()); int areasPerPass = (int) Math.ceil((double) areas.size() / numPasses); long startDistPass = System.currentTimeMillis(); if (numPasses > 1) { System.out.println("Processing " + areas.size() + " areas in " + numPasses + " passes, " + areasPerPass + " areas at a time"); } else { System.out.println("Processing " + areas.size() + " areas in a single pass"); } for (int i = 0; i < numPasses; i++) { int areaOffset = i * areasPerPass; int numAreasThisPass = Math.min(areasPerPass, areas.size() - i * areasPerPass); dataStorer.restartWriterMaps(); SplitProcessor processor = new SplitProcessor(dataStorer, areaOffset, numAreasThisPass, mainOptions); System.out.println("Starting distribution pass " + (i + 1) + " of " + numPasses + ", processing " + numAreasThisPass + " areas (" + areas.get(i * areasPerPass).getMapId() + " to " + areas.get(i * areasPerPass + numAreasThisPass - 1).getMapId() + ')'); osmFileHandler.execute(processor); } System.out.println("Distribution pass(es) took " + (System.currentTimeMillis() - startDistPass) + " ms"); } static boolean testAndReportFname(String fileName, String type) { File f = new File(fileName); if (f.exists() == false || f.isFile() == false || f.canRead() == false) { String msg = "Error: " + type + " doesn't exist or is not a readable file: " + fileName; System.out.println(msg); System.err.println(msg); return false; } return true; } } splitter-r653/src/uk/me/parabola/splitter/MapDetails.java0000664000175300017530000000253714352507254024671 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * The map features that we are going to map are collected here. * * @author Steve Ratcliffe */ public class MapDetails { private int minLat = Utils.toMapUnit(180.0); private int minLon = Utils.toMapUnit(180.0); private int maxLat = Utils.toMapUnit(-180.0); private int maxLon = Utils.toMapUnit(-180.0); /** * Add the given point to the total bounds for the map. * * @param lat the latitude, in map units. * @param lon the longitude, in map units. */ public void addToBounds(int lat, int lon) { if (lat < minLat) minLat = lat; if (lat > maxLat) maxLat = lat; if (lon < minLon) minLon = lon; if (lon > maxLon) maxLon = lon; } /** * Get the bounds of this map. * * @return An area covering all the points in the map. */ public Area getBounds() { return new Area(minLat, minLon, maxLat, maxLon); } } splitter-r653/src/uk/me/parabola/splitter/MapProcessor.java0000664000175300017530000000417514352507254025263 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.concurrent.BlockingQueue; public interface MapProcessor { /** * (performance) Returns true if the reader is allowed to ignore tags * while reading OSM data */ boolean skipTags(); /** * (performance) Returns true if the reader is allowed to skip nodes * while reading OSM data */ boolean skipNodes(); /** * (performance) Returns true if the reader is allowed to skip ways * while reading OSM data */ boolean skipWays(); /** * (performance) Returns true if the reader is allowed to skip relations * while reading OSM data */ boolean skipRels(); /** * returns a value that identifies the current phase * @return */ int getPhase(); /** * Called when the bound tag is encountered. Note that it is possible * for this to be called multiple times, eg if there are multiple OSM * files provided as input. * @param bounds the area covered by the map. */ void boundTag(Area bounds); /** * Called when a whole node has been processed. */ void processNode(Node n); /** * Called when a whole way has been processed. */ void processWay(Way w); /** * Called when a whole relation has been processed. */ void processRelation(Relation r); /** * Called when all input files were processed, * it returns false to signal that the same instance of the processor * should be called again with a new reader for all these input files. */ boolean endMap(); /** * For use with the producer/consumer pattern * @param queue * @return */ boolean consume(BlockingQueue queue); /** * Called for each single input file */ void startFile(); } splitter-r653/src/uk/me/parabola/splitter/MultiTileProcessor.java0000664000175300017530000010144014352507254026447 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import uk.me.parabola.splitter.Relation.Member; import uk.me.parabola.splitter.tools.Long2IntClosedMap; import uk.me.parabola.splitter.tools.Long2IntClosedMapFunction; import uk.me.parabola.splitter.tools.OSMId2ObjectMap; import uk.me.parabola.splitter.tools.SparseBitSet; import it.unimi.dsi.fastutil.longs.Long2ObjectLinkedOpenHashMap; import it.unimi.dsi.fastutil.longs.Long2ObjectMap.Entry; import it.unimi.dsi.fastutil.longs.LongArrayList; import java.awt.Point; import java.awt.Rectangle; import java.util.ArrayList; import java.util.Iterator; import java.util.LinkedList; import java.util.List; /** * Analyzes elements that should be written to multiple tiles * to find out what details are needed in each tile. */ class MultiTileProcessor extends AbstractMapProcessor { private final static int PHASE1_RELS_ONLY = 1; private final static int PHASE2_WAYS_ONLY = 2; private final static int PHASE3_NODES_AND_WAYS = 3; private final static int PHASE4_WAYS_ONLY = 4; private final boolean addParentRels = false; private final static byte MEM_NODE_TYPE = 1; private final static byte MEM_WAY_TYPE = 2; private final static byte MEM_REL_TYPE = 3; private final static byte MEM_INVALID_TYPE = -1; private final static int PROBLEM_WIDTH = Utils.toMapUnit(180.0); protected final static String[] NAME_TAGS = {"name","name:en","int_name","note"}; private final static String NOT_SORTED_MSG = "Maybe the IDs are not sorted. This is not supported with keep-complete=true or --problem-list"; private int phase = PHASE1_RELS_ONLY; private final DataStorer dataStorer; private final AreaDictionary areaDictionary; private Long2ObjectLinkedOpenHashMap relMap = new Long2ObjectLinkedOpenHashMap<>(); private Long2IntClosedMapFunction nodeWriterMap; private Long2IntClosedMapFunction wayWriterMap; private Long2IntClosedMapFunction relWriterMap; private int [] nodeLons; private int [] nodeLats; private SparseBitSet problemRels = new SparseBitSet(); private SparseBitSet neededWays = new SparseBitSet(); private SparseBitSet neededNodes = new SparseBitSet(); private OSMId2ObjectMap wayBboxMap = new OSMId2ObjectMap<>(); private SparseBitSet mpWays = new SparseBitSet(); private OSMId2ObjectMap mpWayEndNodesMap = new OSMId2ObjectMap<>(); /** each bit represents one area/tile */ private final AreaSet workWriterSet = new AreaSet(); private long lastCoordId = Long.MIN_VALUE; private int foundWays; private int neededNodesCount; private int neededWaysCount; private int neededMpWaysCount; private int visitId; MultiTileProcessor(DataStorer dataStorer, LongArrayList problemWayList, LongArrayList problemRelList) { this.dataStorer = dataStorer; this.areaDictionary = dataStorer.getAreaDictionary(); for (long id: problemWayList){ neededWays.set(id); } for (long id: problemRelList){ problemRels.set(id); } // we allocate this once to avoid massive resizing with large number of tiles neededMpWaysCount = mpWays.cardinality(); if (problemRelList.isEmpty()) { phase = PHASE2_WAYS_ONLY; } return; } @Override public boolean skipTags() { if (phase == PHASE1_RELS_ONLY) return false; return true; } @Override public boolean skipNodes() { if (phase == PHASE3_NODES_AND_WAYS) return false; return true; } @Override public boolean skipWays() { if (phase == PHASE1_RELS_ONLY) return true; return false; } @Override public boolean skipRels() { if (phase == PHASE1_RELS_ONLY && problemRels.cardinality() > 0) return false; return true; } @Override public int getPhase() { return phase; } @Override public void processNode(Node node) { if (phase == PHASE3_NODES_AND_WAYS && neededNodes.get(node.getId())) { storeCoord(node); // return memory to GC neededNodes.clear(node.getId()); } } @Override public void processWay(Way way) { if (phase == PHASE2_WAYS_ONLY){ if (!neededWays.get(way.getId())) return; for (long id : way.getRefs()) { neededNodes.set(id); } if (mpWays.get(way.getId())){ mpWays.clear(way.getId()); int numRefs = way.getRefs().size(); if (numRefs >= 2){ JoinedWay joinedWay = new JoinedWay(way.getRefs().getLong(0), way.getRefs().getLong(numRefs-1)); mpWayEndNodesMap.put(way.getId(), joinedWay); } } foundWays++; } else if (phase == PHASE3_NODES_AND_WAYS){ if (!neededWays.get(way.getId())) return; // calculate the bbox int numRefs = way.getRefs().size(); boolean isClosed = numRefs > 1 && way.getRefs().get(0).equals(way.getRefs().get(numRefs-1)); workWriterSet.clear(); Rectangle wayBbox = getWayBbox(way.getId(), way.getRefs()); if (wayBbox == null) return; wayBboxMap.put(way.getId(), wayBbox); if (isClosed){ checkBoundingBox(workWriterSet, wayBbox); } else { addWritersOfWay(workWriterSet, wayBbox, way.getId(), way.getRefs()); } int wayWriterIdx; if (workWriterSet.isEmpty()) wayWriterIdx = UNASSIGNED; else wayWriterIdx = areaDictionary.translate(workWriterSet); try{ wayWriterMap.add(way.getId(), wayWriterIdx); }catch (IllegalArgumentException e){ System.err.println(e.getMessage()); throw new SplitFailedException(NOT_SORTED_MSG); } } else if (phase == PHASE4_WAYS_ONLY){ // propagate the ways writers to all nodes if (!neededWays.get(way.getId())) return; int wayWriterIdx = wayWriterMap.getRandom(way.getId()); if (wayWriterIdx != UNASSIGNED){ AreaSet wayWriterSet = areaDictionary.getSet(wayWriterIdx); for (long id : way.getRefs()) { addOrMergeWriters(nodeWriterMap, wayWriterSet, wayWriterIdx, id); } } } } @Override public void processRelation(Relation rel) { // TODO: we store all relations here, no matter how many are needed. Another approach would be to store // the rels in the problem list and read again until all sub rels of these problem rels are found or // known as missing. This can require many more read passes for relations, but can help if this phase // starts to be a memory bottleneck. if (phase == PHASE1_RELS_ONLY){ MTRelation myRel = new MTRelation(rel); relMap.put(myRel.getId(), myRel); } } @Override public boolean endMap() { if (phase == PHASE1_RELS_ONLY){ stats("Finished collecting relations."); Utils.printMem(); System.out.println("starting to resolve relations containing problem relations ..."); // add all ways and nodes of problem rels so that we collect the coordinates markProblemMembers(); if (addParentRels){ // we want to see the parent rels, but not all children of all parents markParentRels(); } // free memory for rels that are not causing any trouble relMap.long2ObjectEntrySet().removeIf(e -> !problemRels.get(e.getLongKey())); problemRels = null; // reallocate to the needed size relMap = new Long2ObjectLinkedOpenHashMap<>(relMap); //System.out.println("Finished adding parents and members of problem relations to problem lists."); System.out.println("Finished adding members of problem relations to problem lists."); stats("starting to collect ids of needed way nodes ..."); neededMpWaysCount = mpWays.cardinality(); neededWaysCount = neededWays.cardinality(); ++phase; } else if (phase == PHASE2_WAYS_ONLY){ stats("Finished collecting problem ways."); neededNodesCount = neededNodes.cardinality(); // critical part: we have to allocate possibly large arrays here nodeWriterMap = new Long2IntClosedMap("node", neededNodesCount, UNASSIGNED); wayWriterMap = new Long2IntClosedMap("way", foundWays, UNASSIGNED); dataStorer.setWriterMap(DataStorer.NODE_TYPE, nodeWriterMap); dataStorer.setWriterMap(DataStorer.WAY_TYPE, wayWriterMap); nodeLons = new int[neededNodesCount]; nodeLats = new int[neededNodesCount]; System.out.println("Found " + Utils.format(foundWays) + " of " + Utils.format(neededWaysCount) + " needed ways."); System.out.println("Found " + Utils.format(mpWayEndNodesMap.size()) + " of " + Utils.format(neededMpWaysCount) + " needed multipolygon ways."); stats("Starting to collect coordinates for " + Utils.format(neededNodesCount) + " needed nodes."); Utils.printMem(); ++phase; } else if (phase == PHASE3_NODES_AND_WAYS){ System.out.println("Found " + Utils.format(nodeWriterMap.size()) + " of " + Utils.format(neededNodesCount) + " needed nodes."); Utils.printMem(); mpWays = null; neededNodes = null; System.out.println("Calculating tiles for problem relations..."); calcWritersOfRelWaysAndNodes(); // return coordinate memory to GC nodeLats = null; nodeLons = null; calcWritersOfMultiPolygonRels(); mergeRelMemWriters(); propagateWritersOfRelsToMembers(); mpWayEndNodesMap.clear(); wayBboxMap = null; relWriterMap = new Long2IntClosedMap("rel", relMap.size(), UNASSIGNED); for (Entry entry : relMap.long2ObjectEntrySet()){ int val = entry.getValue().getMultiTileWriterIndex(); if (val != UNASSIGNED){ try{ relWriterMap.add(entry.getLongKey(), val); }catch (IllegalArgumentException e){ System.err.println(e); throw new SplitFailedException(NOT_SORTED_MSG); } } } relMap = null; dataStorer.setWriterMap(DataStorer.REL_TYPE, relWriterMap); stats("Making sure that needed way nodes of relations are written to the correct tiles..."); ++phase; } else if (phase == PHASE4_WAYS_ONLY){ stats("Finished processing problem lists."); return true; } return false; // not done yet } /** * Mark all members of given problem relations as problem cases. */ private void markProblemMembers() { ArrayList visited = new ArrayList<>(); for (MTRelation rel: relMap.values()){ if (!problemRels.get(rel.getId())) continue; incVisitID(); visited.clear(); MarkNeededMembers(rel, 0, visited); assert visited.size() == 0; } } /** * Mark the ways and nodes of a relation as problem cases. If the relation * contains sub relations, the routine calls itself recursively. * @param rel the relation * @param depth used to detect loops * @param visited * @return */ private void MarkNeededMembers(MTRelation rel, int depth, ArrayList visited){ if (rel.getLastVisitId() == visitId) return; rel.setLastVisitId(visitId); if (depth > 15){ System.out.println("MarkNeededMembers reached max. depth: " + rel.getId() + " " + depth); return ; } for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; byte memType = rel.memTypes[i]; if (memType == MEM_WAY_TYPE){ neededWays.set(memId); if (rel.isMultiPolygon()) mpWays.set(memId); } else if (memType == MEM_NODE_TYPE) neededNodes.set(memId); else if (memType == MEM_REL_TYPE){ MTRelation subRel = relMap.get(memId); if (subRel == null) continue; if (subRel.getLastVisitId() == visitId) loopAction(rel, subRel, visited); else { problemRels.set(memId); visited.add(subRel); MarkNeededMembers(subRel, depth+1, visited); visited.remove(visited.size()-1); } } } } /** * Mark the parents of problem relations as problem relations. */ private void markParentRels(){ while (true){ boolean changed = false; for (MTRelation rel: relMap.values()){ if (rel.hasRelMembers() == false || problemRels.get(rel.getId())) continue; for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; if (rel.memTypes[i] == MEM_REL_TYPE && problemRels.get(memId)) { problemRels.set(rel.getId()); rel.setAddedAsParent(); System.out.println("Adding parent of problem rel " + memId + " to problem list: " + rel.getId()); changed = true; break; } } } if (!changed) return; } } /** * Calculate the writers for each relation based on the * nodes and ways. */ private void calcWritersOfRelWaysAndNodes() { for (MTRelation rel: relMap.values()){ if (false == (rel.hasWayMembers() || rel.hasNodeMembers()) ) continue; AreaSet writerSet = new AreaSet(); for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; boolean memFound = false; if (rel.memTypes[i] == MEM_NODE_TYPE){ int pos = nodeWriterMap.getKeyPos(memId); if (pos >= 0){ addWritersOfPoint(writerSet, nodeLats[pos], nodeLons[pos]); memFound = true; } } else if (rel.memTypes[i] == MEM_WAY_TYPE){ int idx = wayWriterMap.getRandom(memId); if (idx != UNASSIGNED){ writerSet.or(areaDictionary.getSet(idx)); memFound = true; } if (wayBboxMap.get(memId) != null) memFound = true; } else if (rel.memTypes[i] == MEM_REL_TYPE) continue; // handled later if (!memFound) { rel.setNotComplete(); continue; } } if (!writerSet.isEmpty()){ int idx = areaDictionary.translate(writerSet); rel.setMultiTileWriterIndex(idx); } } } /** * Multipolygon relations should describe one or more closed polygons. * We calculate the writers for each of the polygons. */ private void calcWritersOfMultiPolygonRels() { // recurse thru sub relations ArrayList visited = new ArrayList<>(); for (MTRelation rel: relMap.values()){ AreaSet relWriters = new AreaSet(); if (rel.isMultiPolygon()){ if (rel.hasRelMembers()){ incVisitID(); visited.clear(); orSubRelWriters(rel, 0, visited); } checkSpecialMP(relWriters, rel); if (!relWriters.isEmpty()){ int writerIdx = areaDictionary.translate(relWriters); rel.setMultiTileWriterIndex(writerIdx); int touchedTiles = relWriters.cardinality(); if (touchedTiles > dataStorer.getNumOfAreas() / 2 && dataStorer.getNumOfAreas() > 10){ System.out.println("Warning: rel " + rel.getId() + " touches " + touchedTiles + " tiles."); } } } } } /** * Or-combine all writers of the members of a relation */ private void mergeRelMemWriters() { // or combine the writers of sub-relations with the parent relation ArrayList visited = new ArrayList<>(); for (MTRelation rel: relMap.values()){ incVisitID(); visited.clear(); orSubRelWriters(rel, 0, visited); } } /** * Make sure that all the elements of a relation are written to the same tiles as the relation info itself. */ private void propagateWritersOfRelsToMembers() { // make sure that the ways and nodes of the problem relations are written to all needed tiles for (MTRelation rel: relMap.values()){ if (rel.wasAddedAsParent()) continue; int relWriterIdx = rel.getMultiTileWriterIndex(); if (relWriterIdx == UNASSIGNED) continue; AreaSet relWriters = areaDictionary.getSet(relWriterIdx); for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; switch (rel.memTypes[i]){ case MEM_WAY_TYPE: addOrMergeWriters(wayWriterMap, relWriters, relWriterIdx, memId); break; case MEM_NODE_TYPE: addOrMergeWriters(nodeWriterMap, relWriters, relWriterIdx, memId); break; default: } } } } /** * Store the coordinates of a node in the most appropriate data structure. * @param node */ private void storeCoord(Node node) { long id = node.getId(); if (lastCoordId >= id){ System.err.println("Error: Node ids are not sorted. Use e.g. osmosis to sort the input data."); System.err.println("This is not supported with keep-complete=true or --problem-list"); throw new SplitFailedException("Node ids are not sorted"); } int nodePos = -1; try{ nodePos = nodeWriterMap.add(id, UNASSIGNED); }catch (IllegalArgumentException e){ System.err.println(e.getMessage()); throw new SplitFailedException(NOT_SORTED_MSG); } nodeLons[nodePos ] = node.getMapLon(); nodeLats[nodePos] = node.getMapLat(); lastCoordId = id; } /** * If a relation contains relations, or-combine the writers of the sub- * relation with the writes of the parent relation . The routine calls * itself recursively when the sub relation contains sub relations. * @param rel the relation * @param depth used to detect loops * @return */ private void orSubRelWriters(MTRelation rel, int depth, ArrayList visited ){ if (rel.getLastVisitId() == visitId) return; rel.setLastVisitId(visitId); if (depth > 15){ System.out.println("orSubRelWriters reached max. depth: " + rel.getId() + " " + depth); return ; } AreaSet relWriters = new AreaSet(); int relWriterIdx = rel.getMultiTileWriterIndex(); if (relWriterIdx != UNASSIGNED) relWriters.or(areaDictionary.getSet(relWriterIdx)); boolean changed = false; for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; if (rel.memTypes[i] == MEM_REL_TYPE){ MTRelation subRel = relMap.get(memId); if (subRel == null) continue; if (subRel.getLastVisitId() == visitId) loopAction(rel, subRel, visited); else { visited.add(rel); orSubRelWriters(subRel, depth+1, visited); visited.remove(visited.size()-1); int memWriterIdx = subRel.getMultiTileWriterIndex(); if (memWriterIdx == UNASSIGNED || memWriterIdx == relWriterIdx){ continue; } AreaSet memWriters = areaDictionary.getSet(memWriterIdx); int oldSize = relWriters.cardinality(); relWriters.or(memWriters); if (oldSize != relWriters.cardinality()) changed = true; } } } if (changed){ rel.setMultiTileWriterIndex(areaDictionary.translate(relWriters)); } } /** * Report some numbers regarding memory usage * @param msg */ private void stats(String msg){ System.out.println("Stats for " + getClass().getSimpleName() + " pass " + phase); if (problemRels != null) System.out.println(" " + problemRels.getClass().getSimpleName() + " problemRels contains now " + Utils.format(problemRels.cardinality()) + " Ids."); if (neededWays != null) System.out.println(" " + neededWays.getClass().getSimpleName() + " neededWays contains now " + Utils.format(neededWays.cardinality())+ " Ids."); if (mpWays != null) System.out.println(" " + mpWays.getClass().getSimpleName() + " mpWays contains now " + Utils.format(mpWays.cardinality())+ " Ids."); if (neededNodes != null) System.out.println(" " + neededNodes.getClass().getSimpleName() + " neededNodes contains now " + Utils.format(neededNodes.cardinality())+ " Ids."); if (relMap != null) System.out.println(" Number of stored relations: " + Utils.format(relMap.size())); System.out.println(" Number of stored tile combinations in multiTileDictionary: " + Utils.format(areaDictionary.size())); if (phase == PHASE4_WAYS_ONLY) dataStorer.stats(" "); System.out.println("Status: " + msg); } /** * Find all writer areas that intersect with a given bounding box. * @param writerSet an already allocate AreaSet which may be modified * @param polygonBbox the bounding box * @return true if any writer bbox intersects the polygon bbox */ private boolean checkBoundingBox(AreaSet writerSet, Rectangle polygonBbox){ boolean foundIntersection = false; if (polygonBbox != null){ for (int i = 0; i < dataStorer.getNumOfAreas(); i++) { Rectangle writerBbox = Utils.area2Rectangle(dataStorer.getArea(i), 1); if (writerBbox.intersects(polygonBbox)){ writerSet.set(i); foundIntersection = true; } } } return foundIntersection; } /** * Merge the writers of a parent object with the writes of the child, * add or update the entry in the Map * @param map * @param parentWriters * @param parentWriterIdx * @param childId */ private void addOrMergeWriters(Long2IntClosedMapFunction map, AreaSet parentWriters, int parentWriterIdx, long childId) { int pos = map.getKeyPos(childId); if (pos < 0) return; int childWriterIdx = map.getRandom(childId); if (childWriterIdx != UNASSIGNED){ // we have already calculated writers for this child if (parentWriterIdx == childWriterIdx) return; // we have to merge (without changing the stored BitSets!) AreaSet childWriters = areaDictionary.getSet(childWriterIdx); AreaSet mergedWriters = new AreaSet(parentWriters); mergedWriters.or(childWriters); childWriterIdx = areaDictionary.translate(mergedWriters); } else childWriterIdx = parentWriterIdx; map.replace(childId, childWriterIdx); } /** * Calculate the writers for a given point specified by coordinates. * Set the corresponding bit in the AreaSet. * @param writerSet an already allocate AreaSet which may be modified * @param mapLat latitude value * @param mapLon longitude value * @return true if a writer was found */ private boolean addWritersOfPoint(AreaSet writerSet, int mapLat, int mapLon){ AreaGridResult writerCandidates = dataStorer.getGrid().get(mapLat,mapLon); if (writerCandidates == null) return false; boolean foundWriter = false; for (int n : writerCandidates.set) { Area extbbox = dataStorer.getExtendedArea(n); boolean found = (writerCandidates.testNeeded) ? extbbox.contains(mapLat, mapLon) : true; foundWriter |= found; if (found) writerSet.set(n); } return foundWriter; } /** * Find tiles that are crossed by a line specified by two points. * @param writerSet an already allocate AreaSet which may be modified * @param possibleWriters a AreaSet that contains the writers to be checked * @param p1 first point of line * @param p2 second point of line */ private void addWritersOfCrossedTiles(AreaSet writerSet, final AreaSet possibleWriters, final Point p1,final Point p2){ for (int i : possibleWriters) { Rectangle writerBbox = Utils.area2Rectangle(dataStorer.getArea(i), 1); if (writerBbox.intersectsLine(p1.x,p1.y,p2.x,p2.y)) writerSet.set(i); } } /** * Calculate all writer areas that are crossed or directly "touched" by a way. * @param writerSet an already allocate AreaSet which may be modified * @param wayBbox * @param wayId the id that identifies the way * @param wayRefs list with the node references */ private void addWritersOfWay (AreaSet writerSet, Rectangle wayBbox, long wayId, LongArrayList wayRefs){ int numRefs = wayRefs.size(); int foundNodes = 0; boolean needsCrossTileCheck = false; Point p1 = null,p2 = null; for (int i = 0; i= 0){ foundNodes++; boolean hasWriters = addWritersOfPoint(writerSet, nodeLats[pos], nodeLons[pos]); if (!hasWriters) needsCrossTileCheck = true; } } if (foundNodes < numRefs) System.out.println("Sorry, way " + wayId + " is missing " + (numRefs-foundNodes) + " node(s)."); if (needsCrossTileCheck == false){ int numWriters = writerSet.cardinality(); if (numWriters == 0) needsCrossTileCheck = true; else if (numWriters > 1 && dataStorer.getAreaDictionary().mayCross(writerSet)) { needsCrossTileCheck = true; } } if (needsCrossTileCheck){ AreaSet possibleWriters = new AreaSet(); checkBoundingBox(possibleWriters ,wayBbox); // the way did cross a border tile for (int i = 0; i= 0){ if (i > 0){ p1 = p2; } p2 = new Point(nodeLons[pos],nodeLats[pos]); if (p1 != null){ addWritersOfCrossedTiles(writerSet, possibleWriters, p1, p2); } } } } } /** * Calculate the bbox of the way. * @param wayId the id that identifies the way * @param wayRefs the list of node references * @return a new Area object or null if no node is known */ private Rectangle getWayBbox (long wayId, LongArrayList wayRefs){ // calculate the bbox int minLat = Integer.MAX_VALUE,minLon = Integer.MAX_VALUE; int maxLat = Integer.MIN_VALUE,maxLon = Integer.MIN_VALUE; int numRefs = wayRefs.size(); for (int i = 0; i= 0){ int lat = nodeLats[pos]; int lon = nodeLons[pos]; if (lat < minLat) minLat = lat; if (lat > maxLat) maxLat = lat; if (lon < minLon) minLon = lon; if (lon > maxLon) maxLon = lon; } } if (maxLon == Integer.MIN_VALUE|| maxLat == Integer.MIN_VALUE){ System.out.println("Sorry, no nodes found for needed way " + wayId); return null; } return new Rectangle(minLon, minLat, Math.max(1, maxLon-minLon), Math.max(1,maxLat-minLat)); } /** * Increment the loop detection ID. If the maximum value is reached, * reset all IDs and start again. */ private void incVisitID() { if (visitId == Integer.MAX_VALUE){ // unlikely visitId = 0; for (Entry entry : relMap.long2ObjectEntrySet()){ entry.getValue().setLastVisitId(visitId); } } visitId++; } /* * Report a loop in a relation */ static void loopAction(MTRelation rel, MTRelation subRel, ArrayList visited){ if (subRel.isOnLoop()) return; // don't complain again if (rel.getId() == subRel.getId()){ System.out.println("Loop in relation " + rel.getId() + ": Contains itself as sub relation."); rel.markOnLoop(); } else if (visited.contains(rel)){ subRel.markOnLoop(); StringBuilder sb = new StringBuilder("Loop in relation " + subRel.getId() + ". Loop contains relation(s): "); for (MTRelation r: visited){ sb.append(r.getId()); sb.append(' '); r.markOnLoop(); } System.out.println(sb); } else { System.out.println("Duplicate sub relation in relation " + rel.getId() + ". Already looked at member " + subRel.getId() + "." ); } } /** * Handle multipolygon relations that have too large bboxes. * TODO: handle polygons that cross the 180/-180 border * @param relWriters * @param rel */ private void checkSpecialMP(AreaSet relWriters, MTRelation rel) { long[] joinedWays = null; List wayMembers = new LinkedList<>(); LongArrayList polygonWays = new LongArrayList(); for (int i = 0; i < rel.numMembers; i++){ long memId = rel.memRefs[i]; if (rel.memTypes[i] == MEM_WAY_TYPE && "inner".equals(rel.memRoles[i]) == false){ wayMembers.add(memId); } } boolean complainedAboutSize = false; Rectangle mpBbox; boolean hasMissingWays = false; while (wayMembers.size() > 0){ polygonWays.clear(); mpBbox = null; boolean closed = false; while (true){ boolean changed = false; for (int i = wayMembers.size()-1; i >= 0; i--){ boolean added = false; long memId = wayMembers.get(i); JoinedWay mpWay = mpWayEndNodesMap.get(memId); if (mpWay == null){ wayMembers.remove(i); hasMissingWays = true; continue; } long mpWayStart = mpWay.startNode; long mpWayEnd = mpWay.endNode; added = true; if (joinedWays == null){ joinedWays = new long[2]; joinedWays[0] = mpWayStart; joinedWays[1] = mpWayEnd; } else if (joinedWays[0] == mpWayStart){ joinedWays[0] = mpWayEnd; } else if (joinedWays[0] == mpWayEnd){ joinedWays[0] = mpWayStart; } else if (joinedWays[1] == mpWayStart){ joinedWays[1] = mpWayEnd; } else if (joinedWays[1] == mpWayEnd){ joinedWays[1] = mpWayStart; } else added = false; if (added){ changed = true; wayMembers.remove(i); polygonWays.add(memId); int pos = wayWriterMap.getKeyPos(memId); if (pos < 0) continue; Rectangle wayBbox = wayBboxMap.get(memId); if (wayBbox == null) continue; if (wayBbox.x < 0 && wayBbox.getMaxX() > 0 && wayBbox.width >= PROBLEM_WIDTH){ System.out.println("way crosses -180/180: " + memId); } if (mpBbox == null) mpBbox = new Rectangle(wayBbox); else mpBbox.add(wayBbox); if (!complainedAboutSize && mpBbox.x < 0 && mpBbox.getMaxX() > 0 && mpBbox.width >= PROBLEM_WIDTH){ System.out.println("rel crosses -180/180: " + rel.getId()); complainedAboutSize = true; } } if (joinedWays[0] == joinedWays[1]){ closed = true; break; } } if (!changed || closed){ break; } } if (mpBbox != null){ // found closed polygon or nothing more to add boolean isRelevant = checkBoundingBox(relWriters, mpBbox); if (isRelevant & hasMissingWays) System.out.println("Warning: Incomplete multipolygon relation " + rel.getId() + " (" + rel.getName() + "): using bbox of " + (closed ? "closed":"unclosed") + " polygon to calc tiles, ways: " + polygonWays); mpBbox = null; } joinedWays = null; } return; } /** * Stores the IDs of the end nodes of a way * @author GerdP * */ class JoinedWay{ long startNode, endNode; public JoinedWay(long startNode, long endNode) { this.startNode = startNode; this.endNode = endNode; } } /** * A helper class that just contains all information about relation that we need * in the MultiTileProcessor. * @author GerdP * */ private class MTRelation { private final static short IS_MP = 0x01; private final static short ON_LOOP = 0x02; private final static short HAS_NODES = 0x04; private final static short HAS_WAYS = 0x08; private final static short HAS_RELS = 0x10; private final static short IS_JUST_PARENT = 0x20; private final static short IS_NOT_COMPLETE = 0x40; private final long id; protected final byte[] memTypes; protected final String[] memRoles; protected final long[] memRefs; protected final int numMembers; private final String name; private int multiTileWriterIndex = UNASSIGNED; private int lastVisitId; private short flags; // flags for the MultiTileProcessor public MTRelation(Relation rel){ numMembers = rel.getMembers().size(); memTypes = new byte[numMembers]; memRoles = new String[numMembers]; memRefs = new long[numMembers]; id = rel.getId(); for (int i = 0; i tags = rel.tagsIterator(); while(tags.hasNext()) { Element.Tag t = tags.next(); for (String nameTag: NAME_TAGS){ if (nameTag.equals(t.key)){ goodNameCandidate = t.value; break; } } if (goodNameCandidate != null) break; if (t.key.contains("name")) nameCandidate = t.value; else if ("postal_code".equals(t.key)) zipCode = t.value; } if (goodNameCandidate != null) name = goodNameCandidate; else if (nameCandidate != null) name = nameCandidate; else if (zipCode != null) name = "postal_code=" + zipCode; else name = "?"; } public long getId() { return id; } public boolean isOnLoop() { return (flags & ON_LOOP) != 0; } public void markOnLoop() { this.flags |= ON_LOOP; } public int getMultiTileWriterIndex() { return multiTileWriterIndex; } public void setMultiTileWriterIndex(int multiTileWriterIndex) { this.multiTileWriterIndex = multiTileWriterIndex; } public boolean hasNodeMembers() { return (flags & HAS_NODES) != 0; } public boolean hasWayMembers() { return (flags & HAS_WAYS) != 0; } public boolean hasRelMembers() { return (flags & HAS_RELS) != 0; } public boolean wasAddedAsParent() { return (flags & IS_JUST_PARENT) != 0; } public void setAddedAsParent() { this.flags |= IS_JUST_PARENT; } public boolean isNotComplete() { return (flags & IS_NOT_COMPLETE) != 0; } public void setNotComplete() { this.flags |= IS_NOT_COMPLETE; } public boolean isMultiPolygon() { return (flags & IS_MP) != 0; } public void markAsMultiPolygon() { this.flags |= IS_MP; } public int getLastVisitId() { return lastVisitId; } public void setLastVisitId(int visitId) { this.lastVisitId = visitId; } public String getName(){ return name; } @Override public String toString(){ return "r" + id + " " + name + " subrels:" + hasRelMembers() + " incomplete:" + isNotComplete(); } } } splitter-r653/src/uk/me/parabola/splitter/Node.java0000664000175300017530000000246514352507254023533 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * A single map node. * * @author Steve Ratcliffe */ public class Node extends Element { private double lat, lon; private int mapLat, mapLon; public void set(long id, double lat, double lon) { setId(id); this.lat = lat; this.lon = lon; this.mapLat = Utils.toMapUnit(lat); this.mapLon = Utils.toMapUnit(lon); if (mapLat < Utils.MIN_LAT_MAP_UNITS || mapLat > Utils.MAX_LAT_MAP_UNITS) throw new IllegalArgumentException("invalid lattitude value " + lat); if (mapLon < Utils.MIN_LON_MAP_UNITS || mapLon > Utils.MAX_LON_MAP_UNITS) throw new IllegalArgumentException("invalid longitude value " + lon); } public double getLat() { return lat; } public double getLon() { return lon; } public int getMapLat() { return mapLat; } public int getMapLon() { return mapLon; } } splitter-r653/src/uk/me/parabola/splitter/OSMFileHandler.java0000664000175300017530000001221214352507254025371 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.RandomAccessFile; import java.io.Reader; import java.nio.channels.FileChannel; import java.util.HashMap; import java.util.List; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import org.xmlpull.v1.XmlPullParserException; import crosby.binary.file.BlockInputStream; import it.unimi.dsi.fastutil.shorts.ShortArrayList; import uk.me.parabola.splitter.parser.BinaryMapParser; import uk.me.parabola.splitter.parser.O5mMapParser; import uk.me.parabola.splitter.parser.OSMXMLParser; /** * A class which stores parameters needed to process input (OSM) files * * @author Gerd Petermann * */ public class OSMFileHandler { /** list of OSM input files to process */ private List filenames; // for faster access on blocks in pbf files private final HashMap blockTypeMap = new HashMap<>(); // for faster access on blocks in o5m files private final HashMap skipArrayMap = new HashMap<>(); // Whether or not the source OSM file(s) contain strictly nodes first, then // ways, then rels, // or they're all mixed up. Running with mixed enabled takes longer. private boolean mixed; private int maxThreads = 1; public void setFileNames(List filenames) { this.filenames = filenames; } public void setMixed(boolean f) { mixed = f; } public void setMaxThreads(int maxThreads) { this.maxThreads = maxThreads; } public boolean process(MapProcessor processor) { // create appropriate parser for each input file for (String filename : filenames) { System.out.println("Processing " + filename); processor.startFile(); try { if (filename.endsWith(".o5m")) { File file = new File(filename); try (RandomAccessFile raf = new RandomAccessFile(file, "r"); FileChannel fileChannel = raf.getChannel()) { long[] skipArray = skipArrayMap.get(filename); O5mMapParser o5mParser = new O5mMapParser(processor, fileChannel, skipArray); o5mParser.parse(); if (skipArray == null) { skipArray = o5mParser.getNextSkipArray(); skipArrayMap.put(filename, skipArray); } } } else if (filename.endsWith(".pbf")) { // Is it a binary file? File file = new File(filename); ShortArrayList blockTypes = blockTypeMap.get(filename); BinaryMapParser binParser = new BinaryMapParser(processor, blockTypes, 1); try (InputStream stream = new FileInputStream(file)) { BlockInputStream blockinput = (new BlockInputStream(stream, binParser)); blockinput.process(); if (blockTypes == null) { // remember this file blockTypes = binParser.getBlockList(); blockTypeMap.put(filename, blockTypes); } } } else { // No, try XML. try (Reader reader = Utils.openFile(filename, maxThreads > 1)) { OSMXMLParser parser = new OSMXMLParser(processor, mixed); parser.setReader(reader); parser.parse(); } } } catch (FileNotFoundException e) { System.out.println(e); throw new SplitFailedException("ERROR: file " + filename + " was not found"); } catch (XmlPullParserException e) { e.printStackTrace(); throw new SplitFailedException("ERROR: file " + filename + " is not a valid OSM XML file"); } catch (IllegalArgumentException e) { e.printStackTrace(); throw new SplitFailedException("ERROR: file " + filename + " contains unexpected data"); } catch (IOException e) { e.printStackTrace(); throw new SplitFailedException("ERROR: file " + filename + " caused I/O exception"); } catch (RuntimeException e) { e.printStackTrace(); throw new SplitFailedException("ERROR: file " + filename + " caused exception"); } } return processor.endMap(); } RuntimeException exception = null; public boolean execute(MapProcessor processor) { if (maxThreads == 1) return process(processor); // use two threads BlockingQueue queue = new ArrayBlockingQueue<>(10); QueueProcessor queueProcessor = new QueueProcessor(queue, processor); // start producer thread new Thread("producer for " + processor.getClass().getSimpleName()) { @Override public void run() { try { process(queueProcessor); } catch (SplitFailedException e) { try { queue.put(new OSMMessage(OSMMessage.Type.EXIT)); exception = e; } catch (InterruptedException e1) { e1.printStackTrace(); } } } }.start(); boolean done = processor.consume(queue); if (exception != null) throw exception; return done; } } splitter-r653/src/uk/me/parabola/splitter/OSMMessage.java0000664000175300017530000000221314352507254024600 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * For OSM data which is passed between parsers and processors * @author Gerd Petermann * */ public class OSMMessage { public enum Type {START_FILE, ELEMENTS, BOUNDS, END_MAP, EXIT} // either el or bounds must be null final Element[] elements; final Area bounds; final Type type; public OSMMessage(Element[] elements) { this.elements = elements; type = Type.ELEMENTS; bounds = null; } public OSMMessage(Area bounds) { this.bounds = bounds; type = Type.BOUNDS; elements = null; } public OSMMessage(Type t) { assert t != Type.BOUNDS && t != Type.ELEMENTS; elements = null; bounds = null; type = t; } } splitter-r653/src/uk/me/parabola/splitter/ProblemListProcessor.java0000664000175300017530000002531314352507254026777 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import uk.me.parabola.splitter.Relation.Member; import uk.me.parabola.splitter.args.SplitterParams; import uk.me.parabola.splitter.tools.SparseLong2IntMap; import it.unimi.dsi.fastutil.longs.LongArrayList; import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.regex.Pattern; /** * Find ways and relations that will be incomplete. * Strategy: * - calculate the areas of each node, calculate and store an integer that represents the combination of areas * (this is done by the AreaDictionary) * - a way is a problem way if its nodes are found in different combinations of areas * - a relation is a problem relation if its members are found in different combinations of areas * */ class ProblemListProcessor extends AbstractMapProcessor { private static final int PHASE1_NODES_AND_WAYS = 1; private static final int PHASE2_RELS_ONLY = 2; private final SparseLong2IntMap coords; private final SparseLong2IntMap ways; private final AreaDictionary areaDictionary; private final DataStorer dataStorer; private final LongArrayList problemWays = new LongArrayList(); private final LongArrayList problemRels = new LongArrayList(); /** each bit represents one distinct area */ private final AreaSet areaSet = new AreaSet(); private int phase = PHASE1_NODES_AND_WAYS; private long countCoords = 0; private final int areaOffset; private final int lastAreaOffset; private boolean isFirstPass; private boolean isLastPass; private AreaIndex areaIndex; private final HashSet wantedBoundaryAdminLevels = new HashSet<>(); private final HashSet wantedBoundaryTagValues; private final HashSet wantedRouteTagValues; ProblemListProcessor(DataStorer dataStorer, int areaOffset, int numAreasThisPass, SplitterParams mainOptions) { this.dataStorer = dataStorer; this.areaDictionary = dataStorer.getAreaDictionary(); if (dataStorer.getUsedWays() == null){ ways = new SparseLong2IntMap("way"); ways.defaultReturnValue(UNASSIGNED); dataStorer.setUsedWays(ways); } else ways = dataStorer.getUsedWays(); this.areaIndex = dataStorer.getGrid(); this.coords = new SparseLong2IntMap("coord"); this.coords.defaultReturnValue(UNASSIGNED); this.isFirstPass = (areaOffset == 0); this.areaOffset = areaOffset; this.lastAreaOffset = areaOffset + numAreasThisPass - 1; this.isLastPass = (areaOffset + numAreasThisPass == dataStorer.getNumOfAreas()); String boundaryTagsParm = mainOptions.getBoundaryTags(); if ("use-exclude-list".equals(boundaryTagsParm)) wantedBoundaryTagValues = null; else { String[] boundaryTags = boundaryTagsParm.split(Pattern.quote(",")); wantedBoundaryTagValues = new HashSet<>(Arrays.asList(boundaryTags)); } setWantedAdminLevel(mainOptions.getWantedAdminLevel()); String routeRelationValuesParm = mainOptions.getRouteRelValues(); if (routeRelationValuesParm.isEmpty()) { wantedRouteTagValues = null; } else { String[] routeValues = routeRelationValuesParm.split(Pattern.quote(",")); wantedRouteTagValues = new HashSet<>(Arrays.asList(routeValues)); } } public void setWantedAdminLevel(int adminLevel) { int min, max = 11; min = Math.max(2, adminLevel); wantedBoundaryAdminLevels.clear(); for (int i = min; i <= max; i++){ wantedBoundaryAdminLevels.add(Integer.toString(i)); } } @Override public boolean skipTags() { return phase == PHASE1_NODES_AND_WAYS; } @Override public boolean skipNodes() { return phase == PHASE2_RELS_ONLY; } @Override public boolean skipWays() { return phase == PHASE2_RELS_ONLY; } @Override public boolean skipRels() { return phase != PHASE2_RELS_ONLY; } @Override public int getPhase(){ return phase; } @Override public void processNode(Node node) { if (phase == PHASE2_RELS_ONLY) return; int countAreas = 0; int lastUsedArea = UNASSIGNED; AreaGridResult areaCandidates = areaIndex.get(node); if (areaCandidates == null) return; areaSet.clear(); for (int n : areaCandidates.set) { if (n >= areaOffset && n <= lastAreaOffset && (!areaCandidates.testNeeded || areaDictionary.getArea(n).contains(node))) { areaSet.set(n); ++countAreas; lastUsedArea = n; } } if (countAreas > 0) { int areaIdx; if (countAreas > 1) areaIdx = areaDictionary.translate(areaSet); else areaIdx = AreaDictionary.translate(lastUsedArea); // no need to do lookup in the dictionary coords.put(node.getId(), areaIdx); ++countCoords; if (countCoords % 10_000_000 == 0) { System.out.println("coord MAP occupancy: " + Utils.format(countCoords) + ", number of area dictionary entries: " + areaDictionary.size()); } } } @Override public void processWay(Way way) { if (phase == PHASE2_RELS_ONLY) return; boolean maybeChanged = false; int oldclIndex = UNASSIGNED; areaSet.clear(); for (long id : way.getRefs()){ // Get the list of areas that the way is in. int clIdx = coords.get(id); if (clIdx != UNASSIGNED && oldclIndex != clIdx){ areaSet.or(areaDictionary.getSet(clIdx)); oldclIndex = clIdx; maybeChanged = true; } } if (!isFirstPass && maybeChanged || (isLastPass && !isFirstPass)){ int wayAreaIdx = ways.get(way.getId()); if (wayAreaIdx != UNASSIGNED) areaSet.or(areaDictionary.getSet(wayAreaIdx)); } if (isLastPass && checkIfMultipleAreas(areaSet)){ problemWays.add(way.getId()); } if (maybeChanged && !areaSet.isEmpty()){ ways.put(way.getId(), areaDictionary.translate(areaSet)); } } // default exclude list for boundary tag private static final HashSet unwantedBoundaryTagValues = new HashSet<>( Arrays.asList("administrative", "postal_code", "political")); @Override public void processRelation(Relation rel) { if (phase == PHASE1_NODES_AND_WAYS) return; boolean useThis = false; boolean isMPRelType = false; boolean hasBoundaryTag = false; boolean isWantedBoundary = wantedBoundaryTagValues == null; boolean isRouteRelType = false; boolean isWantedRoute = wantedRouteTagValues != null; Iterator tags = rel.tagsIterator(); String admin_level = null; while(tags.hasNext()) { Element.Tag t = tags.next(); if ("type".equals(t.key)) { if ("restriction".equals((t.value)) || "through_route".equals((t.value)) || t.value.startsWith("restriction:")) useThis= true; // no need to check other tags else if ("multipolygon".equals((t.value)) || "boundary".equals((t.value))) isMPRelType= true; else if ("route".equals(t.value)) isRouteRelType = true; else if ("associatedStreet".equals((t.value)) || "street".equals((t.value))) useThis= true; // no need to check other tags } else if ("boundary".equals(t.key)){ hasBoundaryTag = true; if (wantedBoundaryTagValues != null){ if (wantedBoundaryTagValues.contains(t.value)) isWantedBoundary = true; } else { if (unwantedBoundaryTagValues.contains(t.value)) isWantedBoundary = false; } } else if ("admin_level".equals(t.key)){ admin_level = t.value; } if (wantedRouteTagValues != null && "route".equals((t.key)) && wantedRouteTagValues.contains(t.value)) { isWantedRoute = true; } if (useThis) break; } if (isMPRelType && (isWantedBoundary || !hasBoundaryTag)) useThis = true; else if (isMPRelType && hasBoundaryTag && admin_level != null) { if (wantedBoundaryAdminLevels.contains(admin_level)) useThis = true; } else if (isRouteRelType && isWantedRoute) { useThis = true; } if (!useThis) { return; } areaSet.clear(); Integer relAreaIdx; if (!isFirstPass) { relAreaIdx = dataStorer.getUsedRels().get(rel.getId()); if (relAreaIdx != null) areaSet.or(areaDictionary.getSet(relAreaIdx)); } int oldclIndex = UNASSIGNED; int oldwlIndex = UNASSIGNED; for (Member mem : rel.getMembers()) { long id = mem.getRef(); if ("node".equals(mem.getType())) { int clIdx = coords.get(id); if (clIdx != UNASSIGNED){ if (oldclIndex != clIdx){ areaSet.or(areaDictionary.getSet(clIdx)); } oldclIndex = clIdx; } } else if ("way".equals(mem.getType())) { int wlIdx = ways.get(id); if (wlIdx != UNASSIGNED){ if (oldwlIndex != wlIdx){ areaSet.or(areaDictionary.getSet(wlIdx)); } oldwlIndex = wlIdx; } } // ignore relation here } if (areaSet.isEmpty()) return; if (isLastPass){ if (checkIfMultipleAreas(areaSet)){ problemRels.add(rel.getId()); } else { // the relation is only in one distinct area // store the info that the rel is only in one distinct area dataStorer.storeRelationAreas(rel.getId(), areaSet); } return; } relAreaIdx = areaDictionary.translate(areaSet); dataStorer.getUsedRels().put(rel.getId(), relAreaIdx); } @Override public boolean endMap() { if (phase == PHASE1_NODES_AND_WAYS){ phase++; return false; } coords.stats(0); ways.stats(0); if (isLastPass){ System.out.println(""); System.out.println(" Number of stored area combis for nodes: " + Utils.format(coords.size())); System.out.println(" Number of stored area combis for ways: " + Utils.format(dataStorer.getUsedWays().size())); System.out.println(" Number of stored Integers for rels: " + Utils.format(dataStorer.getUsedRels().size())); System.out.println(" Number of stored combis in dictionary: " + Utils.format(areaDictionary.size())); System.out.println(" Number of detected problem ways: " + Utils.format(problemWays.size())); System.out.println(" Number of detected problem rels: " + Utils.format(problemRels.size())); Utils.printMem(); System.out.println(""); dataStorer.getUsedWays().clear(); dataStorer.getUsedRels().clear(); } return true; } /** * @param areaCombis * @return true if the combination of distinct areas can contain a problem polygon */ static boolean checkIfMultipleAreas(AreaSet areaCombis){ // this returns a few false positives for those cases // where a way or rel crosses two pseudo-areas at a // place that is far away from the real areas // but it is difficult to detect these cases. return areaCombis.cardinality() > 1; } public LongArrayList getProblemWays() { return problemWays; } public LongArrayList getProblemRels() { return problemRels; } } splitter-r653/src/uk/me/parabola/splitter/ProblemLists.java0000664000175300017530000005220514352507254025262 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.awt.Point; import java.awt.Rectangle; import java.io.File; import java.io.FileInputStream; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.regex.Pattern; import it.unimi.dsi.fastutil.longs.LongArrayList; import uk.me.parabola.splitter.args.SplitterParams; public class ProblemLists { private final LongArrayList problemWays = new LongArrayList(); private final LongArrayList problemRels = new LongArrayList(); private final TreeSet calculatedProblemWays = new TreeSet<>(); private final TreeSet calculatedProblemRels = new TreeSet<>(); /** * Calculate lists of ways and relations that appear in multiple areas for a * given list of areas. * @param osmFileHandler * @param realAreas list of areas, possibly overlapping if read from split-file * @param overlapAmount * @param mainOptions main options * @return */ public DataStorer calcProblemLists(OSMFileHandler osmFileHandler, List realAreas, int overlapAmount, SplitterParams mainOptions) { long startProblemListGenerator = System.currentTimeMillis(); ArrayList distinctAreas = getNonOverlappingAreas(realAreas); if (distinctAreas.size() > realAreas.size()) { System.err.println("Warning: The areas given in --split-file are overlapping."); Set overlappingTiles = new TreeSet<>(); for (int i = 0; i < realAreas.size(); i++) { Area a1 = realAreas.get(i); for (int j = i + 1; j < realAreas.size(); j++) { Area a2 = realAreas.get(j); if (a1.overlaps(a2)) { overlappingTiles.add(a1.getMapId()); overlappingTiles.add(a2.getMapId()); System.out.format("overlapping areas %08d and %08d : (%d,%d to %d,%d) and (%d,%d to %d,%d)\n", a1.getMapId(), a2.getMapId(), a1.getMinLat(), a1.getMinLong(), a1.getMaxLat(), a1.getMaxLong(), a2.getMinLat(), a2.getMinLong(), a2.getMaxLat(), a2.getMaxLong()); a1.overlaps(a2); } } } if (!overlappingTiles.isEmpty()) { System.out.println("Overlaping tiles: " + overlappingTiles.toString()); } } System.out.println("Generating problem list for " + distinctAreas.size() + " distinct areas"); List workAreas = addPseudoAreas(distinctAreas); int numPasses = (int) Math.ceil((double) workAreas.size() / mainOptions.getMaxAreas()); int areasPerPass = (int) Math.ceil((double) workAreas.size() / numPasses); if (numPasses > 1) { System.out.println("Processing " + distinctAreas.size() + " areas in " + numPasses + " passes, " + areasPerPass + " areas at a time"); } else { System.out.println("Processing " + distinctAreas.size() + " areas in a single pass"); } ArrayList allAreas = new ArrayList<>(); System.out.println("Pseudo areas:"); for (int j = 0; j < workAreas.size(); j++) { Area area = workAreas.get(j); allAreas.add(area); if (area.isPseudoArea()) System.out.println("Pseudo area " + area.getMapId() + " covers " + area); } DataStorer distinctDataStorer = new DataStorer(workAreas, overlapAmount); System.out.println("Starting problem-list-generator pass(es)"); for (int pass = 0; pass < numPasses; pass++) { System.out.println("-----------------------------------"); System.out.println("Starting problem-list-generator pass " + (pass + 1) + " of " + numPasses); long startThisPass = System.currentTimeMillis(); int areaOffset = pass * areasPerPass; int numAreasThisPass = Math.min(areasPerPass, workAreas.size() - pass * areasPerPass); ProblemListProcessor processor = new ProblemListProcessor(distinctDataStorer, areaOffset, numAreasThisPass, mainOptions); boolean done = false; while (!done) { done = osmFileHandler.execute(processor); calculatedProblemWays.addAll(processor.getProblemWays()); calculatedProblemRels.addAll(processor.getProblemRels()); } System.out.println("Problem-list-generator pass " + (pass + 1) + " took " + (System.currentTimeMillis() - startThisPass) + " ms"); } System.out.println("Problem-list-generator pass(es) took " + (System.currentTimeMillis() - startProblemListGenerator) + " ms"); DataStorer dataStorer = new DataStorer(realAreas, overlapAmount); dataStorer.translateDistinctToRealAreas(distinctDataStorer); return dataStorer; } /** Read user defined problematic relations and ways */ public boolean readProblemIds(String problemFileName) { File fProblem = new File(problemFileName); boolean ok = true; if (!fProblem.exists()) { System.out.println("Error: problem file doesn't exist: " + fProblem); return false; } try (InputStream fileStream = new FileInputStream(fProblem); LineNumberReader problemReader = new LineNumberReader(new InputStreamReader(fileStream));) { Pattern csvSplitter = Pattern.compile(Pattern.quote(":")); Pattern commentSplitter = Pattern.compile(Pattern.quote("#")); String problemLine; String[] items; while ((problemLine = problemReader.readLine()) != null) { items = commentSplitter.split(problemLine); if (items.length == 0 || items[0].trim().isEmpty()) { // comment or empty line continue; } items = csvSplitter.split(items[0].trim()); if (items.length != 2) { System.out.println("Error: Invalid format in problem file, line number " + problemReader.getLineNumber() + ": " + problemLine); ok = false; continue; } long id = 0; try { id = Long.parseLong(items[1]); } catch (NumberFormatException exp) { System.out.println("Error: Invalid number format in problem file, line number " + +problemReader.getLineNumber() + ": " + problemLine + exp); ok = false; } if ("way".equals(items[0])) problemWays.add(id); else if ("rel".equals(items[0])) problemRels.add(id); else { System.out.println("Error in problem file: Type not way or relation, line number " + +problemReader.getLineNumber() + ": " + problemLine); ok = false; } } } catch (IOException exp) { System.out.println("Error: Cannot read problem file " + fProblem + exp); return false; } return ok; } /** * Write a file that can be given to mkgmap that contains the correct * arguments for the split file pieces. You are encouraged to edit the file * and so it contains a template of all the arguments that you might want to * use. * * @param problemRelsThisPass * @param problemWaysThisPass */ public void writeProblemList(File fileOutputDir, String fname) { try (PrintWriter w = new PrintWriter(new FileWriter(new File(fileOutputDir, fname)));) { w.println("#"); w.println("# This file can be given to splitter using the --problem-file option"); w.println("#"); w.println("# List of relations and ways that are known to cause problems"); w.println("# in splitter or mkgmap"); w.println("# Objects listed here are specially treated by splitter to assure"); w.println("# that complete data is written to all related tiles"); w.println("# Format:"); w.println("# way:"); w.println("# rel:"); w.println("# ways"); for (long id : calculatedProblemWays) { w.println("way: " + id + " #"); } w.println("# rels"); for (long id : calculatedProblemRels) { w.println("rel: " + id + " #"); } w.println(); } catch (IOException e) { System.err.println("Warning: Could not write problem-list file " + fname + ", processing continues"); } } /** * Calculate writers for elements which cross areas. * * @param dataStorer * stores data that is needed in different passes of the program. * @param osmFileHandler * used to access OSM input files */ public void calcMultiTileElements(DataStorer dataStorer, OSMFileHandler osmFileHandler) { // merge the calculated problem ids and the user given problem ids problemWays.addAll(calculatedProblemWays); problemRels.addAll(calculatedProblemRels); calculatedProblemRels.clear(); calculatedProblemWays.clear(); if (problemWays.isEmpty() && problemRels.isEmpty()) return; // calculate which ways and relations are written to multiple areas. MultiTileProcessor multiProcessor = new MultiTileProcessor(dataStorer, problemWays, problemRels); // multiTileProcessor stores the problem relations in its own structures // return memory to GC problemRels.clear(); problemWays.clear(); problemRels.trim(); problemWays.trim(); boolean done = false; long startThisPhase = System.currentTimeMillis(); int prevPhase = -1; while (!done) { int phase = multiProcessor.getPhase(); if (prevPhase != phase) { startThisPhase = System.currentTimeMillis(); System.out.println("-----------------------------------"); System.out.println("Executing multi-tile analyses phase " + phase); } done = osmFileHandler.execute(multiProcessor); prevPhase = phase; if (done || (phase != multiProcessor.getPhase())) { System.out.println("Multi-tile analyses phase " + phase + " took " + (System.currentTimeMillis() - startThisPhase) + " ms"); } } System.out.println("-----------------------------------"); } /** * Make sure that our areas cover the planet. This is done by adding * pseudo-areas if needed. * * @param realAreas * list of areas (read from split-file or calculated) * @return new list of areas containing the real areas and additional areas */ public static List addPseudoAreas(List realAreas) { ArrayList areas = new ArrayList<>(realAreas); Rectangle planetBounds = new Rectangle(Utils.toMapUnit(-180.0), Utils.toMapUnit(-90.0), 2 * Utils.toMapUnit(180.0), 2 * Utils.toMapUnit(90.0)); while (!checkIfCovered(planetBounds, areas)) { boolean changed = addPseudoArea(areas); if (!changed) { throw new SplitFailedException("Failed to fill planet with pseudo-areas"); } } return areas; } /** * Work around for possible rounding errors in area.subtract processing * * @param area * an area that is considered to be empty or a rectangle * @return */ private static java.awt.geom.Area simplifyArea(java.awt.geom.Area area) { if (area.isEmpty() || area.isRectangular()) return area; // area.isRectugular() may returns false although the shape is a // perfect rectangle :-( If we subtract the area from its bounding // box we get better results. java.awt.geom.Area bbox = new java.awt.geom.Area(area.getBounds2D()); bbox.subtract(area); if (bbox.isEmpty()) // bbox equals area: is a rectangle return new java.awt.geom.Area(area.getBounds2D()); return area; } private static boolean checkIfCovered(Rectangle bounds, ArrayList areas) { java.awt.geom.Area bbox = new java.awt.geom.Area(bounds); long sumTiles = 0; for (Area area : areas) { sumTiles += (long) area.getHeight() * (long) area.getWidth(); bbox.subtract(area.getJavaArea()); } long areaBox = (long) bounds.height * (long) bounds.width; if (sumTiles != areaBox) return false; return bbox.isEmpty(); } /** * Create a list of areas that do not overlap. If areas in the original list * are overlapping, they can be replaced by up to 5 disjoint areas. This is * done if parameter makeDisjoint is true * * @param realAreas * the list of areas * @return the new list */ public static ArrayList getNonOverlappingAreas(final List realAreas) { java.awt.geom.Area covered = new java.awt.geom.Area(); ArrayList splitList = new ArrayList<>(); int artificialId = -99999999; boolean foundOverlap = false; for (Area area1 : realAreas) { Rectangle r1 = area1.getRect(); if (covered.intersects(r1) == false) { splitList.add(area1); } else { if (foundOverlap == false) { foundOverlap = true; System.out.println("Removing overlaps from tiles..."); } // String msg = "splitting " + area1.getMapId() + " " + (i+1) + // "/" + realAreas.size() + " overlapping "; // find intersecting areas in the already covered part ArrayList splitAreas = new ArrayList<>(); for (int j = 0; j < splitList.size(); j++) { Area area2 = splitList.get(j); if (area2 == null) continue; Rectangle r2 = area2.getRect(); if (r1.intersects(r2)) { java.awt.geom.Area overlap = new java.awt.geom.Area(area1.getRect()); overlap.intersect(area2.getJavaArea()); Rectangle ro = overlap.getBounds(); if (ro.height == 0 || ro.width == 0) continue; // msg += area2.getMapId() + " "; Area aNew = new Area(ro.y, ro.x, (int) ro.getMaxY(), (int) ro.getMaxX()); aNew.setMapId(artificialId++); aNew.setName("" + area1.getMapId()); aNew.setJoinable(false); covered.subtract(area2.getJavaArea()); covered.add(overlap); splitList.set(j, aNew); java.awt.geom.Area coveredByPair = new java.awt.geom.Area(r1); coveredByPair.add(new java.awt.geom.Area(r2)); java.awt.geom.Area originalPair = new java.awt.geom.Area(coveredByPair); int minX = coveredByPair.getBounds().x; int minY = coveredByPair.getBounds().y; int maxX = (int) coveredByPair.getBounds().getMaxX(); int maxY = (int) coveredByPair.getBounds().getMaxY(); coveredByPair.subtract(overlap); if (coveredByPair.isEmpty()) continue; // two equal areas a coveredByPair.subtract(covered); java.awt.geom.Area testSplit = new java.awt.geom.Area(overlap); Rectangle[] rectPair = { r1, r2 }; Area[] areaPair = { area1, area2 }; int lx = minX; int lw = ro.x - minX; int rx = (int) ro.getMaxX(); int rw = maxX - rx; int uy = (int) ro.getMaxY(); int uh = maxY - uy; int by = minY; int bh = ro.y - by; Rectangle[] clippers = { new Rectangle(lx, minY, lw, bh), // lower // left new Rectangle(ro.x, minY, ro.width, bh), // lower // middle new Rectangle(rx, minY, rw, bh), // lower right new Rectangle(lx, ro.y, lw, ro.height), // left new Rectangle(rx, ro.y, rw, ro.height), // right new Rectangle(lx, uy, lw, uh), // upper left new Rectangle(ro.x, uy, ro.width, uh), // upper // middle new Rectangle(rx, uy, rw, uh) // upper right }; for (Rectangle clipper : clippers) { for (int k = 0; k <= 1; k++) { Rectangle test = clipper.intersection(rectPair[k]); if (!test.isEmpty()) { testSplit.add(new java.awt.geom.Area(test)); if (k == 1 || covered.intersects(test) == false) { aNew = new Area(test.y, test.x, (int) test.getMaxY(), (int) test.getMaxX()); aNew.setMapId(areaPair[k].getMapId()); splitAreas.add(aNew); covered.add(aNew.getJavaArea()); } } } } assert testSplit.equals(originalPair); } } // recombine parts that form a rectangle for (Area splitArea : splitAreas) { if (splitArea.isJoinable()) { for (int j = 0; j < splitList.size(); j++) { Area area = splitList.get(j); if (area == null || area.isJoinable() == false || area.getMapId() != splitArea.getMapId()) continue; boolean doJoin = false; if (splitArea.getMaxLat() == area.getMaxLat() && splitArea.getMinLat() == area.getMinLat() && (splitArea.getMinLong() == area.getMaxLong() || splitArea.getMaxLong() == area.getMinLong())) doJoin = true; else if (splitArea.getMinLong() == area.getMinLong() && splitArea.getMaxLong() == area.getMaxLong() && (splitArea.getMinLat() == area.getMaxLat() || splitArea.getMaxLat() == area.getMinLat())) doJoin = true; if (doJoin) { splitArea = area.add(splitArea); splitArea.setMapId(area.getMapId()); splitList.set(j, splitArea); splitArea = null; // don't add later break; } } } if (splitArea != null) { splitList.add(splitArea); } } /* * if (msg.isEmpty() == false) System.out.println(msg); */ } covered.add(new java.awt.geom.Area(r1)); } covered.reset(); Iterator iter = splitList.iterator(); while (iter.hasNext()) { Area a = iter.next(); if (a == null) iter.remove(); else { Rectangle r1 = a.getRect(); if (covered.intersects(r1) == true) { throw new SplitFailedException("Failed to create list of distinct areas"); } covered.add(a.getJavaArea()); } } return splitList; } /** * Fill uncovered parts of the planet with pseudo-areas. TODO: check if * better algorithm reduces run time in ProblemListProcessor We want a small * number of pseudo areas because many of them will require more memory or * more passes, esp. when processing whole planet. Also, the total length of * all edges should be small. * * @param areas * list of areas (either real or pseudo) * @return true if pseudo-areas were added */ private static boolean addPseudoArea(ArrayList areas) { int oldSize = areas.size(); Rectangle planetBounds = new Rectangle(Utils.toMapUnit(-180.0), Utils.toMapUnit(-90.0), 2 * Utils.toMapUnit(180.0), 2 * Utils.toMapUnit(90.0)); java.awt.geom.Area uncovered = new java.awt.geom.Area(planetBounds); java.awt.geom.Area covered = new java.awt.geom.Area(); for (Area area : areas) { uncovered.subtract(area.getJavaArea()); covered.add(area.getJavaArea()); } Rectangle rCov = covered.getBounds(); Rectangle[] topAndBottom = { new Rectangle(planetBounds.x, (int) rCov.getMaxY(), planetBounds.width, (int) (planetBounds.getMaxY() - rCov.getMaxY())), // top new Rectangle(planetBounds.x, planetBounds.y, planetBounds.width, rCov.y - planetBounds.y) }; // bottom for (Rectangle border : topAndBottom) { if (!border.isEmpty()) { uncovered.subtract(new java.awt.geom.Area(border)); covered.add(new java.awt.geom.Area(border)); Area pseudo = new Area(border.y, border.x, (int) border.getMaxY(), (int) border.getMaxX()); pseudo.setMapId(-1 * (areas.size() + 1)); pseudo.setPseudoArea(true); areas.add(pseudo); } } while (uncovered.isEmpty() == false) { boolean changed = false; List> shapes = Utils.areaToShapes(uncovered); // we divide planet into stripes for all vertices of the uncovered // area int minX = uncovered.getBounds().x; int nextX = Integer.MAX_VALUE; for (int i = 0; i < shapes.size(); i++) { List shape = shapes.get(i); for (Point point : shape) { int lon = point.x; if (lon < nextX && lon > minX) nextX = lon; } } java.awt.geom.Area stripeLon = new java.awt.geom.Area( new Rectangle(minX, planetBounds.y, nextX - minX, planetBounds.height)); // cut out already covered area stripeLon.subtract(covered); assert stripeLon.isEmpty() == false; // the remaining area must be a set of zero or more disjoint // rectangles List> stripeShapes = Utils.areaToShapes(stripeLon); for (int j = 0; j < stripeShapes.size(); j++) { List rectShape = stripeShapes.get(j); java.awt.geom.Area test = Utils.shapeToArea(rectShape); test = simplifyArea(test); assert test.isRectangular(); Rectangle pseudoRect = test.getBounds(); if (uncovered.contains(pseudoRect)) { assert test.getBounds().width == stripeLon.getBounds().width; boolean wasMerged = false; // check if new area can be merged with last rectangles for (int k = areas.size() - 1; k >= oldSize; k--) { Area prev = areas.get(k); if (prev.getMaxLong() < pseudoRect.x || prev.isPseudoArea() == false) continue; if (prev.getHeight() == pseudoRect.height && prev.getMaxLong() == pseudoRect.x && prev.getMinLat() == pseudoRect.y) { // merge Area pseudo = prev.add(new Area(pseudoRect.y, pseudoRect.x, (int) pseudoRect.getMaxY(), (int) pseudoRect.getMaxX())); pseudo.setMapId(prev.getMapId()); pseudo.setPseudoArea(true); areas.set(k, pseudo); // System.out.println("Enlarged pseudo area " + // pseudo.getMapId() + " " + pseudo); wasMerged = true; break; } } if (!wasMerged) { Area pseudo = new Area(pseudoRect.y, pseudoRect.x, (int) pseudoRect.getMaxY(), (int) pseudoRect.getMaxX()); pseudo.setMapId(-1 * (areas.size() + 1)); pseudo.setPseudoArea(true); // System.out.println("Adding pseudo area " + // pseudo.getMapId() + " " + pseudo); areas.add(pseudo); } uncovered.subtract(test); covered.add(test); changed = true; } } if (!changed) break; } return oldSize != areas.size(); } } splitter-r653/src/uk/me/parabola/splitter/QueueProcessor.java0000664000175300017530000000605114352507254025625 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.concurrent.BlockingQueue; import uk.me.parabola.splitter.OSMMessage.Type; /** * Simple helper to allow all existing processors to use the producer/consumer * pattern. For each call of a supplier (one of the OSM parsers) it either * passes the call to the original processor or adds messages to the queue.. * * @author Gerd Petermann * */ public class QueueProcessor extends AbstractMapProcessor { private final BlockingQueue queue; private final MapProcessor realProcessor; /** number of OSM elements to collect before adding them to the queue */ private static final int NUM_STAGING = 1000; private Element[] staging; private int stagingPos; public QueueProcessor(BlockingQueue queue, MapProcessor realProcessor) { this.queue = queue; this.realProcessor = realProcessor; initStaging(); } private void initStaging() { staging = new Element[NUM_STAGING]; stagingPos = 0; } @Override public boolean skipTags() { return realProcessor.skipTags(); } @Override public boolean skipNodes() { return realProcessor.skipNodes(); } @Override public boolean skipWays() { return realProcessor.skipWays(); } @Override public boolean skipRels() { return realProcessor.skipRels(); } @Override public void boundTag(Area bounds) { addToQueue(bounds); } @Override public void processNode(Node n) { addToQueue(n); } @Override public void processWay(Way w) { addToQueue(w); } @Override public void processRelation(Relation r) { addToQueue(r); } @Override public void startFile() { try { flush(); queue.put(new OSMMessage(Type.START_FILE)); } catch (InterruptedException e) { throw new RuntimeException(e); } } @Override public boolean endMap() { try { flush(); queue.put(new OSMMessage(Type.END_MAP)); } catch (InterruptedException e) { throw new RuntimeException(e); } return true; } @Override public int getPhase() { throw new UnsupportedOperationException("call getPhase() of real processor"); } private void addToQueue(Element el) { try { staging[stagingPos++] = el; if (stagingPos >= NUM_STAGING) flush(); } catch (InterruptedException e) { throw new RuntimeException(e); } } private void addToQueue(Area bounds) { try { flush(); queue.put(new OSMMessage(bounds)); } catch (InterruptedException e) { throw new RuntimeException(e); } } private void flush() throws InterruptedException { if (staging == null || stagingPos == 0) return; queue.put(new OSMMessage(staging)); initStaging(); } } splitter-r653/src/uk/me/parabola/splitter/Relation.java0000664000175300017530000000234014352507254024413 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.util.ArrayList; import java.util.List; /** * @author Steve Ratcliffe */ public class Relation extends Element { private final List members = new ArrayList<>(); public void addMember(String type, long ref, String role) { Member mem = new Member(type, ref, role); members.add(mem); } public List getMembers() { return members; } public static class Member { private String type; private long ref; private String role; Member(String type, long ref, String role) { this.type = type; this.ref = ref; this.role = role; } public String getType() { return type; } public long getRef() { return ref; } public String getRole() { return role; } } } splitter-r653/src/uk/me/parabola/splitter/RoundingUtils.java0000664000175300017530000000624514352507254025454 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * Utility methods for rounding numbers and areas * * @author Chris Miller */ public class RoundingUtils { /** * Rounds an integer down to the nearest multiple of {@code 2^shift}. * Works with both positive and negative integers. * @param val the integer to round down. * @param shift the power of two to round down to. * @return the rounded integer. */ public static int roundDown(int val, int shift) { return val >>> shift << shift; } /** * Rounds an integer up to the nearest multiple of {@code 2^shift}. * Works with both positive and negative integers. * @param val the integer to round up. * @param shift the power of two to round up to. * @return the rounded integer. */ public static int roundUp(int val, int shift) { return (val + (1 << shift) - 1) >>> shift << shift; } /** * Rounds an integer up or down to the nearest multiple of {@code 2^shift}. * Works with both positive and negative integers. * @param val the integer to round. * @param shift the power of two to round to. * @return the rounded integer. */ public static int round(int val, int shift) { return (val + (1 << (shift - 1))) >>> shift << shift; } /** * Rounds an area's borders to suit the supplied resolution. This * means edges are aligned at 2 ^ (24 - resolution) boundaries * * @param b the area to round * @param resolution the map resolution to align the borders at * @return the rounded area */ public static Area round(Area b, int resolution) { int shift = 24 - resolution; int alignment = 1 << shift; // Avoid pathological behaviour near the poles by discarding anything // greater than +/-85 degrees latitude. int minLat = Math.max(b.getMinLat(), Utils.toMapUnit(-85.0d)); int maxLat = Math.min(b.getMaxLat(), Utils.toMapUnit(85.0d)); int roundedMinLat = roundDown(minLat, shift); int roundedMaxLat = roundUp(maxLat, shift); assert roundedMinLat % alignment == 0 : "The area's min latitude is not aligned to a multiple of " + alignment; assert roundedMaxLat % alignment == 0 : "The area's max latitude is not aligned to a multiple of " + alignment; int roundedMinLon = roundDown(b.getMinLong(), shift); int roundedMaxLon = roundUp(b.getMaxLong(), shift); // don't produce illegal values if (roundedMinLon < -0x800000) roundedMinLon = -0x800000; if (roundedMaxLon > 0x800000) roundedMaxLon = 0x800000; assert roundedMinLon % alignment == 0 : "The area's min longitude is not aligned to a multiple of " + alignment; assert roundedMaxLon % alignment == 0 : "The area's max longitude is not aligned to a multiple of " + alignment; return new Area(roundedMinLat, roundedMinLon, roundedMaxLat, roundedMaxLon); } } splitter-r653/src/uk/me/parabola/splitter/SplitFailedException.java0000664000175300017530000000152514352507254026721 0ustar builderbuilder00000000000000/* * Copyright (C) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * Thrown when a severe error occurs while calculating or writing the tile areas * * @author GerdP */ public class SplitFailedException extends RuntimeException { public SplitFailedException(String s) { super(s); } public SplitFailedException(String message, Throwable cause) { super(message, cause); } } splitter-r653/src/uk/me/parabola/splitter/SplitProcessor.java0000664000175300017530000003106514352507254025637 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import uk.me.parabola.splitter.Relation.Member; import uk.me.parabola.splitter.args.SplitterParams; import uk.me.parabola.splitter.tools.Long2IntClosedMapFunction; import uk.me.parabola.splitter.tools.SparseLong2IntMap; import uk.me.parabola.splitter.writer.OSMWriter; import java.io.IOException; import java.util.ArrayList; import java.util.Date; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; /** * Splits a map into multiple areas. */ class SplitProcessor extends AbstractMapProcessor { private final OSMWriter[] writers; private SparseLong2IntMap coords; private SparseLong2IntMap ways; private final AreaDictionary writerDictionary; private final DataStorer dataStorer; private final Long2IntClosedMapFunction nodeWriterMap; private final Long2IntClosedMapFunction wayWriterMap; private final Long2IntClosedMapFunction relWriterMap; // for statistics private long countQuickTest; private long countFullTest; private long countCoords; private long countWays; private final int writerOffset; private final int lastWriter; private final AreaIndex writerIndex; private final int maxThreads; private final InputQueueInfo[] writerInputQueues; protected final BlockingQueue toProcess; private final ArrayList workerThreads; protected final InputQueueInfo stopMsg = new InputQueueInfo(null); private AreaSet usedWriters; /** * Distribute the OSM data to separate OSM files. * @param dataStorer * @param writerOffset first writer to be used * @param numWritersThisPass number of writers to used * @param mainOptions main program options */ SplitProcessor(DataStorer dataStorer, int writerOffset, int numWritersThisPass, SplitterParams mainOptions){ this.dataStorer = dataStorer; this.writerDictionary = dataStorer.getAreaDictionary(); this.writers = dataStorer.getWriters(); this.coords = new SparseLong2IntMap("coord"); this.ways = new SparseLong2IntMap("way"); this.coords.defaultReturnValue(UNASSIGNED); this.ways.defaultReturnValue(UNASSIGNED); this.writerIndex = dataStorer.getGrid(); this.countWays = ways.size(); this.writerOffset = writerOffset; this.lastWriter = writerOffset + numWritersThisPass-1; this.maxThreads = mainOptions.getMaxThreads().getCount(); this.toProcess = new ArrayBlockingQueue<>(numWritersThisPass); this.writerInputQueues = new InputQueueInfo[numWritersThisPass]; for (int i = 0; i < writerInputQueues.length; i++) { writerInputQueues[i] = new InputQueueInfo(this.writers[i + writerOffset]); writers[i + writerOffset].initForWrite(); } nodeWriterMap = dataStorer.getWriterMap(DataStorer.NODE_TYPE); wayWriterMap = dataStorer.getWriterMap(DataStorer.WAY_TYPE); relWriterMap = dataStorer.getWriterMap(DataStorer.REL_TYPE); usedWriters = new AreaSet(); int noOfWorkerThreads = Math.min(this.maxThreads - 1, numWritersThisPass); workerThreads = new ArrayList<>(noOfWorkerThreads); for (int i = 0; i < noOfWorkerThreads; i++) { Thread worker = new Thread(new OSMWriterWorker()); worker.setName("worker-" + i); workerThreads.add(worker); worker.start(); } } /** * Get the active writers associated to the index * @param multiTileWriterIdx */ private void setUsedWriters(int multiTileWriterIdx) { if (multiTileWriterIdx != UNASSIGNED) { AreaSet cl = writerDictionary.getSet(multiTileWriterIdx); // set only active writer bits for (int i : cl) { if (i >= writerOffset && i <= lastWriter) usedWriters.set(i); } } } @Override public void processNode(Node n) { try { writeNode(n); } catch (IOException e) { throw new SplitFailedException("failed to write node " + n.getId(), e); } } @Override public void processWay(Way w) { usedWriters.clear(); int multiTileWriterIdx = (wayWriterMap != null) ? wayWriterMap.getSeq(w.getId()) : UNASSIGNED; if (multiTileWriterIdx != UNASSIGNED) { setUsedWriters(multiTileWriterIdx); } else { int oldclIndex = UNASSIGNED; for (long id : w.getRefs()) { // Get the list of areas that the way is in. int clIdx = coords.get(id); if (clIdx != UNASSIGNED && oldclIndex != clIdx) { usedWriters.or(writerDictionary.getSet(clIdx)); if (wayWriterMap != null) { // we can stop here because all other nodes // will be in the same tile break; } oldclIndex = clIdx; } } } if (!usedWriters.isEmpty()) { // store these areas in ways map ways.put(w.getId(), writerDictionary.translate(usedWriters)); ++countWays; if (countWays % 10_000_000 == 0){ System.out.println(" Number of stored tile combinations in multiTileDictionary: " + Utils.format(writerDictionary.size())); } try { writeWay(w); } catch (IOException e) { throw new SplitFailedException("failed to write way " + w.getId(), e); } } } @Override public void processRelation(Relation rel) { usedWriters.clear(); Integer singleTileWriterIdx = dataStorer.getOneTileOnlyRels(rel.getId()); if (singleTileWriterIdx != null){ if (singleTileWriterIdx == UNASSIGNED) { // we know that the relation is outside of all real areas return; } // relation is within an area that is overlapped by the writer areas setUsedWriters(singleTileWriterIdx); } else { int multiTileWriterIdx = (relWriterMap != null) ? relWriterMap.getSeq(rel.getId()) : UNASSIGNED; if (multiTileWriterIdx != UNASSIGNED) { setUsedWriters(multiTileWriterIdx); } else{ int oldclIndex = UNASSIGNED; int oldwlIndex = UNASSIGNED; for (Member mem : rel.getMembers()) { long id = mem.getRef(); if ("node".equals(mem.getType())) { int clIdx = coords.get(id); if (clIdx != UNASSIGNED){ if (oldclIndex != clIdx){ usedWriters.or(writerDictionary.getSet(clIdx)); } oldclIndex = clIdx; } } else if ("way".equals(mem.getType())) { int wlIdx = ways.get(id); if (wlIdx != UNASSIGNED){ if (oldwlIndex != wlIdx){ usedWriters.or(writerDictionary.getSet(wlIdx)); } oldwlIndex = wlIdx; } } } } } try { writeRelation(rel); } catch (IOException e) { throw new SplitFailedException("failed to write relation " + rel.getId(), e); } } @Override public boolean endMap() { coords.stats(0); ways.stats(0); Utils.printMem(); System.out.println("Full Node tests: " + Utils.format(countFullTest)); System.out.println("Quick Node tests: " + Utils.format(countQuickTest)); coords = null; ways = null; for (int i = 0; i < writerInputQueues.length; i++) { try { writerInputQueues[i].stop(); } catch (InterruptedException e) { throw new SplitFailedException("Failed to add the stop element for worker thread " + i, e); } } try { if (maxThreads > 1) toProcess.put(stopMsg);// Magic flag used to indicate that all data is done. } catch (InterruptedException e1) { e1.printStackTrace(); } for (Thread workerThread : workerThreads) { try { workerThread.join(); } catch (InterruptedException e) { throw new SplitFailedException("Failed to join for thread " + workerThread.getName(), e); } } for (int i=writerOffset; i<= lastWriter; i++) { writers[i].finishWrite(); } return true; } private void writeNode(Node currentNode) throws IOException { int countWriters = 0; int lastUsedWriter = UNASSIGNED; AreaGridResult writerCandidates = writerIndex.get(currentNode); int multiTileWriterIdx = (nodeWriterMap != null) ? nodeWriterMap.getSeq(currentNode.getId()): UNASSIGNED; boolean isSpecialNode = (multiTileWriterIdx != UNASSIGNED); if (writerCandidates == null && !isSpecialNode) { return; } usedWriters.clear(); if (writerCandidates != null){ for (int n : writerCandidates.set) { if (n < writerOffset || n > lastWriter) continue; OSMWriter writer = writers[n]; boolean found; if (writerCandidates.testNeeded){ found = writer.getExtendedBounds().contains(currentNode); ++countFullTest; } else{ found = true; ++countQuickTest; } if (found) { usedWriters.set(n); ++countWriters; lastUsedWriter = n; if (maxThreads > 1) { addToWorkingQueue(n, currentNode); } else { writer.write(currentNode); } } } } if (isSpecialNode){ // this node is part of a multi-tile-polygon, add it to all tiles covered by the parent AreaSet nodeWriters = writerDictionary.getSet(multiTileWriterIdx); for (int i : nodeWriters) { if (i < writerOffset || i > lastWriter || usedWriters.get(i)) continue; if (maxThreads > 1) { addToWorkingQueue(i, currentNode); } else { writers[i].write(currentNode); } } } if (countWriters > 0){ int writersID; if (countWriters > 1) writersID = writerDictionary.translate(usedWriters); else writersID = AreaDictionary.translate(lastUsedWriter); // no need to do lookup in the dictionary coords.put(currentNode.getId(), writersID); ++countCoords; if (countCoords % 100_000_000 == 0){ System.out.println("coord MAP occupancy: " + Utils.format(countCoords) + ", number of area dictionary entries: " + writerDictionary.size()); } } } private boolean seenWay; private void writeWay(Way currentWay) throws IOException { if (!seenWay) { seenWay = true; System.out.println("Writing ways " + new Date()); } writeElement(currentWay, usedWriters); } private boolean seenRel; private void writeRelation(Relation currentRelation) throws IOException { if (!seenRel) { seenRel = true; System.out.println("Writing relations " + new Date()); } writeElement(currentRelation, usedWriters); } private void writeElement (Element el, AreaSet writersToUse) throws IOException { if (!writersToUse.isEmpty()) { for (int n : writersToUse) { if (n < writerOffset || n > lastWriter) continue; if (maxThreads > 1) { addToWorkingQueue(n, el); } else { writers[n].write(el); } } } } private void addToWorkingQueue(int writerNumber, Element element) { try { writerInputQueues[writerNumber-writerOffset].put(element); } catch (InterruptedException e) { throw new SplitFailedException("Failed to add to working queue", e); } } private class InputQueueInfo { protected final OSMWriter writer; private ArrayList staging; protected final BlockingQueue> inputQueue; public InputQueueInfo(OSMWriter writer) { inputQueue = new ArrayBlockingQueue<>(NO_ELEMENTS); this.writer = writer; this.staging = new ArrayList<>(STAGING_SIZE); } void put(Element e) throws InterruptedException { staging.add(e); if (staging.size() >= STAGING_SIZE) flush(); } void flush() throws InterruptedException { inputQueue.put(staging); staging = new ArrayList<>(STAGING_SIZE); toProcess.put(this); } void stop() throws InterruptedException { flush(); } } static final int NO_ELEMENTS = 3; static final int STAGING_SIZE = 300; private class OSMWriterWorker implements Runnable { @Override public void run() { boolean finished = false; while (!finished) { InputQueueInfo workPackage = null; try { workPackage = toProcess.take(); } catch (InterruptedException e1) { e1.printStackTrace(); continue; } if (workPackage == stopMsg) { try { toProcess.put(stopMsg); // Re-inject it so that other // threads know that we're // exiting. } catch (InterruptedException e) { e.printStackTrace(); } finished = true; } else { synchronized (workPackage) { while (!workPackage.inputQueue.isEmpty()) { ArrayList elements = null; try { elements = workPackage.inputQueue.poll(); for (Element element : elements) { workPackage.writer.write(element); } } catch (IOException e) { throw new SplitFailedException("Thread " + Thread.currentThread().getName() + " failed to write element ", e); } } } } } System.out.println("Thread " + Thread.currentThread().getName() + " has finished"); } } } splitter-r653/src/uk/me/parabola/splitter/StopNoErrorException.java0000664000175300017530000000132614352507254026754 0ustar builderbuilder00000000000000/* * Copyright (C) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; /** * Thrown when the program should be stopped * * @author GerdP */ public class StopNoErrorException extends RuntimeException { public StopNoErrorException(String s) { super(s); } } splitter-r653/src/uk/me/parabola/splitter/UnknownFeatureException.java0000664000175300017530000000140714352507254027473 0ustar builderbuilder00000000000000/* * Copyright (C) 2011 by the splitter contributors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ package uk.me.parabola.splitter; /** * Thrown when an unknown feature is required to process one of the input files. * * @author Steve Ratcliffe */ public class UnknownFeatureException extends RuntimeException { public UnknownFeatureException(String s) { super(s); } } splitter-r653/src/uk/me/parabola/splitter/Utils.java0000664000175300017530000002030114352507254023733 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import java.awt.Point; import java.awt.Polygon; import java.awt.Rectangle; import java.awt.geom.Path2D; import java.awt.geom.PathIterator; import java.io.BufferedInputStream; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.nio.charset.StandardCharsets; import java.text.NumberFormat; import java.util.ArrayList; import java.util.List; import java.util.zip.GZIPInputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipInputStream; import org.apache.tools.bzip2.CBZip2InputStream; /** * Some miscellaneous functions that are used within the .img code. * * @author Steve Ratcliffe */ public class Utils { private static final NumberFormat FORMATTER = NumberFormat.getIntegerInstance(); public static final int MIN_LAT_MAP_UNITS = toMapUnit(-90); public static final int MAX_LAT_MAP_UNITS = toMapUnit(90); public static final int MIN_LON_MAP_UNITS = toMapUnit(-180); public static final int MAX_LON_MAP_UNITS = toMapUnit(180); private Utils() { // avoid implicit public constructor } public static String format(int number) { return FORMATTER.format(number); } public static String format(long number) { return FORMATTER.format(number); } public static double toDegrees(int val) { return 360.0d * val / (1 << 24) ; } /** * A map unit is an integer value that is 1/(2^24) degrees of latitude or * longitude. * * @param l The lat or long as decimal degrees. * @return An integer value in map units. */ public static int toMapUnit(double l) { double delta = 360.0D / (1 << 24) / 2; // Correct rounding if (l > 0) return (int) ((l + delta) * (1 << 24) / 360); return (int) ((l - delta) * (1 << 24) / 360); } /** * Open a file and apply filters necessary to reading it such as decompression. * * @param name The file to open. gz, zip, bz2 are supported. * @return A stream that will read the file, positioned at the beginning. * @throws IOException If the file cannot be opened for any reason. */ @SuppressWarnings("resource") public static Reader openFile(String name, boolean backgroundReader) throws IOException { InputStream is = new BufferedInputStream(new FileInputStream(name), 8192); if (name.endsWith(".gz")) { try { is = new GZIPInputStream(is); } catch (IOException e) { throw new IOException( "Could not read " + name + " as a gz compressed file", e); } } else if (name.endsWith(".bz2")) { try { is.read(); is.read(); is = new CBZip2InputStream(is); } catch (IOException e) { throw new IOException( "Could not read " + name + " as a bz2 compressed file", e); } } else if (name.endsWith(".zip")) { ZipInputStream zis = new ZipInputStream(is); name = new File(name).getName(); // Strip off any path ZipEntry entry; while ((entry = zis.getNextEntry()) != null) { if (entry.getName().startsWith(name.substring(0, name.length() - 4))) { is = zis; break; } } if (is != zis) { zis.close(); throw new IOException("Unable to find a file inside " + name + " that starts with " + name.substring(0, name.length() - 4)); } } if (backgroundReader) { is = new BackgroundInputStream(is); } return new InputStreamReader(is, StandardCharsets.UTF_8); } public static Rectangle area2Rectangle (Area area, int overlap){ return new Rectangle(area.getMinLong()-overlap, area.getMinLat()-overlap,area.getWidth()+2*overlap,area.getHeight()+2*overlap); } /** * Convert area into a list of polygons each represented by a list * of points. It is possible that the area contains multiple discontinuous * polygons, so you may append more than one shape to the output list.
* Attention: The outline of the polygon has clockwise order whereas * holes in the polygon have counterclockwise order. * * Taken from Java2DConverter by WanMil in mkgmap * @param area The area to be converted. * @return a list of closed polygons */ public static List> areaToShapes(java.awt.geom.Area area) { List> outputs = new ArrayList<>(); float[] res = new float[6]; PathIterator pit = area.getPathIterator(null); List points = null; int iPrevLat = Integer.MIN_VALUE; int iPrevLong = Integer.MIN_VALUE; while (!pit.isDone()) { int type = pit.currentSegment(res); float fLat = res[1]; float fLon = res[0]; int iLat = Math.round(fLat); int iLon = Math.round(fLon); switch (type) { case PathIterator.SEG_LINETO: if (iPrevLat != iLat || iPrevLong != iLon) points.add(new Point(iLon,iLat)); iPrevLat = iLat; iPrevLong = iLon; break; case PathIterator.SEG_MOVETO: case PathIterator.SEG_CLOSE: if ((type == PathIterator.SEG_MOVETO && points != null) || type == PathIterator.SEG_CLOSE) { if (points.size() > 2 && !points.get(0).equals(points.get(points.size() - 1))) { points.add(points.get(0)); } if (points.size() > 3){ outputs.add(points); } } if (type == PathIterator.SEG_MOVETO){ points = new ArrayList<>(); points.add(new Point(iLon,iLat)); iPrevLat = iLat; iPrevLong = iLon; } else { points = null; iPrevLat = Integer.MIN_VALUE; iPrevLong = Integer.MIN_VALUE; } break; default: System.out.println("Unsupported path iterator type " + type + ". This is an internal splitter error."); } pit.next(); } return outputs; } /** * Convert list of points which describe a closed polygon to an area * Taken from Java2DConverter by WanMil in mkgmap * @param shape * @return */ public static java.awt.geom.Area shapeToArea(List shape){ Polygon polygon = new Polygon(); for (Point point : shape) { polygon.addPoint(point.x, point.y); } return new java.awt.geom.Area(polygon); } /** * Convert area with coordinates in degrees to area in MapUnits. * @param area * @return */ public static java.awt.geom.Area AreaDegreesToMapUnit(java.awt.geom.Area area){ if (area == null) return null; double[] res = new double[6]; Path2D path = new Path2D.Double(); PathIterator pit = area.getPathIterator(null); while (!pit.isDone()) { int type = pit.currentSegment(res); double fLat = res[1]; double fLon = res[0]; int lat = toMapUnit(fLat); int lon = toMapUnit(fLon); switch (type) { case PathIterator.SEG_LINETO: path.lineTo(lon, lat); break; case PathIterator.SEG_MOVETO: path.moveTo(lon, lat); break; case PathIterator.SEG_CLOSE: path.closePath(); break; default: System.out.println("Unsupported path iterator type " + type + ". This is an internal splitter error."); } pit.next(); } return new java.awt.geom.Area(path); } // returns true if the way is a closed polygon with a clockwise // direction public static boolean clockwise(List points) { if(points.size() < 3 || !points.get(0).equals(points.get(points.size() - 1))) return false; long area = 0; Point p1 = points.get(0); for(int i = 1; i < points.size(); ++i) { Point p2 = points.get(i); area += ((long)p1.x * p2.y- (long)p2.x * p1.y); p1 = p2; } // this test looks to be inverted but gives the expected result! // empty linear areas are defined as clockwise return area <= 0; } public static void printMem(){ long maxMem = Runtime.getRuntime().maxMemory() / 1024 / 1024; long totalMem = Runtime.getRuntime().totalMemory() / 1024 / 1024; long freeMem = Runtime.getRuntime().freeMemory() / 1024 / 1024; long usedMem = totalMem - freeMem; System.out.println(" JVM Memory Info: Current " + totalMem + "MB (" + usedMem + "MB used, " + freeMem + "MB free) Max " + maxMem + "MB"); } } splitter-r653/src/uk/me/parabola/splitter/Version.java0000664000175300017530000000426414352507254024272 0ustar builderbuilder00000000000000/* * File: Version.java * * Copyright (C) 2007 Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe * Create date: 12 Dec 2007 */ package uk.me.parabola.splitter; import java.io.IOException; import java.io.InputStream; import java.util.Properties; /** * Definitions of version numbers. * * @author Steve Ratcliffe */ public class Version { public static final String VERSION = getSvnVersion(); public static final String TIMESTAMP = getTimeStamp(); // A default version to use. private static final String DEFAULT_VERSION = "unknown"; private static final String DEFAULT_TIMESTAMP = "unknown"; /** * Get the version number if we can find one, else a default string. * This looks in a file called splitter-version.properties on the * classpath. * This is created outside of the system by the build script. * * @return The version number or a default string if a version number * cannot be found. */ private static String getSvnVersion() { try (InputStream is = Version.class .getResourceAsStream("/splitter-version.properties")) { if (is == null) return DEFAULT_VERSION; Properties props = new Properties(); props.load(is); String version = props.getProperty("svn.version", DEFAULT_VERSION); if (version.matches("[1-9]+.*")) return version; return DEFAULT_VERSION; } catch (IOException e) { return DEFAULT_VERSION; } } private static String getTimeStamp() { try (InputStream is = Version.class .getResourceAsStream("/splitter-version.properties")) { if (is == null) return DEFAULT_TIMESTAMP; Properties props = new Properties(); props.load(is); return props.getProperty("build.timestamp", DEFAULT_TIMESTAMP); } catch (IOException e) { return DEFAULT_TIMESTAMP; } } } splitter-r653/src/uk/me/parabola/splitter/Way.java0000664000175300017530000000152414352507254023401 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import it.unimi.dsi.fastutil.longs.LongArrayList; /** * @author Steve Ratcliffe */ public class Way extends Element { private final LongArrayList refs = new LongArrayList(10); public void set(long id){ setId(id); } public void addRef(long ref) { refs.add(ref); } public LongArrayList getRefs() { return refs; } } splitter-r653/src/uk/me/parabola/splitter/args/0000775000175300017530000000000014352507253022727 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/args/Option.java0000664000175300017530000000266314352507253025051 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; import java.lang.annotation.ElementType; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; import java.lang.annotation.Target; /** * Tag interface that marks command line arguments in a java interface. * * @author Chris Miller */ @Retention(RetentionPolicy.RUNTIME) @Target(ElementType.METHOD) public @interface Option { String DEFAULT_DESCRIPTION = "[No description specified]"; String OPTIONAL = "**OPTIONAL**"; /** * @return The name of the command line argument */ String name() default ""; /** * @return a default value to be used when one isn't specified. If this isn't set * {@code null} will be returned (or zero/false in the case of primitives). */ String defaultValue() default OPTIONAL; /** * @return A description of this parameter to be displayed to the end user when the * usage is presented */ String description() default DEFAULT_DESCRIPTION; } splitter-r653/src/uk/me/parabola/splitter/args/Param.java0000664000175300017530000000225414352507253024635 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; /** * A single command line parameter. * * @author Chris Miller */ public class Param { private final String name; private final String description; private final String defaultValue; private final Class returnType; public Param(String name, String description, String defaultValue, Class returnType) { this.name = name; this.description = description; this.defaultValue = defaultValue; this.returnType = returnType; } public String getName() { return name; } public String getDescription() { return description; } public String getDefaultValue() { return defaultValue; } public Class getReturnType() { return returnType; } } splitter-r653/src/uk/me/parabola/splitter/args/ParamConverter.java0000664000175300017530000000774314352507253026535 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; import java.io.File; import java.util.HashMap; import java.util.Map; /** * Converts arguments from a String to another type. * * @author Chris Miller */ public class ParamConverter { private final Map, Converter> converterMap; private final Map, Object> primitiveDefaults; public ParamConverter() { converterMap = new HashMap<>(10); converterMap.put(String.class, new Converter() { @Override String convert(String value) { return value; } }); converterMap.put(Boolean.class, new Converter() { @Override Boolean convert(String value) { return Boolean.valueOf(value); } }); converterMap.put(Integer.class, new IntegerConverter()); converterMap.put(Long.class, new LongConverter()); converterMap.put(File.class, new Converter() { @Override File convert(String value) { return new File(value); } }); converterMap.put(ThreadCount.class, new ThreadCountConverter()); primitiveDefaults = new HashMap<>(10); primitiveDefaults.put(Boolean.TYPE, Boolean.FALSE); primitiveDefaults.put(Byte.TYPE, Byte.valueOf((byte) 0)); primitiveDefaults.put(Character.TYPE, Character.valueOf('\u0000')); primitiveDefaults.put(Short.TYPE, Short.valueOf((short) 0)); primitiveDefaults.put(Integer.TYPE, Integer.valueOf(0)); primitiveDefaults.put(Long.TYPE, Long.valueOf(0)); primitiveDefaults.put(Float.TYPE, Float.valueOf(0.0f)); primitiveDefaults.put(Double.TYPE, Double.valueOf(0.0d)); } public Object getPrimitiveDefault(Class returnType) { return primitiveDefaults.get(returnType); } /** * Convert the argument to the target type * * @param param the parameter being converted. * @param value the value to convert. * @return the converted argument. * @throws Exception if the string could not be converted. */ public Object convert(Param param, String value) { if (value == null) return param.getDefaultValue(); Converter converter = converterMap.get(param.getReturnType()); if (converter == null) throw new UnsupportedOperationException("Unable to convert parameters of type " + param.getReturnType() + ". Parameter " + param.getName() + " (value=" + value + ") could not be converted."); return converter.convert(value); } private abstract static class Converter { abstract T convert(String value); } private static class IntegerConverter extends Converter { @Override Integer convert(String value) { try { return Integer.valueOf(value); } catch (NumberFormatException e) { throw new NumberFormatException('\'' + value + "' is not a valid number."); } } } private static class LongConverter extends Converter { @Override Long convert(String value) { try { return Long.valueOf(value); } catch (NumberFormatException e) { throw new NumberFormatException('\'' + value + "' is not a valid number."); } } } private static class ThreadCountConverter extends Converter { @Override ThreadCount convert(String value) { int cpuCores = Runtime.getRuntime().availableProcessors(); if ("auto".equals(value)) { return new ThreadCount(cpuCores, true); } int threads = 0; boolean valid = false; try { threads = Integer.valueOf(value); if (threads >= 1) { valid = true; } } catch (NumberFormatException e) { } if (!valid) { throw new IllegalArgumentException( '\'' + value + "' should be a number >= 1, or 'auto' to use all available CPU cores."); } return new ThreadCount(threads, false); } } } splitter-r653/src/uk/me/parabola/splitter/args/ParamParser.java0000664000175300017530000002015414352507253026011 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.TreeMap; import uk.me.parabola.splitter.StopNoErrorException; import uk.me.parabola.splitter.Version; /** * Parses command line arguments and returns them via the supplied interface. * * @author Chris Miller */ public class ParamParser { private final ParamConverter converter = new ParamConverter(); private final Map paramMap = new TreeMap<>(); private final Map convertedParamMap = new TreeMap<>(); private final List additionalParams = new ArrayList<>(); private final List errors = new ArrayList<>(); private boolean wantHelp; private boolean wantVersion; private int maxParamLength; public

P parse(Class

paramInterface, String... args) { if (!paramInterface.isInterface()) { throw new IllegalArgumentException(paramInterface + " must be an interface"); } return createProxy(paramInterface, args); } public Map getValidParams() { return paramMap; } public Map getConvertedParams() { return convertedParamMap; } public List getAdditionalParams() { return additionalParams; } public List getErrors() { return errors; } @SuppressWarnings("unchecked") private

P createProxy(Class

paramInterface, String... args) { Map params = new HashMap<>(); paramMap.clear(); convertedParamMap.clear(); wantHelp = false; wantVersion = false; for (Method method : paramInterface.getDeclaredMethods()) { Option option = ReflectionUtils.getOptionAnnotation(method); String name = getParameterName(method, option); if (name.length() > maxParamLength) { maxParamLength = name.length(); } String description = option.description(); String defaultValue = option.defaultValue(); if (defaultValue.equals(Option.OPTIONAL)) { defaultValue = null; } Class returnType = ReflectionUtils.getBoxedClass(method.getReturnType()); Param param = new Param(name, description, defaultValue, returnType); paramMap.put(name, param); MethodParamPair pair = new MethodParamPair(method, param); params.put(name, pair); } Map valuesMap = convert(params, args); for (Map.Entry entry : valuesMap.entrySet()) { Method method = entry.getKey(); Option option = ReflectionUtils.getOptionAnnotation(method); String name = getParameterName(method, option); convertedParamMap.put(name, entry.getValue()); } if (wantHelp) { displayUsage(); throw new StopNoErrorException(null); } if (wantVersion){ System.err.println("splitter " + Version.VERSION + " compiled " + Version.TIMESTAMP); throw new StopNoErrorException(null); } ParamInvocationHandler invocationHandler = new ParamInvocationHandler(valuesMap); return (P) Proxy.newProxyInstance(paramInterface.getClassLoader(), new Class[]{paramInterface}, invocationHandler); } private Map convert(Map paramMap, String[] args) { Map result = new HashMap<>(10); // First set up the defaults for (MethodParamPair pair : paramMap.values()) { Method method = pair.getMethod(); Param param = pair.getParam(); Object value = converter.convert(param, param.getDefaultValue()); if (value == null && method.getReturnType().isPrimitive()) { result.put(method, converter.getPrimitiveDefault(method.getReturnType())); } else { result.put(method, value); } } // Now override these with any parameters that were specified on the command line HashMap parsedArgs = new HashMap<>(); for (String arg : args) { if (arg.startsWith("--")) { String name; String value; int j = arg.indexOf('='); if (j > 0) { name = arg.substring(2, j); value = arg.substring(j + 1); } else { // Should be a boolean name = arg.substring(2); value = null; } // warn user regarding duplicated parms String testVal = value==null? "no val":value; String oldVal = parsedArgs.put(name, testVal); if (oldVal != null && oldVal.equals(testVal) == false){ System.err.println("Warning: repeated paramter overwrites previous value: --" + name + (value==null? "":"="+value)); } MethodParamPair pair = paramMap.get(name); if (pair != null) { if (pair.getParam().getReturnType() == Boolean.class && value == null) { result.put(pair.getMethod(), Boolean.TRUE); } else { try { Object convertedVal = converter.convert(pair.getParam(), value); result.put(pair.getMethod(), convertedVal); } catch (Exception e) { errors.add("Unable to parse " + arg + ". Reason: " + e.getMessage()); } } } else { // Unknown parameter if ("help".equals(name)) { wantHelp = true; } else if ("version".equals(name)){ wantVersion = true; } else { errors.add("Parameter " + arg + " is not recognised"); } } } else { // We have a parameter that doesn't start with -- additionalParams.add(arg); } } return result; } public void displayUsage() { System.out.println("Usage: java [JAVA_OPTIONS] -jar splitter.jar [OPTIONS] input_file (*.osm or *.pbf or *.o5m)"); System.out.println("Options:"); String lastName = null; int leftColWidth = maxParamLength + 5; for(Param param : paramMap.values()){ String desc = param.getDescription(); if (param.getDefaultValue() != null) { desc += " Default is " + param.getDefaultValue() + "."; } if ("help".compareTo(param.getName()) < 0 && lastName != null && "help".compareTo(lastName) >= 0){ String ln = padRight(" --help", leftColWidth) + "Print this help."; System.out.println(ln); } if ("version".compareTo(param.getName()) < 0 && lastName != null && "version".compareTo(lastName) >= 0){ String ln = padRight(" --version", leftColWidth) + "Just write program version and build timestamp."; System.out.println(ln); } String ln = padRight(" --" + param.getName(), leftColWidth); String[] descWords = desc.split(" "); for (String word: descWords){ if (ln.length() + word.length() >= 78){ System.out.println(ln); ln = padRight("", leftColWidth); } ln += word + " "; } System.out.println(ln); lastName = param.getName(); } } /** * Pad string with blanks on the right to the desired length. * @param s the string * @param wantedLen the desired length * @return the padded string. No truncation or padding is done * if s is longer than wantedLen. */ private static String padRight(String s, int wantedLen) { return String.format("%1$-" + wantedLen + "s", s); } private static String getParameterName(Method method, Option option) { return option.name().length() == 0 ? ReflectionUtils.getParamName(method) : option.name(); } private static class ParamInvocationHandler implements InvocationHandler { private final Map valuesMap; private ParamInvocationHandler(Map valuesMap) { this.valuesMap = valuesMap; } @Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { return valuesMap.get(method); } } private static class MethodParamPair { private final Method method; private final Param param; private MethodParamPair(Method method, Param param) { this.method = method; this.param = param; } public Method getMethod() { return method; } public Param getParam() { return param; } } } splitter-r653/src/uk/me/parabola/splitter/args/ParseException.java0000664000175300017530000000253214352507253026525 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; import java.util.List; /** * Thrown when the command line arguments could not be successfully parsed. * * @author Chris Miller */ public class ParseException extends Exception { private static final String LINE_SEPARATOR = System.getProperty("line.separator"); private final List errors; public ParseException(String message, List errors) { this(message, null, errors); } public ParseException(String message, Exception cause, List errors) { super(message, cause); this.errors = errors; } public List getErrors() { return errors; } @Override public String toString() { StringBuilder buf = new StringBuilder(500); buf.append(super.toString()); buf.append(LINE_SEPARATOR); for (String error : errors) { buf.append(error).append(LINE_SEPARATOR); } return buf.toString(); } } splitter-r653/src/uk/me/parabola/splitter/args/ReflectionUtils.java0000664000175300017530000000657614352507253026723 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; import java.lang.reflect.Method; import java.util.HashMap; import java.util.Map; /** * Reflection utility methods for argument parsing. * * @author Chris Miller */ public class ReflectionUtils { private static final Map, Class> boxedMappings = new HashMap<>(15); static { boxedMappings.put(Boolean.TYPE, Boolean.class); boxedMappings.put(Byte.TYPE, Byte.class); boxedMappings.put(Character.TYPE, Character.class); boxedMappings.put(Short.TYPE, Short.class); boxedMappings.put(Integer.TYPE, Integer.class); boxedMappings.put(Long.TYPE, Long.class); boxedMappings.put(Float.TYPE, Float.class); boxedMappings.put(Double.TYPE, Double.class); } public static Class getBoxedClass(Class actualClass) { if (actualClass.isPrimitive()) { return boxedMappings.get(actualClass); } return actualClass; } public static boolean isBooleanReturnType(Method method) { Class returnType = method.getReturnType(); return returnType == Boolean.class || returnType == Boolean.TYPE; } public static boolean isEnumReturnType(Method method) { Class returnType = method.getReturnType(); return returnType.isEnum(); } public static Option getOptionAnnotation(Method method) { return method.getAnnotation(Option.class); } /** * Checks to make sure this is a getter with a return type. * Also checks the Option annotation for the argument name. * * @param getter the getter method to check * @return the name of the argument that corresponds to this getter. */ public static String getParamName(Method getter) { Class returnType = getter.getReturnType(); if (returnType == Void.TYPE) { throw new IllegalArgumentException("Method " + getter + " is not a getter, it doesn't return anything"); } int params = getter.getParameterTypes().length; if (params > 0) { throw new IllegalArgumentException("Method " + getter + " is not a getter, it shouldn't take any parameters but takes " + params); } String name = getter.getName(); int i = 0; if (name.length() > 3 && (name.startsWith("get") || name.startsWith("has") && isBooleanReturnType(getter))) { i = 3; } else if (name.length() > 2 && name.startsWith("is") && isBooleanReturnType(getter)) { i = 2; } if (i == 0) { throw new IllegalArgumentException("Method " + getter + " is not a getter, its name should begin with 'is', 'has' or 'get'"); } if (getter.isAnnotationPresent(Option.class)) { String annotationName = getOptionAnnotation(getter).name(); if (annotationName.length() != 0) { return annotationName; } } StringBuilder sb = new StringBuilder(name.length()); sb.append(Character.toLowerCase(name.charAt(i))); for (int j = i + 1; j < name.length(); j++) { char ch = name.charAt(j); if (Character.isUpperCase(ch)) { sb.append('-').append(Character.toLowerCase(ch)); } else { sb.append(ch); } } return sb.toString(); } } splitter-r653/src/uk/me/parabola/splitter/args/SplitterParams.java0000664000175300017530000001340514352507253026547 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; /** * Command line parameters for the splitter * * @author Chris Miller */ public interface SplitterParams { /** * @return the ID for the first split area. */ @Option(defaultValue = "63240001", description = "The starting map ID.") int getMapid(); @Option(description = "A default description to give to each area.") String getDescription(); @Option(defaultValue = "2048", description = "The maximum number of areas to process in a single pass. " + "More areas require more memory, but less time. Values: 1-9999.") int getMaxAreas(); @Option(defaultValue = "auto", description = "Deprecated. Nodes/ways/rels that fall outside an area will still " + "be included if they are within this many map units. ") String getOverlap(); @Option(defaultValue = "1600000", description = "A threshold value that is used when no split-file is given. Splitting is done so that " + "no tile has more than maxNodes nodes inside the bounding box of the tile. " + "Nodes added by overlap or keep-complete are not taken into account.") int getMaxNodes(); @Option(description = "A target value that is used when no split-file is given. Splitting is done so that " + "the given number of tiles is produced. The max-nodes value is ignored if this option is given.") String getNumTiles(); @Option(defaultValue = "13", description = "The resolution determines how the tiles must be aligned." + "Eg a resolution of 13 means the tiles need to have their edges aligned to multiples of 2 ^ (24 - 13) = 2048 map units.") int getResolution(); @Option(description = "Specify this if the input osm file has nodes, ways and relations intermingled.") boolean isMixed(); @Option(description = "Deprecated, now does nothing") String getCache(); @Option(description = "The path to the output directory. Defaults to the current working directory.") String getOutputDir(); @Option(description = "The name of a file containing the areas definitions. Can be .list or .kml. Providing such a file will save processing time.") String getSplitFile(); @Option(description = "The name of a GeoNames file to use for determining tile names. Typically cities15000.zip from http://download.geonames.org/export/dump/") String getGeonamesFile(); @Option(description = "The name of a kml file to write out the areas to. This is in addition to areas.list (which is always written out).") String getWriteKml(); @Option(defaultValue = "120", description = "Displays the amount of memory used by the JVM every --status-freq seconds. Set =0 to disable.") int getStatusFreq(); @Option(description = "Don't trim empty space off the edges of tiles.") boolean isNoTrim(); @Option(defaultValue = "auto", description = "The maximum number of threads used by splitter.") ThreadCount getMaxThreads(); @Option(defaultValue = "pbf", description = "The output type, either pbf, o5m, or xml.") String getOutput(); @Option(description = "The name of a file containing ways and relations that are known to cause problems in the split process.") String getProblemFile(); @Option(defaultValue="true", description = "Write complete ways and relations if possible (requires more time and more heap memory). This should be used " + "with --overlap=0") boolean isKeepComplete(); // @Option(description = "Just write program version and build timestamp") // boolean getVersion(); @Option(description = "The name of a file to write the generated problem list created with --keep-complete.") String getProblemReport(); @Option(description = "The name of a file containing a bounding polygon in osmosis polygon file format.") String getPolygonFile(); @Option(description = "An osm file (.o5m, .pbf, .osm) with named ways that describe bounding polygons with OSM ways having tags name and mapid" ) String getPolygonDescFile(); @Option(defaultValue = "dist", description = "Debugging: stop after the program phase. Can be split, gen-problem-list, or handle-problem-list") String getStopAfter(); @Option(description = "The name of a directory containing precompiled sea tiles.") String getPrecompSea(); @Option(defaultValue="use-exclude-list", description = "A comma separated list of tag values for relations. " + "Used to filter multipolygon and boundary relations for problem-list processing.") String getBoundaryTags(); @Option(defaultValue="5", description = "The lowest admin_level value that should be kept complete. Reasonable values are 2 .. 11. " + "Used to filter boundary relations for problem-list processing. Ignored when keep-complete is false.") int getWantedAdminLevel(); @Option(defaultValue = "200000", description = "Search limit in split algo. Higher values may find better splits, but will take longer.") int getSearchLimit(); @Option(defaultValue = "remove", description = "Define how splitter treats version info in the osm data. Can be remove, fake, or keep") String getHandleElementVersion(); @Option(defaultValue = "false", description = "Specify if splitter should ignore bounds tags in input files") boolean getIgnoreOsmBounds(); @Option(defaultValue="", description = "A comma separated list of tag values for route relations. " + "Can be used to keep route relations of the given type complete. Only route values listed are kept complete. Default is empty.") String getRouteRelValues(); } splitter-r653/src/uk/me/parabola/splitter/args/ThreadCount.java0000664000175300017530000000165214352507253026016 0ustar builderbuilder00000000000000/* * Copyright (c) 2010, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.args; /** * @author Chris Miller */ public class ThreadCount { private final int count; private final boolean auto; public ThreadCount(int count, boolean isAuto) { this.count = count; auto = isAuto; } public int getCount() { return count; } public boolean isAuto() { return auto; } @Override public String toString() { if (auto) return count + " (auto)"; return String.valueOf(count); } } splitter-r653/src/uk/me/parabola/splitter/geo/0000775000175300017530000000000014352507253022545 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/geo/City.java0000664000175300017530000000253214352507253024322 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.geo; /** * Holds information about a single city. Immutable. * * @author Chris Miller */ public class City { // The location of the city in Garmin map units private final int lat, lon; // A unique ID for the city private final int id; private final String countryCode; private final String name; private final int population; public City(int id, String countryCode, String name, int lat, int lon, int population) { this.id = id; this.lat = lat; this.lon = lon; this.countryCode = countryCode; this.name = name; this.population = population; } public int getId() { return id; } public int getLat() { return lat; } public int getLon() { return lon; } public String getCountryCode() { return countryCode; } public String getName() { return name; } public int getPopulation() { return population; } } splitter-r653/src/uk/me/parabola/splitter/geo/CityFinder.java0000664000175300017530000000132014352507253025444 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.geo; import java.util.Set; import uk.me.parabola.splitter.Area; public interface CityFinder { Set findCities(Area area); Set findCities(int minLat, int minLon, int maxLat, int maxLon); } splitter-r653/src/uk/me/parabola/splitter/geo/CityLoader.java0000664000175300017530000000555214352507253025456 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.geo; import java.io.BufferedReader; import java.io.IOException; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import uk.me.parabola.splitter.Convert; import uk.me.parabola.splitter.Utils; /** * Loads in city information from a GeoNames file. See * http://download.geonames.org/export/dump/readme.txt for details of the file format. * * @author Chris Miller */ public class CityLoader { private static final Pattern TAB_DELIMTED_SPLIT_PATTERN = Pattern.compile("\\t"); private static final int GEONAME_ID_INDEX = 0; private static final int NAME_INDEX = 1; private static final int ASCII_NAME_INDEX = 2; private static final int COUNTRY_CODE_INDEX = 8; private static final int LAT_INDEX = 4; private static final int LON_INDEX = 5; private static final int POPULATION_INDEX = 14; private final boolean useAsciiNames; public CityLoader(boolean useAsciiNames) { this.useAsciiNames = useAsciiNames; } public List load(String geoNamesFile) { List result = null; try(BufferedReader r = new BufferedReader(Utils.openFile(geoNamesFile, true))){ result = load(r); } catch (IOException ignore) { System.out.println("Warning: Could not read geonames file " + geoNamesFile + ", processing continues"); } return result; } public List load(BufferedReader reader) throws IOException { List cities = new ArrayList<>(1000); String line; int lineNumber = 0; while ((line = reader.readLine()) != null) { lineNumber++; try { String[] split = TAB_DELIMTED_SPLIT_PATTERN.split(line, 16); int geoNameId = Integer.parseInt(split[GEONAME_ID_INDEX]); String name; if (useAsciiNames) name = new String(split[ASCII_NAME_INDEX].toCharArray()); // prevent memory leak from substr else name = new String(split[NAME_INDEX].toCharArray()); String countryCode = new String(split[COUNTRY_CODE_INDEX].toCharArray()).intern(); int population = Integer.parseInt(split[POPULATION_INDEX]); int lat = Utils.toMapUnit(Convert.parseDouble(split[LAT_INDEX])); int lon = Utils.toMapUnit(Convert.parseDouble(split[LON_INDEX])); cities.add(new City(geoNameId, countryCode, name, lat, lon, population)); } catch (Exception e) { System.err.format("Unable to parse GeoNames data at line %d%nReason:%s%nData: %s%n",lineNumber, e.toString(),line); } } return cities; } } splitter-r653/src/uk/me/parabola/splitter/geo/DefaultCityFinder.java0000664000175300017530000000514714352507253026764 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.geo; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import uk.me.parabola.splitter.Area; /** * Manages a store of city details in a format optimised for fast * retrieval based on lat/lon coordinates. * * @author Chris Miller */ public class DefaultCityFinder implements CityFinder { private final int[] lats; private final int[] lons; private final City[] citiesByLat; /** * Creates a city store that holds all the given cities. */ public DefaultCityFinder(List cities) { lats = new int[cities.size()]; lons = new int[cities.size()]; citiesByLat = new City[cities.size()]; cities.sort((c1,c2) -> Integer.compare(c1.getLat(), c2.getLat())); int i = 0; for (City city : cities) { lats[i] = city.getLat(); lons[i] = city.getLon(); citiesByLat[i++] = city; } } /** * Retrieves all the cities that fall within the given bounds. */ @Override public Set findCities(Area area) { return findCities(area.getMinLat(), area.getMinLong(), area.getMaxLat(), area.getMaxLong()); } /** * Retrieves all the cities that fall within the given bounds. */ @Override public Set findCities(int minLat, int minLon, int maxLat, int maxLon) { int minLatIndex = findMinIndex(lats, minLat); int maxLatIndex = findMaxIndex(lats, maxLat); if (minLatIndex > maxLatIndex) return Collections.emptySet(); Set hits = new HashSet<>(100); for (int i = minLatIndex; i <= maxLatIndex; i++) { City city = citiesByLat[i]; if (city.getLon() >= minLon && city.getLon() <= maxLon) hits.add(city); } return hits; } private static int findMinIndex(int[] data, int value) { int result = Arrays.binarySearch(data, value); if (result < 0) return -1 - result; while (result > 0 && data[result - 1] == value) result--; return result; } private static int findMaxIndex(int[] data, int value) { int result = Arrays.binarySearch(data, value); if (result < 0) return -2 - result; while (result < data.length - 2 && data[result + 1] == value) result++; return result; } } splitter-r653/src/uk/me/parabola/splitter/kml/0000775000175300017530000000000014352507253022556 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/kml/KmlParser.java0000664000175300017530000000734614352507253025333 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.kml; import java.util.ArrayList; import java.util.List; import org.xmlpull.v1.XmlPullParserException; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.xml.parser.AbstractXppParser; /** * Parses a KML area file. * * @author Chris Miller */ public class KmlParser extends AbstractXppParser { private enum State { None, Placemark, Name, Polygon, OuterBoundaryIs, LinearRing, Coordinates } private State state = State.None; private int currentId; private int[] currentCoords = new int[10]; private List areas = new ArrayList<>(); public KmlParser() throws XmlPullParserException { } public List getAreas() { return areas; } @Override protected boolean startElement(String name) throws XmlPullParserException { switch (state) { case None: if (name.equals("Placemark")) state = State.Placemark; break; case Placemark: if (name.equals("name")) { state = State.Name; } else if (name.equals("Polygon")) { state = State.Polygon; } break; case Polygon: if (name.equals("outerBoundaryIs")) { state = State.OuterBoundaryIs; } break; case OuterBoundaryIs: if (name.equals("LinearRing")) { state = State.LinearRing; } break; case LinearRing: if (name.equals("coordinates")) { state = State.Coordinates; } break; default: } return false; } @Override protected void text() throws XmlPullParserException { if (state == State.Name) { String idStr = getTextContent(); try { currentId = Integer.valueOf(idStr); } catch (NumberFormatException e) { throw createException("Unexpected area name encountered. Expected a valid number, found \"" + idStr + '"'); } } else if (state == State.Coordinates) { String coordText = getTextContent(); String[] coordPairs = coordText.trim().split("\\s+"); if (coordPairs.length != 5) { throw createException("Unexpected number of coordinates. Expected 5, found " + coordPairs.length + " in \"" + coordText + '"'); } for (int i = 0; i < 5; i++) { String[] coordStrs = coordPairs[i].split(","); if (coordStrs.length != 2) { throw createException( "Unexpected coordinate pair encountered in \"" + coordPairs[i] + "\". Expected 2 numbers, found " + coordStrs.length); } for (int j = 0; j < 2; j++) { try { Double val = Double.valueOf(coordStrs[j]); currentCoords[i * 2 + j] = Utils.toMapUnit(val); } catch (NumberFormatException e) { throw createException("Unexpected coordinate encountered. \"" + coordStrs[j] + "\" is not a valid number"); } } } } } @Override protected void endElement(String name) throws XmlPullParserException { if (state == State.Name) { state = State.Placemark; } else if (state == State.Coordinates) { state = State.LinearRing; } else if (name.equals("Placemark")) { int minLat = currentCoords[1]; int minLon = currentCoords[0]; int maxLat = currentCoords[5]; int maxLon = currentCoords[4]; Area a = new Area(minLat, minLon, maxLat, maxLon); if (!a.verify()) throw new IllegalArgumentException("invalid area " + currentId + " in split file: " + a); a.setMapId(currentId); areas.add(a); state = State.None; } } } splitter-r653/src/uk/me/parabola/splitter/kml/KmlWriter.java0000664000175300017530000001142314352507253025342 0ustar builderbuilder00000000000000/* * Copyright (c) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.kml; import java.awt.geom.PathIterator; import java.io.IOException; import java.io.PrintWriter; import java.util.List; import java.util.Locale; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Utils; /** * A class to create kml files from java areas (polygons) or rectangular areas. * @author GerdP * */ public class KmlWriter { private static void writeKmlHeader(PrintWriter pw){ pw.format("\n" + "\n" + "\n" + " \n\n"); } private static void writeLineHeader(PrintWriter pw, int id, String name){ pw.format(Locale.ROOT, " \n" + " %1$d\n" + " #transWhitePoly\n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n" + " \n", id, name); } private static void writeLineFooter(PrintWriter pw){ pw.format(Locale.ROOT, " \n" + " \n" + " \n" + " \n" + " \n"); } private static void writeKmlFooter(PrintWriter pw){ pw.format("\n\n"); } private static void writeCoordinates(PrintWriter pw, double x, double y){ pw.format(Locale.ROOT, " %f,%f\n",x,y); } /** * Write a java area in kml format. * @param filename * @param name * @param area */ public static void writeKml(String filename, String name, java.awt.geom.Area area){ String filePath = filename; if (filePath.endsWith(".kml") == false) filePath += ".kml"; try (PrintWriter pw = new PrintWriter(filePath)) { writeKmlHeader(pw); int linePart = 0 ; double startx = 0,starty = 0; double[] res = new double[6]; PathIterator pit = area.getPathIterator(null); int id = 0; while (!pit.isDone()) { int type = pit.currentSegment(res); double x = Utils.toDegrees((int) res[0]); double y = Utils.toDegrees((int) res[1]); switch (type) { case PathIterator.SEG_MOVETO: writeLineHeader(pw, id++, name + linePart++); writeCoordinates(pw, x,y); startx = x; starty = y; break; case PathIterator.SEG_LINETO: writeCoordinates(pw, x,y); break; case PathIterator.SEG_CLOSE: writeCoordinates(pw, startx,starty); writeLineFooter(pw); break; default: // should not happen System.err.println("Unsupported path iterator type " + type + ". This is an internal splitter error."); throw new IOException(); } pit.next(); } writeKmlFooter(pw); } catch (IOException e) { System.err.println("Could not write KML file " + filePath + ", processing continues"); } } /** * Write out a KML file containing the areas that we calculated. This KML file * can be opened in Google Earth etc to see the areas that were split. * * @param filename The KML filename to write to. */ public static void writeKml(String filename, List areas) { System.out.println("Writing KML file to " + filename); try (PrintWriter pw = new PrintWriter(filename);) { writeKmlHeader(pw); for (Area area : areas) { double south = Utils.toDegrees(area.getMinLat()); double west = Utils.toDegrees(area.getMinLong()); double north = Utils.toDegrees(area.getMaxLat()); double east = Utils.toDegrees(area.getMaxLong()); String name = area.getName() == null ? String.valueOf(area.getMapId()) : area.getName(); writeLineHeader(pw, area.getMapId(), name); writeCoordinates(pw, west, south); writeCoordinates(pw, west, north); writeCoordinates(pw, east, north); writeCoordinates(pw, east, south); writeCoordinates(pw, west, south); writeLineFooter(pw); } writeKmlFooter(pw); } catch (IOException e) { System.err.println("Could not write KML file " + filename + ", processing continues"); } } } splitter-r653/src/uk/me/parabola/splitter/parser/0000775000175300017530000000000014352507253023267 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/parser/BinaryMapParser.java0000664000175300017530000001626314352507253027201 0ustar builderbuilder00000000000000/* * Copyright (c) 2010, Scott Crosby * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.parser; import crosby.binary.BinaryParser; import crosby.binary.Osmformat; import crosby.binary.file.FileBlockPosition; import it.unimi.dsi.fastutil.shorts.ShortArrayList; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.MapProcessor; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.UnknownFeatureException; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; import java.util.List; public class BinaryMapParser extends BinaryParser { private static final short TYPE_DENSE = 0x1; private static final short TYPE_NODES = 0x2; private static final short TYPE_WAYS = 0x4; private static final short TYPE_RELS = 0x8; private final ShortArrayList blockTypes = new ShortArrayList(); private final ShortArrayList knownBlockTypes; // for status messages private final ElementCounter elemCounter = new ElementCounter(); private short blockType = -1; private int blockCount = -1; private boolean skipTags; private boolean skipNodes; private boolean skipWays; private boolean skipRels; private short wantedTypeMask = 0; private int msgLevel; public BinaryMapParser(MapProcessor processor, ShortArrayList knownBlockTypes, int msgLevel) { this.processor = processor; this.knownBlockTypes = knownBlockTypes; this.skipTags = processor.skipTags(); this.skipNodes = processor.skipNodes(); this.skipWays = processor.skipWays(); this.skipRels = processor.skipRels(); this.msgLevel = msgLevel; if (!skipNodes) { wantedTypeMask |= TYPE_DENSE; wantedTypeMask |= TYPE_NODES; } if (!skipWays) wantedTypeMask |= TYPE_WAYS; if (!skipRels) wantedTypeMask |= TYPE_RELS; } MapProcessor processor; public ShortArrayList getBlockList() { return blockTypes; } @Override public boolean skipBlock(FileBlockPosition block) { blockCount++; if (knownBlockTypes != null) { blockType = knownBlockTypes.getShort(blockCount); if (blockType != 0 && (blockType & wantedTypeMask) == 0) return true; } else if (blockType != -1) { blockTypes.add(blockType); } blockType = 0; if (block.getType().equals("OSMData")) return false; if (block.getType().equals("OSMHeader")) return false; System.out.println("Skipped block of type: " + block.getType()); return true; } @Override public void complete() { blockTypes.add(blockType); // End of map is sent when all input files are processed. // So do nothing else. } @Override protected void parseDense(Osmformat.DenseNodes nodes) { blockType |= TYPE_DENSE; if (skipNodes) return; long lastId = 0, lastLat = 0, lastLon = 0; int j = 0; int maxi = nodes.getIdCount(); for (int i = 0; i < maxi; i++) { long lat = nodes.getLat(i) + lastLat; lastLat = lat; long lon = nodes.getLon(i) + lastLon; lastLon = lon; long id = nodes.getId(i) + lastId; lastId = id; double latf = parseLat(lat), lonf = parseLon(lon); Node tmp = new Node(); tmp.set(id, latf, lonf); if (nodes.hasDenseinfo()) tmp.setVersion(nodes.getDenseinfo().getVersion(i)); if (!skipTags && nodes.getKeysValsCount() > 0) { while (nodes.getKeysVals(j) != 0) { int keyid = nodes.getKeysVals(j++); int valid = nodes.getKeysVals(j++); tmp.addTag(getStringById(keyid), getStringById(valid)); } j++; // Skip over the '0' delimiter. } processor.processNode(tmp); elemCounter.countNode(tmp.getId()); } } @Override protected void parseNodes(List nodes) { if (nodes.isEmpty()) return; blockType |= TYPE_NODES; if (skipNodes) return; for (Osmformat.Node i : nodes) { Node tmp = new Node(); for (int j = 0; j < i.getKeysCount(); j++) tmp.addTag(getStringById(i.getKeys(j)), getStringById(i.getVals(j))); long id = i.getId(); double latf = parseLat(i.getLat()), lonf = parseLon(i.getLon()); tmp.set(id, latf, lonf); if (i.hasInfo()) tmp.setVersion(i.getInfo().getVersion()); processor.processNode(tmp); elemCounter.countNode(tmp.getId()); } } @Override protected void parseWays(List ways) { long numways = ways.size(); if (numways == 0) return; blockType |= TYPE_WAYS; if (skipWays) return; for (Osmformat.Way i : ways) { Way tmp = new Way(); if (!skipTags) { for (int j = 0; j < i.getKeysCount(); j++) tmp.addTag(getStringById(i.getKeys(j)), getStringById(i.getVals(j))); } long lastId = 0; for (long j : i.getRefsList()) { tmp.addRef(j + lastId); lastId = j + lastId; } long id = i.getId(); tmp.setId(id); if (i.hasInfo()) tmp.setVersion(i.getInfo().getVersion()); processor.processWay(tmp); elemCounter.countWay(i.getId()); } } @Override protected void parseRelations(List rels) { if (rels.isEmpty()) return; blockType |= TYPE_RELS; if (skipRels) return; for (Osmformat.Relation i : rels) { Relation tmp = new Relation(); if (!skipTags) { for (int j = 0; j < i.getKeysCount(); j++) tmp.addTag(getStringById(i.getKeys(j)), getStringById(i.getVals(j))); } long id = i.getId(); tmp.setId(id); tmp.setVersion(i.getInfo().getVersion()); long lastMemId = 0; for (int j = 0; j < i.getMemidsCount(); j++) { long mid = lastMemId + i.getMemids(j); lastMemId = mid; String role = getStringById(i.getRolesSid(j)); String etype = null; if (i.getTypes(j) == Osmformat.Relation.MemberType.NODE) etype = "node"; else if (i.getTypes(j) == Osmformat.Relation.MemberType.WAY) etype = "way"; else if (i.getTypes(j) == Osmformat.Relation.MemberType.RELATION) etype = "relation"; else assert false; // TODO; Illegal file? tmp.addMember(etype, mid, role); } processor.processRelation(tmp); elemCounter.countRelation(tmp.getId()); } } @Override public void parse(Osmformat.HeaderBlock block) { for (String s : block.getRequiredFeaturesList()) { if (s.equals("OsmSchema-V0.6")) continue; // OK. if (s.equals("DenseNodes")) continue; // OK. throw new UnknownFeatureException(s); } if (block.hasBbox()) { final double multiplier = .000000001; double rightf = block.getBbox().getRight() * multiplier; double leftf = block.getBbox().getLeft() * multiplier; double topf = block.getBbox().getTop() * multiplier; double bottomf = block.getBbox().getBottom() * multiplier; if (msgLevel > 0) System.out.println("Bounding box " + leftf + " " + bottomf + " " + rightf + " " + topf); Area area = new Area(Utils.toMapUnit(bottomf), Utils.toMapUnit(leftf), Utils.toMapUnit(topf), Utils.toMapUnit(rightf)); if (!area.verify()) throw new IllegalArgumentException("invalid bbox area in pbf file: " + area); processor.boundTag(area); } } } splitter-r653/src/uk/me/parabola/splitter/parser/ElementCounter.java0000664000175300017530000000361114352507253027064 0ustar builderbuilder00000000000000/* * Copyright (c) 2013, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.parser; import uk.me.parabola.splitter.Utils; /** * Common OSM parseder method for status messages * @author GerdP * */ public class ElementCounter { // How many elements to process before displaying a status update private static final int NODE_STATUS_UPDATE_THRESHOLD = 10000000; private static final int WAY_STATUS_UPDATE_THRESHOLD = 1000000; private static final int RELATION_STATUS_UPDATE_THRESHOLD = 100000; // for messages private long nodeCount; private long wayCount; private long relationCount; /** * Count node and eventually print progress message with the node id * @param id */ protected void countNode(long id) { nodeCount++; if (nodeCount % NODE_STATUS_UPDATE_THRESHOLD == 0) { System.out.println(Utils.format(nodeCount) + " nodes parsed... id=" + id); } } /** * Count way and eventually print progress message with the way id * @param id */ protected void countWay(long id) { wayCount++; if (wayCount % WAY_STATUS_UPDATE_THRESHOLD == 0) { System.out.println(Utils.format(wayCount) + " ways parsed... id=" + id); } } /** * Count relation and eventually print progress message with the relation id * @param id */ protected void countRelation(long id) { relationCount++; if (relationCount % RELATION_STATUS_UPDATE_THRESHOLD == 0) { System.out.println(Utils.format(relationCount) + " relations parsed... id=" + id); } } } splitter-r653/src/uk/me/parabola/splitter/parser/O5mMapParser.java0000664000175300017530000004146514352507253026417 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.parser; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.StandardCharsets; import java.util.Arrays; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.MapProcessor; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; /** * Parser for the o5m format described here: http://wiki.openstreetmap.org/wiki/O5m * The routines to are based on the osmconvert.c source from Markus Weber who allows * to copy them for any o5m IO, thanks a lot for that. * @author GerdP * */ public class O5mMapParser { // O5M data set constants private static final int NODE_DATASET = 0x10; private static final int WAY_DATASET = 0x11; private static final int REL_DATASET = 0x12; private static final int BBOX_DATASET = 0xdb; private static final int TIMESTAMP_DATASET = 0xdc; private static final int HEADER_DATASET = 0xe0; private static final int EOD_FLAG = 0xfe; private static final int RESET_FLAG = 0xff; // o5m constants private static final int STRING_TABLE_SIZE = 15000; private static final int MAX_STRING_PAIR_SIZE = 250 + 2; private static final String[] REL_REF_TYPES = {"node", "way", "relation", "?"}; private static final double FACTOR = 1d / 1000000000; // used with 100**FACTOR // for status messages private final ElementCounter elemCounter = new ElementCounter(); // flags set by the processor to signal what information is not needed private final boolean skipTags; private final boolean skipNodes; private final boolean skipWays; private final boolean skipRels; private final FileChannel fileChannel; // Buffer size, must be a power of 2 private static final int BUF_SIZE = 0x1000; private final ByteBuffer fileBuffer = ByteBuffer.allocate(BUF_SIZE); private long filePos; private long bufStart; private int bufSize = -1; private long nextFilePos; private final MapProcessor processor; // buffer for byte -> String conversions private final byte[] cnvBuffer; // the o5m string table private String[][] stringTable; private final String[] stringPair; private int currStringTablePos; // a counter that must be maintained by all routines that read data // performance: save byte position of first occurrence of a data set type (node, way, relation) // to allow skipping large parts of the stream private long[] firstPosInFile; private long[] skipArray; // for delta calculations private long lastNodeId; private long lastWayId; private long lastRelId; private long[] lastRef; private long lastTs; private long lastChangeSet; private int lastLon; private int lastLat; /** * A parser for the o5m format. * @param processor A mapProcessor instance * @param fc the file channel for the input file * @param skipArray An Array of longs that is used to hold information of file position of the first occurrence of * each known 05m data type (esp. nodes, ways, and relations). */ public O5mMapParser(MapProcessor processor, FileChannel fc, long[] skipArray) { this.fileChannel = fc; this.processor = processor; this.skipArray = skipArray; this.skipTags = processor.skipTags(); this.skipNodes = processor.skipNodes(); this.skipWays = processor.skipWays(); this.skipRels = processor.skipRels(); this.cnvBuffer = new byte[4000]; // OSM data should not contain string pairs with length > 512 this.stringPair = new String[2]; this.lastRef = new long[3]; if (skipArray == null) { firstPosInFile = new long[256]; Arrays.fill(firstPosInFile, -1); } reset(); } /** * parse the input stream. * @throws IOException */ public void parse() throws IOException { int start = get() & 0xff; if (start != RESET_FLAG) throw new IOException("wrong header byte " + start); if (skipArray != null && skipNodes) { if (skipWays) filePos = skipArray[REL_DATASET]; // jump to first relation else filePos = skipArray[WAY_DATASET]; // jump to first way } if (filePos >= 0) readFile(); } /** * Read the file following the initial byte. * @throws IOException */ private void readFile() throws IOException { boolean done = false; while (!done) { long size = 0; int fileType = get() & 0xff; if (fileType >= 0 && fileType < 0xf0) { if (skipArray == null && firstPosInFile[fileType] == -1) { // save first occurrence of a data set type firstPosInFile[fileType] = Math.max(0, filePos- 1); } size = readUnsignedNum64(); nextFilePos = filePos + size; boolean doSkip = ((fileType == NODE_DATASET && skipNodes) || (fileType == WAY_DATASET && skipWays) || (fileType == REL_DATASET && skipRels)); switch(fileType) { case NODE_DATASET: case WAY_DATASET: case REL_DATASET: case BBOX_DATASET: case TIMESTAMP_DATASET: case HEADER_DATASET: if (doSkip) { filePos = nextFilePos; continue; } break; default: } } if (fileType == NODE_DATASET) readNode(); else if (fileType == WAY_DATASET) readWay(); else if (fileType == REL_DATASET) readRel(); else if (fileType == BBOX_DATASET) readBBox(); else if (fileType == TIMESTAMP_DATASET) readFileTimestamp(); else if (fileType == HEADER_DATASET) readHeader(); else if (fileType == EOD_FLAG) done = true; else if (fileType == RESET_FLAG) reset(); else { if (fileType < 0xf0) filePos = nextFilePos; // skip unknown data set } } } /** * read (and ignore) the file timestamp data set. * @throws IOException */ private void readFileTimestamp() throws IOException { /*long fileTimeStamp = */readSignedNum64(); } /** * read the bounding box data set. * @throws IOException */ private void readBBox() throws IOException { double leftf = 100L * readSignedNum32() * FACTOR; double bottomf = 100L * readSignedNum32() * FACTOR; double rightf = 100L * readSignedNum32() * FACTOR; double topf = 100L * readSignedNum32() * FACTOR; assert filePos == nextFilePos; System.out.println("Bounding box " + leftf + " " + bottomf + " " + rightf + " " + topf); Area area = new Area( Utils.toMapUnit(bottomf), Utils.toMapUnit(leftf), Utils.toMapUnit(topf), Utils.toMapUnit(rightf)); if (!area.verify()) throw new IllegalArgumentException("invalid bbox area in o5m file: " + area); processor.boundTag(area); } /** * read a node data set. * @throws IOException */ private void readNode() throws IOException{ Node node = new Node(); lastNodeId += readSignedNum64(); if (filePos == nextFilePos) return; // only nodeId: this is a delete action, we ignore it int version = readVersionTsAuthor(); node.setVersion(version); if (filePos == nextFilePos) return; // only nodeId+version: this is a delete action, we ignore it int lon = readSignedNum32() + lastLon; lastLon = lon; int lat = readSignedNum32() + lastLat; lastLat = lat; double flon = 100L * lon * FACTOR; double flat = 100L * lat * FACTOR; assert flat >= -90.0 && flat <= 90.0; assert flon >= -180.0 && flon <= 180.0; node.set(lastNodeId, flat, flon); readTags(node); elemCounter.countNode(lastNodeId); processor.processNode(node); } /** * read a way data set. * @throws IOException */ private void readWay() throws IOException{ lastWayId += readSignedNum64(); if (filePos == nextFilePos) return; // only wayId: this is a delete action, we ignore it int version = readVersionTsAuthor(); if (filePos == nextFilePos) return; // only wayId + version: this is a delete action, we ignore it Way way = new Way(); way.setId(lastWayId); way.setVersion(version); long refSize = readUnsignedNum32(); long stop = filePos + refSize; while (filePos < stop) { lastRef[0] += readSignedNum64(); way.addRef(lastRef[0]); } readTags(way); elemCounter.countWay(lastWayId); processor.processWay(way); } /** * read a relation data set. * @throws IOException */ private void readRel() throws IOException{ lastRelId += readSignedNum64(); if (filePos == nextFilePos) return; // only relId: this is a delete action, we ignore it int version = readVersionTsAuthor(); if (filePos == nextFilePos) return; // only relId + version: this is a delete action, we ignore it Relation rel = new Relation(); rel.setId(lastRelId); rel.setVersion(version); long refSize = readUnsignedNum32(); long stop = filePos + refSize; while (filePos < stop) { long deltaRef = readSignedNum64(); int refType = readRelRef(); lastRef[refType] += deltaRef; rel.addMember(stringPair[0], lastRef[refType], stringPair[1]); } // tags readTags(rel); elemCounter.countRelation(lastRelId); processor.processRelation(rel); } private void readTags(Element elem) throws IOException{ // we cannot skip the tags if we read relations (roles) if (skipTags && skipRels) { filePos = nextFilePos; return; } while (filePos < nextFilePos) { readStringPair(); if (!skipTags) { elem.addTag(stringPair[0], stringPair[1]); } } assert filePos == nextFilePos; } /** * Store a new string pair (length check must be performed by caller). */ private void storeStringPair() { stringTable[0][currStringTablePos] = stringPair[0]; stringTable[1][currStringTablePos] = stringPair[1]; ++currStringTablePos; if (currStringTablePos >= STRING_TABLE_SIZE) currStringTablePos = 0; } /** * set stringPair to the values referenced by given string reference * No checking is performed. * @param ref valid values are 1 .. STRING_TABLE_SIZE * @throws IOException */ private void setStringRefPair(int ref) throws IOException{ int pos = currStringTablePos - ref; if (pos < 0) pos += STRING_TABLE_SIZE; if (pos < 0 || pos >= STRING_TABLE_SIZE) throw new IOException("invalid string table reference: " + ref); stringPair[0] = stringTable[0][pos]; stringPair[1] = stringTable[1][pos]; } /** * Read version, time stamp and change set and author. * @return the version * @throws IOException */ private int readVersionTsAuthor() throws IOException { int version = readUnsignedNum32(); if (version != 0) { // version info long ts = readSignedNum64() + lastTs; lastTs = ts; if (ts != 0) { long changeSet = readSignedNum32() + lastChangeSet; lastChangeSet = changeSet; readAuthor(); } } return version; } /** * Read author . * @throws IOException */ private void readAuthor() throws IOException{ int stringRef = readUnsignedNum32(); if (stringRef == 0) { long toReadStart = filePos; long uidNum = readUnsignedNum64(); if (uidNum == 0) stringPair[0] = ""; else { stringPair[0] = Long.toString(uidNum); get(); // skip terminating zero from uid } stringPair[1] = readString(); if (filePos - toReadStart <= MAX_STRING_PAIR_SIZE) storeStringPair(); } else { setStringRefPair(stringRef); } //System.out.println(pair[0]+ "/" + pair[1]); } /** * read object type ("0".."2") concatenated with role (single string). * @return 0..3 for type (3 means unknown) */ private int readRelRef() throws IOException { int refType = -1; long toReadStart = filePos; int stringRef = readUnsignedNum32(); if (stringRef == 0) { refType = get() - '0'; if (refType < 0 || refType > 2) refType = 3; stringPair[0] = REL_REF_TYPES[refType]; stringPair[1] = readString(); if (filePos - toReadStart <= MAX_STRING_PAIR_SIZE) storeStringPair(); } else { setStringRefPair(stringRef); char c = stringPair[0].charAt(0); switch (c) { case 'n': refType = 0; break; case 'w': refType = 1; break; case 'r': refType = 2; break; default: refType = 3; } } return refType; } /** * read a string pair (see o5m definition). * @throws IOException */ private void readStringPair() throws IOException{ int stringRef = readUnsignedNum32(); if (stringRef == 0) { long toReadStart = filePos; int cnt = 0; while (cnt < 2) { stringPair[cnt++] = readString(); } if (filePos - toReadStart <= MAX_STRING_PAIR_SIZE) storeStringPair(); } else { setStringRefPair(stringRef); } } /** * Read a zero-terminated string (see o5m definition). * @throws IOException */ String readString() throws IOException { int length = 0; while (true) { final int b = get(); if (b == 0) return new String(cnvBuffer, 0, length, StandardCharsets.UTF_8); cnvBuffer[length++] = (byte) b; } } /** reset the delta values and string table. */ private void reset() { lastNodeId = 0; lastWayId = 0; lastRelId = 0; lastRef[0] = 0; lastRef[1] = 0; lastRef[2] = 0; lastTs = 0; lastChangeSet = 0; lastLon = 0; lastLat = 0; stringTable = new String[2][STRING_TABLE_SIZE]; currStringTablePos = 0; } /** * read and verify o5m header (known values are o5m2 and o5c2). * @throws IOException */ private void readHeader() throws IOException { byte[] header = new byte[4]; for (int i = 0; i < header.length; i++) { header[i] = get(); } if (header[0] != 'o' || header[1] != '5' || (header[2] != 'c' && header[2] != 'm') || header[3] != '2') { throw new IOException("unsupported header"); } } /** * read a varying length signed number (see o5m definition). * @return the number * @throws IOException */ private int readSignedNum32() throws IOException { int result; int b = get(); result = b; if ((b & 0x80) == 0) { // just one byte if ((b & 0x01) == 1) return -1 - (result >> 1); return result >> 1; } int sign = b & 0x01; result = (result & 0x7e) >> 1; int fac = 0x40; while (((b = get()) & 0x80) != 0) { // more bytes will follow result += fac * (b & 0x7f); fac <<= 7; } result += fac * b; if (sign == 1) // negative return -1 - result; return result; } /** * read a varying length signed number (see o5m definition). * @return the number * @throws IOException */ private long readSignedNum64() throws IOException { long result; int b = get(); result = b; if ((b & 0x80) == 0) { // just one byte if ((b & 0x01) == 1) return -1 - (result >> 1); return result >> 1; } int sign = b & 0x01; result = (result & 0x7e) >> 1; long fac = 0x40; while (((b = get()) & 0x80) != 0) { // more bytes will follow result += fac * (b & 0x7f); fac <<= 7; } result += fac * b; if (sign == 1) // negative return -1 - result; return result; } /** * read a varying length unsigned number (see o5m definition). * @return a long * @throws IOException */ private long readUnsignedNum64() throws IOException { int b = get(); long result = b; if ((b & 0x80) == 0) { // just one byte return result; } result &= 0x7f; long fac = 0x80; while (((b = get()) & 0x80) != 0) { // more bytes will follow result += fac * (b & 0x7f); fac <<= 7; } result += fac * b; return result; } /** * read a varying length unsigned number (see o5m definition) * is similar to the 64 bit version. * @return an int * @throws IOException */ private int readUnsignedNum32() throws IOException { int b = get(); int result = b; if ((b & 0x80) == 0) { // just one byte return result; } result &= 0x7f; long fac = 0x80; while (((b = get()) & 0x80) != 0) { // more bytes will follow result += fac * (b & 0x7f); fac <<= 7; } result += fac * b; return result; } public long[] getNextSkipArray() { return firstPosInFile; } /** * Read in a single byte from the current position. * * @return The byte that was read. * @throws IOException if buffer contains no data */ private byte get() throws IOException { fillBuffer(); int pos = (int) (filePos - bufStart); if (pos < 0 || pos >= bufSize) { throw new IOException("no data in file buffer, pos="+pos); } filePos++; return fileBuffer.get(pos); } /** * Check to see if the buffer contains the byte at the current position. * If not then it is re-read so that it does. * @throws IOException in case of I/O error */ private void fillBuffer() throws IOException { // If we are no longer inside the buffer, then re-read it. if (filePos >= bufStart + bufSize) { // Get channel position on a block boundary. bufStart = filePos & ~(BUF_SIZE - 1); fileChannel.position(bufStart); // Fill buffer fileBuffer.clear(); bufSize = fileChannel.read(fileBuffer); } } } splitter-r653/src/uk/me/parabola/splitter/parser/OSMXMLParser.java0000664000175300017530000001660214352507253026333 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.parser; import org.xmlpull.v1.XmlPullParserException; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Convert; import uk.me.parabola.splitter.MapProcessor; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; import uk.me.parabola.splitter.xml.parser.AbstractXppParser; /** * Parses an OSM file, calling the appropriate methods on a * {@code MapProcessor} as it progresses. */ public class OSMXMLParser extends AbstractXppParser { private enum State { Node, Way, Relation, None } private Node currentNode = new Node(); private Way currentWay = new Way(); private Relation currentRelation = new Relation(); private final MapProcessor processor; // for status messages private final ElementCounter elemCounter = new ElementCounter(); // There are mixed nodes and ways in the file private final boolean mixed; private boolean skipTags; private boolean skipNodes; private boolean skipWays; private boolean skipRels; private State state = State.None; public OSMXMLParser(MapProcessor processor, boolean mixed) throws XmlPullParserException { this.processor = processor; this.mixed = mixed; skipTags = processor.skipTags(); skipNodes = processor.skipNodes(); skipWays = processor.skipWays(); skipRels = processor.skipRels(); } /** * Receive notification of the start of an element. */ @Override public boolean startElement(String name) { switch (state) { case None: CharSequence action = getAttr("action"); if (action != null && action.equals("delete")) return false; if (name.equals("node")) { startNode(); } else if (name.equals("way")) { if (!skipWays) startWay(); else if (!mixed && skipRels) return true; } else if (name.equals("relation")) { if (!skipRels) startRelation(); else if (!mixed) return true; } else if (name.equals("bounds") || name.equals("bound")) { processBounds(); } break; case Node: if (!skipNodes) processNode(name); break; case Way: if (!skipWays) processWay(name); break; case Relation: if (!skipRels) processRelation(name); break; } return false; } private void startNode() { String idStr = getAttr("id"); String latStr = getAttr("lat"); String lonStr = getAttr("lon"); if (idStr == null || latStr == null || lonStr == null) { // This should never happen - bad/corrupt .osm file? System.err.println("Node encountered with missing data. Bad/corrupt osm file? id=" + idStr + ", lat=" + latStr + ", lon=" + lonStr + ". Ignoring this node"); return; } long id = Long.parseLong(idStr); double lat = Convert.parseDouble(latStr); double lon = Convert.parseDouble(lonStr); currentNode = new Node(); currentNode.set(id, lat, lon); currentNode.setVersion(parseVersion()); state = State.Node; } private void startWay() { currentWay = new Way(); currentWay.setId(getLongAttr("id")); currentWay.setVersion(parseVersion()); state = State.Way; } private void startRelation() { currentRelation = new Relation(); currentRelation.setId(getLongAttr("id")); currentRelation.setVersion(parseVersion()); state = State.Relation; } private int parseVersion () { String versionStr = getAttr("version"); if (versionStr == null) return 0; return Integer.parseInt(versionStr); } private void processNode(CharSequence name) { if (name.equals("tag")) { if (!skipTags) currentNode.addTag(getAttr("k"), getAttr("v")); } } private void processWay(CharSequence name) { if (name.equals("nd")) { currentWay.addRef(getLongAttr("ref")); } else if (name.equals("tag")) { if (!skipTags) currentWay.addTag(getAttr("k"), getAttr("v")); } } private void processRelation(CharSequence name) { if (name.equals("tag")) { if (!skipTags) currentRelation.addTag(getAttr("k"), getAttr("v")); } else if (name.equals("member")) { String type = getAttr("type"); long id = getLongAttr("ref"); String role = getAttr("role"); if (role == null) role = ""; if ("node".equals(type)) currentRelation.addMember("node", id, role); else if ("way".equals(type)) currentRelation.addMember("way", id, role); else if ("relation".equals(type)) { currentRelation.addMember("relation", id, role); } } } private static final String[] BOUND_ATTRS = {"minlat", "minlon", "maxlat", "maxlon"}; private void processBounds() { String[] split; String boxStr = getAttr("box"); if (boxStr == null) { split = new String[4]; for (int i = 0; i < BOUND_ATTRS.length; i++) { split[i] = getAttr(BOUND_ATTRS[i]); if (split[i] == null) { System.err.println("A tag was found but it has no 'box' attribute and no '" + BOUND_ATTRS[i] + "' attribute. Ignoring bounds"); return; } } } else { split = boxStr.split(","); if (split.length != 4) { System.err.println( "A tag was found but its 'box' attribute contains an unexpected number of coordinates (expected 4, found " + split.length + "). Ignoring bounds"); return; } } double[] coords = new double[4]; int[] mapUnits = new int[4]; for (int i = 0; i < 4; i++) { try { coords[i] = Double.parseDouble(split[i].trim()); } catch (NumberFormatException e) { System.err.println("A tag was found but it contains unexpected data. Unable to parse '" + split[i] + "' as a double. Ignoring bounds"); return; } mapUnits[i] = Utils.toMapUnit(coords[i]); } Area bounds = new Area(mapUnits[0], mapUnits[1], mapUnits[2], mapUnits[3]); if (!bounds.verify()) throw new IllegalArgumentException("invalid bbox area in osm file: " + bounds); if (bounds.getMinLong() > bounds.getMaxLong()) { System.out.println("A tag was found but it crosses +/-180 the latitude line (western edge=" + Utils.toDegrees(bounds.getMinLong()) + ", eastern=" + Utils.toDegrees(bounds.getMaxLong()) + "). The splitter isn't currently able to deal with this, so the bounds are being ignored"); return; } processor.boundTag(bounds); System.out.println("A tag was found. Area covered is " + bounds.toString()); } /** * Receive notification of the end of an element. */ @Override public void endElement(String name) { if (state == State.Node) { if (name.equals("node")) { if (!skipNodes) processor.processNode(currentNode); state = State.None; elemCounter.countNode(currentNode.getId()); } } else if (state == State.Way) { if (name.equals("way")) { if (!skipWays) processor.processWay(currentWay); state = State.None; elemCounter.countWay(currentWay.getId()); } } else if (state == State.Relation) { if (name.equals("relation")) { if (!skipRels) processor.processRelation(currentRelation); state = State.None; elemCounter.countRelation(currentRelation.getId()); } } } } splitter-r653/src/uk/me/parabola/splitter/solver/0000775000175300017530000000000014352507254023306 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/solver/AreasCalculator.java0000664000175300017530000002072214352507254027221 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Point; import java.awt.Rectangle; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.List; import org.openstreetmap.osmosis.core.filter.common.PolygonFileReader; import org.xmlpull.v1.XmlPullParserException; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.OSMFileHandler; import uk.me.parabola.splitter.RoundingUtils; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.args.SplitterParams; /** * Some helper methods around area calculation. * @author Gerd Petermann * */ public class AreasCalculator { private final List polygons = new ArrayList<>(); private final int resolution; private final int numTiles; private final SplitterParams mainOptions; private final DensityMapCollector pass1Collector; private Area exactArea; public AreasCalculator(SplitterParams mainOptions, int numTiles) { this.mainOptions = mainOptions; this.resolution = mainOptions.getResolution(); this.numTiles = numTiles; pass1Collector = new DensityMapCollector(mainOptions); readPolygonFile(mainOptions.getPolygonFile(), mainOptions.getMapid()); readPolygonDescFile(mainOptions.getPolygonDescFile()); int numPolygons = polygons.size(); if (numPolygons > 0) { if (!checkPolygons()) { System.out.println( "Warning: Bounding polygon is complex. Splitter might not be able to fit all tiles into the polygon!"); } if (numTiles > 0) { System.out.println("Warning: bounding polygons are ignored because --num-tiles is used"); } } } /** * Check if the bounding polygons are usable. * @return false if any */ public boolean checkPolygons() { return polygons.stream().allMatch(pd -> checkPolygon(pd.getArea(), resolution)); } /** * Check if the bounding polygon is usable. * @param mapPolygonArea * @param resolution * @return false if the polygon is too complex */ private static boolean checkPolygon(java.awt.geom.Area mapPolygonArea, int resolution) { List> shapes = Utils.areaToShapes(mapPolygonArea); int shift = 24 - resolution; long rectangleWidth = 1L << shift; for (List shape : shapes) { int estimatedPoints = 0; Point p1 = shape.get(0); for (int i = 1; i < shape.size(); i++) { Point p2 = shape.get(i); if (p1.x != p2.x && p1.y != p2.y) { // diagonal line int width = Math.abs(p1.x - p2.x); int height = Math.abs(p1.y - p2.y); estimatedPoints += (Math.min(width, height) / rectangleWidth) * 2; } if (estimatedPoints > SplittableDensityArea.MAX_SINGLE_POLYGON_VERTICES) return false; // too complex p1 = p2; } } return true; } private void readPolygonFile(String polygonFile, int mapId) { if (polygonFile == null) return; polygons.clear(); File f = new File(polygonFile); if (!f.exists()) { throw new IllegalArgumentException("polygon file doesn't exist: " + polygonFile); } PolygonFileReader polyReader = new PolygonFileReader(f); java.awt.geom.Area polygonInDegrees = polyReader.loadPolygon(); PolygonDesc pd = new PolygonDesc(polyReader.getPolygonName(), Utils.AreaDegreesToMapUnit(polygonInDegrees), mapId); polygons.add(pd); } private void readPolygonDescFile(String polygonDescFile) { if (polygonDescFile == null) return; polygons.clear(); if (!new File(polygonDescFile).exists()) { throw new IllegalArgumentException("polygon desc file doesn't exist: " + polygonDescFile); } final PolygonDescProcessor polygonDescProcessor = new PolygonDescProcessor(resolution); final OSMFileHandler polyDescHandler = new OSMFileHandler(); polyDescHandler.setFileNames(Arrays.asList(polygonDescFile)); polyDescHandler.setMixed(false); polyDescHandler.process(polygonDescProcessor); polygons.addAll(polygonDescProcessor.getPolygons()); } /** * Fill the density map. * @param osmFileHandler * @param fileOutputDir */ public void fillDensityMap(OSMFileHandler osmFileHandler, File fileOutputDir) { long start = System.currentTimeMillis(); // this is typically only used for debugging File densityData = new File("densities.txt"); File densityOutData = null; if (densityData.exists() && densityData.isFile()) { System.err.println("reading density data from " + densityData.getAbsolutePath()); pass1Collector.readMap(densityData.getAbsolutePath()); } else { // fill the map with data from OSM files osmFileHandler.execute(pass1Collector); densityOutData = new File(fileOutputDir, "densities-out.txt"); } exactArea = pass1Collector.getExactArea(); if (exactArea == null) { throw new SplitFailedException("no usable data in input file(s)"); } System.out.println("Fill-densities-map pass took " + (System.currentTimeMillis() - start) + " ms"); System.out.println("Exact map coverage read from input file(s) is " + exactArea); if (densityOutData != null) pass1Collector.saveMap(densityOutData.getAbsolutePath()); if (polygons.size() == 1) { // intersect the bounding polygon with the exact area Rectangle polgonsBoundingBox = polygons.get(0).getArea().getBounds(); exactArea = Area.calcArea(exactArea, polgonsBoundingBox); if (exactArea != null) System.out.println("Exact map coverage after applying bounding box of polygon-file is " + exactArea); else { throw new SplitFailedException( "Exact map coverage after applying bounding box of polygon-file is an empty area"); } } addPrecompSeaDensityData(); } private void addPrecompSeaDensityData () { String precompSeaDir = mainOptions.getPrecompSea(); if (precompSeaDir != null) { System.out.println("Counting nodes of precompiled sea data ..."); long startSea = System.currentTimeMillis(); DensityMapCollector seaCollector = new DensityMapCollector(mainOptions); PrecompSeaReader precompSeaReader = new PrecompSeaReader(exactArea, new File(precompSeaDir)); try { precompSeaReader.processMap(seaCollector); } catch (XmlPullParserException e) { // very unlikely because we read generated files e.printStackTrace(); } pass1Collector.mergeSeaData(seaCollector, !mainOptions.isNoTrim(), mainOptions.getResolution()); System.out.println("Precompiled sea data pass took " + (System.currentTimeMillis() - startSea) + " ms"); } } /** * Calculate the areas that we are going to split into by getting the total * area and then subdividing down until each area has at most max-nodes * nodes in it. * If {@code --num-tiles} option is used, tries to find a max-nodes value which results in the wanted number of areas. * * @return */ public List calcAreas () { Area roundedBounds = RoundingUtils.round(exactArea, mainOptions.getResolution()); DensityMap densityMap = pass1Collector.getDensityMap(); boolean trim = !mainOptions.isNoTrim(); SplittableDensityArea splittableArea = new SplittableDensityArea(densityMap.subset(roundedBounds), mainOptions.getSearchLimit(), trim); if (splittableArea.hasData() == false) { System.out.println("input file(s) have no data inside calculated bounding box"); return Collections.emptyList(); } System.out.println("Rounded map coverage is " + splittableArea.getBounds()); splittableArea.setMapId(mainOptions.getMapid()); long startSplit = System.currentTimeMillis(); List areas; if (numTiles >= 2) { System.out.println("Splitting nodes into " + numTiles + " areas"); areas = splittableArea.split(numTiles); } else { System.out.println( "Splitting nodes into areas containing a maximum of " + Utils.format(mainOptions.getMaxNodes()) + " nodes each..."); splittableArea.setMaxNodes(mainOptions.getMaxNodes()); areas = splittableArea.split(polygons); } if (areas != null && areas.isEmpty() == false) System.out.println("Creating the initial areas took " + (System.currentTimeMillis() - startSplit) + " ms"); return areas; } public List getPolygons() { return Collections.unmodifiableList(polygons); } } splitter-r653/src/uk/me/parabola/splitter/solver/DensityMap.java0000664000175300017530000003013614352507254026231 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Point; import java.awt.Rectangle; import java.io.File; import java.io.FileInputStream; import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.util.List; import java.util.regex.Pattern; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.MapDetails; import uk.me.parabola.splitter.RoundingUtils; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; /** * Builds up a map of node densities across the total area being split. * Density information is held at the maximum desired map resolution. * Every step up in resolution increases the size of the density map by * a factor of 4. * * @author Chris Miller */ public class DensityMap { private static final int SEA_NODE_FACTOR = 2; private final int width, height, shift; private int[][] nodeMap; private Area bounds; private long totalNodeCount; /** * Creates a density map. * @param area the area that the density map covers. * @param resolution the resolution of the density map. This must be a value between 1 and 24. */ public DensityMap(Area area, int resolution) { assert resolution >= 1 && resolution <= 24; shift = 24 - resolution; bounds = RoundingUtils.round(area, resolution); height = bounds.getHeight() >> shift; width = bounds.getWidth() >> shift; nodeMap = new int[width][]; } /** * @param polygonArea the polygon area * @return an area with rectilinear shape that approximates the polygon area */ public java.awt.geom.Area rasterPolygon(java.awt.geom.Area polygonArea) { if (polygonArea == null) return null; java.awt.geom.Area simpleArea = new java.awt.geom.Area(); if (!polygonArea.intersects(Utils.area2Rectangle(bounds, 0))) return simpleArea; int gridElemWidth = bounds.getWidth() / width; int gridElemHeight = bounds.getHeight() / height; Rectangle polygonBbox = polygonArea.getBounds(); int minLat = Math.max((int) polygonBbox.getMinY(), bounds.getMinLat()); int maxLat = Math.min((int) polygonBbox.getMaxY(), bounds.getMaxLat()); int minY = latToY(minLat); int maxY = latToY(maxLat); assert minY >= 0 && minY <= height; assert maxY >= 0 && maxY <= height; for (int x = 0; x < width; x++) { int lon = xToLon(x); if (lon + gridElemWidth < polygonBbox.getMinX() || lon > polygonBbox.getMaxX() || !polygonArea.intersects(lon, polygonBbox.getMinY(), gridElemWidth, polygonBbox.getHeight())) { continue; } int firstY = -1; for (int y = 0; y < height; y++) { int lat = yToLat(y); if (y < minY || y > maxY || !polygonArea.intersects(lon, lat, gridElemWidth, gridElemHeight)) { if (firstY >= 0) { simpleArea.add(new java.awt.geom.Area(new Rectangle(x, firstY, 1, y - firstY))); firstY = -1; } } else { if (firstY < 0) firstY = y; } } if (firstY >= 0){ simpleArea.add(new java.awt.geom.Area(new Rectangle(x, firstY, 1, height - firstY))); } } if (!simpleArea.isSingular()) { List> shapes = Utils.areaToShapes(simpleArea); if (shapes.removeIf(s -> !Utils.clockwise(s))) { System.out.println("Warning: Rastered polygon area contains holes, polygon is probably concave, trying to fix this"); simpleArea.reset(); shapes.forEach(s -> simpleArea.add(Utils.shapeToArea(s))); } } return simpleArea; } public int getShift() { return shift; } public Area getBounds() { return bounds; } public int getWidth() { return width; } public int getHeight() { return height; } public int addNode(int lat, int lon) { if (!bounds.contains(lat, lon)) return 0; totalNodeCount++; int x = lonToX(lon); if (x == width) x--; int y = latToY(lat); if (y == height) y--; if (nodeMap[x] == null) nodeMap[x] = new int[height]; return ++nodeMap[x][y]; } public long getNodeCount() { return totalNodeCount; } public int getNodeCount(int x, int y) { return nodeMap[x] != null ? nodeMap[x][y] : 0; } public DensityMap subset(final Area subsetBounds) { int minLat = Math.max(bounds.getMinLat(), subsetBounds.getMinLat()); int minLon = Math.max(bounds.getMinLong(), subsetBounds.getMinLong()); int maxLat = Math.min(bounds.getMaxLat(), subsetBounds.getMaxLat()); int maxLon = Math.min(bounds.getMaxLong(), subsetBounds.getMaxLong()); // If the area doesn't intersect with the density map, return an empty map if (minLat > maxLat || minLon > maxLon) { return new DensityMap(Area.EMPTY, 24 - shift); } Area subset = new Area(minLat, minLon, maxLat, maxLon); // If there's nothing in the area return an empty map if (subset.getWidth() == 0 || subset.getHeight() == 0) { return new DensityMap(Area.EMPTY, 24 - shift); } DensityMap result = new DensityMap(subset, 24 - shift); int startX = lonToX(subset.getMinLong()); int startY = latToY(subset.getMinLat()); int maxX = subset.getWidth() >> shift; int maxY = subset.getHeight() >> shift; for (int x = 0; x < maxX; x++) { if (startY == 0 && maxY == height) { result.nodeMap[x] = nodeMap[startX + x]; } else if (nodeMap[startX + x] != null) { result.nodeMap[x] = new int[maxY]; try { System.arraycopy(nodeMap[startX + x], startY, result.nodeMap[x], 0, maxY); } catch (ArrayIndexOutOfBoundsException e) { System.out.println("subSet() died at " + startX + ',' + startY + " " + maxX + ',' + maxY + " " + x); } } for (int y = 0; y < maxY; y++) { if (result.nodeMap[x] != null) result.totalNodeCount += result.nodeMap[x][y]; } } return result; } private int yToLat(int y) { return (y << shift) + bounds.getMinLat(); } private int xToLon(int x) { return (x << shift) + bounds.getMinLong(); } private int latToY(int lat) { return lat - bounds.getMinLat() >>> shift; } private int lonToX(int lon) { return lon - bounds.getMinLong() >>> shift; } /** * Write content of density map to file. Serves for easier debugging, * but may also be used to manipulate the map with other tools. * @param fileName the name of the output file * @param detailBounds * @param collectorBounds */ public void saveMap(String fileName, Area detailBounds, Area collectorBounds) { try (FileWriter f = new FileWriter(new File(fileName))){ f.write(detailBounds.getMinLat() + "," + detailBounds.getMinLong() + "," + detailBounds.getMaxLat() + "," + detailBounds.getMaxLong() + '\n'); if (collectorBounds != null) f.write(collectorBounds.getMinLat() + "," + collectorBounds.getMinLong() + "," + collectorBounds.getMaxLat() + "," + collectorBounds.getMaxLong() + '\n'); else f.write("no_bounds_in_input\n"); for (int x=0; x= width || y < 0 || y >= height) { System.out.println("Error: Invalid data in map file, line number " + problemReader.getLineNumber() + ": " + inLine); } else { if (nodeMap[x] == null) nodeMap[x] = new int[height]; nodeMap[x][y] = sum; totalNodeCount += sum; } } catch(NumberFormatException exp){ System.out.println("Error: Invalid number format in density file " + fileName + ", line " + problemReader.getLineNumber() + ": " + inLine); System.out.println(exp); throw new SplitFailedException("Error: Cannot read density file " + mapFile); } } } catch (IOException exp) { throw new SplitFailedException("Error: Cannot read density file " + mapFile); } return collectorBounds; } private static void reportErrorLine(int lineNo, String inLine) { System.out.println("Error: Invalid format in map file, line number " + lineNo + ": " + inLine); } public Area getArea(int x, int y, int width2, int height2) { assert x >= 0; assert y >= 0; assert width2 > 0; assert height2 > 0; return new Area(yToLat(y),xToLon(x),yToLat(y+height2),xToLon(x+width2)); } /** * Handle data that will be added with the --precomp-sea option of mkgmap. * We add coast line data only to empty parts to avoid counting it twice. * @param seaData a DensityMap that was filled with data from precompiled sea * @param area */ public void mergeSeaData(DensityMap seaData, Area area, boolean trim) { if (this.shift != seaData.shift || !Utils.area2Rectangle(bounds, 0).equals(Utils.area2Rectangle(seaData.getBounds(), 0))) { throw new SplitFailedException("cannot merge density maps"); } if (trim && totalNodeCount == 0) return; int minX = lonToX(area.getMinLong()); int maxX = lonToX(area.getMaxLong()); int minY = latToY(area.getMinLat()); int maxY = latToY(area.getMaxLat()); if (maxX >= width) maxX = width - 1; if (maxY >= height) maxY = height - 1; if (trim) { while (minX < width && nodeMap[minX] == null) minX++; while (maxX > 0 && nodeMap[maxX] == null) maxX--; while (minY < height && rowAllZero(minY, minX, maxX)) minY++; while (maxY > 0 && rowAllZero(maxY, minX, maxX)) maxY--; } long addedSeaNodes = 0; for (int x = minX; x <= maxX; x++) { int[] seaCol = seaData.nodeMap[x]; if (seaCol == null) continue; int[] col = nodeMap[x]; if (col == null) col = new int[height + 1]; for (int y = minY; y <= maxY; y++) { if (col[y] == 0) { int seaCount = seaCol[y] * SEA_NODE_FACTOR; col[y] = seaCount; totalNodeCount += seaCount; addedSeaNodes += seaCount; } } } System.out.println("Added " + addedSeaNodes + " nodes from precompiled sea data."); } boolean rowAllZero(int row, int minX, int maxX) { for (int x = minX; x <= maxX; x++) { if (nodeMap[x] != null && nodeMap[x][row] > 0) { return false; } } return true; } } splitter-r653/src/uk/me/parabola/splitter/solver/DensityMapCollector.java0000664000175300017530000000553614352507254030106 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import uk.me.parabola.splitter.AbstractMapProcessor; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.MapDetails; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.RoundingUtils; import uk.me.parabola.splitter.args.SplitterParams; /** * Builds up a density map. */ class DensityMapCollector extends AbstractMapProcessor{ private final DensityMap densityMap; private final MapDetails details = new MapDetails(); private Area bounds; private final boolean ignoreBoundsTags; private int files; public DensityMapCollector(SplitterParams mainOptions) { Area densityBounds = new Area(-0x400000, -0x800000, 0x400000, 0x800000); densityMap = new DensityMap(densityBounds, mainOptions.getResolution()); this.ignoreBoundsTags = mainOptions.getIgnoreOsmBounds(); } @Override public boolean skipTags() { return true; } @Override public boolean skipWays() { return true; } @Override public boolean skipRels() { return true; } @Override public void startFile() { if (++files > 1) checkBounds(); } @Override public void boundTag(Area fileBbox) { if (ignoreBoundsTags) return; if (this.bounds == null){ this.bounds = fileBbox; } else this.bounds = this.bounds.add(fileBbox); } @Override public void processNode(Node n) { int glat = n.getMapLat(); int glon = n.getMapLon(); densityMap.addNode(glat, glon); details.addToBounds(glat, glon); } /** * Check if a bounds tag was found. If not, * use the bbox of the data that was collected so far. * This is used when multiple input files are used * and first doesn't contain a bounds tag. */ public void checkBounds(){ if (this.bounds == null) this.bounds = getExactArea(); } public Area getExactArea() { if (bounds != null) { return bounds; } return details.getBounds(); } public void mergeSeaData(DensityMapCollector seaData, boolean trim, int resolution) { Area roundedBounds = RoundingUtils.round(getExactArea(), resolution); densityMap.mergeSeaData(seaData.densityMap, roundedBounds, trim); } public void saveMap(String fileName) { if (details.getBounds() != null) densityMap.saveMap(fileName, details.getBounds(), bounds); } public void readMap(String fileName) { bounds = densityMap.readMap(fileName, details); } public DensityMap getDensityMap() { return densityMap; } } splitter-r653/src/uk/me/parabola/splitter/solver/EnhancedDensityMap.java0000664000175300017530000001241314352507254027655 0ustar builderbuilder00000000000000/* * Copyright (C) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Rectangle; import java.util.BitSet; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Utils; /** * Contains info that is needed by the {@link Tile} class. For a given * DensityMap we calculate some extra info to allow faster access to row sums * and column sums. * * @author GerdP * */ public class EnhancedDensityMap { private final DensityMap densityMap; private int[][] xyMap; private int[][] yxMap; private BitSet xyOutsidePolygon = new BitSet(); private double[] aspectRatioFactor; private int minAspectRatioFactorPos; private int maxNodesInDensityMapGridElement = Integer.MIN_VALUE; private int maxNodesInDensityMapGridElementInPoly = Integer.MIN_VALUE; private java.awt.geom.Area polygonArea; public EnhancedDensityMap(DensityMap densities, java.awt.geom.Area polygonArea) { this.densityMap = densities; this.polygonArea = polygonArea; prepare(); } /** * If a polygon is given, filter the density data Compute once complex * trigonometric results for needed for proper aspect ratio calculations. * */ private void prepare() { // performance: calculate only once the needed complex math results aspectRatioFactor = new double[densityMap.getHeight() + 1]; int minLat = densityMap.getBounds().getMinLat(); int maxLat = densityMap.getBounds().getMaxLat(); int lat = 0; double maxAspectRatioFactor = Double.MIN_VALUE; int minPos = Integer.MAX_VALUE; for (int i = 0; i < aspectRatioFactor.length; i++) { lat = minLat + i * (1 << densityMap.getShift()); assert lat <= maxLat; aspectRatioFactor[i] = Math.cos(Math.toRadians(Utils.toDegrees(lat))); if (maxAspectRatioFactor < aspectRatioFactor[i]) { maxAspectRatioFactor = aspectRatioFactor[i]; minPos = i; } } minAspectRatioFactorPos = minPos; assert lat == maxLat; // filter the density map and populate xyMap int width = densityMap.getWidth(); int height = densityMap.getHeight(); xyMap = new int[width][]; int shift = densityMap.getShift(); for (int x = 0; x < width; x++) { int polyXPos = densityMap.getBounds().getMinLong() + (x << shift); int[] xCol = null; xCol = new int[height]; boolean colNeeded = false; for (int y = 0; y < height; y++) { int count = densityMap.getNodeCount(x, y); if (polygonArea != null) { int polyYPos = densityMap.getBounds().getMinLat() + (y << shift); if (polygonArea.intersects(polyXPos, polyYPos, 1 << shift, 1 << shift)) { maxNodesInDensityMapGridElementInPoly = Math.max(count, maxNodesInDensityMapGridElementInPoly); } else { xyOutsidePolygon.set(x * height + y); } } if (count > 0) { maxNodesInDensityMapGridElement = Math.max(count, maxNodesInDensityMapGridElement); xCol[y] = count; colNeeded = true; } } if (colNeeded) xyMap[x] = xCol; } // create and populate yxMap, this helps to speed up row access yxMap = new int[height][]; for (int y = 0; y < height; y++) { boolean rowNeeded = false; int[] yRow = new int[width]; for (int x = 0; x < width; x++) { int count = 0; if (xyMap[x] != null) count = xyMap[x][y]; if (count > 0) { rowNeeded = true; yRow[x] = count; } } if (rowNeeded) yxMap[y] = yRow; } } public boolean isGridElemInPolygon(int x, int y) { if (polygonArea == null || xyOutsidePolygon.isEmpty()) return true; return !xyOutsidePolygon.get(x * densityMap.getHeight() + y); } // calculate aspect ratio of a tile which is a view on the densityMap public double getAspectRatio(Rectangle r) { double ratio; double maxWidth; if (r.y < minAspectRatioFactorPos && r.y + r.height > minAspectRatioFactorPos) { maxWidth = r.width; // tile crosses equator } else { double width1 = r.width * aspectRatioFactor[r.y]; double width2 = r.width * aspectRatioFactor[r.y + r.height]; maxWidth = Math.max(width1, width2); } ratio = maxWidth / r.height; return ratio; } public Area getBounds() { return densityMap.getBounds(); } public DensityMap getDensityMap() { return densityMap; } public long getNodeCount() { return densityMap.getNodeCount(); } public int[] getMapRow(int mapRow) { return yxMap[mapRow]; } public int[] getMapCol(int mapCol) { return xyMap[mapCol]; } public double[] getAspectRatioFactor() { return aspectRatioFactor; } public int getMinAspectRatioFactorPos() { return minAspectRatioFactorPos; } public int getMaxNodesInDensityMapGridElement() { return maxNodesInDensityMapGridElement; } public int getMaxNodesInDensityMapGridElementInPoly() { return maxNodesInDensityMapGridElementInPoly; } public java.awt.geom.Area getPolygonArea() { return polygonArea; } public boolean allInsidePolygon() { return polygonArea == null || xyOutsidePolygon.isEmpty(); } } splitter-r653/src/uk/me/parabola/splitter/solver/PolygonDesc.java0000664000175300017530000000177514352507254026411 0ustar builderbuilder00000000000000/* * Copyright (c) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.geom.Area; /** * Store a java area with a name and mapid * @author GerdP * */ public class PolygonDesc { private final java.awt.geom.Area area; private final String name; private final int mapId; public PolygonDesc(String name, Area area, int mapId) { this.name = name; this.area = area; this.mapId = mapId; } public java.awt.geom.Area getArea() { return area; } public String getName() { return name; } public int getMapId() { return mapId; } } splitter-r653/src/uk/me/parabola/splitter/solver/PolygonDescProcessor.java0000664000175300017530000000564414352507254030310 0ustar builderbuilder00000000000000/* * Copyright (c) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; import uk.me.parabola.splitter.AbstractMapProcessor; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.RoundingUtils; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; import java.awt.geom.Path2D; import java.awt.geom.Area; import java.util.ArrayList; import java.util.List; /** * * Class to read a polygon description file (OSM) * Expected input are nodes and ways. Ways with * tag name=* and mapid=nnnnnnnn should describe polygons * which are used to calculate area lists. * @author GerdP * */ class PolygonDescProcessor extends AbstractMapProcessor { private Long2ObjectOpenHashMap nodes = new Long2ObjectOpenHashMap<>(); private final List polygonDescriptions = new ArrayList<>(); private final int shift; public PolygonDescProcessor(int resolution) { this.shift = 24 - resolution; } @Override public void processNode(Node n){ // round all coordinates to be on the used grid. int lat = RoundingUtils.round(n.getMapLat(), shift); int lon = RoundingUtils.round(n.getMapLon(), shift); double roundedLat = Utils.toDegrees(lat); double roundedLon = Utils.toDegrees(lon); Node rNode = new Node(); rNode.set(n.getId(),roundedLat,roundedLon); nodes.put(rNode.getId(), rNode); } @Override public void processWay(Way w){ String name = w.getTag("name"); if (name == null){ System.out.println("name missing, ignoring way " + w.getId()); return; } String mapIdString = w.getTag("mapid"); if (mapIdString == null){ System.out.println("mapid missing, ignoring way " + w.getId()); return; } int mapId; try{ mapId = Integer.parseInt(mapIdString); } catch (NumberFormatException e){ System.out.println("invalid mapid in way " + w.getId()); return; } Path2D path = null; for (long ref : w.getRefs()){ Node n = nodes.get(ref); if (n != null){ if (path == null){ path = new Path2D.Double(); path.moveTo(n.getMapLon(), n.getMapLat()); } else path.lineTo(n.getMapLon(), n.getMapLat()); } } PolygonDesc pd = new PolygonDesc(name, new Area(path), mapId); polygonDescriptions.add(pd); } @Override public boolean endMap(){ nodes = null; System.out.println("found " + polygonDescriptions.size() + " named polygons"); return true; } public List getPolygons() { return polygonDescriptions; } } splitter-r653/src/uk/me/parabola/splitter/solver/PrecompSeaReader.java0000664000175300017530000002543614352507254027344 0ustar builderbuilder00000000000000/* * Copyright (C) 2013, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.LineNumberReader; import java.io.Reader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.regex.Pattern; import java.util.zip.GZIPInputStream; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import org.xmlpull.v1.XmlPullParserException; import crosby.binary.file.BlockInputStream; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.parser.BinaryMapParser; import uk.me.parabola.splitter.parser.OSMXMLParser; /** * Reader for precompiled sea data. This is mostly a copy of the corresponding * code in mkgmap SeaGenerator. * * @author GerdP * */ public class PrecompSeaReader { /** The size (lat and long) of the precompiled sea tiles */ private static final int PRECOMP_RASTER = 1 << 15; private static final byte SEA_TILE = 's'; private static final byte LAND_TILE = 'l'; private static final byte MIXED_TILE = 'm'; // useful constants defining the min/max map units of the precompiled sea // tiles private static final int MIN_LAT = Utils.toMapUnit(-90.0); private static final int MAX_LAT = Utils.toMapUnit(90.0); private static final int MIN_LON = Utils.toMapUnit(-180.0); private static final int MAX_LON = Utils.toMapUnit(180.0); private static final Pattern keySplitter = Pattern.compile(Pattern.quote("_")); private final Area bounds; private final File precompSeaDir; private byte[][] precompIndex; private String precompSeaExt; private String precompSeaPrefix; private String precompZipFileInternalPath; private ZipFile zipFile; public PrecompSeaReader(Area bounds, File precompSeaDir) { this.bounds = bounds; this.precompSeaDir = precompSeaDir; init(); } /** * Process all precompiled sea tiles. * * @param processor * The processor that is called * @throws XmlPullParserException */ public void processMap(DensityMapCollector processor) throws XmlPullParserException { for (String tileName : getPrecompKeyNames()) { InputStream is = getStream(tileName); if (is != null) { try { if (tileName.endsWith(".pbf")) { BinaryMapParser binParser = new BinaryMapParser(processor, null, 0); BlockInputStream blockinput = (new BlockInputStream(is, binParser)); blockinput.process(); blockinput.close(); } else { // No, try XML. try (Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8)) { OSMXMLParser parser = new OSMXMLParser(processor, true); parser.setReader(reader); parser.parse(); } } } catch (Exception e) { e.printStackTrace(); throw new SplitFailedException(e.getMessage()); } } } } /** * Read the index and set corresponding fields. */ private void init() { if (precompSeaDir.exists()) { String internalPath = null; String indexFileName = "index.txt.gz"; try { if (precompSeaDir.isDirectory()) { File indexFile = new File(precompSeaDir, indexFileName); if (!indexFile.exists()) { // check if the unzipped index file exists indexFileName = "index.txt"; indexFile = new File(precompSeaDir, indexFileName); } if (indexFile.exists()) { try (InputStream indexStream = new FileInputStream(indexFile)) { loadIndex(indexStream, indexFileName); } } else { throw new IllegalArgumentException("Cannot find required index.txt[.gz] in " + precompSeaDir); } } else if (precompSeaDir.getName().endsWith(".zip")) { zipFile = new ZipFile(precompSeaDir); internalPath = "sea/"; ZipEntry entry = zipFile.getEntry(internalPath + indexFileName); if (entry == null) { indexFileName = "index.txt"; entry = zipFile.getEntry(internalPath + indexFileName); } if (entry == null) { internalPath = ""; indexFileName = "index.txt.gz"; entry = zipFile.getEntry(internalPath + indexFileName); } if (entry != null) { try (InputStream indexStream = zipFile.getInputStream(entry)) { precompZipFileInternalPath = internalPath; loadIndex(indexStream, indexFileName); } } else { throw new SplitFailedException("Don't know how to read " + precompSeaDir); } } else { throw new SplitFailedException("Don't know how to read " + precompSeaDir); } } catch (IOException exp) { exp.printStackTrace(); throw new SplitFailedException("Cannot read index file " + indexFileName); } } else { throw new SplitFailedException( "Directory or zip file with precompiled sea does not exist: " + precompSeaDir.getName()); } } private void loadIndex(InputStream indexStream, String indexFileName) throws IOException { if (indexFileName.endsWith(".gz")) { try (InputStream stream = new GZIPInputStream(indexStream)) { loadIndexFromStream(stream); return; } } loadIndexFromStream(indexStream); } /** * Read the index from stream and populate the index grid. * * @param fileStream * already opened stream */ private void loadIndexFromStream(InputStream fileStream) throws IOException { int indexWidth = (PrecompSeaReader.getPrecompTileStart(MAX_LON) - PrecompSeaReader.getPrecompTileStart(MIN_LON)) / PrecompSeaReader.PRECOMP_RASTER; int indexHeight = (PrecompSeaReader.getPrecompTileStart(MAX_LAT) - PrecompSeaReader.getPrecompTileStart(MIN_LAT)) / PrecompSeaReader.PRECOMP_RASTER; LineNumberReader indexReader = new LineNumberReader(new InputStreamReader(fileStream)); Pattern csvSplitter = Pattern.compile(Pattern.quote(";")); String indexLine = null; byte[][] indexGrid = new byte[indexWidth + 1][indexHeight + 1]; boolean detectExt = true; String prefix = null; String ext = null; while ((indexLine = indexReader.readLine()) != null) { if (indexLine.startsWith("#")) { // comment continue; } String[] items = csvSplitter.split(indexLine); if (items.length != 2) { throw new IllegalArgumentException("Invalid format in index file name: " + indexLine); } String precompKey = items[0]; byte type = updatePrecompSeaTileIndex(precompKey, items[1], indexGrid); if (type == '?') { throw new IllegalArgumentException("Invalid format in index file name: " + indexLine); } if (type == MIXED_TILE) { // make sure that all file names are using the same name scheme int prePos = items[1].indexOf(items[0]); if (prePos >= 0) { if (detectExt) { prefix = items[1].substring(0, prePos); ext = items[1].substring(prePos + items[0].length()); detectExt = false; } else { StringBuilder sb = new StringBuilder(prefix); sb.append(precompKey); sb.append(ext); if (!items[1].equals(sb.toString())) { throw new IllegalArgumentException("Unexpected file name in index file: " + indexLine); } } } } } // precompIndex = indexGrid; precompSeaPrefix = prefix; precompSeaExt = ext; } private InputStream getStream(String tileName) { InputStream is = null; try { if (zipFile != null) { ZipEntry entry = zipFile.getEntry(precompZipFileInternalPath + tileName); if (entry != null) { is = zipFile.getInputStream(entry); } else { throw new IOException("Preompiled sea tile " + tileName + " not found."); } } else { File precompTile = new File(precompSeaDir, tileName); is = new FileInputStream(precompTile); } } catch (Exception exp) { exp.printStackTrace(); throw new SplitFailedException(exp.getMessage()); } return is; } /** * Retrieves the start value of the precompiled tile. * * @param value * the value for which the start value is calculated * @return the tile start value */ private static int getPrecompTileStart(int value) { int rem = value % PRECOMP_RASTER; if (rem == 0) { return value; } else if (value >= 0) { return value - rem; } else { return value - PRECOMP_RASTER - rem; } } /** * Retrieves the end value of the precompiled tile. * * @param value * the value for which the end value is calculated * @return the tile end value */ private static int getPrecompTileEnd(int value) { int rem = value % PRECOMP_RASTER; if (rem == 0) { return value; } else if (value >= 0) { return value + PRECOMP_RASTER - rem; } else { return value - rem; } } /** * Calculates the key names of the precompiled sea tiles for the bounding * box. The key names are compiled of {@code lat+"_"+lon}. * * @return the key names for the bounding box */ private List getPrecompKeyNames() { List precompKeys = new ArrayList<>(); for (int lat = getPrecompTileStart(bounds.getMinLat()); lat < getPrecompTileEnd( bounds.getMaxLat()); lat += PRECOMP_RASTER) { for (int lon = getPrecompTileStart(bounds.getMinLong()); lon < getPrecompTileEnd( bounds.getMaxLong()); lon += PRECOMP_RASTER) { int latIndex = (MAX_LAT - lat) / PRECOMP_RASTER; int lonIndex = (MAX_LON - lon) / PRECOMP_RASTER; byte type = precompIndex[lonIndex][latIndex]; if (type == MIXED_TILE) precompKeys.add(precompSeaPrefix + lat + "_" + lon + precompSeaExt); } } return precompKeys; } /** * Update the index grid for the element identified by precompKey. * * @param precompKey * The key name is compiled of {@code lat+"_"+lon}. * @param fileName * either "land", "sea", or a file name containing OSM data * @param indexGrid * the previously allocated index grid * @return the byte that was saved in the index grid */ private static byte updatePrecompSeaTileIndex(String precompKey, String fileName, byte[][] indexGrid) { String[] tileCoords = keySplitter.split(precompKey); byte type = '?'; if (tileCoords.length == 2) { int lat = Integer.parseInt(tileCoords[0]); int lon = Integer.parseInt(tileCoords[1]); int latIndex = (MAX_LAT - lat) / PRECOMP_RASTER; int lonIndex = (MAX_LON - lon) / PRECOMP_RASTER; if ("sea".equals(fileName)) type = SEA_TILE; else if ("land".equals(fileName)) type = LAND_TILE; else type = MIXED_TILE; indexGrid[lonIndex][latIndex] = type; } return type; } } splitter-r653/src/uk/me/parabola/splitter/solver/Solution.java0000664000175300017530000002013314352507254025764 0ustar builderbuilder00000000000000/* * Copyright (C) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Rectangle; import java.util.ArrayList; import java.util.List; /** * Helper class to combine a list of tiles with some * values that measure the quality. * @author GerdP * */ public class Solution { /** * */ private enum sides {TOP,RIGHT,BOTTOM,LEFT} private final List tiles; private final long maxNodes; private double worstAspectRatio = -1; private int numLowCount; private long worstMinNodes = Long.MAX_VALUE; public Solution(long maxNodes) { tiles = new ArrayList<>(); this.maxNodes = maxNodes; } public Solution copy() { Solution s = new Solution(this.maxNodes); tiles.forEach(s::add); return s; } public boolean add(Tile tile) { tiles.add(tile); double aspectRatio = tile.getAspectRatio(); if (aspectRatio < 1.0) aspectRatio = 1.0 / aspectRatio; worstAspectRatio = Math.max(aspectRatio, worstAspectRatio); worstMinNodes = Math.min(tile.getCount(), worstMinNodes); if (tile.getCount() < maxNodes / 3) numLowCount++; return true; } /** * Combine this solution with the other. * @param other */ public void merge(Solution other) { if (other.tiles.isEmpty()) return; if (tiles.isEmpty()) { worstAspectRatio = other.worstAspectRatio; worstMinNodes = other.worstMinNodes; } else { if (other.worstAspectRatio > worstAspectRatio) worstAspectRatio = other.worstAspectRatio; if (worstMinNodes > other.worstMinNodes) worstMinNodes = other.worstMinNodes; } numLowCount += other.numLowCount; tiles.addAll(other.tiles); } public List getTiles() { return tiles; } public long getWorstMinNodes() { return worstMinNodes; } public double getWorstAspectRatio() { return worstAspectRatio; } public boolean isEmpty() { return tiles.isEmpty(); } public int size() { return tiles.size(); } /** * Compare two solutions * @param other * @return -1 if this is better, 1 if other is better, 0 if both are equal */ public int compareTo(Solution other) { if (other == null) return -1; if (other == this) return 0; if (isEmpty() != other.isEmpty()) return isEmpty() ? 1 : -1; int d = Boolean.compare(isNice(), other.isNice()); if (d != 0) return -d; // prefer this if nice if (worstMinNodes != other.worstMinNodes) { // ignore minNodes when both are bad if (Math.max(worstMinNodes, other.worstMinNodes) > 1000) return (worstMinNodes > other.worstMinNodes) ? -1 : 1; } // if aspect ratio is very different and tile sizes are almost equal, // favour better aspect ratio double tileRatio = (double) tiles.size() / other.tiles.size(); double arRatio = worstAspectRatio / other.worstAspectRatio; if (tileRatio < 1 && tileRatio > 0.99 && arRatio > 1.5) return 1; if (tileRatio < 1.01 && tileRatio > 1 && arRatio < 0.66666) return -1; if (tiles.size() != other.tiles.size()) return tiles.size() < other.tiles.size() ? -1 : 1; if (worstAspectRatio != other.worstAspectRatio) return worstAspectRatio < other.worstAspectRatio ? -1 : 1; return 0; } /** * Trim tiles without creating holes or gaps between tiles */ public void trimOuterTiles() { while (true) { boolean trimmedAny = false; int minX = Integer.MAX_VALUE; int maxX = Integer.MIN_VALUE; int minY = Integer.MAX_VALUE; int maxY = Integer.MIN_VALUE; for (Tile tile : tiles) { if (minX > tile.x) minX = tile.x; if (minY > tile.y) minY = tile.y; if (maxX < tile.getMaxX()) maxX = (int) tile.getMaxX(); if (maxY < tile.getMaxY()) maxY = (int) tile.getMaxY(); } for (sides side:sides.values()) { for (int direction = -1; direction <= 1; direction += 2) { int trimToPos = -1; switch (side) { case LEFT: case BOTTOM: trimToPos = Integer.MAX_VALUE; break; case TOP: case RIGHT: trimToPos = -1; } while (true) { Tile candidate = null; boolean trimmed = false; for (Tile tile : tiles) { if (tile.getCount() == 0) continue; switch (side) { case LEFT: if (minX == tile.x && (candidate == null || (direction < 0 && candidate.y > tile.y) || (direction > 0 && candidate.getMaxY() < tile.getMaxY()))) { candidate = tile; } break; case RIGHT: if (maxX == tile.getMaxX() && (candidate == null || (direction < 0 && candidate.y > tile.y) || (direction > 0 && candidate.getMaxY() < tile.getMaxY()))) { candidate = tile; } break; case BOTTOM: if (minY == tile.y && (candidate == null || (direction < 0 && candidate.x > tile.x) || (direction > 0 && candidate.getMaxX() < tile.getMaxX()))) { candidate = tile; } break; case TOP: if (maxY == tile.getMaxY() && (candidate == null || (direction < 0 && candidate.x > tile.x) || (direction > 0 && candidate.getMaxX() < tile.getMaxX()))) { candidate = tile; } break; } } if (candidate == null) break; Rectangle before = new Rectangle(candidate); switch (side) { case LEFT: while (candidate.x < trimToPos && candidate.getColSum(0) == 0) { candidate.x ++; candidate.width--; } if (candidate.x < trimToPos) trimToPos = candidate.x; break; case RIGHT: while ((candidate.getMaxX() > trimToPos) && candidate.getColSum(candidate.width-1) == 0) { candidate.width--; } if (candidate.getMaxX() > trimToPos) trimToPos = (int) candidate.getMaxX(); break; case BOTTOM: while (candidate.y < trimToPos && candidate.getRowSum(0) == 0) { candidate.y ++; candidate.height--; } if (candidate.y < trimToPos) trimToPos = candidate.y; break; case TOP: while (candidate.getMaxY() > trimToPos && candidate.getRowSum(candidate.height-1) == 0) { candidate.height--; } if (candidate.getMaxX() > trimToPos) trimToPos = (int) candidate.getMaxY(); break; } if (!before.equals(candidate)) { trimmed = true; trimmedAny = true; } if (!trimmed) break; } } } if (!trimmedAny) return; } } /** * A solution is considered to be nice when aspect * ratios are not extreme and every tile is filled * with at least 33% of the max-nodes value or almost all tiles are filled much better. * @return */ public boolean isNice() { if (isEmpty() || worstAspectRatio > SplittableDensityArea.NICE_MAX_ASPECT_RATIO) return false; final long low = maxNodes / 3; if (tiles.size() == 1 || worstMinNodes >= low || (numLowCount <= 2 && tiles.size() > 20) || (numLowCount == 1 && tiles.size() > 4)) return true; double lowRatio = 100.0 * numLowCount / tiles.size(); return lowRatio < 3; // less then 3 percent of the tiles are not well filled } @Override public String toString() { if (isEmpty()) return "is empty"; long percentage = 100 * worstMinNodes / maxNodes; return tiles.size() + " tile(s). The smallest node count is " + worstMinNodes + " (" + percentage + " %)"; } /** * Returns true if this solution is smaller or better than the other. * @param other the other solution * @return true if this solution is smaller or better than the other */ public boolean isSmallerOrBetter(Solution other) { if (isEmpty()) return false; if (other == null || other.isEmpty() && !isEmpty()) return true; if (other.size() > this.size()) return true; if (other.size() == this.size()) return compareTo(other) < 0; return false; } } splitter-r653/src/uk/me/parabola/splitter/solver/SplittableDensityArea.java0000664000175300017530000013426014352507254030413 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Point; import java.awt.Rectangle; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import it.unimi.dsi.fastutil.ints.IntArrayList; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.RoundingUtils; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; /** * Splits a density map into multiple areas, none of which exceed the desired * threshold. * * @author Chris Miller, Gerd Petermann */ public class SplittableDensityArea { private static final int MAX_LAT_DEGREES = 85; private static final int MAX_LON_DEGREES = 90; public static final int MAX_SINGLE_POLYGON_VERTICES = 40; private static final int MAX_LOOPS = 100; // number of loops to find better solution for one rectangular area static final int AXIS_HOR = 0; static final int AXIS_VERT = 1; public static final double NICE_MAX_ASPECT_RATIO = 4; private static final double VERY_NICE_FILL_RATIO = 0.94; private static final long LARGE_MAX_NODES = 10_000_000; private static final double MAX_OUTSIDE_RATIO = 0.5; private static final int MIN_TILE_AREA_BAD_CACHE = 100; private static final int MAX_DEPTH_STATS = 10; private boolean enableExtraOpt = true; // option ? private final int startSearchLimit; private final DensityMap allDensities; private EnhancedDensityMap extraDensityInfo; private boolean beQuiet; private static final boolean DEBUG = false; private long maxNodes; private int stopNumber; private final int shift; private final boolean trimShape; private boolean trimTiles; private boolean allowEmptyPart; private int currMapId; private boolean hasEmptyPart; private int solverId; public SplittableDensityArea(DensityMap densities, int startSearchLimit, boolean trim) { this.shift = densities.getShift(); this.startSearchLimit = startSearchLimit; this.trimShape = trim; allDensities = densities; } public DensityMap getAllDensities() { return allDensities; } public void setMapId(int mapId) { currMapId = mapId; } public void setMaxNodes(long maxNodes) { this.maxNodes = maxNodes; } public boolean hasData() { return allDensities != null && allDensities.getNodeCount() > 0; } /** * @return the area that this splittable area represents */ public Area getBounds() { return allDensities.getBounds(); } /** * Calculate a solution (list of areas that either matches the given criteria or is empty) * * @return solution (can be empty if none was found with the given criteria) */ private Solution split() { Solution fullSolution = new Solution(maxNodes); if (allDensities == null || allDensities.getNodeCount() == 0) return fullSolution; prepare(null); Tile startTile = new Tile(extraDensityInfo); List startTiles = new ArrayList<>(); if (trimShape || allDensities.getBounds().getWidth() >= 0x1000000) { // if trim is wanted or tile spans over planet // we try first to find large empty areas (sea) startTiles.addAll(checkForEmptyClusters(0, startTile, true)); } else { startTiles.add(startTile); } int countNoSol; while (true) { countNoSol = 0; for (Tile tile : startTiles) { hasEmptyPart = false; // possibly overwritten in solveRectangularArea if (!beQuiet) System.out.println("Solving partition " + tile.toString()); Solution solution = solveRectangularArea(tile); if (solution != null && !solution.isEmpty()) fullSolution.merge(solution); else { countNoSol++; if (!beQuiet) System.out.println("Warning: No solution found for partition " + tile.toString()); } } if (countNoSol == 0) break; if (allowEmptyPart || !hasEmptyPart) break; allowEmptyPart = true; fullSolution = new Solution(maxNodes); } if (countNoSol > 0 && stopNumber == 0) throw new SplitFailedException("Failed to find a correct split"); if (!beQuiet) { printFinalSplitMsg(fullSolution); } return fullSolution; } /** * Split with a given polygon and max nodes threshold. If the polygon is not * singular, it is divided into singular areas. * * @param polygonArea * @return list of areas */ private List split(java.awt.geom.Area polygonArea) { if (polygonArea == null) return getAreas(split(), null); if (polygonArea.isSingular()) { java.awt.geom.Area rasteredArea = allDensities.rasterPolygon(polygonArea); List> shapes = Utils.areaToShapes(rasteredArea); List areas = new ArrayList<>(); for (List shape : shapes) { java.awt.geom.Area rasteredPart = Utils.shapeToArea(shape); if (rasteredPart.isEmpty()) { System.err.println("Bounding polygon doesn't intersect with the bounding box of the input file(s)"); return Collections.emptyList(); } if (rasteredPart.isSingular()) { prepare(polygonArea); Tile tile = new Tile(extraDensityInfo, rasteredPart.getBounds()); Solution solution = findSolutionWithSinglePolygon(0, tile, rasteredPart, new HashSet<>()); if (solution == null && rasteredPart.isRectangular()) solution = split(); if (solution != null) { areas.addAll(getAreas(solution, polygonArea)); } } } return areas; } if (polygonArea.intersects(Utils.area2Rectangle(allDensities.getBounds(), 0))) return splitPolygon(polygonArea); System.err.println("Bounding polygon doesn't intersect with the bounding box of the input file(s)"); return Collections.emptyList(); } /** * Split a list of named polygons. Overlapping areas of the polygons are * extracted and each one is split for itself. A polygon may not be singular. * * @param namedPolygons list of polygons, if empty the tile bounds are used * @return list of areas */ public List split(List namedPolygons) { if (namedPolygons.isEmpty()) { return getAreas(split(), null); } List result = new ArrayList<>(); class ShareInfo { java.awt.geom.Area area; final IntArrayList sharedBy = new IntArrayList(); } List sharedParts = new ArrayList<>(); for (int i = 0; i < namedPolygons.size(); i++) { boolean wasDistinct = true; PolygonDesc namedPart = namedPolygons.get(i); java.awt.geom.Area distinctPart = new java.awt.geom.Area(namedPart.getArea()); for (int j = 0; j < namedPolygons.size(); j++) { if (j == i) continue; java.awt.geom.Area test = new java.awt.geom.Area(namedPart.getArea()); test.intersect(namedPolygons.get(j).getArea()); if (!test.isEmpty()) { wasDistinct = false; distinctPart.subtract(namedPolygons.get(j).getArea()); if (j > i) { ShareInfo si = new ShareInfo(); si.area = test; si.sharedBy.add(i); si.sharedBy.add(j); sharedParts.add(si); } } } if (!distinctPart.isEmpty() && distinctPart.intersects(Utils.area2Rectangle(allDensities.getBounds(), 0))) { // KmlWriter.writeKml("e:/ld_sp/distinct_"+namedPart.getName(), "distinct", distinctPart); if (!wasDistinct) System.out.println("splitting distinct part of " + namedPart.getName()); else System.out.println("splitting " + namedPart.getName()); result.addAll(split(distinctPart)); } } for (int i = 0; i < sharedParts.size(); i++) { ShareInfo si = sharedParts.get(i); int last = namedPolygons.size(); // list is extended in the loop for (int j = 0; j < last; j++) { if (si.sharedBy.contains(j)) continue; java.awt.geom.Area test = new java.awt.geom.Area(si.area); test.intersect(namedPolygons.get(j).getArea()); if (!test.isEmpty()) { si.area.subtract(test); if (j > si.sharedBy.getInt(si.sharedBy.size() - 1)) { ShareInfo si2 = new ShareInfo(); si2.area = test; si2.sharedBy.addAll(si.sharedBy); si2.sharedBy.add(j); sharedParts.add(si2); } } if (si.area.isEmpty()) break; } if (!si.area.isEmpty() && si.area.intersects(Utils.area2Rectangle(allDensities.getBounds(), 0))) { String desc = ""; for (int pos : si.sharedBy) desc += namedPolygons.get(pos).getName() + " and "; desc = desc.substring(0, desc.lastIndexOf(" and")); System.out.println("splitting area shared by exactly " + si.sharedBy.size() + " polygons: " + desc); // KmlWriter.writeKml("e:/ld_sp/shared_"+desc.replace(" " , "_"), desc, si.area); result.addAll(split(si.area)); } } return result; } /** * Split into a given number of tiles. * * @param wantedTiles * @return list of areas */ public List split(int wantedTiles) { this.stopNumber = wantedTiles; long currMaxNodes = (long) (this.allDensities.getNodeCount() / (wantedTiles * 0.95)); class Pair { long myMaxNodes; int numTiles; Pair(long maxNodes, int numTiles) { this.myMaxNodes = maxNodes; this.numTiles = numTiles; } } Pair bestBelow = null; Pair bestAbove = null; beQuiet = true; while (true) { this.setMaxNodes(currMaxNodes); System.out.println("Trying a max-nodes value of " + currMaxNodes + " to split " + allDensities.getNodeCount() + " nodes into " + wantedTiles + " areas"); Solution sol = split(); if (sol.isEmpty() || sol.size() == wantedTiles) { beQuiet = false; printFinalSplitMsg(sol); return getAreas(sol, null); } Pair pair = new Pair(currMaxNodes, sol.size()); if (sol.size() > wantedTiles) { if (bestAbove == null || bestAbove.numTiles > pair.numTiles || (bestAbove.numTiles == pair.numTiles && pair.myMaxNodes < bestAbove.myMaxNodes)) bestAbove = pair; } else { if (bestBelow == null || bestBelow.numTiles < pair.numTiles || (bestBelow.numTiles == pair.numTiles && pair.myMaxNodes > bestBelow.myMaxNodes)) bestBelow = pair; } long testMaxNodes; if (bestBelow == null || bestAbove == null) testMaxNodes = Math.min(Math.round((double) currMaxNodes * sol.size() / wantedTiles), this.allDensities.getNodeCount() - 1); else testMaxNodes = (bestBelow.myMaxNodes + bestAbove.myMaxNodes) / 2; if (testMaxNodes == currMaxNodes) { System.err.println("Cannot find a good split with exactly " + wantedTiles + " areas"); printFinalSplitMsg(sol); return getAreas(sol, null); } currMaxNodes = testMaxNodes; } } /** * Filter the density data, calculate once complex trigonometric results * * @param polygonArea */ private void prepare(java.awt.geom.Area polygonArea) { extraDensityInfo = new EnhancedDensityMap(allDensities, polygonArea); if (!beQuiet) { System.out.println("Highest node count in a single grid element is " + Utils.format(extraDensityInfo.getMaxNodesInDensityMapGridElement())); if (polygonArea != null) { System.out.println("Highest node count in a single grid element within the bounding polygon is " + Utils.format(extraDensityInfo.getMaxNodesInDensityMapGridElementInPoly())); } } if (polygonArea != null) trimTiles = true; } /** * Try to find empty areas. This will fail if the empty area is enclosed by a * non-empty area. * * @param depth recursion depth * @param tile the tile that might contain an empty area * @param splitHoriz true: search horizontal, else vertical * @return a list containing one or more tiles, cut from the original tile, or * just the original tile */ private ArrayList checkForEmptyClusters(int depth, final Tile tile, boolean splitHoriz) { java.awt.geom.Area area = new java.awt.geom.Area(tile); int firstEmpty = -1; int countEmpty = 0; long countLastPart = 0; long countRemaining = tile.getCount(); int maxEmpty = Utils.toMapUnit(30) / (1 << shift); int minEmpty = Utils.toMapUnit(10) / (1 << shift); if (splitHoriz) { for (int i = 0; i < tile.width; i++) { long count = tile.getColSum(i); if (count == 0) { if (firstEmpty < 0) firstEmpty = i; countEmpty++; } else { if (countEmpty > maxEmpty || (countEmpty > minEmpty && countLastPart > maxNodes / 3 && countRemaining > maxNodes / 3)) { java.awt.geom.Area empty = new java.awt.geom.Area( new Rectangle(firstEmpty, tile.y, countEmpty, tile.height)); area.subtract(empty); countLastPart = 0; } countRemaining -= count; firstEmpty = -1; countEmpty = 0; countLastPart += count; } } } else { for (int i = 0; i < tile.height; i++) { long count = tile.getRowSum(i); if (count == 0) { if (firstEmpty < 0) firstEmpty = i; countEmpty++; } else { if (countEmpty > maxEmpty || (countEmpty > minEmpty && countLastPart > maxNodes / 3 && countRemaining > maxNodes / 3)) { java.awt.geom.Area empty = new java.awt.geom.Area( new Rectangle(tile.x, firstEmpty, tile.width, countEmpty)); area.subtract(empty); countLastPart = 0; } countRemaining -= count; firstEmpty = -1; countEmpty = 0; countLastPart += count; } } } ArrayList clusters = new ArrayList<>(); if (depth == 0 && area.isSingular()) { // try also the other split axis clusters.addAll(checkForEmptyClusters(depth + 1, tile.trim(), !splitHoriz)); } else { if (area.isSingular()) { clusters.add(tile.trim()); } else { List> shapes = Utils.areaToShapes(area); for (List shape : shapes) { java.awt.geom.Area part = Utils.shapeToArea(shape); Tile t = new Tile(extraDensityInfo, part.getBounds()); if (t.getCount() > 0) clusters.addAll(checkForEmptyClusters(depth + 1, t.trim(), !splitHoriz)); } } } return clusters; } /** * Split, handling a polygon that may contain multiple distinct areas. * * @param polygonArea * @return a list of areas that cover the polygon */ private List splitPolygon(final java.awt.geom.Area polygonArea) { List result = new ArrayList<>(); List> shapes = Utils.areaToShapes(polygonArea); for (int i = 0; i < shapes.size(); i++) { List shape = shapes.get(i); if (!Utils.clockwise(shape)) continue; java.awt.geom.Area shapeArea = Utils.shapeToArea(shape); Rectangle rShape = shapeArea.getBounds(); if (shape.size() > MAX_SINGLE_POLYGON_VERTICES) { shapeArea = new java.awt.geom.Area(rShape); System.out.println("Warning: shape is too complex, using rectangle " + rShape + " instead"); } Area shapeBounds = new Area(rShape.y, rShape.x, (int) rShape.getMaxY(), (int) rShape.getMaxX()); int resolution = 24 - allDensities.getShift(); shapeBounds = RoundingUtils.round(shapeBounds, resolution); SplittableDensityArea splittableArea = new SplittableDensityArea(allDensities.subset(shapeBounds), startSearchLimit, trimShape); splittableArea.setMaxNodes(maxNodes); if (!splittableArea.hasData()) { System.out.println( "Warning: a part of the bounding polygon would be empty and is ignored:" + shapeBounds); // result.add(shapeBounds); continue; } List partResult = splittableArea.split(shapeArea); if (partResult != null) result.addAll(partResult); } return result; } /** * Split the given tile using the given (singular) polygon area. The routine * splits the polygon into parts and calls itself recursively for each part that * is not rectangular. * * @param depth recursion depth * @param tile the tile to split * @param rasteredPolygonArea an area describing a rectilinear shape * @param knownBad collection containing rectangles which are known to be without solution * @return a solution (maybe empty), or null if rasteredPolygon is not singular */ private Solution findSolutionWithSinglePolygon(int depth, final Tile tile, java.awt.geom.Area rasteredPolygonArea, Set knownBad) { if (!rasteredPolygonArea.isSingular()) { return null; } if (rasteredPolygonArea.isRectangular()) { Tile part = new Tile(extraDensityInfo, rasteredPolygonArea.getBounds()); return solveRectangularArea(part); } List> shapes = Utils.areaToShapes(rasteredPolygonArea); List shape = shapes.get(0); if (shape.size() > MAX_SINGLE_POLYGON_VERTICES) { Tile part = new Tile(extraDensityInfo, rasteredPolygonArea.getBounds()); System.out.println("Warning: rastered shape is too complex, using rectangle " + part + " instead"); return solveRectangularArea(part); } Rectangle pBounds = rasteredPolygonArea.getBounds(); int lastPoint = shape.size() - 1; if (shape.get(0).equals(shape.get(lastPoint))) --lastPoint; for (int i = 0; i <= lastPoint; i++) { Point point = shape.get(i); if (i > 0 && point.equals(shape.get(0))) continue; int cutX = point.x; int cutY = point.y; Solution part1Sol = null, part2Sol = null; for (int axis = 0; axis < 2; axis++) { Rectangle r1, r2; if (axis == AXIS_HOR) { r1 = new Rectangle(pBounds.x, pBounds.y, cutX - pBounds.x, pBounds.height); r2 = new Rectangle(cutX, pBounds.y, (int) (pBounds.getMaxX() - cutX), pBounds.height); } else { r1 = new Rectangle(pBounds.x, pBounds.y, pBounds.width, cutY - pBounds.y); r2 = new Rectangle(pBounds.x, cutY, pBounds.width, (int) (pBounds.getMaxY() - cutY)); } if (r1.isEmpty() || r2.isEmpty() || knownBad.contains(r1) || knownBad.contains(r2)) continue; // System.out.println("depth=" + depth + ", trying point " + i + "/" + lastPoint); if (r1.width * r1.height > r2.width * r2.height) { Rectangle help = r1; r1 = r2; r2 = help; } java.awt.geom.Area area = new java.awt.geom.Area(r1); area.intersect(rasteredPolygonArea); part1Sol = findSolutionWithSinglePolygon(depth + 1, tile, area, knownBad); if (part1Sol != null && !part1Sol.isEmpty()) { area = new java.awt.geom.Area(r2); area.intersect(rasteredPolygonArea); part2Sol = findSolutionWithSinglePolygon(depth + 1, tile, area, knownBad); if (part2Sol != null && !part2Sol.isEmpty()) { part1Sol.merge(part2Sol); return part1Sol; } knownBad.add(r2); } else { knownBad.add(r1); } } } return new Solution(maxNodes); } private Solution solveRectangularArea(Tile startTile) { if (startTile.getCount() <= 1) return new Solution(maxNodes); int bestPossible = stopNumber > 0 ? stopNumber : startTile.getMinParts(maxNodes); System.out.println("Splitting tile " + startTile + ", goal is to get near " + bestPossible + " tiles"); return solveRectangularAreaParallel(startTile, 0); } /** * Split large tile into smaller parts with a simple split and solve the small parts using parallel stream. * @param startTile the tile to split * @param depth recursion depth * @return solution */ private Solution solveRectangularAreaParallel(Tile startTile, int depth) { if (depth == 0 && (stopNumber > 0 || startTile.getCount() < 256 * maxNodes)) return solveRectangularAreaOne(startTile); Solution res = new Solution(maxNodes); long partSize = 64 * maxNodes; if (depth > 0) { partSize = Math.max(1, startTile.getCount() - 1); } List todo = startTile.divide(partSize); System.out.println("Initial simple split returned " + todo.size() + " tile(s)"); List solvers = new ArrayList<>(); List initialAreas = new ArrayList<>(); for (Tile t : todo) { if (t.outsidePolygon()) continue; if (trimTiles) t = t.trim(); int areaSize = t.width * t.height; boolean useSearchAll = areaSize < 32_000 || t.getCount() < 16 * maxNodes; boolean anyOutside = t.countElemsOutside() > 0; Solver solver = new Solver(++solverId, useSearchAll, maxNodes, t, shift, 0, trimTiles, startSearchLimit, allowEmptyPart); solver.maxAspectRatio = getStartRatio(startTile); System.out.println("Using " + solver.toString() + " on " + Utils.format(areaSize) + " grid elements" + (trimTiles && anyOutside ? ", trim needed" : ", trim not needed")); Rectangle r = t.getRealBBox(); Area area = new Area(r.y, r.x, (int) r.getMaxY(), (int) r.getMaxX()); area.setMapId(solverId); initialAreas.add(area); // if (depth > 0 ||solver.name.startsWith("S19 ")) solvers.add(solver); } solvers.parallelStream().forEach(Solver::solve); List solvers2 = new ArrayList<>(); if (enableExtraOpt) { for (int i = 0; i < solvers.size(); i++) { Solver solver = solvers.get(i); Solution s = solver.bestSolution; int goal = solver.startTile.getMinParts(maxNodes); int areaSize = solver.startTile.width * solver.startTile.height; if (areaSize > 200_000) continue; if (s.size() > 1 && (!s.isNice() || s.size() >= goal + 3)) { System.out.println("trying to improve poor solution from " + solver); Solver sv2 = new Solver(++solverId, !solver.searchAll, maxNodes, solver.startTile, shift, stopNumber, solver.trimTiles, startSearchLimit, allowEmptyPart); System.out.println("Starting " + sv2.toString()); sv2.maxAspectRatio = getStartRatio(startTile); solvers2.add(sv2); } } solvers2.parallelStream().forEach(Solver::solve); } for (Solver sv : solvers) { Solution sol = sv.bestSolution; Optional opt = solvers2.stream().filter(s2 -> sv.startTile.equals(s2.startTile)).findAny(); if (opt.isPresent()) { Solution sol2 = opt.get().bestSolution; if (sol2.isNice() && sol2.isSmallerOrBetter(sol)) { System.out.println(opt.get().name + ": replaced solution from " + sv.name); sol = sol2; } } if (sol.isEmpty()) sol = solveRectangularAreaParallel(sv.startTile, depth + 1); res.merge(sol); } return res; } /** * Get a first solution and search for better ones until either a nice solution * is found or no improvement was found. * * @param startTile the tile to split * @return a solution (maybe be empty) */ private Solution solveRectangularAreaOne(Tile startTile) { // start values for optimization process: we make little steps towards a good // solution if (startTile.getCount() == 0) return new Solution(maxNodes); List solvers = new ArrayList<>(); int numAlgos = 2; for (int i = 0; i < numAlgos; i++) { Solver solver = new Solver(++solverId, i == 1, maxNodes, startTile, shift, stopNumber, trimTiles, startSearchLimit, allowEmptyPart); if (solver.searchAll && startTile.getCount() > 300 * maxNodes) continue; // too complex for FULL if (!solver.searchAll && stopNumber == 0 && startTile.getCount() < 10 * maxNodes) continue; // too simple for SOME solver.maxAspectRatio = getStartRatio(startTile); solvers.add(solver); } if (solvers.size() == 1) { solvers.get(0).solve(); } else { ExecutorService threadPool = Executors.newFixedThreadPool(solvers.size()); List> futures = new ArrayList<>(); for (Solver solver : solvers) { futures.add(threadPool.submit(solver::solve)); } threadPool.shutdown(); Instant t1 = null; final double n75 = 0.75 * maxNodes; final double n85 = 0.85 * maxNodes; while (!threadPool.isTerminated()) { for (int i = 0; i < solvers.size(); i++) { Future future = futures.get(i); if (future.isDone()) { try { future.get(); } catch (InterruptedException | ExecutionException e) { throw new SplitFailedException("parallel solver crashed", e.getCause()); } Solution sol = solvers.get(i).bestSolution; if (sol.isNice()) { if (t1 == null) t1 = Instant.now(); long dt = Duration.between(t1, Instant.now()).getSeconds(); boolean stop = false; if (sol.getWorstMinNodes() >= n85 && dt > 10) { stop = true; // all tiles have at least 85% of max-nodes } else { int num75 = 0; for (Tile tile : sol.getTiles()) { if (tile.getCount() < n75) num75++; } double below75 = 100.0 * num75 / sol.size(); if (below75 > 5 && dt > 30) { // +5 percent of tiles are less the 75 percent but we waited +30 seconds stop = true; } } if (stop) { // stop the other solver solvers.forEach(Solver::stop); } } } } try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } } // call get() on each future to recognise possible exceptions futures.forEach(f -> { try { f.get(); } catch (InterruptedException | ExecutionException e) { Thread.currentThread().interrupt(); throw new SplitFailedException("parallel solver crashed", e.getCause()); } }); // sort by number of tiles so that the smaller number comes first // can't use compareTo here as it prefers the higher worstMinNodes value solvers.sort((o1, o2) -> { int d = Boolean.compare(o1.bestSolution.isNice(), o2.bestSolution.isNice()); if (d != 0) return -d; // prefer nice d = Integer.compare(o1.bestSolution.size(), o2.bestSolution.size()); if (d != 0) return d; // prefer higher min-nodes return Long.compare(o2.bestSolution.getWorstMinNodes(), o1.bestSolution.getWorstMinNodes()); }); } Solver best = solvers.get(0); if (best.bestSolution.isEmpty()) { int highestCount = extraDensityInfo.getMaxNodesInDensityMapGridElement(); // inform user about possible better options? double ratio = (double) highestCount / maxNodes; if (ratio > 4) System.err.printf( "max-nodes value %d is far below highest node count %d in single grid element, consider using a higher resolution.%n", maxNodes, highestCount); else if (ratio > 1) System.err.printf( "max-nodes value %d is below highest node count %d in single grid element, consider using a higher resolution.%n", maxNodes, highestCount); else if (ratio < 0.25) System.err.printf( "max-nodes value %d is far above highest node count %d in single grid element, consider using a lower resolution.%n", maxNodes, highestCount); } hasEmptyPart = best.hasEmptyPart; printFinishMsg(best.bestSolution, best.searchLimit); return best.bestSolution; } private double getStartRatio(Tile startTile) { if (extraDensityInfo.getNodeCount() / maxNodes < 4) { return 32; } double startMaxAspectRatio = startTile.getAspectRatio(); if (startMaxAspectRatio < 1) startMaxAspectRatio = 1 / startMaxAspectRatio ; if (startMaxAspectRatio < NICE_MAX_ASPECT_RATIO) startMaxAspectRatio = NICE_MAX_ASPECT_RATIO; return startMaxAspectRatio; } private void printFinishMsg(Solution solution, int searchLimit) { if (!beQuiet) { if (!solution.isEmpty()) { if (solution.getWorstMinNodes() > VERY_NICE_FILL_RATIO * maxNodes && solution.isNice()) System.out.println( "Solution is very nice. No need to search for a better solution: " + solution.toString()); else System.out.println("Solution is " + (solution.isNice() ? "" : "not ") + "nice. Can't find a better solution with search-limit " + searchLimit + ": " + solution.toString()); } } } private static void printFinalSplitMsg(Solution solution) { System.out.println("Final solution: " + solution.toString()); if (solution.isNice()) System.out.println("This seems to be nice."); } /** * Convert the list of Tile instances of a solution to Area instances, report * some statistics. * * @param sol the solution * @param polygonArea the split polygon * * @return list of areas */ private List getAreas(Solution sol, java.awt.geom.Area polygonArea) { List result = new ArrayList<>(); int minLat = allDensities.getBounds().getMinLat(); int minLon = allDensities.getBounds().getMinLong(); if (polygonArea != null) { System.out.println("Trying to cut the areas so that they fit into the polygon ..."); } else { if (trimShape) sol.trimOuterTiles(); } boolean fits = true; for (Tile tile : sol.getTiles()) { if (tile.getCount() == 0) continue; if (!tile.verify()) throw new SplitFailedException("found invalid tile"); Rectangle r = new Rectangle(minLon + (tile.x << shift), minLat + (tile.y << shift), tile.width << shift, tile.height << shift); if (polygonArea != null) { java.awt.geom.Area cutArea = new java.awt.geom.Area(r); cutArea.intersect(polygonArea); if (!cutArea.isEmpty() && cutArea.isRectangular()) { r = cutArea.getBounds(); } else { fits = false; } } Area area = new Area(r.y, r.x, (int) r.getMaxY(), (int) r.getMaxX()); if (!beQuiet) { String note; if (tile.getCount() > maxNodes) note = " but is already at the minimum size so can't be split further"; else note = ""; long percentage = 100 * tile.getCount() / maxNodes; System.out.println("Area " + currMapId++ + " covers " + area + " and contains " + tile.getCount() + " nodes (" + percentage + " %)" + note); } result.add(area); } if (!fits) { System.out.println("One or more areas do not exactly fit into the bounding polygon"); } return result; } private static class Solver { private final long myMaxNodes; private boolean hasEmptyPart; private double maxAspectRatio; private int countBad; private Long minNodes; private int searchLimit; private LinkedHashMap incomplete; private Map knownBad = new HashMap<>(50_000); static final int MAX_SEARCH_LIMIT = 5_000_000; final String name; private boolean searchAll; private Solution bestSolution; private Solution smallestSolution; private boolean stopped; private long localOptMinNodes; private final Tile startTile; private int bestPossible; private long largestOptTileCount; private int largestOptSize; private long optLoops; private int[] lastGoodCounts; private final int maxTileHeight; private final int maxTileWidth; private final int stopNumber; private final boolean trimTiles; private final int startSearchLimit; private final boolean allowEmptyPart; public Solver(int id, boolean searchAll, long maxNodes, Tile startTile, int shift, int stopNumber, boolean trimTiles, int startSearchLimit, boolean allowEmptyPart) { this.searchAll = searchAll; this.myMaxNodes = maxNodes; this.startTile = startTile; this.stopNumber = stopNumber; this.trimTiles = trimTiles; this.startSearchLimit = startSearchLimit; this.allowEmptyPart = allowEmptyPart; incomplete = new LinkedHashMap<>(); bestSolution = new Solution(myMaxNodes); name = "S" + id + " " + (searchAll ? "FULL" : "SOME"); maxTileHeight = Utils.toMapUnit(MAX_LAT_DEGREES) / (1 << shift); maxTileWidth = Utils.toMapUnit(MAX_LON_DEGREES) / (1 << shift); } /** * Try to split the tile into nice parts recursively. * * @param depth the recursion depth * @param tile the tile to be split * @param smiParent meta info for parent tile * @return a solution instance or null */ private Solution findSolution(int depth, final Tile tile, Tile parent, TileMetaInfo smiParent) { if (stopped) return null; boolean addAndReturn = false; if (tile.getCount() == 0) { if (!allowEmptyPart) { hasEmptyPart = true; return null; } if (tile.width * tile.height <= 4) return null; return new Solution(myMaxNodes); // allow empty part of the world } else if (tile.getCount() > myMaxNodes && tile.width == 1 && tile.height == 1) { addAndReturn = true; // can't split further } else if (tile.getCount() < minNodes && depth == 0) { addAndReturn = true; // nothing to do } else if (tile.getCount() < minNodes) { return null; } else if (tile.getCount() <= myMaxNodes) { double ratio = tile.getAspectRatio(); if (ratio < 1.0) ratio = 1.0 / ratio; if (ratio <= maxAspectRatio) { if (stopNumber > 0 || myMaxNodes >= LARGE_MAX_NODES || checkSize(tile)) addAndReturn = true; } else { return null; } } else if (tile.width < 2 && tile.height < 2) { return null; } if (addAndReturn) { if (depth > 0 && smiParent.getNumOutside() > MAX_OUTSIDE_RATIO * tile.width * tile.height && !tile.outsideRatioIsOK(MAX_OUTSIDE_RATIO)) { return null; } Solution solution = new Solution(myMaxNodes); solution.add(tile); // can't split further return solution; } if (tile.getCount() < minNodes * 2) { return null; } if (!trimTiles && tile.getMinParts(myMaxNodes) * minNodes > tile.getCount()) { return null; } // we have to split the tile Integer alreadyDone = null; if (countBad == 0 && !incomplete.isEmpty()) { alreadyDone = incomplete.remove(tile); if (alreadyDone == null) incomplete.clear(); // rest is not useful } final boolean isCacheCandidate = depth > 0 && tile.width * tile.height > MIN_TILE_AREA_BAD_CACHE; if (alreadyDone == null && isCacheCandidate) { Long x = knownBad.get(tile); if (x != null && x <= minNodes) { return null; } } // copy the existing density info from parent // typically, at least one half can be re-used TileMetaInfo smi = new TileMetaInfo(tile, parent, smiParent); smi.setMinNodes(minNodes); // we have to split the tile TestGenerator generator = new TestGenerator(searchAll, tile, smi); int countDone = 0; Solution bestSol = null; while (generator.hasNext()) { int splitPos = generator.next(); countDone++; if (alreadyDone != null && countDone <= alreadyDone.intValue()) { continue; } // create the two parts of the tile int axis = generator.getAxis(); boolean ok = axis == AXIS_HOR ? tile.splitHoriz(splitPos, smi) : tile.splitVert(splitPos, smi); if (!ok) continue; Tile[] parts = smi.getParts(); if (parts[0].getCount() > parts[1].getCount()) { // first try the less populated part Tile help = parts[0]; parts[0] = parts[1]; parts[1] = help; } Solution[] sols = new Solution[2]; int countOK = 0; for (int i = 0; i < 2; i++) { if (trimTiles && smi.getNumOutside() > 0) { parts[i] = parts[i].trim(); } // depth first recursive search if (incomplete.isEmpty() || incomplete.containsKey(parts[i])) { sols[i] = findSolution(depth + 1, parts[i], tile, smi); if (sols[i] == null) { countBad++; break; } countOK++; } } if (countOK == 2) { Solution sol = sols[0]; sol.merge(sols[1]); if (bestSol == null || bestSol.compareTo(sol) > 0) bestSol = sol; if (depth > 0 || tile.getCount() > 2 * myMaxNodes) break; // we found a valid split } else if (countBad >= searchLimit) { if (DEBUG) System.out.println(name + ": limit reached " + depth + " min-nodes " + minNodes); if (depth < MAX_DEPTH_STATS) lastGoodCounts[depth] = -1; incomplete.put(tile, countDone - 1); break; } } if (depth < MAX_DEPTH_STATS && countBad < searchLimit) { lastGoodCounts[depth] = countDone; } smi.propagateToParent(smiParent, tile, parent); if (bestSol == null && countBad < searchLimit && isCacheCandidate) { Long x = knownBad.get(tile); if (x == null || x > minNodes) knownBad.put(tile, minNodes); } // check if we should perform a local optimisation if (bestSol != null && localOptMinNodes > 0 && bestSol.size() > 2 && bestSol.size() <= 32) { long backupMinNodes = minNodes; boolean backupSearchAll = searchAll; int backupCountBad = countBad; int min = tile.getMinParts(myMaxNodes); int oldSize = bestSol.size(); while (bestSol.size() > min) { localOptMinNodes = Math.max(tile.getCount() / bestSol.size(), bestSol.getWorstMinNodes() + 1); minNodes = localOptMinNodes; searchAll = false; countBad = 0; Solution sol2 = findSolution(depth, tile, parent, smiParent); if(DEBUG) { if (countBad > 200_000) { System.out.println(name + ": bad opt? tile " + tile + " required " + countBad + " bad tests for " + sol2); } } optLoops++; minNodes = backupMinNodes; searchAll = backupSearchAll; if (sol2 != null && sol2.isSmallerOrBetter(bestSol)) { if (tile.getCount() > largestOptTileCount) largestOptTileCount = tile.getCount(); if (oldSize > largestOptSize) largestOptSize = oldSize; bestSol = sol2; // we found a better split } else break; } countBad = backupCountBad; } return bestSol; } private boolean checkSize(Tile tile) { return tile.height <= maxTileHeight && tile.width <= maxTileWidth; } @Override public String toString() { return name + " for tile " +startTile; } public void solve() { bestPossible = stopNumber > 0 ? stopNumber : startTile.getMinParts(myMaxNodes); solve0(); if (smallestSolution.isSmallerOrBetter(bestSolution)) bestSolution = smallestSolution; System.out.println(name + " goal was " + bestPossible + " tiles, solver " + (stopped ? "was stopped" : "finished") + " with : " + bestSolution.toString()); knownBad.clear(); incomplete.clear(); } private void solve0() { knownBad.clear(); lastGoodCounts = new int[MAX_DEPTH_STATS]; bestSolution = new Solution(myMaxNodes); smallestSolution = new Solution(myMaxNodes); minNodes = Math.max(Math.min((long) (0.05 * myMaxNodes), startTile.getLargestInfo()), 1); searchLimit = startSearchLimit; TileMetaInfo smiStart = new TileMetaInfo(startTile, null, null); final long veryNiceMinNodes = (long) (VERY_NICE_FILL_RATIO * myMaxNodes); boolean clearIncomplete = false; for (int numLoops = 1; numLoops < MAX_LOOPS && !stopped; numLoops++) { if (clearIncomplete) { incomplete.clear(); } // store values to be able to detect progress double saveMaxAspectRatio = maxAspectRatio; long saveMinNodes = minNodes; countBad = 0; final String dbgPrefix = name + ": step " + numLoops; if (DEBUG) { System.out.println(dbgPrefix + " searching for split with min-nodes " + minNodes + ", cache size " + Utils.format(knownBad.size())); } smiStart.setMinNodes(minNodes); int oldCacheSize = knownBad.size(); largestOptTileCount = 0; largestOptSize = 0; Solution solution = findSolution(0, startTile, startTile, smiStart); if (stopped) return; if (DEBUG) { System.out.println(dbgPrefix + " positions " + Arrays.toString(lastGoodCounts)); if (optLoops > 0) { System.out.println(dbgPrefix + " local opt. runs: " + optLoops + ", worked up to count " + Utils.format(largestOptTileCount) + ", worked up to old size " + largestOptSize ); } } if (solution != null) { if (solution.isSmallerOrBetter(smallestSolution)) { smallestSolution = solution; } if (solution.size() < stopNumber) { minNodes = (bestSolution.getWorstMinNodes() + solution.getWorstMinNodes()) / 2; if(minNodes != saveMinNodes) continue; solution = null; } boolean foundBetter = bestSolution.compareTo(solution) > 0; if (solution != null) { if (foundBetter ) { Solution prevBest = bestSolution; bestSolution = solution; System.out.println(dbgPrefix+ " goal: " + bestPossible + " tiles, now: " + bestSolution + ", cache size " + Utils.format(knownBad.size())); // change criteria to find a better(nicer) result double factor = 1.10; if (!prevBest.isEmpty() && prevBest.isNice()) factor = Math.min(1.30, (double) bestSolution.getWorstMinNodes() / prevBest.getWorstMinNodes()); minNodes = Math.max(myMaxNodes / 3, (long) (bestSolution.getWorstMinNodes() * factor)); if (localOptMinNodes == 0) { // enable local optimisation minNodes = bestSolution.getWorstMinNodes() + 1; localOptMinNodes = minNodes; } } else { minNodes = solution.getWorstMinNodes() + 1; } } } else if (!bestSolution.isEmpty() && minNodes > bestSolution.getWorstMinNodes() + 1) { // reduce minNodes minNodes = (bestSolution.getWorstMinNodes() + minNodes) / 2; if (minNodes < bestSolution.getWorstMinNodes() * 1.001) minNodes = bestSolution.getWorstMinNodes() + 1; } else if (!searchAll && oldCacheSize < knownBad.size()) { if (bestSolution.isEmpty()) { clearIncomplete = false; continue; } } if (!bestSolution.isEmpty() ) { if (stopNumber * 0.95 > bestSolution.getTiles().size()) return; if (bestSolution.size() == 1) return; if (bestSolution.size() == bestPossible && numLoops > 6) { return; } } if (stopNumber == 0 && minNodes > veryNiceMinNodes) minNodes = veryNiceMinNodes; clearIncomplete = true; maxAspectRatio = Math.min(32, Math.max(bestSolution.getWorstAspectRatio() / 2, NICE_MAX_ASPECT_RATIO)); if (saveMaxAspectRatio == maxAspectRatio && saveMinNodes == minNodes) { // no improvement found boolean tryAgain = false; if (bestSolution.isEmpty() || bestSolution.getWorstMinNodes() < 0.5 * myMaxNodes) { // try to improve by adjusting threshold values if (countBad > searchLimit && searchLimit < MAX_SEARCH_LIMIT) { searchLimit *= 2; knownBad.clear(); clearIncomplete = false; System.out.println(name + ": No good solution found, duplicated search-limit to " + searchLimit); tryAgain = true; } else if (bestSolution.isEmpty() && minNodes > 1) { minNodes = 1L; searchLimit = searchAll? startSearchLimit : Math.max(MAX_SEARCH_LIMIT, startSearchLimit); // sanity check System.out.println(name + ": No good solution found, trying to find one accepting anything"); tryAgain = true; } else if (!bestSolution.isEmpty() && smallestSolution.size() < bestSolution.size() && minNodes != smallestSolution.getWorstMinNodes() + 1) { minNodes = smallestSolution.getWorstMinNodes() + 1; if (DEBUG) { System.out.println(name + ": Trying to improve smallest solution"); } tryAgain = true; } } if (!tryAgain) { return; } } } } void stop() { stopped = true; } private class TestGenerator { final boolean searchAll; int axis; final Tile tile; final TileMetaInfo smi; int countAxis; int usedTestPos; private IntArrayList todoList; public TestGenerator(boolean searchAll, Tile tile, TileMetaInfo smi) { this.searchAll = searchAll; this.tile = tile; this.smi = smi; axis = (tile.getAspectRatio() >= 1.0) ? AXIS_HOR : AXIS_VERT; todoList = generateTestCases(); } boolean hasNext() { if (usedTestPos >= todoList.size()) { countAxis++; if (countAxis > 1) return false; axis = axis == AXIS_HOR ? AXIS_VERT : AXIS_HOR; todoList = generateTestCases(); usedTestPos = 0; } return usedTestPos < todoList.size(); } int next() { return todoList.get(usedTestPos++); } public int getAxis() { return axis; } IntArrayList generateTestCases() { final int start = (axis == AXIS_HOR) ? tile.findValidStartX(smi) : tile.findValidStartY(smi); final int end = (axis == AXIS_HOR) ? tile.findValidEndX(smi) : tile.findValidEndY(smi); final int mid = (start + end) / 2; final int range = end - start; if (searchAll || range < 4) { return Tile.genTests(start, end); } double ratio = tile.getAspectRatio(); IntArrayList tests = new IntArrayList(); if (range < 0 || ratio < 1.0 / 32 || ratio > 32 || ratio < 1.0 / 16 && axis == AXIS_HOR || ratio > 16 && axis == AXIS_VERT) { return tests; } else if (range == 0) { tests.add(start); return tests; } if (range > 1024 && (axis == AXIS_HOR && tile.width >= maxTileWidth || axis == AXIS_VERT && tile.height >= maxTileHeight)) { // large tile, just split at a few valid positions for (int i = 5; i > 1; --i) tests.add(start + range / i); } else if (tile.getCount() < myMaxNodes * 4 && range > 256) { // large tile with rather few nodes, allow more tests int step = (range) / 20; for (int pos = start; pos <= end; pos += step) tests.add(pos); if (tests.get(tests.size() - 1) != end) tests.add(end); } else if (tile.getCount() > myMaxNodes * 4) { int step = range / 7; // 7 turned out to be a good value here if (step < 1) step = 1; for (int pos = start; pos <= end; pos += step) tests.add(pos); } else { // count <= 4 * max, this will be one of the last splits long minCount = smi.getNumOutside() > 0 ? tile.countInside() : tile.getCount(); int nMin = (int) (minCount / myMaxNodes); if (nMin * myMaxNodes < minCount) nMin++; if (nMin == 0) { nMin++; } long limit = nMin == 0? 1 : minCount / nMin; double dMin = (double) minCount / myMaxNodes; final int around; if ((dMin > 1.8 && dMin < 2.0 && ratio > 0.125 && ratio < 8) || (dMin > 2.8 && dMin < 3.0)) { around = tile.findFirstHigher(axis, smi, limit); } else if (dMin > 3.8) { around = tile.findFirstHigher(axis, smi, 2 * limit); } else { around = -1; } if (around >= 0) { // this is likely to be a good split, generate more cases final int numAround = 20; final int p1 = Math.max(start, around - numAround / 2); final int p2 = Math.min(end, around + numAround / 2); final int toAdd = p2 - p1; if (toAdd > 16) tests = new IntArrayList(toAdd); for (int i = p1; i <= p2; i++) { tests.add(i); } tests.sort((o1, o2) -> Integer.compare(Math.abs(o1 - around), Math.abs(o2 - around))); return tests; } if (nMin == 4) tests.add(tile.findFirstHigher(axis, smi, 2 * limit)); tests.add(tile.findFirstHigher(axis, smi, limit)); } if (tests.size() > 4) { tests.sort((o1, o2) -> Integer.compare(Math.abs(o1 - mid), Math.abs(o2 - mid))); if (tests.getInt(0) != mid) { tests.add(0, mid); } } return tests; } } } } splitter-r653/src/uk/me/parabola/splitter/solver/Tile.java0000664000175300017530000005003614352507254025052 0ustar builderbuilder00000000000000/* * Copyright (c) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.awt.Rectangle; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import it.unimi.dsi.fastutil.ints.IntArrayList; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; /** * This class implements a "view" on a rectangle covering a part * of a {@link DensityMap}. * It contains the sum of all nodes in this area and has methods to * help splitting it into smaller parts. * * We want to keep the memory footprint of this class small as * many instances are kept in maps. * @author GerdP * */ class Tile extends Rectangle{ /** * */ private final EnhancedDensityMap densityInfo; private final long count; /** * Create tile for whole density map. * @param densityInfo */ public Tile(EnhancedDensityMap densityInfo) { this(densityInfo, 0, 0, densityInfo.getDensityMap().getWidth(), densityInfo.getDensityMap().getHeight(), densityInfo.getNodeCount()); } /** * create a tile with unknown number of nodes * @param r the rectangle * @param densityInfo */ public Tile(EnhancedDensityMap densityInfo, Rectangle r) { super(r); if (r.x < 0 || r.y < 0 || r.x + r.width > densityInfo.getDensityMap().getWidth() || r.y + r.height > densityInfo.getDensityMap().getHeight()) throw new IllegalArgumentException("Rectangle doesn't fit into density map"); this.densityInfo = densityInfo; count = calcCount(); } /** * create a tile with a known number of nodes * @param densityInfo * @param x * @param y * @param width * @param height * @param count caller must ensure that this value is correct. See also verify() */ private Tile(EnhancedDensityMap densityInfo, int x,int y, int width, int height, long count) { super(x,y,width,height); this.densityInfo = densityInfo; this.count = count; // if (!verify()){ // System.out.println(count + " <> " + calcCount()); // assert false; // } } public long getCount() { return count; } /** * @return true if the saved count value is correct. */ public boolean verify(){ return (getCount() == calcCount()); } public static IntArrayList genTests(final int start, final int end) { if (end - start < 0) return new IntArrayList(0); final int mid = (start + end) / 2; final int toAdd = end - start + 1; IntArrayList list = new IntArrayList(toAdd); list.add(mid); for (int i = 1; i < toAdd / 2; i++) { list.add(mid + i); list.add(mid - i); } if (list.size() < toAdd) list.add(end); if (list.size() < toAdd) list.add(start); return list; } /** * calculate the number of nodes in this tile * @return */ private long calcCount() { long sum = 0; for (int i = 0; i < height; i++) { sum += getRowSum(i); } return sum; } /** * Calculate the sum of all grid elements within a row * * @param row the row within the tile (0..height-1) * @return */ public long getRowSum(int row) { assert row >= 0 && row < height; int mapRow = row + y; long sum = 0; int[] vector = densityInfo.getMapRow(mapRow); if (vector != null) { final int lastX = x + width; for (int i = x; i < lastX; i++) sum += vector[i]; } return sum; } private long getRowSum(int row, long[] rowSums) { if (rowSums[row] < 0) rowSums[row] = getRowSum(row); return rowSums[row]; } /** * Calculate the sum of all grid elements within a column. * * @param col the column within the tile * @return */ public long getColSum(int col) { assert col >= 0 && col < width; int mapCol = col + x; long sum = 0; int[] vector = densityInfo.getMapCol(mapCol); if (vector != null) { final int lastY = y + height; for (int i = y; i < lastY; i++) sum += vector[i]; } return sum; } private long getColSum(int col, long[] colSums) { if (colSums[col] < 0) colSums[col] = getColSum(col); return colSums[col]; } /** * Find first y so that sums of columns for 0-y is > count/2 * Update corresponding fields in smi. * * @param smi fields firstNonZeroX, horMidPos and horMidSum may be updated * @return true if the above fields are usable */ public int findHorizontalMiddle(TileMetaInfo smi) { if (getCount() == 0 || width < 2) smi.setHorMidPos(0); else if (smi.getHorMidPos() < 0) { int start = (smi.getFirstNonZeroX() > 0) ? smi.getFirstNonZeroX() : 0; long sum = 0; long lastSum = 0; long target = getCount()/2; for (int pos = start; pos <= width; pos++) { lastSum = sum; sum += getColSum(pos, smi.getColSums()); if (sum == 0) continue; if (lastSum <= 0) smi.setFirstNonZeroX(pos); if (sum > target){ if (sum - target < target - lastSum && pos + 1 < width){ smi.setHorMidPos(pos+1); smi.setHorMidSum(sum); } else { smi.setHorMidPos(pos); smi.setHorMidSum(lastSum); } break; } } } return smi.getHorMidPos(); } /** * Find first x so that sums of rows for 0-x is > count/2. * Update corresponding fields in smi. * @param smi fields firstNonZeroY, vertMidPos, and vertMidSum may be updated * @return true if the above fields are usable */ public int findVerticalMiddle(TileMetaInfo smi) { if (getCount() == 0 || height < 2) smi.setVertMidPos(0); else if (smi.getVertMidPos() < 0) { long sum = 0; long lastSum; long target = getCount()/2; int start = (smi.getFirstNonZeroY() > 0) ? smi.getFirstNonZeroY() : 0; for (int pos = start; pos <= height; pos++) { lastSum = sum; sum += getRowSum(pos, smi.getRowSums()); if (sum == 0) continue; if (lastSum <= 0) smi.setFirstNonZeroY(pos); if (sum > target) { if (sum - target < target - lastSum && pos + 1 < height) { smi.setVertMidPos(pos + 1); smi.setVertMidSum(sum); } else { smi.setVertMidPos(pos); smi.setVertMidSum(lastSum); } break; } } } return smi.getVertMidPos(); } /** * Split at the given horizontal position. * @param splitX the horizontal split line * @return true if result in smi is OK */ public boolean splitHoriz(int splitX, TileMetaInfo smi) { if (splitX <= 0 || splitX >= width) return false; long sum = 0; if (splitX <= width / 2){ int start = (smi.getFirstNonZeroX() > 0) ? smi.getFirstNonZeroX() : 0; for (int pos = start; pos < splitX; pos++) { sum += getColSum(pos, smi.getColSums()); } } else { int end = (smi.getLastNonZeroX() > 0) ? smi.getLastNonZeroX() + 1: width; for (int pos = splitX; pos < end; pos++) { sum += getColSum(pos, smi.getColSums()); } sum = getCount() - sum; } if (sum < smi.getMinNodes() || getCount() - sum < smi.getMinNodes()) return false; assert splitX > 0 && splitX < width; Tile[] parts = smi.getParts(); parts[0] = new Tile(densityInfo, x, y, splitX, height, sum); parts[1] = new Tile(densityInfo, x + splitX, y, width - splitX,height, getCount() - sum); assert smi.getParts()[0].width + smi.getParts()[1].width == this.width; return true; } /** * Split at the given vertical position. * @param splitY the vertical split line * @return true if result in smi is OK */ public boolean splitVert(int splitY, TileMetaInfo smi) { if (splitY <= 0 || splitY >= height) return false; long sum = 0; if (splitY <= height / 2){ int start = (smi.getFirstNonZeroY() > 0) ? smi.getFirstNonZeroY() : 0; for (int pos = start; pos < splitY; pos++) { sum += getRowSum(pos, smi.getRowSums()); } } else { int end = (smi.getLastNonZeroY() > 0) ? smi.getLastNonZeroY()+1 : height; for (int pos = splitY; pos < end; pos++) { sum += getRowSum(pos, smi.getRowSums()); } sum = getCount() - sum; } if (sum < smi.getMinNodes() || getCount() - sum < smi.getMinNodes()) return false; assert splitY > 0 && splitY < height; Tile[] parts = smi.getParts(); parts[0] = new Tile(densityInfo, x, y, width, splitY, sum); parts[1] = new Tile(densityInfo, x, y + splitY, width, height- splitY, getCount()- sum); assert parts[0].height + parts[1].height == this.height; return true; } /** * * @param smi * @return lowest horizontal position at which a split will work regarding minNodes */ public int findValidStartX(TileMetaInfo smi) { if (smi.getValidStartX() >= 0) return smi.getValidStartX(); long sum = 0; int start = (smi.getFirstNonZeroX() > 0) ? smi.getFirstNonZeroX() : 0; for (int i = start; i < width; i++) { sum += getColSum(i, smi.getColSums()); if (sum == 0) continue; if (smi.getFirstNonZeroX() < 0) smi.setFirstNonZeroX(i); if (sum >= smi.getMinNodes()) { int splitPos = i + 1; smi.setValidStartX(splitPos); return splitPos; } } smi.setValidStartX(width); return width; } /** * * @param smi * @return highest position at which all columns on the right have a sum < minNodes */ public int findValidEndX(TileMetaInfo smi) { if (smi.getValidEndX() < 0){ int end = smi.getLastNonZeroX() > 0 ? smi.getLastNonZeroX() : width - 1; long sum = 0; for (int i = end; i >= 0; --i) { sum += getColSum(i, smi.getColSums()); if (sum > 0 && smi.getLastNonZeroX() < 0) smi.setLastNonZeroX(i); if (sum >= smi.getMinNodes()){ smi.setValidEndX(i); break; } } } return smi.getValidEndX(); } /** * * @param smi * @return lowest vertical position at which a split will work regarding minNodes * or height if no such position exists */ public int findValidStartY(TileMetaInfo smi) { if (smi.getValidStartY() > 0) return smi.getValidStartY(); long sum = 0; int start = (smi.getFirstNonZeroY() > 0) ? smi.getFirstNonZeroY() : 0; for (int i = start; i < height; i++) { sum += getRowSum(i, smi.getRowSums()); if (sum == 0) continue; if (smi.getFirstNonZeroY() < 0) smi.setFirstNonZeroY(i); if (sum >= smi.getMinNodes()){ int splitPos = i+1; smi.setValidStartY(splitPos); return splitPos; } } smi.setValidStartY(height); return height; } /** * * @param smi * @return highest position at which all upper rows have a sum < minNodes */ public int findValidEndY(TileMetaInfo smi) { if (smi.getValidEndY() < 0){ int end = smi.getLastNonZeroY() > 0 ? smi.getLastNonZeroY() : height - 1; long sum = 0; for (int i = end; i >= 0; --i) { sum += getRowSum(i, smi.getRowSums()); if (sum > 0 && smi.getLastNonZeroY() < 0) smi.setLastNonZeroY(i); if (sum >= smi.getMinNodes()){ smi.setValidEndY(i); break; } } } return smi.getValidEndY(); } public int findFirstXHigher(TileMetaInfo smi, long limit) { long sum = 0; int start = (smi.getFirstNonZeroX() > 0) ? smi.getFirstNonZeroX() : 0; for (int i = start; i < width; i++) { sum += getColSum(i, smi.getColSums()); if (sum == 0) continue; if (smi.getFirstNonZeroX() < 0) smi.setFirstNonZeroX(i); if (sum > limit) { return i; } } return height; } public int findFirstYHigher(TileMetaInfo smi, long limit) { long sum = 0; int start = (smi.getFirstNonZeroY() > 0) ? smi.getFirstNonZeroY() : 0; for (int i = start; i < height; i++) { sum += getRowSum(i, smi.getRowSums()); if (sum == 0) continue; if (smi.getFirstNonZeroY() < 0) smi.setFirstNonZeroY(i); if (sum > limit) { return i; } } return height; } public int findFirstHigher(int axis, TileMetaInfo smi, long limit) { return axis == SplittableDensityArea.AXIS_HOR ? findFirstXHigher(smi, limit) : findFirstYHigher(smi, limit); } /** * * @return aspect ratio of this tile */ public double getAspectRatio() { return densityInfo.getAspectRatio(this); } /** * Calculate the trimmed tile so that it has no empty outer rows or columns. * Does not change the tile itself. * @return the trimmed version of the tile. */ public Tile trim() { long sumRemovedColCounts = 0; long sumRemovedRowCounts = 0; int minX = -1; for (int i = 0; i < width; i++) { long colSum = getColSum(i); boolean needed = (densityInfo.getPolygonArea() == null) ? colSum > 0 : !colOutsidePolygon(i); if (needed) { minX = x + i; break; } sumRemovedColCounts += colSum; } int maxX = -1; for (int i = width - 1; i >= 0; i--) { long colSum = getColSum(i); boolean needed = (densityInfo.getPolygonArea() == null) ? colSum > 0 : !colOutsidePolygon(i); if (needed) { maxX = x + i; break; } sumRemovedColCounts += colSum; } int minY = -1; for (int i = 0; i < height; i++) { long rowSum = getRowSum(i); boolean needed = (densityInfo.getPolygonArea() == null) ? rowSum > 0 : !rowOutsidePolygon(i); if (needed) { minY = y + i; break; } sumRemovedRowCounts += rowSum; } int maxY = -1; for (int i = height - 1; i >= 0; i--) { long rowSum = getRowSum(i); boolean needed = (densityInfo.getPolygonArea() == null) ? rowSum > 0 : !rowOutsidePolygon(i); if (needed) { maxY = y + i; break; } sumRemovedRowCounts += rowSum; } if (minX > maxX || minY > maxY || maxX < 0 || maxY < 0) { return new Tile(densityInfo, x, y, 0, 0, 0); } long newCount = getCount(); int modWidth = maxX - minX + 1; int modHeight = maxY - minY + 1; if (densityInfo.getPolygonArea() != null) { if (modWidth != width || modHeight != height) { // tile was trimmed, try hard to avoid a new costly calculation of the count // value if (width == modWidth) { newCount = getCount() - sumRemovedRowCounts; } else if (height == modHeight) { newCount = getCount() - sumRemovedColCounts; } else { // worst case: recalculate return new Tile(densityInfo, new Rectangle(minX, minY, modWidth, modHeight)); } } } return new Tile(densityInfo, minX, minY, modWidth, modHeight, newCount); } private boolean rowOutsidePolygon(int row) { if (densityInfo.getPolygonArea() == null) return false; // performance critical part, check corners first if (densityInfo.isGridElemInPolygon(x, y + row) || densityInfo.isGridElemInPolygon(x + width-1, y + row)) return false; // check rest of row for (int i = 1; i < width-1; i++) { if (densityInfo.isGridElemInPolygon(x + i, y + row)) return false; } return true; } private boolean colOutsidePolygon(int col) { if (densityInfo.getPolygonArea() == null) return false; // performance critical part, check corners first if (densityInfo.isGridElemInPolygon(x + col, y) || densityInfo.isGridElemInPolygon(x + col, y + height - 1)) return false; // check rest of column for (int i = 1; i < height - 1; i++) { if (densityInfo.isGridElemInPolygon(x + col, y + i)) return false; } return true; } public boolean outsidePolygon(){ java.awt.geom.Area polygonArea = densityInfo.getPolygonArea(); return polygonArea != null && !polygonArea.intersects(getRealBBox()); } /** * * Check if enough grid elements are inside the polygon. * @param maxOutsideRatio the wanted ratio * @return true if the ratio inside/outside is greater than the given value */ public boolean outsideRatioIsOK(final double maxOutsideRatio) { if (densityInfo.allInsidePolygon()) return true; Rectangle realBBox = getRealBBox(); // check special case: tile may contain the polygon Rectangle polyBBox = densityInfo.getPolygonArea().getBounds(); if (realBBox.contains(polyBBox)) { return true; } final long maxOutsde = (long) (maxOutsideRatio * (width * height)); final long neededInside = width * height - maxOutsde; int countInside = 0; int countOutside= 0; for (int i = x; i < x + width; i++) { for (int j = y; j < y + height; j++) { if (densityInfo.isGridElemInPolygon(i, j)) { if (++countInside >= neededInside) return true; } else { if (++countOutside >= maxOutsde) return false; } } } return false; } public Rectangle getRealBBox(){ int shift = densityInfo.getDensityMap().getShift(); int polyYPos = densityInfo.getDensityMap().getBounds().getMinLat() + (y << shift); int polyXPos = densityInfo.getDensityMap().getBounds().getMinLong() + (x << shift); return new Rectangle(polyXPos, polyYPos, width< divide(long maxNodes) { if (getCount() < maxNodes) return Arrays.asList(this); List parts = new ArrayList<>(2); TileMetaInfo smi = new TileMetaInfo(this, null, null); smi.setMinNodes(1); // don't create tiles with 0 nodes boolean ok = false; if (width > height) { int start = findValidStartX(smi); int end = findValidEndX(smi); int mid = (start + end) / 2; ok = splitHoriz(mid, smi); } else { int start = findValidStartY(smi); int end = findValidEndY(smi); int mid = (start + end) / 2; ok = splitVert(mid, smi); } if (ok) { for (Tile part : smi.getParts()) { parts.addAll(part.divide(maxNodes)); } } else { parts.add(this); } return parts; } public double getFillRatio() { return (double)getCount() / (width * height); } /** * Find largest count in single grid element. Might be outside of the polygon. * @return largest count in single grid element */ int getLargestInfo() { int largest = 0; for (int i = 0; i < width; i++) { final int[] col = densityInfo.getMapCol(x + i); if (col != null) { for (int k = 0; k < height; k++) { int n = col[y+k]; if (n > largest) { largest = n; } } } } return largest; } } splitter-r653/src/uk/me/parabola/splitter/solver/TileMetaInfo.java0000664000175300017530000001745214352507254026502 0ustar builderbuilder00000000000000/* * Copyright (C) 2014, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.solver; import java.util.Arrays; /** * A helper class to store all kind of * information which cannot be easily calculated * @author GerdP */ class TileMetaInfo { private long minNodes; private final long[] rowSums; private final long[] colSums; private final Tile[] parts = new Tile[2]; private int validStartX = -1; private int validStartY = -1; private int firstNonZeroX = -1; private int firstNonZeroY = -1; private int lastNonZeroX = -1; private int lastNonZeroY = -1; private long vertMidSum = -1; private long horMidSum = -1; private int vertMidPos = -1; private int horMidPos = -1; private int validEndX = -1; private int validEndY = -1; private int numOutside = -1; /** * Copy information from parent tile to child. Reusing these values * saves a lot of time. * @param tile * @param parent * @param smiParent */ public TileMetaInfo(Tile tile, Tile parent, TileMetaInfo smiParent) { rowSums = new long[tile.height]; colSums = new long[tile.width]; if (parent != null && parent.width == tile.width){ int srcPos = tile.y - parent.y; System.arraycopy(smiParent.rowSums, srcPos, rowSums, 0, rowSums.length); if (srcPos == 0) firstNonZeroY = smiParent.firstNonZeroY; } else Arrays.fill(rowSums, -1); if (parent != null && parent.height == tile.height){ int srcPos = tile.x - parent.x; System.arraycopy(smiParent.colSums, srcPos, colSums, 0, colSums.length); if (srcPos == 0) firstNonZeroX = smiParent.firstNonZeroX; } else Arrays.fill(colSums, -1); if (smiParent != null) { this.minNodes = smiParent.minNodes; if (smiParent.getNumOutside() == 0) numOutside = 0; } if (numOutside < 0) { numOutside = tile.countElemsOutside(); } } /** * Set new minNodes value. This invalidates cached values if the value is * different to the previously used one. * @param minNodes */ public void setMinNodes(long minNodes){ if (this.minNodes == minNodes) return; this.minNodes = minNodes; this.validStartX = -1; this.validStartY = -1; this.validEndX = -1; this.validEndY = -1; } public int getValidStartX() { return validStartX; } public void setValidStartX(int validStartX) { this.validStartX = validStartX; } public int getValidStartY() { return validStartY; } public void setValidStartY(int validStartY) { this.validStartY = validStartY; } public int getFirstNonZeroX() { return firstNonZeroX; } public void setFirstNonZeroX(int firstNonZeroX) { this.firstNonZeroX = firstNonZeroX; } public int getFirstNonZeroY() { return firstNonZeroY; } public void setFirstNonZeroY(int firstNonZeroY) { this.firstNonZeroY = firstNonZeroY; } public int getLastNonZeroX() { return lastNonZeroX; } public void setLastNonZeroX(int lastNonZeroX) { this.lastNonZeroX = lastNonZeroX; } public int getLastNonZeroY() { return lastNonZeroY; } public void setLastNonZeroY(int lastNonZeroY) { this.lastNonZeroY = lastNonZeroY; } public long getVertMidSum() { return vertMidSum; } public void setVertMidSum(long vertMidSum) { this.vertMidSum = vertMidSum; } public long getHorMidSum() { return horMidSum; } public void setHorMidSum(long horMidSum) { this.horMidSum = horMidSum; } public int getVertMidPos() { return vertMidPos; } public void setVertMidPos(int vertMidPos) { this.vertMidPos = vertMidPos; } public int getHorMidPos() { return horMidPos; } public void setHorMidPos(int horMidPos) { this.horMidPos = horMidPos; } public long getMinNodes() { return minNodes; } public long[] getRowSums() { return rowSums; } public long[] getColSums() { return colSums; } public Tile[] getParts() { return parts; } public int getValidEndX() { return validEndX; } public void setValidEndX(int pos) { this.validEndX = pos; } public int getValidEndY() { return validEndY; } public void setValidEndY(int pos) { this.validEndY = pos; } public int getNumOutside() { return numOutside; } public void setNumOutside(int numOutside) { this.numOutside = numOutside; } /** * Copy the information back from child to parent so that next child has more info. * @param smiParent * @param tile * @param parent */ void propagateToParent(TileMetaInfo smiParent, Tile tile, Tile parent){ if (parent.width == tile.width){ int destPos = tile.y - parent.y; System.arraycopy(this.rowSums, 0, smiParent.rowSums, destPos, this.rowSums.length); if (destPos == 0) { if (smiParent.firstNonZeroY < 0 && this.firstNonZeroY >= 0) smiParent.firstNonZeroY = this.firstNonZeroY; if (smiParent.validStartY < 0 && this.validStartY >= 0) smiParent.validStartY = this.validStartY; } else { if (smiParent.lastNonZeroY < 0 && this.lastNonZeroY >= 0){ smiParent.lastNonZeroY = destPos + this.lastNonZeroY; assert smiParent.lastNonZeroY <= parent.height; } if (smiParent.validEndY < 0 && this.validEndY >= 0){ smiParent.validEndY = destPos + this.validEndY; assert smiParent.validEndY <= parent.height; } } } if (parent.height == tile.height){ int destPos = tile.x - parent.x; System.arraycopy(this.colSums, 0, smiParent.colSums, destPos, this.colSums.length); if (destPos == 0) { if (smiParent.firstNonZeroX < 0 && this.firstNonZeroX >= 0) smiParent.firstNonZeroX = this.firstNonZeroX; if (smiParent.validStartX < 0 && this.validStartX >= 0) smiParent.validStartX = this.validStartX; } else { if (smiParent.lastNonZeroX < 0 && this.lastNonZeroX >= 0){ smiParent.lastNonZeroX = destPos + this.lastNonZeroX; assert parent.getColSum(smiParent.lastNonZeroX) > 0; } if (smiParent.validEndX < 0 && this.validEndX >= 0){ smiParent.validEndX = destPos + this.validEndX; assert smiParent.validEndX <= parent.width; } } } // verify(tile); // smiParent.verify(parent); } boolean verify(Tile tile){ if (firstNonZeroX >= 0){ assert tile.getColSum(firstNonZeroX) > 0; for (int i = 0; i < firstNonZeroX; i++) assert tile.getColSum(i) == 0; } if (lastNonZeroX >= 0){ assert tile.getColSum(lastNonZeroX) > 0; for (int i = lastNonZeroX+1; i < tile.width; i++) assert tile.getColSum(i) == 0; } if (validEndX >= 0){ long sum = 0; for (int i = validEndX; i < tile.width; i++){ sum += tile.getColSum(i); } assert sum >= minNodes; assert sum - tile.getColSum(validEndX) < minNodes; } if (validStartX >= 0){ long sum = 0; for (int i = 0; i < validStartX; i++){ sum += tile.getColSum(i); } assert sum < minNodes; assert sum + tile.getColSum(validStartX) >= minNodes; } if (firstNonZeroY >= 0){ assert tile.getRowSum(firstNonZeroY) > 0; for (int i = 0; i < firstNonZeroY; i++) assert tile.getRowSum(i) == 0; } if (lastNonZeroY >= 0){ assert tile.getRowSum(lastNonZeroY) > 0; for (int i = lastNonZeroY+1; i < tile.height; i++) assert tile.getRowSum(i) == 0; } if (validStartY >= 0){ long sum = 0; for (int i = 0; i < validStartY; i++){ sum += tile.getRowSum(i); } assert sum < minNodes; assert sum + tile.getRowSum(validStartY) >= minNodes; } if (validEndY >= 0){ long sum = 0; for (int i = validEndY; i < tile.height; i++){ sum += tile.getRowSum(i); } assert sum >= minNodes; assert sum - tile.getRowSum(validEndY) < minNodes; } return false; } } splitter-r653/src/uk/me/parabola/splitter/tools/0000775000175300017530000000000014352507254023134 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/tools/BitReader.java0000664000175300017530000000613114352507254025641 0ustar builderbuilder00000000000000/* * Copyright (C) 2017, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; /** * Read an array as a bit stream. Based on code in mkgmap. * * @author Steve Ratcliffe * @author Gerd Petermann */ public class BitReader { private final byte[] buf; /** index of the first available byte in the buffer. */ private final int offset; /** index of the current byte in the buffer. */ private int index; // /** bit position within the current byte. */ private int bitPosition; public BitReader(byte[] buf) { this(buf, 0); } public BitReader(byte[] buf, int start) { this.buf = buf; this.offset = start; reset(); } /** reset the reader for a repeated read. */ public void reset() { index = offset; bitPosition = 0; } /** set the reader to the given bit position. */ public void position(int bitPos) { index = offset + bitPos / 8; bitPosition = bitPos & 0x07; } /** set the reader to the relative bit position. */ public void skip(int bits) { position(getBitPosition() + bits); } /** get a single bit. */ public boolean get1() { int off = bitPosition; byte b = buf[index]; if (++bitPosition == 8) { bitPosition = 0; index++; } return ((b >> off) & 1) == 1; } /** get an unsigned int value using the given number of bits */ public int get(int n) { if (n == 1) { return get1() ? 1 : 0; } int nb = n + bitPosition; int shift = 0; long work = 0; do { work |= ((long)buf[index++] & 0xff) << shift; shift += 8; nb -= 8; } while (nb > 0); if (nb < 0) index--; int res = (int) (work >>> bitPosition); bitPosition = nb < 0 ? nb + 8 : 0; if (n < 32) res &= ((1 << n) - 1); return res; } /** * Get a signed quantity. * * @param n The field width, including the sign bit. * @return A signed number. */ public int sget(int n) { int res = get(n); if (n < 32) { int top = 1 << (n - 1); if ((res & top) != 0) { int mask = top - 1; res = ~mask | res; } } return res; } /** * Get a signed n-bit value, treating 1 << (n-1) as a flag to read another signed n-bit value * for extended range. */ public int sget2(int n) { assert n > 1; int top = 1 << (n - 1); int mask = top - 1; int base = 0; long res = get(n); while (res == top) { // Add to the base value, and read another base += mask; res = get(n); } // The final byte determines the sign of the result. Add or subtract the base as // appropriate. if ((res & top) == 0) res += base; else res = (res | ~mask) - base; // Make negative and subtract the base return (int) res; } public int getBitPosition() { return (index - offset) * 8 + bitPosition; } } splitter-r653/src/uk/me/parabola/splitter/tools/BitWriter.java0000664000175300017530000001076014352507254025716 0ustar builderbuilder00000000000000/* * Copyright (C) 2017, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import java.util.Arrays; /** * A class to write the bitstream. Based on code in mkgmap. * * @author Steve Ratcliffe * @author Gerd Petermann */ public class BitWriter { // Choose so that chunks will not fill it. // The byte buffer and its current length (allocated length) private byte[] buf; // The buffer private int bufsize; // The allocated size /** The number of bits already used in the current byte of the buffer. */ private int usedBits; /** The index of the current byte of the buffer. */ private int index; private static final int BUFSIZE_INC = 50; private static final int INITIAL_BUF_SIZE = 20; public BitWriter(int sizeInBytes) { bufsize = sizeInBytes; buf = new byte[bufsize]; } public BitWriter() { this(INITIAL_BUF_SIZE); } public void clear() { Arrays.fill(buf, (byte) 0); index = 0; usedBits = 0; } /** * Put exactly one bit into the buffer. * * @param b The bottom bit of the integer is set at the current bit position. */ private void put1(int b) { ensureSize(index + 1); // Get the remaining bits into the byte. int rem = usedBits; // Or it in, we are assuming that the position is never turned back. buf[index] |= (b & 0x1) << rem; usedBits++; if (usedBits == 8) { index++; usedBits = 0; } } public void put1(boolean b) { put1(b ? 1 : 0); } /** * Put a number of bits into the buffer, growing it if necessary. * * @param bval The bits to add, the lowest n bits will be added to * the buffer. * @param nb The number of bits. */ public void putn(int bval, int nb) { assert nb >= 1 && nb <= 32; int val = nb < 32 ? bval & ((1< 0) { buf[index] |= ((val << rem) & 0xff); // Account for change so far int nput = 8 - rem; if (nput > n) nput = n; usedBits += nput; if (usedBits >= 8) { index++; usedBits = 0; } // Shift down in preparation for next byte. val >>>= nput; rem = 0; n -= nput; } } /** * Write a signed value. Caller must make sure that absolute value fits into * the given number of bits */ public void sputn(final int bval, final int nb) { assert nb > 1 && nb <= 32; int top = 1 << (nb - 1); if (bval < 0) { assert -bval < top || top < 0; int v = (top + bval) | top; putn(v, nb); } else { assert bval < top || top < 0; putn(bval, nb); } } /** * Write a signed value. If the value doesn't fit into nb bits, write one or more 1 << (nb-1) * as a flag for extended range. */ public void sputn2(final int bval, final int nb) { assert nb > 1 && nb <= 32; int top = 1 << (nb - 1); int mask = top - 1; int val = Math.abs(bval); if (bval == Integer.MIN_VALUE) { // catch special case : Math.abs(Integer.MIN_VALUE) returns Integer.MIN_VALUE putn(top, nb); val = Math.abs(val - mask); } assert val >= 0; while (val > mask) { putn(top, nb); val -= mask; } if (bval < 0) { putn((top - val) | top, nb); } else { putn(val, nb); } } public byte[] getBytes() { return buf; } public int getBitPosition() { return index * 8 + usedBits; } /** * Get the number of bytes actually used to hold the bit stream. This therefore can be and usually * is less than the length of the buffer returned by {@link #getBytes()}. * @return Number of bytes required to hold the output. */ public int getLength() { if (usedBits == 0) return index; return index + 1; } /** * Set everything up so that the given size can be accommodated. * The buffer is re-sized if necessary. * * @param newlen The new length of the bit buffer in bytes. */ private void ensureSize(int newlen) { if (newlen >= bufsize) reallocBuffer(); } /** * Reallocate the byte buffer. */ private void reallocBuffer() { bufsize += BUFSIZE_INC; buf = Arrays.copyOf(buf, bufsize); } } splitter-r653/src/uk/me/parabola/splitter/tools/Long2IntClosedMap.java0000664000175300017530000001572014352507254027230 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.longs.LongArrayList; import uk.me.parabola.splitter.SplitFailedException; import uk.me.parabola.splitter.Utils; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.IOException; import java.util.Arrays; /** * Stores long/int pairs. * Requires less heap space compared to a HashMap while updates are allowed, and almost no * heap when sequential access is used. This is NOT a general purpose class. * * @author GerdP */ public class Long2IntClosedMap implements Long2IntClosedMapFunction{ private static final long LOW_ID_MASK = 0x3fffffffL; // 30 bits private static final long TOP_ID_MASK = ~LOW_ID_MASK; private static final int TOP_ID_SHIFT = Long.numberOfTrailingZeros(TOP_ID_MASK); private File tmpFile; private final String name; private LongArrayList index; // stores the higher 34 bits of the key which doesn't change frequently private IntArrayList bounds; // stores the lower 30 bits of the long value private int [] keys; private int [] vals; private final int maxSize; private final int unassigned; private int size; private long currentKey = Long.MIN_VALUE; private long oldTopId = Long.MIN_VALUE; private int currentVal; private DataInputStream dis; public Long2IntClosedMap(String name, int maxSize, int unassigned) { this.name = name; this.maxSize = maxSize; index = new LongArrayList(); bounds = new IntArrayList(); keys = new int[maxSize]; this.unassigned = unassigned; } @Override public int add(long key, int val) { if (key == 0 || key == Long.MAX_VALUE){ throw new IllegalArgumentException("Error: Cannot store " + name + " id " + key + ", this value is reserved."); } if (keys == null){ throw new IllegalArgumentException(name + ": Add on read-only map requested"); } if (size > 0 && currentKey >= key) throw new IllegalArgumentException("New " + name + " id " + key + " is not higher than last id " + currentKey); if (size+1 > maxSize) throw new IllegalArgumentException(name + " Map is full."); long topId = key >> TOP_ID_SHIFT; if (topId != oldTopId){ index.add(topId); bounds.add(size); oldTopId = topId; } keys[size] = (int)(key & LOW_ID_MASK); if (val != unassigned) { if (vals == null) allocVals(); vals[size] = val; } currentKey = key; size++; return size-1; } @Override public void switchToSeqAccess(File directory) throws IOException { tmpFile = File.createTempFile(name,null,directory); tmpFile.deleteOnExit(); try (FileOutputStream fos = new FileOutputStream(tmpFile); BufferedOutputStream stream = new BufferedOutputStream(fos); DataOutputStream dos = new DataOutputStream(stream)) { long lastKey = Long.MIN_VALUE; if (vals != null) { for (int indexPos = 0; indexPos < index.size(); indexPos++){ long topId = index.getLong(indexPos); int lowerBound = bounds.getInt(indexPos); int upperBound = size; if (indexPos+1 < index.size()) upperBound = bounds.getInt(indexPos+1); long topVal = topId << TOP_ID_SHIFT; for (int i = lowerBound; i < upperBound; i++){ long key = topVal | (keys[i] & LOW_ID_MASK); int val = vals[i]; assert i == 0 || lastKey < key; lastKey = key; if (val != unassigned){ dos.writeLong(key); dos.writeInt(val); } } } } // write sentinel dos.writeLong(Long.MAX_VALUE); dos.writeInt(Integer.MAX_VALUE); keys = null; vals = null; index = null; bounds = null; currentKey = Long.MIN_VALUE; System.out.println("Wrote " + size + " " + name + " pairs to " + tmpFile.getAbsolutePath()); } } @Override public long size() { return size; } @Override public int defaultReturnValue() { return unassigned; } @Override public int getRandom(long key){ if (vals == null) return unassigned; int pos = getKeyPos(key); if (pos >= 0) return vals[pos]; return unassigned; } @Override public int getKeyPos(long key) { if (keys == null){ throw new IllegalArgumentException("random access on sequential-only map requested"); } long topId = key >> TOP_ID_SHIFT; int indexPos = Arrays.binarySearch(index.toLongArray(),0,index.size(),topId); if (indexPos < 0) return -1; int lowerBound = bounds.getInt(indexPos); int upperBound = size; if (bounds.size() > indexPos+1) upperBound = bounds.getInt(indexPos+1); int lowId = (int)(key & LOW_ID_MASK); int pos = Arrays.binarySearch(keys,lowerBound,upperBound, lowId); return pos; } @Override public int getSeq(long id){ if (currentKey == Long.MIN_VALUE){ dis = null; readPair(); } while(id > currentKey) readPair(); if (id < currentKey || id == Long.MAX_VALUE){ return unassigned; } return currentVal; } private void readPair() { try { if (dis == null) open(); currentKey = dis.readLong(); currentVal = dis.readInt(); } catch (IOException e){ System.out.println(e); throw new SplitFailedException("Failed to read from temp file " + tmpFile); } } private void open() throws FileNotFoundException{ FileInputStream fis = new FileInputStream(tmpFile); BufferedInputStream stream = new BufferedInputStream(fis); dis = new DataInputStream(stream); } @Override public void finish() { if (tmpFile != null && tmpFile.exists()){ close(); tmpFile.delete(); System.out.println("temporary file " + tmpFile.getAbsolutePath() + " was deleted"); } } @Override public void close() { currentKey = Long.MIN_VALUE; currentVal = unassigned; if (dis != null) try { dis.close(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } } @Override public int replace(long key, int val) { if (keys == null){ throw new IllegalArgumentException("replace on read-only map requested"); } int pos = getKeyPos(key); if (pos < 0) throw new IllegalArgumentException("replace on unknown key requested"); if (vals == null) allocVals(); int oldVal = vals[pos]; vals[pos] = val; return oldVal; } @Override public void stats(String prefix) { System.out.println(prefix + name + "WriterMap contains " + Utils.format(size) + " pairs."); } private void allocVals() { vals = new int[maxSize]; Arrays.fill(vals, unassigned); } } splitter-r653/src/uk/me/parabola/splitter/tools/Long2IntClosedMapFunction.java0000664000175300017530000000425514352507254030737 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import java.io.File; import java.io.IOException; /** * Stores long/int pairs. Only useful with data that is already in key-sorted order. * */ public interface Long2IntClosedMapFunction { /** * Add a new pair. The key must be higher than then any existing key in the map. * @param key the key value * @param val the value * @return the position in which the key was inserted */ public int add(long key, int val); /** * Get the value for the key. * @param key * @return */ public int getRandom(long key); /** * Get the value for the key from a map that was written to temporary * file. * @param key the key * @return unassigned if the current key is higher, the value if the key matches */ public int getSeq(long key); public long size(); public int defaultReturnValue(); /** * Remove temp files if they exist. */ void finish(); /** * Close the temp file, reset the current values. Use this to start * from the beginning. * @throws IOException */ public void close() throws IOException; /** * Move the data stored in the map to a temp file. This makes the map read only * and allows only sequential access. * @param directory * @throws IOException */ void switchToSeqAccess(File directory) throws IOException; /** * Return the position of the key if found in the map * @param key * @return the position or a negative value to indicate "not found" */ public int getKeyPos(long key); /** * Replace the value for an existing key. * @param key * @param val * @return the previously stored value */ public int replace(long key, int val); public void stats(final String prefix); } splitter-r653/src/uk/me/parabola/splitter/tools/OSMId2ObjectMap.java0000664000175300017530000000513114352507254026561 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap; import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; /** * A basic memory efficient Map implementation that stores an OSM id with an Object. * As of 2012, normal OSM IDs still use only 31 bits, but we must support 64 bits. * This map avoids to store many 0 bits by splitting the id into the upper part where * almost all bits are zero and the lower part that changes frequently. * @author GerdP * * @param the type of object that should be stored */ public class OSMId2ObjectMap{ public static final long LOW_ID_MASK = 0x7ffffff; public static final long TOP_ID_MASK = ~LOW_ID_MASK; // the part of the key that is saved in the top HashMap private static final int TOP_ID_SHIFT = Long.numberOfTrailingZeros(TOP_ID_MASK); private Long2ObjectOpenHashMap> topMap; private int size; public OSMId2ObjectMap() { topMap = new Long2ObjectOpenHashMap<>(); } public V put(long key, V object){ long topId = key >> TOP_ID_SHIFT; Int2ObjectOpenHashMap midMap = topMap.get(topId); if (midMap == null){ midMap = new Int2ObjectOpenHashMap<>(); topMap.put(topId, midMap); } int midId = (int)(key & LOW_ID_MASK); V old = midMap.put(midId, object); if (old == null) size++; return old; } public V get(long key){ long topId = key >> TOP_ID_SHIFT; Int2ObjectOpenHashMap midMap = topMap.get(topId); if (midMap == null) return null; int midId = (int)(key & LOW_ID_MASK); return midMap.get(midId); } public V remove(long key){ long topId = key >> TOP_ID_SHIFT; Int2ObjectOpenHashMap midMap = topMap.get(topId); if (midMap == null) return null; int midId = (int)(key & LOW_ID_MASK); V old = midMap.remove(midId); if (old == null) return null; if (midMap.isEmpty()) topMap.remove(topId); size--; return old; } public void clear(){ topMap.clear(); size = 0; } public int size(){ return size; } public boolean isEmpty() { return size == 0; } public boolean containsKey(long key) { return get(key) != null; } } splitter-r653/src/uk/me/parabola/splitter/tools/SparseBitSet.java0000664000175300017530000000633214352507254026353 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import it.unimi.dsi.fastutil.ints.Int2LongOpenHashMap; import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; import uk.me.parabola.splitter.SplitFailedException; /** * A partly BitSet implementation optimized for memory when used to store very * large values with a high likelihood that the stored values build groups like * e.g. the OSM node IDs. The keys are divided into 3 parts. The 1st part is * stored in a small hash map. The 2nd part is stored in larger hash maps * addressing long values. The 3rd part (6 bits) is stored in the long value * addressed by the upper maps. author GerdP */ public class SparseBitSet { private static final long MID_ID_MASK = 0x7ffffff; private static final long TOP_ID_MASK = ~MID_ID_MASK; private static final int LOW_MASK = 63; private static final int TOP_ID_SHIFT = Long.numberOfTrailingZeros(TOP_ID_MASK); private static final int MID_ID_SHIFT = Integer.numberOfTrailingZeros(~LOW_MASK); private Long2ObjectOpenHashMap topMap = new Long2ObjectOpenHashMap<>(); private long bitCount; public void set(long key) { long topId = key >> TOP_ID_SHIFT; Int2LongOpenHashMap midMap = topMap.get(topId); if (midMap == null) { midMap = new Int2LongOpenHashMap(); topMap.put(topId, midMap); } int midId = (int) ((key & MID_ID_MASK) >> MID_ID_SHIFT); long chunk = midMap.get(midId); int bitPos = (int) (key & LOW_MASK); long val = 1L << (bitPos - 1); if (chunk != 0) { if ((chunk & val) != 0) return; val |= chunk; } midMap.put(midId, val); ++bitCount; } public void clear(long key) { long topId = key >> TOP_ID_SHIFT; Int2LongOpenHashMap midMap = topMap.get(topId); if (midMap == null) return; int midId = (int) ((key & MID_ID_MASK) >> MID_ID_SHIFT); long chunk = midMap.get(midId); if (chunk == 0) return; int bitPos = (int) (key & LOW_MASK); long val = 1L << (bitPos - 1); if ((chunk & val) == 0) return; chunk &= ~val; if (chunk == 0) { midMap.remove(midId); if (midMap.isEmpty()) { topMap.remove(topId); } } else midMap.put(midId, chunk); --bitCount; } public boolean get(long key) { long topId = key >> TOP_ID_SHIFT; Int2LongOpenHashMap midMap = topMap.get(topId); if (midMap == null) return false; int midId = (int) ((key & MID_ID_MASK) >> MID_ID_SHIFT); long chunk = midMap.get(midId); if (chunk == 0) return false; int bitPos = (int) (key & LOW_MASK); long val = 1L << (bitPos - 1); return ((chunk & val) != 0); } public void clear() { topMap.clear(); bitCount = 0; } public int cardinality() { if (bitCount > Integer.MAX_VALUE) throw new SplitFailedException("cardinality too high for int " + bitCount); return (int) bitCount; } } splitter-r653/src/uk/me/parabola/splitter/tools/SparseLong2IntMap.java0000664000175300017530000010217214352507254027252 0ustar builderbuilder00000000000000/* * Copyright (c) 2016, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import java.nio.ByteBuffer; import java.util.Arrays; import it.unimi.dsi.fastutil.Hash; import it.unimi.dsi.fastutil.ints.Int2IntLinkedOpenHashMap; import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntBidirectionalIterator; import it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap; import uk.me.parabola.splitter.Utils; /** * Intended usage: Store many pairs of OSM id and an int which represents the position. * Optimized for low memory requirements and inserts in sequential order. * Don't use this for a rather small number of pairs. * * Inspired by SparseInt2ShortMapInline. * * A HashMap is used to address {@link ChunkMem} instances which address chunks. The HashMap * is the only part that stores long values, and it will be very small as long * as input is normal OSM data and not something with random keys. * A chunk stores up to CHUNK_SIZE values. A separately stored bit-mask is used * to separate used and unused entries in the chunk. Thus, the stored chunk length * depends on the number of used entries, not on the highest used entry. * Such a "masked encoded" entry may look like this * v1,v1,v1,v1,v1,v1,v2,v2,v2,v2,v1,v1,v1,v1,v1,u,?,?,...} * v1,v2: values stored in the chunk * u: "unassigned" value * ?: anything * * After applying Run Length Encryption on this the chunk looks like this: * {v1,6,v2,4,v1,5,?,?,?} * Further compression is achieved by using a dictionary if appropriate. The dictionary * contains all distinct values. These are then addressed by the position in the dictionary. * The above example will be reduced to this: * {v1,v2,6,4,5} * Note that there is no need to store the position when the dictionary contains * two entries. * If a dictionary contains only one entry we only store the dictionary and the bit mask. * * Fortunately, OSM data is distributed in a way that a lot of chunks contain * just one distinct value. * * Since we have OSM IDs with 64 bits, we have to divide the key into 3 parts: * 37 bits for the value that is stored in the HashMap. * 21 bits for the chunkId (this gives the required length of a large vector) * 6 bits for the position in the chunk * * The chunkId identifies the position of a 32-bit value (stored in the large vector). * A chunk is stored in a chunkStore which is a 3-dimensional array. * We group chunks of equally length together in stores of 64 entries. * To find the right position of a new chunk, we need three values: x,y, and z. * x is the length of the chunk (the number of required bytes) (1-256, we store the value decremented by 1 to have 0-255) * y is the position of the store (0-1048575), we store a value incremented by 1 to ensure a non-zero value for used chunks * z is the position of the chunk within the store. (0-15) * The maximum values for these three values are chosen so that we can place them * together into the int value that is kept in the large vector. */ public final class SparseLong2IntMap { private static final boolean SELF_TEST = false; private static final int CHUNK_SIZE = 64; private static final int CHUNK_SHIFT = Integer.numberOfTrailingZeros(CHUNK_SIZE); private static final int MAX_BYTES_FOR_VAL = Integer.BYTES; private static final int MAX_STORED_BYTES_FOR_CHUNK = CHUNK_SIZE * MAX_BYTES_FOR_VAL; private static final int CHUNK_STORE_BITS_FOR_X = Integer.SIZE - Integer.numberOfLeadingZeros(MAX_STORED_BYTES_FOR_CHUNK - 1); // values 1 .. 256 are stored as 0..255 private static final int CHUNK_STORE_BITS_FOR_Z = 8; // must not be higher than 8 private static final int CHUNK_STORE_BITS_FOR_Y = Integer.SIZE - (CHUNK_STORE_BITS_FOR_X + CHUNK_STORE_BITS_FOR_Z); private static final int CHUNK_STORE_ELEMS = 1 << CHUNK_STORE_BITS_FOR_Z; private static final int CHUNK_STORE_X_MASK = (1 << CHUNK_STORE_BITS_FOR_X) - 1; private static final int CHUNK_STORE_Y_MASK = (1 << CHUNK_STORE_BITS_FOR_Y) - 1; private static final int CHUNK_STORE_Z_MASK = (1 << CHUNK_STORE_BITS_FOR_Z) - 1; private static final int CHUNK_STORE_Y_SHIFT = CHUNK_STORE_BITS_FOR_X; private static final int CHUNK_STORE_Z_SHIFT = CHUNK_STORE_BITS_FOR_X + CHUNK_STORE_BITS_FOR_Y; private static final int BYTES_FOR_MASK = 8; /** Number of entries addressed by one topMap entry. */ private static final int TOP_ID_SHIFT = 27; // must be below 32, smaller values give smaller LARGE_VECTOR_SIZEs and more entries in the top HashMap /** the part of the key that is not saved in the top HashMap. */ private static final long CHUNK_ID_MASK = (1L << (TOP_ID_SHIFT)) - 1; /** Number of entries addressed by one topMap entry. */ private static final int LARGE_VECTOR_SIZE = (int) (CHUNK_ID_MASK / CHUNK_SIZE + 1); private static final int MAX_Y_VAL = LARGE_VECTOR_SIZE / CHUNK_STORE_ELEMS + 1; /** The part of the key that contains the offset in the chunk. */ private static final long CHUNK_OFFSET_MASK = CHUNK_SIZE - 1L; /** First 58 bits of a long. If this part of the key changes, a different chunk is needed. */ private static final long OLD_CHUNK_ID_MASK = ~CHUNK_OFFSET_MASK; private static final long INVALID_CHUNK_ID = 1L; // must NOT be divisible by CHUNK_SIZE /** What to return on unassigned keys. */ private int unassigned = Integer.MIN_VALUE; private long size; private long modCount; private long oldModCount; private long currentChunkId; private ChunkMem currentMem; private final int [] currentChunk = new int[CHUNK_SIZE]; // stores the values in the real position private final int [] testChunk = new int[CHUNK_SIZE]; // for internal test private final int [] maskedChunk = new int[CHUNK_SIZE]; // a chunk after applying the "mask encoding" private final int[] tmpChunk = new int[CHUNK_SIZE * 2]; // used for tests of compression methods private static final int MAX_BYTES_FOR_RLE_CHUNK = CHUNK_SIZE * (Integer.BYTES + 1); private final ByteBuffer bufEncoded = ByteBuffer.allocate(MAX_BYTES_FOR_RLE_CHUNK); // for the RLE-compressed chunk // bit masks for the flag byte private static final int FLAG1_USED_BYTES_MASK = 0x03; // number of bytes - 1 private static final int FLAG1_RUNLEN_MASK = 0x1C; // number of bits for run length values private static final int FLAG1_DICTIONARY = 0x20; // if set a dictionary follows the flag bytes private static final int FLAG1_COMP_METHOD_BITS = 0x40; // rest of vals are "bit" encoded private static final int FLAG1_COMP_METHOD_RLE = 0x80; // values are run length encoded private static final int FLAG2_BITS_FOR_VALS = 0x1f; private static final int FLAG2_ALL_POSITIVE = 0x20; private static final int FLAG2_ALL_NEGATIVE = 0x40; private static final int FLAG2_DICT_SIZE_IS_2 = 0x80; private static final int FLAG_BITS_FOR_DICT_SIZE = Integer.SIZE - Integer.numberOfLeadingZeros(CHUNK_SIZE - 1); /** a chunk that is stored with a length between 1 and 3 has no flag byte and is always a single value chunk. */ private static final int SINGLE_VAL_CHUNK_LEN_NO_FLAG = 3; // for statistics private final String dataDesc; private Long2ObjectOpenHashMap topMap; static final long MAX_MEM = Runtime.getRuntime().maxMemory() / 1024 / 1024; static final int POINTER_SIZE = (MAX_MEM < 32768) ? 4 : 8; // presuming that compressedOOps is enabled private Integer bias1; // used for initial delta encoding private final BitWriter bitWriter = new BitWriter(1000); /** * A map that stores pairs of (OSM) IDs and int values identifying the * areas in which the object with the ID occurs. * @param dataDesc */ public SparseLong2IntMap(String dataDesc) { // sanity check to make sure that we can store enough chunks with the same length // If this test fails it is not possible to store the same value for all ids long reserve = ((1L << CHUNK_STORE_BITS_FOR_Y) - 1) * CHUNK_SIZE - LARGE_VECTOR_SIZE; assert reserve > 0 : "Bad combination of constants"; this.dataDesc = dataDesc; System.out.println(dataDesc + " Map: uses " + this.getClass().getSimpleName()); clear(); } /** * Helper class to manage memory for chunks. * @author Gerd Petermann * */ static class ChunkMem { private final long topId; private long estimatedBytes; // estimate value for the allocated bytes private int[] largeVector; // only used when sparseVector is growing too large private Int2ObjectOpenHashMap reusableChunks; private byte[][][] chunkStore; private final int[] freePosInStore; /** maps chunks that can be reused. */ private int chunkCount; private int lastFlag; private long lastChunkId = INVALID_CHUNK_ID; private boolean checkReuse; public ChunkMem(long topID) { this.topId = topID; chunkStore = new byte[MAX_STORED_BYTES_FOR_CHUNK][][]; freePosInStore = new int[MAX_STORED_BYTES_FOR_CHUNK]; reusableChunks = new Int2ObjectOpenHashMap<>(0, Hash.VERY_FAST_LOAD_FACTOR); largeVector = new int[LARGE_VECTOR_SIZE]; estimatedBytes = (long) LARGE_VECTOR_SIZE * Integer.BYTES + (MAX_STORED_BYTES_FOR_CHUNK) * (8 + 1 * Integer.BYTES) + 3 * (24 + 16) + 190; } private void grow(int x) { int oldCapacity = chunkStore[x].length; int newCapacity = oldCapacity < 1024 ? oldCapacity * 2 : oldCapacity + (oldCapacity >> 1); if (newCapacity >= MAX_Y_VAL) newCapacity = MAX_Y_VAL; if (newCapacity <= oldCapacity) return; resize(x, newCapacity); } private void resize(int x, int newCapacity) { int oldCapacity = chunkStore[x].length; if (newCapacity < oldCapacity) assert chunkStore[x][newCapacity] == null; chunkStore[x] = Arrays.copyOf(chunkStore[x], newCapacity); estimatedBytes += (newCapacity - oldCapacity) * 8; // pointer-pointer } private void putChunk(long chunkId, ByteBuffer bufEncoded) { int len = bufEncoded.limit(); int x = len - (1 + BYTES_FOR_MASK); if (chunkStore[x] == null) { chunkStore[x] = new byte[2][]; estimatedBytes += 24 + 2 * 8; // pointer-pointer } IntArrayList reusableChunk = null; int reuseFlag = 0; int lastX = -1; if (lastChunkId != (chunkId & OLD_CHUNK_ID_MASK)) { chunkCount++; } else { lastX = lastFlag & CHUNK_STORE_X_MASK; if (x == lastX) { reuseFlag = lastFlag; } else { // this is a rewrite with a different length, add the previously used chunk to the reusable list reusableChunk = reusableChunks.get(lastX); if (reusableChunk == null) { reusableChunk = new IntArrayList(8); reusableChunks.put(lastX, reusableChunk); estimatedBytes += 8 * Integer.BYTES + 24 + Integer.BYTES + POINTER_SIZE + 16; // for the IntArrayList instance estimatedBytes += 20; // estimate for the hash map entry } reusableChunk.add(lastFlag); checkReuse = true; } } if (x != lastX && checkReuse) { reusableChunk = reusableChunks.get(x); if (reusableChunk != null && !reusableChunk.isEmpty()) { reuseFlag = reusableChunk.removeInt(reusableChunk.size() - 1); } } int y, z; byte[] store; if (reuseFlag != 0) { y = (reuseFlag >> CHUNK_STORE_Y_SHIFT) & CHUNK_STORE_Y_MASK; y--; // we store the y value incremented by 1 z = (reuseFlag >> CHUNK_STORE_Z_SHIFT) & CHUNK_STORE_Z_MASK; store = chunkStore[x][y]; } else { y = ++freePosInStore[x] / CHUNK_STORE_ELEMS; if (y >= chunkStore[x].length) grow(x); if (chunkStore[x][y] == null) { int numChunks = (len < 16) ? CHUNK_STORE_ELEMS : 8; chunkStore[x][y] = new byte[numChunks * len + 1]; estimatedBytes += 24 + numChunks * len + 1; int padding = 8 - (numChunks & 7); if (padding < 8) estimatedBytes += padding; } store = chunkStore[x][y]; z = (store[0]++) & CHUNK_STORE_Z_MASK; if (len * (z + 1) + 1 > store.length) { int newNum = Math.min(CHUNK_STORE_ELEMS, z + 8); store = Arrays.copyOf(store, newNum * len + 1); chunkStore[x][y] = store; estimatedBytes += (newNum - z) * len; } } ByteBuffer storeBuf = ByteBuffer.wrap(store, z * len + 1, len); storeBuf.put(bufEncoded); // calculate the position in the large vector y++; // we store the y value incremented by 1 assert x < 1 << CHUNK_STORE_BITS_FOR_X; assert y < 1 << CHUNK_STORE_BITS_FOR_Y; assert z < 1 << CHUNK_STORE_BITS_FOR_Z; int flag = (z & CHUNK_STORE_Z_MASK) << CHUNK_STORE_Z_SHIFT | (y & CHUNK_STORE_Y_MASK) << CHUNK_STORE_Y_SHIFT | (x & CHUNK_STORE_X_MASK); assert flag != 0; int vectorPos = getVectorPos(chunkId); largeVector[vectorPos] = flag; } /** * Calculate the position in the large vector * @param chunkId the (unshifted) key * @return the position in the large vector */ private static int getVectorPos (long chunkId) { return (int) (chunkId & CHUNK_ID_MASK) >> CHUNK_SHIFT; } private int getFlag(long chunkId) { int vectorPos = getVectorPos(chunkId); return largeVector[vectorPos]; } /** * @return number of used chunks */ public int getChunkCount() { return chunkCount; } /** * Get a {@link }ByteBuffer} that contains the stored (encoded) chunk. * @param key the key * @param forUpdate * @return the buffer or null if no chunk */ public ByteBuffer getStoredChunk(long key, boolean forUpdate) { int flag = getFlag(key); if (flag == 0) return null; int x = flag & CHUNK_STORE_X_MASK; int y = (flag >> CHUNK_STORE_Y_SHIFT) & CHUNK_STORE_Y_MASK; y--; // we store the y value incremented by 1 int z = (flag >> CHUNK_STORE_Z_SHIFT) & CHUNK_STORE_Z_MASK; int chunkLenWithMask = x + 1 + BYTES_FOR_MASK; int startPos = z * chunkLenWithMask + 1; if (forUpdate) { lastChunkId = key & OLD_CHUNK_ID_MASK; lastFlag = flag; } return ByteBuffer.wrap(chunkStore[x][y], startPos, chunkLenWithMask); } } /** * Count how many of the lowest X bits in mask are set. * * @return how many of the lowest X bits in mask are set. */ private static int countUnder(final long mask, final int lowest) { return Long.bitCount(mask & ((1L << lowest) - 1)); } /** * Put an int value into the byte buffer using the given number of bytes. * @param buf the buffer * @param val the int value to store * @param bytesToUse the number of bytes to use */ static void putVal(final ByteBuffer buf, final int val, final int bytesToUse) { switch (bytesToUse) { case 1: assert val >= Byte.MIN_VALUE && val <= Byte.MAX_VALUE : val + " of out Byte range"; buf.put((byte) val); break; case 2: buf.putShort((short) val); break; case 3: // put3 buf.put((byte) (val & 0xff)); buf.putShort((short) (val >> 8)); break; default: buf.putInt(val); break; } } /** * Read an int value from the byte buffer using the given number of bytes. * @param buf the byte buffer * @param bytesToUse number of bytes (1 - 4) * @return the integer value */ static int getVal(final ByteBuffer buf, final int bytesToUse) { switch (bytesToUse) { case 1: return buf.get(); case 2: return buf.getShort(); case 3: byte b1 = buf.get(); short s = buf.getShort(); return (b1 & 0xff) | (s << 8); default: return buf.getInt(); } } /** * calculate the number of bits needed to store the value as a signed number. * @param val the value to store * @return the number of bits needed to store the value as a signed number */ private static int bitsNeeded(int val) { return Long.SIZE - Long.numberOfLeadingZeros(Math.abs(val)) + 1; } private ChunkMem getMem (long key) { long topID = (key >> TOP_ID_SHIFT); if (currentMem == null || currentMem.topId != topID) { currentMem = topMap.get(topID); } return currentMem; } /** * Try to use Run Length Encoding (RLE) to compress the "mask-encoded" chunk. In most * cases this works very well because chunks often have only one or two distinct values. * The values and run length fields are each written with a fixed number of bits. * * @param numVals number of elements in the chunk, content of {@code maskedChunk} after that is undefined. * @param minVal smallest value in maskedChunk * @param maxVal highest value in maskedChunk * */ private void chunkCompress(int numVals, int minVal, int maxVal) { int flag1 = FLAG1_COMP_METHOD_BITS; int opos = 0; int maxRunLen = 0; int numCounts = 0; Int2IntLinkedOpenHashMap dict = new Int2IntLinkedOpenHashMap(32, Hash.VERY_FAST_LOAD_FACTOR); dict.defaultReturnValue(-1); for (int i = 0; i < numVals; i++) { int runLength = 1; while (i + 1 < numVals && maskedChunk[i] == maskedChunk[i + 1]) { runLength++; i++; } numCounts++; int v = maskedChunk[i]; if (dict.get(v) == dict.defaultReturnValue()) dict.put(v, dict.size()); tmpChunk[opos++] = v; tmpChunk[opos++] = runLength; if (maxRunLen < runLength) maxRunLen = runLength; } // the first value is used as a bias because it is likely that this will bring min/max values closer to 0 int bias2 = maskedChunk[0]; int bits = Math.max(bitsNeeded(minVal - bias2), bitsNeeded(maxVal - bias2)); int sign = getSign(minVal - bias2, maxVal - bias2); // try to find out if compression will help int bitsForRLE = bitsNeeded(maxRunLen-1) - 1; // we always have positive values and we store the len decremented by 1 int bitsForVal = bits - Math.abs(sign); int bitsForPos = bitsNeeded(dict.size() - 1) - 1; int bitsForDictFlag = dict.size() > 2 ? FLAG_BITS_FOR_DICT_SIZE : 0; int bitsForDict = bitsForDictFlag + (dict.size() - 1) * bitsForVal; int len1 = toBytes((numVals - 1) * bitsForVal); int len2 = toBytes(bitsForRLE + (numCounts - 1) * (bitsForRLE + bitsForVal)); int len3 = toBytes(bitsForDict + (numVals - 1) * bitsForPos); int len4 = toBytes(bitsForDict + bitsForRLE + (numCounts - 1) * (bitsForRLE + (dict.size() > 2 ? bitsForPos : 0))); boolean useRLE = numCounts < 5 && maxRunLen > 1 && (Math.min(len2, len4) < Math.min(len1, len3)); boolean useDict = (useRLE) ? len2 > len4 : len1 > len3; // System.out.println(len1 + " " + len2 + " " + len3 + " " + len4 + " " + useDict + " " + useRLE + " " + dict.size() + " " + numCounts); // if (useRLE && numVals / 2 < numCounts) { // long dd = 4; // } bitWriter.clear(); if (useDict) { flag1 |= FLAG1_DICTIONARY; if (dict.size() > 2) bitWriter.putn(dict.size() - 1, FLAG_BITS_FOR_DICT_SIZE); IntBidirectionalIterator iter = dict.keySet().iterator(); iter.next(); while (iter.hasNext()) { storeVal(iter.nextInt() - bias2, bits, sign); } } if (useRLE) { flag1 |= FLAG1_COMP_METHOD_RLE; flag1 |= ((bitsForRLE << 2) & FLAG1_RUNLEN_MASK) ; boolean writeIndex = useDict && (dict.size() > 2); int pos = 1; // first val is written with different method bitWriter.putn(tmpChunk[pos++] - 1, bitsForRLE); while (pos < opos) { int v = tmpChunk[pos++]; if (!useDict) storeVal(v - bias2, bits, sign); else { if (writeIndex) { int idx = dict.get(v); bitWriter.putn(idx, bitsForPos); } } bitWriter.putn(tmpChunk[pos++] - 1, bitsForRLE); } } else { for (int i = 1; i < numVals; i++) { // first val is written with different method if (useDict) { int v = maskedChunk[i]; bitWriter.putn(dict.get(v), bitsForPos); } else { storeVal(maskedChunk[i] - bias2, bits, sign); } } } int bytesForBias = 0; bytesForBias = bytesNeeded(bias2, bias2); flag1 |= (bytesForBias - 1) & FLAG1_USED_BYTES_MASK; int bwLen = bitWriter.getLength(); if (SELF_TEST) { if (useRLE && useDict && len4 != bwLen) assert false : "len4 " + bwLen + " <> " + len4; if (!useRLE && useDict && len3 != bwLen) assert false : "len3 " + bwLen + " <> " + len3; if (useRLE && !useDict && len2 != bwLen) assert false : "len2 " + bwLen + " <> " + len2; if (!useRLE && !useDict && len1 != bwLen) assert false : "len1 " + bwLen + " <> " + len1; } int len = 1 + 1 + bitWriter.getLength() + bytesForBias; if (len < MAX_STORED_BYTES_FOR_CHUNK) { bufEncoded.put((byte) flag1); int flag2 = (bits - 1) & FLAG2_BITS_FOR_VALS; // number of bits for the delta encoded values if (sign > 0) flag2 |= FLAG2_ALL_POSITIVE; else if (sign < 0) flag2 |= FLAG2_ALL_NEGATIVE; if (dict.size() == 2) flag2 |= FLAG2_DICT_SIZE_IS_2; bufEncoded.put((byte) flag2); putVal(bufEncoded, bias2, bytesForBias); bufEncoded.put(bitWriter.getBytes(), 0, bitWriter.getLength()); } else { // no flag byte for worst case for (int i = 0; i < numVals; i++){ putVal(bufEncoded, currentChunk[i], 4); } } } /** * calculate the number of bytes consumed by given a number of bits * @param nBits the number of bits * @return the number of bytes needed to store the bits */ private static int toBytes(int nBits) { return (nBits + 7) / 8; } private void storeVal(int val, int nb, int sign) { if (sign == 0) bitWriter.sputn(val, nb); else if (sign == 1) { bitWriter.putn(val, nb - 1); } else { bitWriter.putn(-val, nb - 1); } } private static int readVal(BitReader br, int bits, int sign) { if (sign == 0) return br.sget(bits); else if (sign > 0) return br.get(bits-1); return -br.get(bits-1); } private static int getSign(int v1, int v2) { assert v1 != v2; if (v1 < 0) { return (v2 <= 0) ? -1 : 0; } else if (v1 > 0) { return v2 >= 0 ? 1: 0; } else { //v1 == 0 return v2 < 0 ? -1 : 1; } } /** * Try to compress the data in currentChunk and store the result in the chunkStore. */ private void saveCurrentChunk() { if (currentChunkId == INVALID_CHUNK_ID || modCount == oldModCount) return; // step 1: mask encoding long mask = 0; int simpleLen = 0; long elementMask = 1L; if (bias1 == null) { bias1 = findBias1(); // very simple heuristics } int maxVal = Integer.MIN_VALUE; int minVal = Integer.MAX_VALUE; for (int i = 0; i < CHUNK_SIZE; i++) { if (currentChunk[i] != unassigned) { int v = currentChunk[i] - bias1; // apply bias if (minVal > v) minVal = v; if (maxVal < v) maxVal = v; maskedChunk[simpleLen++] = v; mask |= elementMask; } elementMask <<= 1; } bufEncoded.clear(); bufEncoded.putLong(mask); if (minVal == maxVal) { // nice: single value chunk int bytesFor1st = bytesNeeded(minVal, maxVal); if (bytesFor1st > SINGLE_VAL_CHUNK_LEN_NO_FLAG) { bufEncoded.put((byte) (bytesFor1st - 1)); // flag byte } putVal(bufEncoded, maskedChunk[0], bytesFor1st); } else { chunkCompress(simpleLen, minVal, maxVal); assert bufEncoded.position() > SINGLE_VAL_CHUNK_LEN_NO_FLAG; } bufEncoded.flip(); ChunkMem mem = getMem(currentChunkId); if (mem == null) { long topID = currentChunkId >> TOP_ID_SHIFT; mem = new ChunkMem(topID); topMap.put(topID, mem); currentMem = mem; } mem.putChunk(currentChunkId, bufEncoded); if (SELF_TEST) { Arrays.fill(testChunk, unassigned); decodeStoredChunk(currentChunkId, testChunk, -1); for (int i = 0; i < CHUNK_SIZE; i++) { if (testChunk[i] != currentChunk[i]) { assert false : "current chunk id=" + currentChunkId + " key=" + (currentChunkId + i) + " doesn't match " + testChunk[i] + "<>" + currentChunk[i]; } } } } /** * Calculate the number of bytes needed to encode values in the given range. * @param minVal smallest value * @param maxVal highest value * @return number of needed bytes */ static int bytesNeeded (long minVal, long maxVal) { if (minVal >= Byte.MIN_VALUE && maxVal <= Byte.MAX_VALUE) { return Byte.BYTES; } else if (minVal >= Short.MIN_VALUE && maxVal <= Short.MAX_VALUE) { return Short.BYTES; } else if (minVal >= -0x00800000 && maxVal <= 0x7fffff) { return 3; } return Integer.BYTES; } private int findBias1() { int minVal = Integer.MAX_VALUE; int maxVal = Integer.MIN_VALUE; for (int i = 0; i < CHUNK_SIZE; i++) { if (currentChunk[i] != unassigned) { if (minVal > currentChunk[i]) minVal = currentChunk[i]; if (maxVal < currentChunk[i]) maxVal = currentChunk[i]; } } int avg = minVal + (maxVal-minVal) / 2; if (avg < 0 && avg - Integer.MIN_VALUE < Byte.MAX_VALUE) return Integer.MIN_VALUE + Byte.MAX_VALUE; if (avg > 0 && Integer.MAX_VALUE - avg < Byte.MAX_VALUE) return Integer.MAX_VALUE - Byte.MAX_VALUE; return avg; } public boolean containsKey(long key) { return get(key) != unassigned; } public int put(long key, int val) { if (val == unassigned) { throw new IllegalArgumentException("Cannot store the value that is reserved as being unassigned. val=" + val); } long chunkId = key & OLD_CHUNK_ID_MASK; if (currentChunkId != chunkId){ // we need a different chunk replaceCurrentChunk(key); } int chunkoffset = (int) (key & CHUNK_OFFSET_MASK); int out = currentChunk[chunkoffset]; currentChunk[chunkoffset] = val; if (out == unassigned) size++; if (out != val) modCount++; return out; } /** * Either decode the encoded chunk data into target or extract a single value. * @param mp the MemPos instance with information about the store * @param targetChunk if not null, data will be decoded into this buffer * @param chunkOffset gives the wanted element (targetChunk must be null) * @return the extracted value or unassigned */ private int decodeStoredChunk (long key, int[] targetChunk, int chunkOffset) { ChunkMem mem = getMem(key); if (mem == null) return unassigned; ByteBuffer inBuf = mem.getStoredChunk(key, targetChunk == currentChunk); if (inBuf == null) return unassigned; long chunkMask = inBuf.getLong(); if (targetChunk == null) { long elementmask = 1L << chunkOffset; if ((chunkMask & elementmask) == 0) return unassigned; // not in chunk // the map contains the key, decode it } int chunkLenNoMask = inBuf.remaining(); int flag = 0; int bytesToUse = Integer.BYTES; // assume worst case if (chunkLenNoMask == MAX_STORED_BYTES_FOR_CHUNK) { // special case: no flag is written if we have the max. size // all values are written with 4 bytes and without bias if (targetChunk == null) { inBuf.position(inBuf.position() + chunkOffset * bytesToUse); return getVal(inBuf, bytesToUse); } for (int i = 0; i < CHUNK_SIZE; i++) { targetChunk[i] = getVal(inBuf, bytesToUse); } return unassigned; } else if (chunkLenNoMask <= SINGLE_VAL_CHUNK_LEN_NO_FLAG) { bytesToUse = chunkLenNoMask; } else { flag = inBuf.get(); if ((flag & FLAG1_COMP_METHOD_BITS) != 0) { inBuf.position(inBuf.position() - 1); return decodeBits(chunkMask, targetChunk, chunkOffset, inBuf); } bytesToUse = (flag & FLAG1_USED_BYTES_MASK) + 1; } int start = bias1 + getVal(inBuf, bytesToUse); boolean isSingleValueChunk = (chunkLenNoMask <= SINGLE_VAL_CHUNK_LEN_NO_FLAG || chunkLenNoMask == 1 + bytesToUse); assert isSingleValueChunk; if (targetChunk == null) { return start; } maskedChunk[0] = start; updateTargetChunk(targetChunk, chunkMask, isSingleValueChunk); return unassigned; } private void updateTargetChunk(int[] targetChunk, long chunkMask, boolean singleValueChunk) { if (targetChunk == null) return; int j = 0; int opos = 0; while (chunkMask != 0) { if ((chunkMask & 1L) != 0) { targetChunk[opos] = maskedChunk[j]; if (!singleValueChunk) j++; } opos++; chunkMask >>>= 1; } } /** * Decode a stored chunk written with the {@link BitWriter}. * @param mp * @param targetChunk * @param chunkOffset * @param inBuf * @return */ private int decodeBits(long chunkMask, int[] targetChunk, int chunkOffset, ByteBuffer inBuf) { int flag1 = inBuf.get(); assert (flag1 & FLAG1_COMP_METHOD_BITS) != 0; int index = CHUNK_SIZE + 1; if (targetChunk == null) { // we only want to retrieve one value for the index index = countUnder(chunkMask, chunkOffset); } boolean useDict = (flag1 & FLAG1_DICTIONARY) != 0; int flag2 = inBuf.get(); int bits = (flag2 & FLAG2_BITS_FOR_VALS) + 1; int sign = 0; if ((flag2 & FLAG2_ALL_POSITIVE) != 0) sign = 1; else if ((flag2 & FLAG2_ALL_NEGATIVE) != 0) sign = -1; boolean dictSizeIs2 = (flag2 & FLAG2_DICT_SIZE_IS_2) != 0; assert bits >= 1; BitReader br; int bias = bias1; int val; // read first value int bytesFor1st = (flag1 & FLAG1_USED_BYTES_MASK) + 1; val = getVal(inBuf, bytesFor1st) + bias; bias = val; br = new BitReader(inBuf.array(), inBuf.position()); if (index == 0) return val; int dictSize = dictSizeIs2 ? 2: 1; if (useDict && !dictSizeIs2) { dictSize = br.get(FLAG_BITS_FOR_DICT_SIZE) + 1; } int[] dict = new int[dictSize]; if (useDict) { dict[0] = val; for (int i = 1; i < dictSize; i++) { dict[i] = readVal(br, bits, sign) + bias; } } boolean useRLE = (flag1 & FLAG1_COMP_METHOD_RLE) != 0; int bitsForPos = bitsNeeded(dictSize - 1) - 1; if (targetChunk == null && !useRLE) { // shortcut: we can calculate the position of the value in the bit stream if (useDict) { br.skip((index-1) * bitsForPos); int dictPos = br.get(bitsForPos); return dict[dictPos]; } // unlikely int bitsToUse = bits - Math.abs(sign); br.skip((index-1) * bitsToUse); return readVal(br, bits, sign) + bias; } int runLength; int bitsForRLE = useRLE ? (flag1 & FLAG1_RUNLEN_MASK) >> 2 : 0; int mPos = 0; int dictPos = 0; int nVals = 0; int n = Long.bitCount(chunkMask); boolean readIndex = dictSize > 2 || !useRLE; while (true) { if (useRLE) { runLength = br.get(bitsForRLE) + 1; nVals += runLength; } else { nVals++; } if (index < nVals) return val; if (targetChunk != null) { do { maskedChunk[mPos++] = val; } while (mPos < nVals); } if (nVals >= n) break; if (useDict) { if (readIndex) { dictPos = br.get(bitsForPos); } else { dictPos = dictPos == 0 ? 1 : 0; } val = dict[dictPos]; } else { val = readVal(br, bits, sign) + bias; } } updateTargetChunk(targetChunk, chunkMask, false); return unassigned; } /** * Check if we already have a chunk for the given key. If no, * fill currentChunk with default value, else with the saved * chunk. * @param key the key for which we need the current chunk */ private void replaceCurrentChunk(long key) { saveCurrentChunk(); Arrays.fill(currentChunk, unassigned); oldModCount = modCount; currentChunkId = key & OLD_CHUNK_ID_MASK; decodeStoredChunk(key, currentChunk, -1); } /** * Returns the value to which the given key is mapped or the {@code unassigned} value. * @param key the key * @return the value to which the given key is mapped or the {@code unassigned} value */ public int get(long key){ long chunkId = key & OLD_CHUNK_ID_MASK; int chunkoffset = (int) (key & CHUNK_OFFSET_MASK); if (currentChunkId == chunkId) { return currentChunk[chunkoffset]; } return decodeStoredChunk(key, null, chunkoffset); } public void clear() { topMap = new Long2ObjectOpenHashMap<>(Hash.DEFAULT_INITIAL_SIZE, Hash.VERY_FAST_LOAD_FACTOR); Arrays.fill(currentChunk, 0); Arrays.fill(maskedChunk, 0); currentChunkId = INVALID_CHUNK_ID; currentMem = null; bias1 = null; size = 0; } public long size() { return size; } public int defaultReturnValue() { return unassigned; } public void defaultReturnValue(int arg0) { unassigned = arg0; } /** * calculate and print performance values regarding memory. */ public void stats(int msgLevel) { if (size() == 0){ System.out.println(dataDesc + " Map is empty"); return; } long totalBytes = (long) currentChunk.length * Integer.BYTES; long totalChunks = 1; // current chunk for (ChunkMem mem : topMap.values()) { totalChunks += mem.getChunkCount(); totalBytes += mem.estimatedBytes; } long bytesPerKey = Math.round((double) totalBytes / size()); System.out.println(dataDesc + " Map: " + Utils.format(size()) + " stored long/int pairs require ca. " + bytesPerKey + " bytes per pair. " + Utils.format(totalChunks) + " chunks are used, the avg. number of values in one " + CHUNK_SIZE + "-chunk is " + (totalChunks == 0 ? 0 : (size() / totalChunks)) + "."); if (msgLevel >= 0) { String details = dataDesc + " Map details: ~" + bytesToMB(totalBytes) + ", including " + topMap.size() + " array(s) with " + bytesToMB((long) LARGE_VECTOR_SIZE * Integer.BYTES); System.out.println(details); } System.out.println(); } private static String bytesToMB (long bytes) { return ((bytes + (1 << 19)) >>> 20) + " MB"; } /* void test(){ int[] yVals = { 0, 1, 2, MAX_Y_VAL - 2, MAX_Y_VAL - 1, MAX_Y_VAL }; for (int z = 0; z < 64; z++){ for (int y : yVals){ for (int x=0; x < 64; x++){ int idx = (z & CHUNK_STORE_Z_MASK)<> CHUNK_STORE_Y_SHIFT) & CHUNK_STORE_Y_MASK; int z2 = (idx >> CHUNK_STORE_Z_SHIFT) & CHUNK_STORE_Z_MASK; assert x == x2; assert y == y2; assert z == z2; } } } } */ } splitter-r653/src/uk/me/parabola/splitter/writer/0000775000175300017530000000000014352507254023310 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/writer/AbstractOSMWriter.java0000664000175300017530000000460114352507254027473 0ustar builderbuilder00000000000000/* * Copyright (c) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import java.awt.Rectangle; import java.io.File; import java.io.IOException; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; public abstract class AbstractOSMWriter implements OSMWriter{ public static final int REMOVE_VERSION = 1; public static final int FAKE_VERSION = 2; public static final int KEEP_VERSION = 3; protected final Area bounds; protected final Area extendedBounds; protected final File outputDir; protected final int mapId; protected final Rectangle bbox; protected int versionMethod; protected AbstractOSMWriter(Area bounds, File outputDir, int mapId, int extra) { this.mapId = mapId; this.bounds = bounds; this.outputDir = outputDir; extendedBounds = new Area(bounds.getMinLat() - extra, bounds.getMinLong() - extra, bounds.getMaxLat() + extra, bounds.getMaxLong() + extra); this.bbox = Utils.area2Rectangle(bounds, 1); } public void setVersionMethod (int versionMethod){ this.versionMethod = versionMethod; } protected int getWriteVersion (Element el){ if (versionMethod == REMOVE_VERSION) return 0; if (versionMethod == FAKE_VERSION) return 1; // XXX maybe return 1 if no version was read ? return el.getVersion(); } @Override public Area getBounds() { return bounds; } @Override public Area getExtendedBounds() { return extendedBounds; } @Override public int getMapId(){ return mapId; } @Override public Rectangle getBBox(){ return bbox; } @Override public void write (Element element) throws IOException { if (element instanceof Node) { write((Node) element); } else if (element instanceof Way) { write((Way) element); } else if (element instanceof Relation) { write((Relation) element); } } } splitter-r653/src/uk/me/parabola/splitter/writer/BinaryMapWriter.java0000664000175300017530000003530314352507254027236 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Francisco Moraes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Iterator; import java.util.List; import java.util.Locale; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Version; import uk.me.parabola.splitter.Way; import uk.me.parabola.splitter.Relation.Member; import crosby.binary.BinarySerializer; import crosby.binary.Osmformat; import crosby.binary.StringTable; import crosby.binary.Osmformat.DenseInfo; import crosby.binary.Osmformat.Relation.MemberType; import crosby.binary.file.BlockOutputStream; import crosby.binary.file.FileBlock; public class BinaryMapWriter extends AbstractOSMWriter { protected PBFSerializer serializer; private BlockOutputStream output; protected boolean useDense = true; protected boolean headerWritten = false; private class PBFSerializer extends BinarySerializer { public PBFSerializer(BlockOutputStream output) { super(output); configBatchLimit(1000); // omit_metadata = true; } /** * Base class containing common code needed for serializing each type of * primitives. */ private abstract class Prim { /** Queue that tracks the list of all primitives. */ ArrayList contents = new ArrayList<>(); /** * Add to the queue. * * @param item * The entity to add */ public void add(T item) { contents.add(item); } /** * Add all of the tags of all entities in the queue to the string * table. */ public void addStringsToStringtable() { StringTable stable = getStringTable(); for (T i : contents) { Iterator tags = i.tagsIterator(); while (tags.hasNext()) { Element.Tag tag = tags.next(); stable.incr(tag.getKey()); stable.incr(tag.getValue()); } if (!omit_metadata) { // stable.incr(i.getUser().getName()); } } } // private static final int MAXWARN = 100; public void serializeMetadataDense(DenseInfo.Builder b, List entities) { if (omit_metadata) { return; } // long lasttimestamp = 0, lastchangeset = 0; // int lastuserSid = 0, lastuid = 0; // StringTable stable = serializer.getStringTable(); // for(Element e : entities) { // // if(e.getUser() == OsmUser.NONE && warncount < MAXWARN) { // LOG // .warning("Attention: Data being output lacks metadata. Please // use omitmetadata=true"); // warncount++; // } // int uid = e.getUser().getId(); // int userSid = stable.getIndex(e.getUser().getName()); // int timestamp = (int)(e.getTimestamp().getTime() / // date_granularity); // int version = e.getVersion(); // long changeset = e.getChangesetId(); // // b.addVersion(version); // b.addTimestamp(timestamp - lasttimestamp); // lasttimestamp = timestamp; // b.addChangeset(changeset - lastchangeset); // lastchangeset = changeset; // b.addUid(uid - lastuid); // lastuid = uid; // b.addUserSid(userSid - lastuserSid); // lastuserSid = userSid; // } for (Element e : entities) { int version = getWriteVersion(e); if (versionMethod != KEEP_VERSION || version == 0) version = 1; // JOSM requires a fake version b.addVersion(version); b.addTimestamp(0); b.addChangeset(0); b.addUid(0); b.addUserSid(0); } } public Osmformat.Info.Builder serializeMetadata(Element e) { // StringTable stable = serializer.getStringTable(); Osmformat.Info.Builder b = Osmformat.Info.newBuilder(); // if(!omit_metadata) { // if(e.getUser() == OsmUser.NONE && warncount < MAXWARN) { // LOG // .warning("Attention: Data being output lacks metadata. Please // use omitmetadata=true"); // warncount++; // } // if(e.getUser() != OsmUser.NONE) { // b.setUid(e.getUser().getId()); // b.setUserSid(stable.getIndex(e.getUser().getName())); // } // b.setTimestamp((int)(e.getTimestamp().getTime() / // date_granularity)); // b.setVersion(e.getVersion()); // b.setChangeset(e.getChangesetId()); // } if (versionMethod != REMOVE_VERSION) { int version = getWriteVersion(e); if (version != 0) { b.setVersion(version); b.setTimestamp(0); b.setChangeset(0); b.setUid(0); b.setUserSid(0); } } return b; } } private class NodeGroup extends Prim implements PrimGroupWriterInterface { @Override public Osmformat.PrimitiveGroup serialize() { if (useDense) return serializeDense(); return serializeNonDense(); } /** * Serialize all nodes in the 'dense' format. */ public Osmformat.PrimitiveGroup serializeDense() { if (contents.isEmpty()) { return null; } // System.out.format("%d Dense ",nodes.size()); Osmformat.PrimitiveGroup.Builder builder = Osmformat.PrimitiveGroup.newBuilder(); StringTable stable = serializer.getStringTable(); long lastlat = 0, lastlon = 0, lastid = 0; Osmformat.DenseNodes.Builder bi = Osmformat.DenseNodes.newBuilder(); boolean doesBlockHaveTags = false; // Does anything in this block have tags? for (Node i : contents) { doesBlockHaveTags = doesBlockHaveTags || (i.tagsIterator().hasNext()); } if (!omit_metadata) { Osmformat.DenseInfo.Builder bdi = Osmformat.DenseInfo.newBuilder(); serializeMetadataDense(bdi, contents); bi.setDenseinfo(bdi); } for (Node i : contents) { long id = i.getId(); int lat = mapDegrees(i.getLat()); int lon = mapDegrees(i.getLon()); bi.addId(id - lastid); lastid = id; bi.addLon(lon - lastlon); lastlon = lon; bi.addLat(lat - lastlat); lastlat = lat; // Then we must include tag information. if (doesBlockHaveTags) { Iterator tags = i.tagsIterator(); while (tags.hasNext()) { Element.Tag t = tags.next(); bi.addKeysVals(stable.getIndex(t.getKey())); bi.addKeysVals(stable.getIndex(t.getValue())); } bi.addKeysVals(0); // Add delimiter. } } builder.setDense(bi); return builder.build(); } /** * Serialize all nodes in the non-dense format. * * @param parentbuilder * Add to this PrimitiveBlock. */ public Osmformat.PrimitiveGroup serializeNonDense() { if (contents.isEmpty()) { return null; } // System.out.format("%d Nodes ",nodes.size()); StringTable stable = serializer.getStringTable(); Osmformat.PrimitiveGroup.Builder builder = Osmformat.PrimitiveGroup.newBuilder(); for (Node i : contents) { long id = i.getId(); int lat = mapDegrees(i.getLat()); int lon = mapDegrees(i.getLon()); Osmformat.Node.Builder bi = Osmformat.Node.newBuilder(); bi.setId(id); bi.setLon(lon); bi.setLat(lat); Iterator tags = i.tagsIterator(); while (tags.hasNext()) { Element.Tag t = tags.next(); bi.addKeys(stable.getIndex(t.getKey())); bi.addVals(stable.getIndex(t.getValue())); } if (!omit_metadata) { bi.setInfo(serializeMetadata(i)); } builder.addNodes(bi); } return builder.build(); } } private class WayGroup extends Prim implements PrimGroupWriterInterface { @Override public Osmformat.PrimitiveGroup serialize() { if (contents.isEmpty()) { return null; } // System.out.format("%d Ways ",contents.size()); StringTable stable = serializer.getStringTable(); Osmformat.PrimitiveGroup.Builder builder = Osmformat.PrimitiveGroup.newBuilder(); for (Way i : contents) { Osmformat.Way.Builder bi = Osmformat.Way.newBuilder(); bi.setId(i.getId()); long lastid = 0; for (long j : i.getRefs()) { long id = j; bi.addRefs(id - lastid); lastid = id; } Iterator tags = i.tagsIterator(); while (tags.hasNext()) { Element.Tag t = tags.next(); bi.addKeys(stable.getIndex(t.getKey())); bi.addVals(stable.getIndex(t.getValue())); } if (!omit_metadata) { bi.setInfo(serializeMetadata(i)); } builder.addWays(bi); } return builder.build(); } } private class RelationGroup extends Prim implements PrimGroupWriterInterface { @Override public void addStringsToStringtable() { StringTable stable = serializer.getStringTable(); super.addStringsToStringtable(); for (Relation i : contents) { for (Member j : i.getMembers()) { stable.incr(j.getRole()); } } } @Override public Osmformat.PrimitiveGroup serialize() { if (contents.isEmpty()) { return null; } // System.out.format("%d Relations ",contents.size()); StringTable stable = serializer.getStringTable(); Osmformat.PrimitiveGroup.Builder builder = Osmformat.PrimitiveGroup.newBuilder(); for (Relation i : contents) { Osmformat.Relation.Builder bi = Osmformat.Relation.newBuilder(); bi.setId(i.getId()); Member[] arr = new Member[i.getMembers().size()]; i.getMembers().toArray(arr); long lastid = 0; for (Member j : i.getMembers()) { long id = j.getRef(); bi.addMemids(id - lastid); lastid = id; if ("node".equals(j.getType())) { bi.addTypes(MemberType.NODE); } else if ("way".equals(j.getType())) { bi.addTypes(MemberType.WAY); } else if ("relation".equals(j.getType())) { bi.addTypes(MemberType.RELATION); } else { assert (false); // Software bug: Unknown entity. } bi.addRolesSid(stable.getIndex(j.getRole())); } Iterator tags = i.tagsIterator(); while (tags.hasNext()) { Element.Tag t = tags.next(); bi.addKeys(stable.getIndex(t.getKey())); bi.addVals(stable.getIndex(t.getValue())); } if (!omit_metadata) { bi.setInfo(serializeMetadata(i)); } builder.addRelations(bi); } return builder.build(); } } /* One list for each type */ protected WayGroup ways; protected NodeGroup nodes; protected RelationGroup relations; protected Processor processor = new Processor(); /** * Buffer up events into groups that are all of the same type, or all of * the same length, then process each buffer. */ public class Processor { /** * Check if we've reached the batch size limit and process the batch * if we have. */ public void checkLimit() { total_entities++; if (++batch_size < batch_limit) { return; } switchTypes(); processBatch(); } public void process(Node node) { if (nodes == null) { writeEmptyHeaderIfNeeded(); // Need to switch types. switchTypes(); nodes = new NodeGroup(); } nodes.add(node); checkLimit(); } public void process(Way way) { if (ways == null) { writeEmptyHeaderIfNeeded(); switchTypes(); ways = new WayGroup(); } ways.add(way); checkLimit(); } public void process(Relation relation) { if (relations == null) { writeEmptyHeaderIfNeeded(); switchTypes(); relations = new RelationGroup(); } relations.add(relation); checkLimit(); } } /** * At the end of this function, all of the lists of unprocessed 'things' * must be null */ protected void switchTypes() { if (nodes != null) { groups.add(nodes); nodes = null; } else if (ways != null) { groups.add(ways); ways = null; } else if (relations != null) { groups.add(relations); relations = null; } else { // No data. Is this an empty file? } } /** Write empty header block when there's no bounds entity. */ public void writeEmptyHeaderIfNeeded() { if (headerWritten) { return; } Osmformat.HeaderBlock.Builder headerblock = Osmformat.HeaderBlock.newBuilder(); finishHeader(headerblock); } } public BinaryMapWriter(Area bounds, File outputDir, int mapId, int extra) { super(bounds, outputDir, mapId, extra); } @Override public void initForWrite() { String filename = String.format(Locale.ROOT, "%08d.osm.pbf", mapId); try { output = new BlockOutputStream(new FileOutputStream(new File(outputDir, filename))); serializer = new PBFSerializer(output); writeHeader(); } catch (IOException e) { System.out.println("Could not open or write file header. Reason: " + e.getMessage()); throw new RuntimeException(e); } } private void writeHeader() { Osmformat.HeaderBlock.Builder headerblock = Osmformat.HeaderBlock.newBuilder(); Osmformat.HeaderBBox.Builder pbfBbox = Osmformat.HeaderBBox.newBuilder(); pbfBbox.setLeft(serializer.mapRawDegrees(Utils.toDegrees(bounds.getMinLong()))); pbfBbox.setBottom(serializer.mapRawDegrees(Utils.toDegrees(bounds.getMinLat()))); pbfBbox.setRight(serializer.mapRawDegrees(Utils.toDegrees(bounds.getMaxLong()))); pbfBbox.setTop(serializer.mapRawDegrees(Utils.toDegrees(bounds.getMaxLat()))); headerblock.setBbox(pbfBbox); finishHeader(headerblock); } /** * Write the header fields that are always needed. * * @param headerblock * Incomplete builder to complete and write. */ public void finishHeader(Osmformat.HeaderBlock.Builder headerblock) { headerblock.setWritingprogram("splitter-r" + Version.VERSION); headerblock.addRequiredFeatures("OsmSchema-V0.6"); if (useDense) { headerblock.addRequiredFeatures("DenseNodes"); } Osmformat.HeaderBlock message = headerblock.build(); try { output.write(FileBlock.newInstance("OSMHeader", message.toByteString(), null)); } catch (IOException e) { throw new RuntimeException("Unable to write OSM header.", e); } headerWritten = true; } @Override public void finishWrite() { try { serializer.switchTypes(); serializer.processBatch(); serializer.close(); serializer = null; } catch (IOException e) { System.out.println("Could not write end of file: " + e); } } @Override public void write(Node node) { serializer.processor.process(node); } @Override public void write(Way way) { serializer.processor.process(way); } @Override public void write(Relation relation) { serializer.processor.process(relation); } } splitter-r653/src/uk/me/parabola/splitter/writer/O5mMapWriter.java0000664000175300017530000004061714352507254026456 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import java.io.BufferedOutputStream; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.Locale; import java.util.Map; import it.unimi.dsi.fastutil.longs.LongArrayList; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Relation.Member; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; /** * Implements the needed methods to write the result in the o5m format. * The routines are based on the osmconvert.c source from Markus Weber who allows * to copy them for any o5m IO, thanks a lot for that. * * @author GerdP * */ public class O5mMapWriter extends AbstractOSMWriter{ // O5M data set constants private static final int NODE_DATASET = 0x10; private static final int WAY_DATASET = 0x11; private static final int REL_DATASET = 0x12; private static final int BBOX_DATASET = 0xdb; //private static final int TIMESTAMP_DATASET = 0xdc; private static final int HEADER_DATASET = 0xe0; private static final int EOD_FLAG = 0xfe; private static final int RESET_FLAG = 0xff; private static final int STW__TAB_MAX = 15000; // this is defined in the o5m format private static final int STW_HASH_TAB_MAX = 30011; // (preferably a prime number) private static final int STW_TAB_STR_MAX = 250;// this is defined in the o5m format private static final String[] REL_REF_TYPES = {"0","1","2"}; private static final double FACTOR = 10000000; private OutputStream os; private byte[][][] stw__tab; // string table private byte[] s1Bytes; private byte[] s2Bytes; // for delta calculations private long lastNodeId; private long lastWayId; private long lastRelId; private long[] lastRef; private int lastLon,lastLat; private int lastWrittenDatasetType = 0; // index of last entered element in string table private short stw__tabi= 0; // has table; elements point to matching strings in stw__tab[] // -1: no matching element private short[] stw__hashtab; // for to chaining of string table rows which match // the same hash value; matching rows are chained in a loop // if there is only one row matching, it will point to itself private short[] stw__tabprev; private short[] stw__tabnext; // has value of this element as a link back to the hash table // a -1 element indicates that the string table entry is not used private short[] stw__tabhash; private byte[] numberConversionBuf; private static final Map wellKnownTagKeys = new HashMap<>(60, 0.25f); private static final Map wellKnownTagVals = new HashMap<>(20, 0.25f); static { try { for (String s : Arrays.asList( "1", "1outer", "1inner", "type", // relation specific // 50 most often used keys (taken from taginfo 2016-11-20) "building", "source", "highway", "addr:housenumber", "addr:street", "name", "addr:city", "addr:postcode", "natural", "source:date", "addr:country", "landuse", "surface", "created_by", "power", "tiger:cfcc", "waterway", "tiger:county", "start_date", "tiger:reviewed", "wall", "amenity", "oneway", "ref:bag", "ref", "attribution", "tiger:name_base", "building:levels", "maxspeed", "barrier", "tiger:name_type", "height", "service", "source:addr", "tiger:tlid", "tiger:source", "lanes", "access", "addr:place", "tiger:zip_left", "tiger:upload_uuid", "layer", "tracktype", "ele", "tiger:separated", "tiger:zip_right", "yh:WIDTH", "place", "foot" )) { wellKnownTagKeys.put(s, s.getBytes(StandardCharsets.UTF_8)); } for (String s : Arrays.asList( "yes", "no", "residential", "garage", "water", "tower", "footway", "Bing", "PGS", "private", "stream", "service", "house", "unclassified", "track", "traffic_signals","restaurant","entrance" )) { wellKnownTagVals.put(s, s.getBytes(StandardCharsets.UTF_8)); } } catch (Exception e) { // should not happen } } //private long countCollisions; public O5mMapWriter(Area bounds, File outputDir, int mapId, int extra) { super(bounds, outputDir, mapId, extra); } private void reset() throws IOException{ os.write(RESET_FLAG); resetVars(); } /** reset the delta values and string table */ private void resetVars(){ lastNodeId = 0; lastWayId = 0; lastRelId = 0; lastRef[0] = 0; lastRef[1] = 0;lastRef[2] = 0; lastLon = 0; lastLat = 0; stw__tab = new byte[2][STW__TAB_MAX][]; stw_reset(); } @Override public void initForWrite() { // has table; elements point to matching strings in stw__tab[] // -1: no matching element stw__hashtab = new short[STW_HASH_TAB_MAX]; // for to chaining of string table rows which match // the same hash value; matching rows are chained in a loop // if there is only one row matching, it will point to itself stw__tabprev = new short[STW__TAB_MAX]; stw__tabnext = new short[STW__TAB_MAX]; // has value of this element as a link back to the hash table // a -1 element indicates that the string table entry is not used stw__tabhash = new short[STW__TAB_MAX]; lastRef = new long[3]; numberConversionBuf = new byte[60]; resetVars(); String filename = String.format(Locale.ROOT, "%08d.o5m", mapId); try { os = new BufferedOutputStream(new FileOutputStream(new File(outputDir, filename))); os.write(RESET_FLAG); writeHeader(); writeBBox(); } catch (IOException e) { System.out.println("Could not open or write file header. Reason: " + e.getMessage()); throw new RuntimeException(e); } } private void writeHeader() throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); byte[] id = {'o','5','m','2'}; stream.write(id); writeDataset(HEADER_DATASET,stream); } private void writeBBox() throws IOException { ByteArrayOutputStream stream = new ByteArrayOutputStream(); writeSignedNum((long)(Utils.toDegrees(bounds.getMinLong()) * FACTOR), stream); writeSignedNum((long)(Utils.toDegrees(bounds.getMinLat()) * FACTOR), stream); writeSignedNum((long)(Utils.toDegrees(bounds.getMaxLong()) * FACTOR), stream); writeSignedNum((long)(Utils.toDegrees(bounds.getMaxLat()) * FACTOR), stream); writeDataset(BBOX_DATASET,stream); } private void writeDataset(int fileType, ByteArrayOutputStream stream) throws IOException { os.write(fileType); writeUnsignedNum(stream.size(), os); stream.writeTo(os); lastWrittenDatasetType = fileType; } @Override public void finishWrite() { try { os.write(EOD_FLAG); os.close(); stw__hashtab = null; stw__tabprev = null; stw__tabnext = null; stw__tabhash = null; lastRef = null; numberConversionBuf = null; stw__tab = null; //System.out.println(mapId + " collisions=" + Utils.format(countCollisions)); } catch (IOException e) { System.out.println("Could not write end of file: " + e); } } @Override public void write(Node node) throws IOException { if (lastWrittenDatasetType != NODE_DATASET){ reset(); } ByteArrayOutputStream stream = new ByteArrayOutputStream(); long delta = node.getId() - lastNodeId; lastNodeId = node.getId(); writeSignedNum(delta, stream); writeVersion(node, stream); int o5Lon = (int)(node.getLon() * FACTOR); int o5Lat = (int)(node.getLat() * FACTOR); int deltaLon = o5Lon - lastLon; lastLon = o5Lon; int deltaLat = o5Lat - lastLat; lastLat = o5Lat; writeSignedNum(deltaLon, stream); writeSignedNum(deltaLat, stream); writeTags(node, stream); writeDataset(NODE_DATASET,stream); } @Override public void write(Way way) throws IOException { if (lastWrittenDatasetType != WAY_DATASET){ reset(); } ByteArrayOutputStream stream = new ByteArrayOutputStream(); long delta = way.getId() - lastWayId; lastWayId = way.getId(); writeSignedNum(delta, stream); writeVersion(way, stream); ByteArrayOutputStream refStream = new ByteArrayOutputStream(); LongArrayList refs = way.getRefs(); int numRefs = refs.size(); for (int i = 0; i < numRefs; i++){ long ref = refs.getLong(i); delta = ref - lastRef[0]; lastRef[0] = ref; writeSignedNum(delta, refStream); } writeUnsignedNum(refStream.size(),stream); refStream.writeTo(stream); writeTags(way, stream); writeDataset(WAY_DATASET,stream); } @Override public void write(Relation rel) throws IOException { if (lastWrittenDatasetType != REL_DATASET){ reset(); } ByteArrayOutputStream stream = new ByteArrayOutputStream(256); long delta = rel.getId() - lastRelId; lastRelId = rel.getId(); writeSignedNum(delta, stream); writeVersion(rel, stream); ByteArrayOutputStream memStream = new ByteArrayOutputStream(256); for (Member mem: rel.getMembers()){ writeRelRef(mem, memStream); } writeUnsignedNum(memStream.size(),stream); memStream.writeTo(stream); writeTags(rel, stream); writeDataset(REL_DATASET,stream); } private void writeRelRef(Member mem, ByteArrayOutputStream memStream) throws IOException { int refType = 0; String type = mem.getType(); if ("node".equals(type)) refType = 0; else if ("way".equals(type)) refType = 1; else if ("relation".equals(type)) refType = 2; else { assert (false); // Software bug: Unknown entity. } long delta = mem.getRef() - lastRef[refType]; lastRef[refType] = mem.getRef(); writeSignedNum(delta, memStream); stw_write(REL_REF_TYPES[refType] + mem.getRole(), null, memStream); } private void writeVersion (Element element, OutputStream stream) throws IOException { if (versionMethod == REMOVE_VERSION){ stream.write(0x00); // no version return; } int version = 1; if (versionMethod == KEEP_VERSION) version = element.getVersion(); if (version != 0){ writeUnsignedNum(version, stream); } stream.write(0x00); // no author or time-stamp info } private void writeTags(Element element, OutputStream stream) throws IOException { if (!element.hasTags()) return; Iterator it = element.tagsIterator(); while (it.hasNext()) { Element.Tag entry = it.next(); stw_write(entry.key, entry.value, stream); } } private void stw_write(String s1, String s2, OutputStream stream) throws IOException { int hash; int ref; s1Bytes = wellKnownTagKeys.get(s1); if (s1Bytes == null){ s1Bytes = s1.getBytes(StandardCharsets.UTF_8); } if (s2 != null){ s2Bytes = wellKnownTagVals.get(s2); if (s2Bytes == null){ s2Bytes= s2.getBytes(StandardCharsets.UTF_8); } } else s2Bytes = null; // try to find a matching string (pair) in string table { int i; // index in stw__tab[] ref = -1; // ref invalid (default) hash = stw_hash(s1,s2); if (hash >= 0){ i = stw__hashtab[hash]; if(i >= 0) // string (pair) presumably stored already ref = stw__getref(i); } // end string (pair) short enough for the string table if(ref >= 0) { // we found the string (pair) in the table writeUnsignedNum(ref, stream); // write just the reference return; } // end we found the string (pair) in the table // write string data stream.write(0x00); stream.write(s1Bytes); stream.write(0x00); if (s2Bytes != null){ stream.write(s2Bytes); stream.write(0x00); } if(hash < 0){ // string (pair) too long, // cannot be stored in string table return; } } // end try to find a matching string (pair) in string table // here: there is no matching string (pair) in the table // free new element - if still being used { int h0; // hash value of old element h0 = stw__tabhash[stw__tabi]; if(h0 >= 0) { // new element in string table is still being used // delete old element if(stw__tabnext[stw__tabi] == stw__tabi) // self-chain, i.e., only this element stw__hashtab[h0]= -1; // invalidate link in hash table else { // one or more other elements in chain stw__hashtab[h0] = stw__tabnext[stw__tabi]; // just to ensure // that hash entry does not point to deleted element // now unchain deleted element stw__tabprev[stw__tabnext[stw__tabi]]= stw__tabprev[stw__tabi]; stw__tabnext[stw__tabprev[stw__tabi]]= stw__tabnext[stw__tabi]; } // end one or more other elements in chain } // end next element in string table is still being used } // end free new element - if still being used // enter new string table element data { int i; stw__tab[0][stw__tabi] = s1Bytes; stw__tab[1][stw__tabi] = s2Bytes; i = stw__hashtab[hash]; if(i < 0) // no reference in hash table until now stw__tabprev[stw__tabi] = stw__tabnext[stw__tabi] = stw__tabi; // self-link the new element else { // there is already a reference in hash table // in-chain the new element stw__tabnext[stw__tabi] = (short) i; stw__tabprev[stw__tabi] = stw__tabprev[i]; stw__tabnext[stw__tabprev[stw__tabi]] = stw__tabi; stw__tabprev[i] = stw__tabi; //countCollisions++; } stw__hashtab[hash] = stw__tabi; // link the new element to hash table stw__tabhash[stw__tabi] = (short) hash; // backlink to hash table element // new element now in use; set index to oldest element if (++stw__tabi >= STW__TAB_MAX) { // index overflow stw__tabi= 0; // restart index } // end index overflow } // end enter new string table element data } int stw__getref(final int stri) { int strie; // index of last occurrence int ref; strie= stri; int pos = stri; do{ // compare the string (pair) with the tab entry byte[] tb1 = stw__tab[0][pos]; if (Arrays.equals(tb1, s1Bytes)){ // first string equal to first string in table byte[] tb2 = stw__tab[1][pos]; if (Arrays.equals(tb2, s2Bytes)){ // second string equal to second string in table ref = stw__tabi - pos; if (ref <= 0) ref += STW__TAB_MAX; return ref; } } pos = stw__tabnext[pos]; } while(pos!=strie); return -1; } void stw_reset() { // clear string table and string hash table; // must be called before any other procedure of this module // and may be called every time the string processing shall // be restarted; stw__tabi = 0; Arrays.fill(stw__tabhash, (short)-1); Arrays.fill(stw__hashtab, (short)-1); } /** * get hash value of a string pair * @param s2 * @param s1 * @return hash value in the range 0..(STW__TAB_MAX-1) * or -1 if the strings are longer than STW_TAB_STR_MAX bytes in total */ private int stw_hash(String s1, String s2) { int len = s1Bytes.length; if (s2Bytes != null) len += s2Bytes.length; if (len > STW_TAB_STR_MAX) return -1; int hash = s1.hashCode(); if (s2 != null) hash ^= s2.hashCode(); return Math.abs(hash % STW__TAB_MAX); } private int writeUnsignedNum(int number, OutputStream stream)throws IOException { int num = number; int cntBytes = 0; int part = num & 0x7f; if (part == num){ // just one byte stream.write(part); return 1; } do{ numberConversionBuf[cntBytes++] = (byte)(part | 0x80); num >>= 7; part = num & 0x7f; } while(part != num); numberConversionBuf[cntBytes++] = (byte)(part); stream.write(numberConversionBuf,0,cntBytes); return cntBytes; } private int writeSignedNum(long num, OutputStream stream)throws IOException { int cntBytes = 0; // write a long as signed varying integer. // return: bytes written long u; int part; if (num < 0){ u = -num; u= (u<<1)-1; } else{ u= num<<1; } part = (int)(u & 0x7f); if(part == u) { // just one byte stream.write(part); return 1; } do { numberConversionBuf[cntBytes++] = (byte)(part | 0x80); u >>>= 7; part = (int)(u & 0x7f); } while(part !=u); numberConversionBuf[cntBytes++] = (byte)(part); stream.write(numberConversionBuf,0,cntBytes); return cntBytes; } } splitter-r653/src/uk/me/parabola/splitter/writer/OSMWriter.java0000664000175300017530000000271414352507254026012 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import java.awt.Rectangle; import java.io.IOException; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Way; public interface OSMWriter { /** * @return the bounds of the area (excluding the overlap) */ public Area getBounds(); /** * @return the bounds of the area (including the overlap) */ public Area getExtendedBounds(); public Rectangle getBBox(); public int getMapId(); /** * open output file, allocate buffers etc. */ public abstract void initForWrite(); /** * close output file, free resources */ public abstract void finishWrite(); public abstract void write(Node node) throws IOException; public abstract void write(Way way) throws IOException; public abstract void write(Relation rel) throws IOException; public abstract void write(Element el) throws IOException; } splitter-r653/src/uk/me/parabola/splitter/writer/OSMXMLWriter.java0000664000175300017530000001604014352507254026370 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.Writer; import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; import java.text.DecimalFormatSymbols; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.zip.GZIPOutputStream; import it.unimi.dsi.fastutil.longs.LongArrayList; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Element; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Utils; import uk.me.parabola.splitter.Way; public class OSMXMLWriter extends AbstractOSMWriter{ private final DecimalFormat numberFormat = new DecimalFormat( "0.#######;-0.#######", new DecimalFormatSymbols(Locale.US) ); private Writer writer; public OSMXMLWriter(Area bounds, File outputDir, int mapId, int extra) { super(bounds, outputDir, mapId, extra); } @Override public void initForWrite() { String filename = String.format(Locale.ROOT, "%08d.osm.gz", mapId); try { FileOutputStream fos = new FileOutputStream(new File(outputDir, filename)); OutputStream zos = new GZIPOutputStream(fos); writer = new OutputStreamWriter(zos, StandardCharsets.UTF_8); writeHeader(); } catch (IOException e) { System.out.println("Could not open or write file header. Reason: " + e.getMessage()); throw new RuntimeException(e); } } private void writeHeader() throws IOException { writeString("\n"); String apiVersion = (versionMethod == REMOVE_VERSION) ? "version='0.5'" : "version='0.6'"; writeString("\n"); writeString("\n"); } @Override public void finishWrite() { try { writeString("\n"); flush(); writer.close(); writer = null; } catch (IOException e) { System.out.println("Could not write end of file: " + e); } } @Override public void write(Node node) throws IOException { writeString("\n"); writeTags(node); writeString("\n"); } else { writeString("'/>\n"); } } @Override public void write(Way way) throws IOException { writeString("\n"); LongArrayList refs = way.getRefs(); for (int i = 0; i < refs.size(); i++) { writeString("\n"); } if (way.hasTags()) writeTags(way); writeString("\n"); } @Override public void write(Relation rel) throws IOException { writeString("\n"); List memlist = rel.getMembers(); for (Relation.Member m : memlist) { if (m.getType() == null || m.getRef() == 0) { System.err.println("Invalid relation member found in relation " + rel.getId() + ": member type=" + m.getType() + ", ref=" + m.getRef() + ", role=" + m.getRole() + ". Ignoring this member"); continue; } writeString("\n"); } if (rel.hasTags()) writeTags(rel); writeString("\n"); } private void writeTags(Element element) throws IOException { Iterator it = element.tagsIterator(); while (it.hasNext()) { Element.Tag entry = it.next(); writeString("\n"); } } private void writeAttribute(String value) throws IOException { for (int i = 0; i < value.length(); i++) { char c = value.charAt(i); switch (c) { case '\'': writeString("'"); break; case '&': writeString("&"); break; case '<': writeString("<"); break; case '\n': writeString(" "); break; case '\r': writeString(" "); break; case '\t': writeString(" "); break; default: writeChar(c); } } } private int index; private final char[] charBuf = new char[4096]; private void checkFlush(int i) throws IOException { if (charBuf.length - index < i) { flush(); } } private void flush() throws IOException { writer.write(charBuf, 0, index); index = 0; } private void writeString(String value) throws IOException { int start = 0; int end = value.length(); int len; while ((len = charBuf.length - index) < end - start) { value.getChars(start, start + len, charBuf, index); start += len; index = charBuf.length; flush(); } value.getChars(start, end, charBuf, index); index += end - start; } /** Write a double to full precision */ private void writeLongDouble(double value) throws IOException { checkFlush(22); writeString(Double.toString(value)); } /** * Write a double truncated to OSM's 7 digits of precision */ private void writeDouble(double value) throws IOException { checkFlush(22); // Punt on some annoying specialcases if (value < -200 || value > 200 || (value > -1 && value < 1)) writeString(numberFormat.format(value)); else { if (value < 0) { charBuf[index++] = '-'; // Write directly. value = -value; } int val = (int) Math.round(value * 10000000); StringBuilder s = new StringBuilder(Integer.toString(val)); s.insert(s.length() - 7, '.'); writeString(s.toString()); } } private void writeLong(long value) throws IOException { checkFlush(20); writeString(Long.toString(value)); } private void writeChar(char value) throws IOException { checkFlush(1); charBuf[index++] = value; } } splitter-r653/src/uk/me/parabola/splitter/writer/PseudoOSMWriter.java0000664000175300017530000000225114352507254027166 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.writer; import uk.me.parabola.splitter.Area; import uk.me.parabola.splitter.Node; import uk.me.parabola.splitter.Relation; import uk.me.parabola.splitter.Way; /** * A do-nothing writer (used with --output=simulate) * @author Gerd Petermann * */ public class PseudoOSMWriter extends AbstractOSMWriter{ public PseudoOSMWriter(Area bounds) { // no overlap for pseudo writers ! super(bounds, null, bounds.getMapId(), 0); } @Override public void write(Relation rel) {} @Override public void write(Way way) {} @Override public void write(Node node) {} @Override public void initForWrite() {} @Override public void finishWrite() {} } splitter-r653/src/uk/me/parabola/splitter/xml/0000775000175300017530000000000014352507254022574 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/xml/parser/0000775000175300017530000000000014352507254024070 5ustar builderbuilder00000000000000splitter-r653/src/uk/me/parabola/splitter/xml/parser/AbstractXppParser.java0000664000175300017530000000512614352507254030347 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.xml.parser; import java.io.IOException; import java.io.Reader; import org.xmlpull.v1.XmlPullParser; import org.xmlpull.v1.XmlPullParserException; import org.xmlpull.v1.XmlPullParserFactory; /** * Base functionality for an XPP based XML parser */ public abstract class AbstractXppParser { private final XmlPullParser parser; public AbstractXppParser() throws XmlPullParserException { XmlPullParserFactory factory = XmlPullParserFactory.newInstance(System.getProperty(XmlPullParserFactory.PROPERTY_NAME), null); parser = factory.newPullParser(); } public void setReader(Reader reader) throws XmlPullParserException { parser.setInput(reader); } protected String getAttr(String name) { return parser.getAttributeValue(null, name); } protected int getIntAttr(String name) { return Integer.parseInt(parser.getAttributeValue(null, name)); } protected long getLongAttr(String name) { return Long.parseLong(parser.getAttributeValue(null, name)); } protected String getTextContent() { return parser.getText(); } public void parse() throws IOException, XmlPullParserException { boolean done = false; int eventType = parser.getEventType(); do { if (eventType == XmlPullParser.START_TAG) { done = startElement(parser.getName()); } else if (eventType == XmlPullParser.END_TAG) { endElement(parser.getName()); } else if (eventType == XmlPullParser.TEXT) { text(); } } while (!done && (eventType = parser.next()) != XmlPullParser.END_DOCUMENT); } protected XmlPullParserException createException(String message) { return new XmlPullParserException(message, parser, null); } /** * Called when the start of an element is encountered. * @param name the name of the element. * @return {@code true} to abort the parsing because there's * no further processing required, {@code false} otherwise. * @throws XmlPullParserException */ abstract protected boolean startElement(String name) throws XmlPullParserException; abstract protected void endElement(String name) throws XmlPullParserException; protected void text() throws XmlPullParserException { } } splitter-r653/test/0000775000175300017530000000000014352507254015515 5ustar builderbuilder00000000000000splitter-r653/test/func/0000775000175300017530000000000014352507254016450 5ustar builderbuilder00000000000000splitter-r653/test/func/ArgsTest.java0000664000175300017530000000157314352507254021055 0ustar builderbuilder00000000000000/* * Copyright (C) 2016 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe, Gerd Petermann * Create date: 2016-12-28 */ package func; import org.junit.Test; import func.lib.Outputs; import func.lib.TestUtils; /** * A basic check of various arguments that can be passed in. * * @author Gerd Petermann */ public class ArgsTest extends Base { @Test public void testHelp() { Outputs outputs = TestUtils.run("--help"); outputs.checkNoError(); } } splitter-r653/test/func/Base.java0000664000175300017530000000171214352507254020166 0ustar builderbuilder00000000000000/* * Copyright (C) 2008 Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe * Create date: 11-Jan-2009 */ package func; import org.junit.After; import org.junit.Before; import func.lib.TestUtils; /** * Base class for tests with some useful routines. It ensures that created * files are deleted before the test starts. * * @author Steve Ratcliffe */ public class Base { @Before public void baseSetup() { TestUtils.deleteOutputFiles(); } @After public void baseTeardown() { TestUtils.closeFiles(); } } splitter-r653/test/func/SolverAndProblemGeneratorTest.java0000664000175300017530000000567614352507254025256 0ustar builderbuilder00000000000000/* * Copyright (C) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Gerd Petermann * Create date: 2017-01-10 */ package func; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Map.Entry; import org.junit.Test; import func.lib.Args; import uk.me.parabola.splitter.Main; /** * Compare file sizes with expected results. A very basic check that the size of * the output files has not changed. This can be used to make sure that a change * that is not expected to change the output does not do so. * * The sizes will have to be always changed when the output does change though. * * * @author Gerd Petermann */ public class SolverAndProblemGeneratorTest extends Base { /** * @throws IOException */ @Test public void testHamburg() throws IOException { runSplitter(Args.expectedHamburg, "--stop-after=gen-problem-list", Args.HAMBURG); } @Test public void testAlaska() throws IOException { runSplitter(Args.expectedAlaska,"--stop-after=gen-problem-list", Args.ALASKA); } @Test public void testAlaskaOverlap() throws IOException { runSplitter(Args.expectedAlaskaOverlap,"--stop-after=split","--keep-complete=false", Args.ALASKA); } @Test /** verifies that --max-areas has no effect on the output */ public void testAlaskaMaxAreas7() throws IOException { runSplitter(Args.expectedAlaska,"--stop-after=gen-problem-list","--max-areas=5", Args.ALASKA); } private static void runSplitter(Map expected, String... optArgs) throws IOException { List argsList = new ArrayList<>(Arrays.asList(Args.MAIN_ARGS)); for (String arg : optArgs) argsList.add(arg); Main.mainNoSystemExit(argsList.toArray(new String[argsList.size()])); for (Entry entry : expected.entrySet()) { String f = entry.getKey(); long expectedSize = entry.getValue(); assertTrue("no " + f + " generated", new File(f).exists()); List lines = Files.readAllLines(Paths.get(f, "")); long realSize = 0; for (String l : lines) { realSize += l.length(); } assertEquals(f + " has wrong size", expectedSize, realSize); } } @Test public void testNoSuchFile() { Main.mainNoSystemExit("no-such-file-xyz.osm"); assertFalse("no file generated", new File(Args.DEF_AREAS_LIST).exists()); } } splitter-r653/test/func/package.html0000664000175300017530000000022114352507254020724 0ustar builderbuilder00000000000000

Functional tests

Functional tests that make complete runs of splitter and examine the resultant files in some way.

splitter-r653/test/func/lib/0000775000175300017530000000000014352507254017216 5ustar builderbuilder00000000000000splitter-r653/test/func/lib/Args.java0000664000175300017530000000462414352507254020763 0ustar builderbuilder00000000000000/* * Copyright (C) 2017 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Gerd Petermann * Create date: 2017-01-10 */ package func.lib; import java.util.LinkedHashMap; import java.util.Map; /** * Useful constants that are used for arguments etc. in the functional * tests. * * @author Gerd Petermann */ public interface Args { public static final String TEST_RESOURCE_OSM = "test/resources/in/osm/"; public static final String DEF_TEMPLATE = "template.args"; public static final String DEF_DENSITIES = "densities-out.txt"; public static final String DEF_AREAS_KML = "areas.kml"; public static final String DEF_AREAS_LIST = "areas.list"; public static final String DEF_AREAS_POLY = "areas.poly"; public static final String DEF_PROBLEM_LIST = "problem.list"; public static final String[] MAIN_ARGS = { "--status-freq=0", "--write-kml=" + DEF_AREAS_KML, "--problem-report=" + DEF_PROBLEM_LIST, "--max-nodes=500000", }; public static final String ALASKA = TEST_RESOURCE_OSM + "alaska-2016-12-27.osm.pbf"; public static final String HAMBURG = TEST_RESOURCE_OSM + "hamburg-2016-12-26.osm.pbf"; /** expected summed line sizes for ALASKA file */ public static final Map expectedAlaska = new LinkedHashMap() { { put(DEF_AREAS_KML, 5158); put(DEF_AREAS_LIST, 1076); put(DEF_AREAS_POLY, 371); put(DEF_DENSITIES, 769055); put(DEF_PROBLEM_LIST, 12157); put(DEF_TEMPLATE, 930); } }; /** expected summed line sizes for ALASKA file */ public static final Map expectedAlaskaOverlap = new LinkedHashMap() { { putAll(expectedAlaska); remove(DEF_PROBLEM_LIST); } }; /** expected summed line sizes for HAMBURG file */ public static final Map expectedHamburg = new LinkedHashMap() { { put(DEF_AREAS_KML, 3143); put(DEF_AREAS_LIST, 616); put(DEF_AREAS_POLY, 204); put(DEF_DENSITIES, 2157); put(DEF_PROBLEM_LIST, 51017); put(DEF_TEMPLATE, 662); } }; } splitter-r653/test/func/lib/Outputs.java0000664000175300017530000000375414352507254021555 0ustar builderbuilder00000000000000/* * Copyright (C) 2008 Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe * Create date: 11-Jan-2009 */ package func.lib; import static org.junit.Assert.*; /** * Standard output and error as produced during a run. * * @author Steve Ratcliffe */ public class Outputs { private final String out; private final String err; public Outputs(String out, String err) { this.out = out; this.err = err; } protected String getOut() { return out; } protected String getErr() { return err; } /** * Check that the standard error is empty. */ public void checkNoError() { assertEquals("no error output", "", getErr()); } /** * Check that the output contains the given strings. You can specify * any number of strings. * @param strings The list of strings to check. */ public void checkOutput(String... strings) { for (String s : strings) { if (!out.contains(s)) { // Test has failed. Construct an assertion that will print // something that is useful to show the problem. assertEquals("contains '" + s + "'", "..." + s + "...", out); } } } /** * Check that the output contains the given strings. You can specify * any number of strings. * @param strings The list of strings to check. */ public void checkError(String... strings) { for (String s : strings) { if (!err.contains(s)) { // Test has failed. Construct an assertion that will print // something that is useful to show the problem. assertEquals("contains '" + s + "'", "..." + s + "...", err); } } } } splitter-r653/test/func/lib/TestUtils.java0000664000175300017530000000607614352507254022032 0ustar builderbuilder00000000000000/* * Copyright (C) 2008 Steve Ratcliffe * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe * Create date: 10-Jan-2009 */ package func.lib; import static org.junit.Assert.assertTrue; import java.io.ByteArrayOutputStream; import java.io.Closeable; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.Deque; import java.util.List; import uk.me.parabola.splitter.Main; /** * Useful routines to use during the functional tests. * * @author Steve Ratcliffe * @author Gerd Petermann */ public class TestUtils { private static final List files = new ArrayList<>(); private static final Deque open = new ArrayDeque<>(); private TestUtils () { // avoid implicit public constructor } static { files.add(Args.DEF_AREAS_KML); files.add(Args.DEF_AREAS_LIST); files.add(Args.DEF_AREAS_POLY); files.add(Args.DEF_PROBLEM_LIST); files.add(Args.DEF_DENSITIES); files.add(Args.DEF_TEMPLATE); Runnable r = TestUtils::deleteOutputFiles; Thread t = new Thread(r); Runtime.getRuntime().addShutdownHook(t); } /** * Delete output files that were created by the tests. * Used to clean up before/after a test. */ public static void deleteOutputFiles() { for (String fname : files) { File f = new File(fname); if (f.exists()) assertTrue("delete existing file: " + f.getName(), f.delete()); } } public static void closeFiles() { while (!open.isEmpty()) { try { open.remove().close(); } catch (IOException e) { e.printStackTrace(); } } } public static void registerFile(String ... names) { Collections.addAll(files, names); } public static void registerFile(Closeable... fileList) { Collections.addAll(open, fileList); } /** * Run with the given args. Some standard arguments are added first. * * To run without the standard args, use runRaw(). * @param in The arguments to use. */ public static Outputs run(String... in) { List args = new ArrayList<>(Arrays.asList(in)); OutputStream outsink = new ByteArrayOutputStream(); OutputStream errsink = new ByteArrayOutputStream(); PrintStream origout = System.out; PrintStream origerr = System.err; try (PrintStream out = new PrintStream(outsink); PrintStream err = new PrintStream(errsink)) { System.setOut(out); System.setErr(err); Main.mainNoSystemExit(args.toArray(new String[args.size()])); } finally { System.setOut(origout); System.setErr(origerr); } return new Outputs(outsink.toString(), errsink.toString()); } } splitter-r653/test/resources/0000775000175300017530000000000014352507254017527 5ustar builderbuilder00000000000000splitter-r653/test/uk/0000775000175300017530000000000014352507254016134 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/0000775000175300017530000000000014352507254016535 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/parabola/0000775000175300017530000000000014352507254020316 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/parabola/splitter/0000775000175300017530000000000014352507254022164 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/parabola/splitter/AreaSetTest.java0000664000175300017530000000410014352507254025206 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Unit tests for the sparse BitSet implementation */ public class AreaSetTest { private final int NUM = 10000; private final int[] POS = { 1, 63, 64, 65, 4711, 78231}; public void allTests() { testAreaSetRandom(); testAreaSetSequential(); } @Test public void testAreaSetSequential() { AreaSet set = new AreaSet(); for (int i = 1; i < NUM; i++) { assertEquals("get(" + i + ")", false, set.get(i)); } for (int i = 1; i < NUM; i++) { set.set(i); assertEquals("get(" + i + ")", true, set.get(i)); } assertEquals("cardinality() returns wrong value", NUM - 1, set.cardinality()); for (int i = 1; i < NUM; i++) { set.clear(i); assertEquals("get(" + i + ")", false, set.get(i)); assertEquals("cardinality() returns wrong value", NUM - i - 1, set.cardinality()); } } @Test public void testAreaSetRandom() { AreaSet set = new AreaSet(); for (int i : POS) { set.set(i); assertEquals("get(" + i + ")", true, set.get(i)); assertEquals("cardinality() returns wrong value", 1, set.cardinality()); set.clear(i); assertEquals("get(" + i + ")", false, set.get(i)); assertEquals("cardinality() returns wrong value", 0, set.cardinality()); } } @Test public void testErr542() { // crashed with r542 AreaSet set = new AreaSet(); set.set(1); set.set(4); set.set(7); set.set(8); set.set(9); set.set(10); set.set(11); set.set(12); set.set(13); set.set(14); set.set(15); set.set(29); set.clear(29); set.clear(29); } } splitter-r653/test/uk/me/parabola/splitter/ConvertTest.java0000664000175300017530000000351514352507254025313 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Unit tests for the rounding up/down utility methods. */ public class ConvertTest { @Test public void testParseDouble() { parse("0"); parse("1"); parse("0.0000012345"); parse("12."); parse(".12"); parse("1.123"); parse("1.123456789"); parse("1.1234567891"); parse("1.12345678912"); parse("1.123456789123"); parse("1.1234567891234"); parse("1.12345678912345"); parse("1.123456789123456"); parse("1.1234567891234568"); // Note that this is in the grey area - it's at the limit of double precision parse("1.12345678912345678"); parse("1.123456789012345678"); parse("120.12345678901234567"); parse("120.123456789012345678"); parse("120.1234567890123456789"); parse("120.12345678901234567892"); parse("120.123456789012345678923"); parse("120.1234567890123456789234"); parse("120.12345678901234567892345"); parse("120.123456789012345678923456"); parse("120.1234567890123456789234567"); parse("120.12345678901234567892345678"); parse("120.123456789012345678923456789"); parse("120.1234567890123456789012345678"); } private static void parse(String dbl) { final double epsilon = 3.0e-10; assertEquals("Double parsing failed when parsing " + dbl, Double.parseDouble(dbl), Convert.parseDouble(dbl), epsilon); } } splitter-r653/test/uk/me/parabola/splitter/RoundingTest.java0000664000175300017530000000561414352507254025462 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Unit tests for the rounding up/down utility methods. */ public class RoundingTest { @Test public void testPositiveRoundingDown() { for (int i = 0; i < 50000; i += 19) { testRoundDown(i, 11, i / 2048 * 2048); } testRoundDown(0x1d5842, 11, 0x1d5800); testRoundDown(0x2399a, 11, 0x23800); testRoundDown(0x23800, 11, 0x23800); testRoundDown(0x237f0, 11, 0x23000); } @Test public void testPositiveRoundingUp() { for (int i = 0; i < 50000; i += 19) { testRoundUp(i, 11, (i + 2047) / 2048 * 2048); } testRoundUp(0x1e7faa, 11, 0x1e8000); testRoundUp(0x1e7801, 11, 0x1e8000); testRoundUp(0x1e7800, 11, 0x1e7800); testRoundUp(0x1e70aa, 11, 0x1e7800); testRoundUp(0x1e77ff, 11, 0x1e7800); } @Test public void testNegativeRoundingDown() { testRoundDown(0xffcbba86, 11, 0xffcbb800); testRoundDown(0xffcbbfff, 11, 0xffcbb800); testRoundDown(0xffcbb801, 11, 0xffcbb800); testRoundDown(0xffcbb7ff, 11, 0xffcbb000); } @Test public void testNegativeRoundingUp() { testRoundUp(0xffcbba86, 11, 0xffcbc000); testRoundUp(0xffcbbfff, 11, 0xffcbc000); testRoundUp(0xffcbb801, 11, 0xffcbc000); testRoundUp(0xffcbb7ff, 11, 0xffcbb800); testRoundUp(Integer.MIN_VALUE + 1234, 11, 0x80000800); } @Test public void testRound() { testRound(7, 2, 8); testRound(6, 2, 8); testRound(5, 2, 4); testRound(4, 2, 4); testRound(3, 2, 4); testRound(2, 2, 4); testRound(1, 2, 0); testRound(0, 2, 0); testRound(-1, 2, 0); testRound(-2, 2, 0); testRound(-3, 2, -4); testRound(-4, 2, -4); testRound(-5, 2, -4); } private static void testRoundDown(int value, int shift, int outcome) { assertEquals( "Before: " + Integer.toHexString(value) + ", After: " + Integer.toHexString(RoundingUtils.roundDown(value, shift)), outcome, RoundingUtils.roundDown(value, shift)); } private static void testRoundUp(int value, int shift, int outcome) { assertEquals( "Before: " + Integer.toHexString(value) + ", After: " + Integer.toHexString(RoundingUtils.roundUp(value, shift)), outcome, RoundingUtils.roundUp(value, shift)); } private static void testRound(int value, int shift, int outcome) { assertEquals( "Before: " + Integer.toHexString(value) + ", After: " + Integer.toHexString(RoundingUtils.round(value, shift)), outcome, RoundingUtils.round(value, shift)); } } splitter-r653/test/uk/me/parabola/splitter/geo/0000775000175300017530000000000014352507254022736 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/parabola/splitter/geo/CityFinderTest.java0000664000175300017530000000315014352507254026500 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.geo; import java.util.ArrayList; import java.util.Collection; import java.util.List; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Unit tests for the CityFinder interface * * @author Chris Miller */ public class CityFinderTest { @Test public void testFinder() { List cities = getCities(); CityFinder cityFinder = new DefaultCityFinder(cities); Collection results = cityFinder.findCities(10,10,10,10); assertEquals(2, results.size()); results = cityFinder.findCities(10, -10, 12, 0); assertEquals(1, results.size()); assertEquals(0, results.iterator().next().getId()); results = cityFinder.findCities(10, -10, 12, -4); assertEquals(0, results.size()); } private static List getCities() { List cities = new ArrayList<>(); cities.add(new City(2, "EF", "Efefef", 10, 10, 100000)); cities.add(new City(1, "CD", "Cdcdcd", 10, 10, 100000)); cities.add(new City(4, "IJ", "Ijijij", 12, 11, 100000)); cities.add(new City(3, "GH", "Ghghgh", -2, 10, 100000)); cities.add(new City(0, "AB", "Ababab", 10, -1, 100000)); return cities; } } splitter-r653/test/uk/me/parabola/splitter/tools/0000775000175300017530000000000014352507254023324 5ustar builderbuilder00000000000000splitter-r653/test/uk/me/parabola/splitter/tools/CustomCollectionsTest.java0000664000175300017530000002414514352507254030506 0ustar builderbuilder00000000000000/* * Copyright (c) 2009, Chris Miller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import static org.junit.Assert.assertEquals; import java.io.IOException; import java.nio.ByteBuffer; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; import org.junit.Test; /** * */ public class CustomCollectionsTest { //@Test(expectedExceptions = IllegalArgumentException.class) //public void testInit() { // new IntObjMap(123, 0.5f); //} @Test public void testLong2IntMap() { testMap(new Long2IntClosedMap("test", 10000, -1)); } private static void testMap(Long2IntClosedMapFunction map) { int val; for (int i = 1; i < 1000; i++) { int j = map.add((long) i * 10, i); assertEquals(i - 1, j); assertEquals(i, map.size()); } for (int i = 1; i < 1000; i++) { int pos = map.getKeyPos(i * 10); assertEquals(pos + 1, i); } for (int i = 1; i < 1000; i++) { val = map.getRandom(i * 10); assertEquals(val, i); } try { map.switchToSeqAccess(null); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } try{ val = map.getRandom(5); } catch (IllegalArgumentException e){ assertEquals("random access on sequential-only map requested", e.getMessage()); } val = map.getSeq(5); assertEquals(-1, val); val = map.getSeq(10); assertEquals(1, val); val = map.getSeq(19); assertEquals(-1, val); val = map.getSeq(30); assertEquals(3, val); map.finish(); } private static void testVals(SparseLong2IntMap map, long idOffset, List vals) { map.clear(); map.put(1, -12000); long key = 128; for (int val : vals) { map.put(idOffset + key++, val); } map.put(1, 0); // trigger saving of chunk key = 128; for (int val : vals) { assertEquals("values " + vals.toString(), val, map.get(idOffset + key++)); } map.clear(); } @Test public void testSparseLong2IntMap() { ByteBuffer buf = ByteBuffer.allocate(4); for (int i = 0; i < 32; i++) { int val = 1 << i; do { for (int j = 1; j <= 4; j++) { int bytesToUse = j; if (bytesToUse == 1 && val >= Byte.MIN_VALUE && val <= Byte.MAX_VALUE || bytesToUse == 2 && val >= Short.MIN_VALUE && val <= Short.MAX_VALUE || bytesToUse == 3 && val >= -0x800000 && val <= 0x7fffff) { buf.clear(); SparseLong2IntMap.putVal(buf, val, bytesToUse); buf.flip(); assertEquals(SparseLong2IntMap.getVal(buf, bytesToUse), val); } } val = ~val; } while (val < 0); } testMap(0L); testMap(-10000L); testMap(1L << 35); testMap(-1L << 35); } private static int UNASSIGNED = Integer.MIN_VALUE; private static void testMap(long idOffset) { SparseLong2IntMap map = new SparseLong2IntMap("test"); map.defaultReturnValue(UNASSIGNED); // special patterns testVals(map, idOffset, Arrays.asList(1,2,1,1,1,2,1,1,2,1,1,2)); testVals(map, idOffset, Arrays.asList(1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2)); testVals(map, idOffset, Arrays.asList(66560, 7936, 7936, 6144)); testVals(map, idOffset, Arrays.asList(Integer.MIN_VALUE + 1, 1234)); testVals(map, idOffset, Arrays.asList(1)); // single value chunk with 1 byte value testVals(map, idOffset, Arrays.asList(1000)); // single value chunk with 2 byte value testVals(map, idOffset, Arrays.asList(33000)); // single value chunk with 3 byte value testVals(map, idOffset, Arrays.asList(1<<25)); // single value chunk with 4 byte value testVals(map, idOffset, Arrays.asList(856, 856, 844, 844, 646, 646, 646, 646, 646, 646)); testVals(map, idOffset, Arrays.asList(260, 31, 31, 24)); testVals(map, idOffset, Arrays.asList(137, 110, 114, 128, 309, 114)); testVals(map, idOffset, Arrays.asList(254, 12, 12, 12, 12)); testVals(map, idOffset, Arrays.asList(254, 254, 12, 12)); testVals(map, idOffset, Arrays.asList(254, 12, 13)); testVals(map, idOffset, Arrays.asList(1000, 800, 700, 820)); testVals(map, idOffset, Arrays.asList(1000, 1000, 700)); testVals(map, idOffset, Arrays.asList(-32519, 255, -32519)); testVals(map, idOffset, Arrays.asList(-1, 1, 200, 1)); testVals(map, idOffset, Arrays.asList(Integer.MIN_VALUE + 1, Integer.MIN_VALUE + 1, 1234)); testVals(map, idOffset, Arrays.asList(Integer.MIN_VALUE + 1, 1234, Integer.MIN_VALUE + 1)); for (int i = 1; i < 1000; i++) { int j = map.put(idOffset + i, i); assertEquals(UNASSIGNED, j); assertEquals(i, map.size()); } for (int i = 1; i < 1000; i++) { boolean b = map.containsKey(idOffset + i); assertEquals(true, b); } for (int i = 1; i < 1000; i++) { assertEquals(i, map.get(idOffset + i)); } // random read access for (int i = 1; i < 1000; i++) { int key = (int) Math.max(1, (Math.random() * 1000)); assertEquals(key, map.get(idOffset + key)); } for (int i = 1000; i < 2000; i++) { assertEquals(UNASSIGNED, map.get(idOffset + i)); } for (int i = 1000; i < 2000; i++) { boolean b = map.containsKey(idOffset + i); assertEquals(false, b); } for (int i = 1000; i < 1200; i++) { int j = map.put(idOffset + i, 333); assertEquals(UNASSIGNED, j); assertEquals(i, map.size()); } // random read access 2 assertEquals(333, map.get(idOffset + 1010)); for (int i = 1; i < 1000; i++) { int key = 1000 + (int) (Math.random() * 200); assertEquals(333, map.get(idOffset + key)); } for (int i = -2000; i < -1000; i++) { assertEquals(UNASSIGNED, map.get(idOffset + i)); } for (int i = -2000; i < -1000; i++) { boolean b = map.containsKey(idOffset + i); assertEquals(false, b); } long mapSize = map.size(); // seq. update existing records for (int i = 1; i < 1000; i++) { int j = map.put(idOffset + i, i+333); assertEquals(i, j); assertEquals(mapSize, map.size()); } // random read access 3, update existing entries for (int i = 1; i < 1000; i++) { int j = map.put(idOffset + i, i+555); assertEquals(true, j == i+333 | j == i+555); assertEquals(mapSize, map.size()); } assertEquals(UNASSIGNED, map.get(idOffset + 123456)); map.put(idOffset + 123456, 999); assertEquals(999, map.get(idOffset + 123456)); map.put(idOffset + 123456, 888); assertEquals(888, map.get(idOffset + 123456)); assertEquals(UNASSIGNED, map.get(idOffset - 123456)); map.put(idOffset - 123456, 999); assertEquals(999, map.get(idOffset - 123456)); map.put(idOffset - 123456, 888); assertEquals(888, map.get(idOffset - 123456)); map.put(idOffset + 3008, 888); map.put(idOffset + 3009, 888); map.put(idOffset + 3010, 876); map.put(idOffset + 3011, 876); map.put(idOffset + 3012, 678); map.put(idOffset + 3013, 678); map.put(idOffset + 3014, 678); map.put(idOffset + 3015, 678); map.put(idOffset + 3016, 678); map.put(idOffset + 3017, 678); map.put(idOffset + 4000, 888); map.put(idOffset + 4001, 888); map.put(idOffset + 4002, 876); map.put(idOffset + 4003, 876); // update the first one map.put(idOffset + 3008, 889); // update the 2nd one map.put(idOffset + 4000, 889); // add a very different key map.put(idOffset + 5000, 889); map.put(idOffset + 5001, 222); assertEquals(889, map.get(idOffset + 3008)); assertEquals(888, map.get(idOffset + 3009)); assertEquals(876, map.get(idOffset + 3010)); assertEquals(876, map.get(idOffset + 3011)); assertEquals(678, map.get(idOffset + 3012)); assertEquals(678, map.get(idOffset + 3013)); assertEquals(678, map.get(idOffset + 3014)); assertEquals(889, map.get(idOffset + 4000)); assertEquals(888, map.get(idOffset + 4001)); assertEquals(876, map.get(idOffset + 4002)); assertEquals(876, map.get(idOffset + 4003)); assertEquals(889, map.get(idOffset + 5000)); assertEquals(222, map.get(idOffset + 5001)); map.clear(); // special pattern 1 assertEquals(UNASSIGNED, map.put(idOffset + 1, 0)); assertEquals(UNASSIGNED, map.put(idOffset + 65, -1)); assertEquals(UNASSIGNED, map.get(idOffset + 999)); assertEquals(0, map.get(idOffset + 1)); assertEquals(-1, map.get(idOffset + 65)); map.clear(); map.put(idOffset + 1, 22); map.put(idOffset + 5, 22); map.put(idOffset + 100, 44); assertEquals(22, map.put(idOffset + 5, 33)); map.clear(); // larger values for (int i = 100_000; i < 110_000; i++) { map.put(idOffset + i, i); } for (int i = 100_000; i < 110_000; i++) { assertEquals(i, map.get(idOffset + i)); } map.clear(); Random random = new Random(101); Map ref = new HashMap<>(); // special cases long chunks (all 64 values used and random for (int i = 0; i < 3; i++) { for (int j = 0; j < 1000; j++) { int val = random.nextInt(Integer.MAX_VALUE); map.put(idOffset + j, val); ref.put(idOffset + j, val); } } // map.stats(0); ref.entrySet().forEach(e -> { long id = e.getKey(); int val = map.get(id); assertEquals("id=" + id, (int) e.getValue(), val); }); ref.clear(); map.clear(); for (int i = 0; i < 10_000; i++) { long id = Math.round((1L << 29) * random.nextDouble()); int val = (-1 * (1 << 20) + (int) Math.round((1 << 20) * random.nextDouble())); map.put(idOffset + id, val); ref.put(idOffset + id, val); } // map.stats(0); ref.entrySet().forEach(e -> { long id = e.getKey(); int val = map.get(id); assertEquals("id=" + id, (int) e.getValue(), val); }); // simulate split where all nodes fall into same tile map.clear(); for (int i = 0; i < 1 << 27; i+=64) { map.put(idOffset + i, 12); } assertEquals("id=" + idOffset+ 2048, 12, map.get(idOffset + 2048)); assertEquals("id=" + idOffset+ 2048*1024, 12, map.get(idOffset + 2048*1024)); assertEquals("id=" + idOffset+ 2048*1024 + 1, UNASSIGNED, map.get(idOffset + 2048*1024+1)); return; } } splitter-r653/test/uk/me/parabola/splitter/tools/SparseBitSetTest.java0000664000175300017530000000352314352507254027402 0ustar builderbuilder00000000000000/* * Copyright (C) 2012, Gerd Petermann * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 3 or * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ package uk.me.parabola.splitter.tools; import static org.junit.Assert.assertEquals; import org.junit.Test; /** * Unit tests for the sparse BitSet implementation */ public class SparseBitSetTest { private final int NUM = 10000; private final long[] POS = { 1, 63, 64, 65, 4711, 12345654321L }; @Test public void testSparseBitSetSequential() { SparseBitSet sparseSet = new SparseBitSet(); for (long i = 1; i < NUM; i++) { assertEquals("get(" + i + ")", false, sparseSet.get(i)); } for (long i = 1; i < NUM; i++) { sparseSet.set(i); assertEquals("get(" + i + ")", true, sparseSet.get(i)); } assertEquals("cardinality() returns wrong value", NUM - 1, sparseSet.cardinality()); for (long i = 1; i < NUM; i++) { sparseSet.clear(i); assertEquals("get(" + i + ")", false, sparseSet.get(i)); assertEquals("cardinality() returns wrong value", NUM - i - 1, sparseSet.cardinality()); } } @Test public void testSparseBitSetRandom() { SparseBitSet sparseSet = new SparseBitSet(); for (long i : POS) { sparseSet.set(i); assertEquals("get(" + i + ")", true, sparseSet.get(i)); assertEquals("cardinality() returns wrong value", 1, sparseSet.cardinality()); sparseSet.clear(i); assertEquals("get(" + i + ")", false, sparseSet.get(i)); assertEquals("cardinality() returns wrong value", 0, sparseSet.cardinality()); } } } splitter-r653/test/uk/me/parabola/splitter/tools/TestBitReader.java0000664000175300017530000001053414352507254026673 0ustar builderbuilder00000000000000/* * Copyright (C) 2016 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * Author: Steve Ratcliffe, Gerd Petermann */ package uk.me.parabola.splitter.tools; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.junit.Test; public class TestBitReader { /** * Very simple test that the bit reader is working. * @author Steve Ratcliffe for mkgmap * @author Gerd Petermann */ @Test public void testGetBits() { // Add your code here BitReader br = new BitReader(new byte[]{ (byte) 0xf1, 0x73, (byte) 0xc2, 0x5 }); assertTrue("first bit", br.get1()); assertEquals("five bits", 0x18, br.get(5)); assertEquals("four bits", 0xf, br.get(4)); assertEquals("sixteen bits", 0x709c, br.get(16)); } @Test public void testSpecialNegative() { BitReader br = new BitReader(new byte[]{0x24, 0xb}); int s = br.sget2(3); assertEquals(-12, s); } @Test public void testSpecialNegative2() { BitReader br = new BitReader(new byte[]{0x2c, 0x0}); int s = br.sget2(3); assertEquals(-6, s); } @Test public void testSpecialPositive() { BitReader br = new BitReader(new byte[]{(byte) 0xa4, 0}); int s = br.sget2(3); assertEquals(8, s); } @Test public void testWriteReadSingleBit() { BitWriter bw = new BitWriter(); final int testVal = 1231212311; int n = 0; int v = testVal; while (v > 0) { bw.put1(v % 2 != 0); v >>= 1; n++; } assertEquals(n, bw.getBitPosition()); BitReader br = new BitReader(bw.getBytes()); v = testVal; while (n-- > 0) { boolean b = br.get1(); assertEquals(v % 2 != 0, b); v >>= 1; } } @Test public void testDynAlloc() { BitWriter bw = new BitWriter(10); int n = 0; int bits = 9; for (int i = 0; i < 100; i++) { bw.putn(i, bits); n += bits; } assertEquals(n, bw.getBitPosition()); for (int i = 0; i < 100; i++) { bw.put1(i % 3 == 0); n += 1; } assertEquals(n, bw.getBitPosition()); } @Test public void testWriteReadSigned() { for (int n = 2; n <= 32; n++) { testWriteReadSigned(n); } } private static void testWriteReadSigned(int nbits) { int[] TEST_VALS = { Integer.MIN_VALUE, Integer.MIN_VALUE + 1, -40, -1, 0, 1, 40, Integer.MAX_VALUE - 1, Integer.MAX_VALUE }; for (int i = 0; i < TEST_VALS.length; i++) { BitWriter bw = new BitWriter(); int v = TEST_VALS[i]; if (nbits < 30 && (v < -1000 || v > 1000)) continue; bw.sputn2(v,nbits); boolean checkSimple = false; if ((1l << (nbits-1)) > Math.abs((long) v) || nbits == 32) { bw.sputn(v, nbits); checkSimple = true; } BitReader br = new BitReader(bw.getBytes()); int s = br.sget2(nbits); assertEquals("number of bits:" + nbits, v, s); if (checkSimple) { int s2 = br.sget(nbits); assertEquals("number of bits:" + nbits, v, s2); } } } @Test public void testWriteReadUnsigned() { for (int n = 1; n <= 32; n++) { testWriteReadUnsigned(n); } } private static void testWriteReadUnsigned(int nbits) { int[] TEST_VALS = { 0, 1, 40, Integer.MAX_VALUE - 1, Integer.MAX_VALUE }; for (int i = 0; i < TEST_VALS.length; i++) { BitWriter bw = new BitWriter(); int v = TEST_VALS[i] & (1 << nbits) - 1; bw.putn(v, nbits); BitReader br = new BitReader(bw.getBytes()); int s = br.get(nbits); assertEquals("number of bits:" + nbits, v, s); } } @Test public void positionedRead() { BitReader br = new BitReader(new byte[] { (byte) 0xf1, 0x73, (byte) 0xc2, 0x5 }); br.position(10); assertEquals("sixteen bits at pos 10", 0x709c, br.get(16)); } @Test public void positionedReadWithOffset() { BitReader br = new BitReader(new byte[] {0, (byte) 0xf1, 0x73, (byte) 0xc2, 0x5}, 1); int pos = 10; br.position(pos); assertEquals("sixteen bits at pos " + pos, 0x709c, br.get(16)); br.skip(-16); assertEquals("sixteen bits at pos " + pos, 0x709c, br.get(16)); br.skip(-2); br.skip(-15); br.skip(1); assertEquals("sixteen bits at pos " + pos, 0x709c, br.get(16)); } }