pax_global_header00006660000000000000000000000064121327233330014511gustar00rootroot0000000000000052 comment=d28841011f44e01423d1abb6d4be6dd0f56f58c9 osm2pgsql-0.82.0/000077500000000000000000000000001213272333300135275ustar00rootroot00000000000000osm2pgsql-0.82.0/900913.sql000066400000000000000000000013261213272333300150170ustar00rootroot00000000000000INSERT INTO spatial_ref_sys (srid, auth_name, auth_srid, srtext, proj4text)VALUES (900913,'EPSG',900913,'PROJCS["WGS84 / Simple Mercator",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS_1984", 6378137.0, 298.257223563]],PRIMEM["Greenwich", 0.0],UNIT["degree", 0.017453292519943295],AXIS["Longitude", EAST],AXIS["Latitude", NORTH]],PROJECTION["Mercator_1SP_Google"],PARAMETER["latitude_of_origin", 0.0],PARAMETER["central_meridian", 0.0],PARAMETER["scale_factor", 1.0],PARAMETER["false_easting", 0.0],PARAMETER["false_northing", 0.0],UNIT["m", 1.0],AXIS["x", EAST],AXIS["y", NORTH],AUTHORITY["EPSG","900913"]]','+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs'); osm2pgsql-0.82.0/AUTHORS000066400000000000000000000002571213272333300146030ustar00rootroot00000000000000osm2pgsql was written by Jon Burgess, Artem Pavlenko, Martijn van Oosterhout Sarah Hoffman, Kai Krueger, Frederik Ramm, Brian Quinion and other OpenStreetMap project members.osm2pgsql-0.82.0/COPYING000066400000000000000000000431031213272333300145630ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. osm2pgsql-0.82.0/ChangeLog000066400000000000000000001474411213272333300153140ustar00rootroot000000000000002010-11-06 21:04 +0000 [r24100] giggls: * Auto-detect filetype pbf/osm based on file extension if no Input frontend has been explicitely selected 2010-11-06 20:37 +0000 [r24099] hholzgra: * * improved configure setup (including automake and libtool) * support for different input readers besides libxml2 OSM XML parsing * "primitive" XML parser integrated into the main binary * OSM PBF parser 2010-11-03 10:51 +0000 [r24039] twain: * add extra operator column to word 2010-10-24 00:37 +0000 [r23798] twain: * hstore version of gazetteer output 2010-10-20 23:47 +0000 [r23731] twain: * set CPPFLAGS correctly for non-standard paths 2010-10-19 10:06 +0000 [r23687] twain: * max admin rank, better postcode defaults 2010-10-18 13:40 +0000 [r23678] ldp: * Remove 3 unused (and undocumented) keys 2010-10-02 13:26 +0000 [r23440] jonb: * fix warning about incorrect pointer assignment at osm2pgsql.c:818 2010-09-20 20:59 +0000 [r23286] ldp: * Delete osm.xml. This shouldn't be in here. 2010-09-19 11:35 +0000 [r23264] stevechilton: * add service=drive-through 2010-09-18 12:07 +0000 [r23247] rodo: * Add packages names for Debian 2010-09-17 21:09 +0000 [r23243] stevechilton: * add service=drive-through 2010-09-15 16:19 +0000 [r23186] twain: * Minor javascript fixes. Introduce new search plans for searching by 'special' words 2010-09-10 18:19 +0000 [r23099] twain: * check the indexing process didn't generate any errors (second attempt) 2010-09-10 18:13 +0000 [r23098] frederik: * real programmers don't do syntax checks ;) 2010-09-10 18:08 +0000 [r23097] twain: * check the indexing process didn't generate any errors 2010-09-09 14:14 +0000 [r23085] frederik: * fixed twain47's version and commited to svn 2010-09-09 12:21 +0000 [r23080] twain: * check resulting geometry is a polygon of some type 2010-09-04 16:48 +0000 [r22986] twain: * remove reference to natural earth 2010-09-04 14:41 +0000 [r22984] twain: * performance improvements for initial load of data 2010-08-22 12:57 +0000 [r22731] twain: * remove debug messages 2010-08-21 12:24 +0000 [r22718] twain: * switch to c code for toekn generation revert change to administrative typo improvements to incremental update code 2010-08-21 11:39 +0000 [r22717] frederik: * fix typo 2010-08-21 11:22 +0000 [r22716] frederik: * comment out replace operations which are already covered by the transliteration module 2010-08-20 15:05 +0000 [r22710] twain: * switch to multi-threaded indexing 2010-08-19 22:19 +0000 [r22701] rodo: * Fix #3169 by applying patch 2010-08-17 22:42 +0000 [r22679] frederik: * readme moved to wiki 2010-08-17 10:59 +0000 [r22669] twain: * manage Osmosis import from within util.update.php note the new log table 'import_osmosis_log' created in gazetteer-tables.sql 2010-08-17 10:11 +0000 [r22668] frederik: * add capability to load .osc files directly 2010-08-17 09:55 +0000 [r22667] frederik: * typo 2010-08-16 12:57 +0000 [r22658] frederik: * make explicit that slim mode has to be used. 2010-08-16 12:21 +0000 [r22655] frederik: * remove references to postgres user "twain" 2010-08-03 12:12 +0000 [r22556] twain: * update mime type headers (as per #3088) 2010-07-19 08:02 +0000 [r22371] mazdermind: * add hstore-column option, which allows to create specific hstore columns for sub tags. '--hstore-column name:' will for example create a column "name:" with all the values of name:xx tags ind the form of xx=>Value for any name:xx tagged to an element. This changeset also includes changes to the regular hstore code by moving it to a separate function and also extending keyvals.c/h with an inline wrapper-function. 2010-07-16 13:24 +0000 [r22349] gravitystorm: * Updated table definitions from twain47 to reflect changes made in [22221] 2010-07-16 13:18 +0000 [r22348] gravitystorm: * If you change the build system remember to update the README 2010-07-16 12:32 +0000 [r22347] gravitystorm: * fix typo in district 2010-07-14 11:40 +0000 [r22308] twain: * Finally add warning about postgresql 8.4 2010-07-12 14:16 +0000 [r22285] twain: * move to processing git rather than svg multi-lingual generate 'near' and 'in' tokens 2010-07-10 08:30 +0000 [r22268] frederik: * osm2pgsql version without libxml 2010-07-07 14:59 +0000 [r22222] twain: * handle broken language list in IE 2010-07-07 14:51 +0000 [r22221] twain: * handle house name / number collisions by showing both 2010-07-07 13:35 +0000 [r22220] twain: * improved multi-processor indexing 2010-07-07 13:20 +0000 [r22219] twain: * improved multi-processor indexing 2010-07-07 08:25 +0000 [r22213] giggls: * Put binary into correct places according to debian policy 2010-06-24 14:47 +0000 [r21987] twain: * improve presentation of update status - time remaining extra options to specify max load / blocking processes on command line 2010-06-11 22:47 +0000 [r21661] giggls: * * remove malloc.h for proper compi8lation on Macosx * allow for postgresql password to be specified in PGPASS Environment Variable instead of interactive input 2010-06-09 20:30 +0000 [r21630] rodo: * Add an option to Create indexes on a different tablespace, close #2988 2010-05-27 17:27 +0000 [r21470] twain: * fix pointer error 2010-05-27 15:43 +0000 [r21464] twain: * move string replacements to c module 2010-05-20 08:58 +0000 [r21381] rodo: * Do not warn about slim option on 32nits system if option is enabled 2010-05-15 22:28 +0000 [r21290] jonb: * osm2pgsql: Add C++ compiler into autoconf. Rename DATADIR to prevent clash with mingw32 objidl.h header. Fix some mingw32 compile issues. 2010-05-15 21:38 +0000 [r21289] jonb: * osm2pgsql: Raise maximum tag size in style file to 63 characters 2010-05-05 15:34 +0000 [r21135] frederik: * fix osm2pgsql debian packaging, and make it buildable for ubuntu lucid 2010-04-09 13:48 +0000 [r20873] giggls: * * add brief explanation of hstore functionality * make phstore flag actually do what it is supposed to do * remove obsolete function add_parking_node 2010-04-07 17:51 +0000 [r20823] feixm: * When running on 32bit systems, userprocess can allocate as much as 3GB of virtual address space. This is due to 3GB/1GB split on 32bit linux machines. No matter how much physical RAM you have, you end up on 3GB limit. This is quite low limit when importing anything big as country OSM file or the whole planet OSM. If we know this, we should warn user in syntax help, during runtime and even when we start throwing std::bad_alloc during conversion. 2010-04-06 18:48 +0000 [r20802] strk: * Autoconf for osm2pgsql 2010-04-02 12:13 +0000 [r20775] giggls: * change "char sql[2048]" to "static char *sql" and do dynamic allocation the reason is, that hstore rows can get really long, thus dynamic allocation prevents them to get cut and (hopefully) also prevents likely buffer overflows as well. 2010-03-20 20:06 +0000 [r20565] giggls: * We need to be able to mark polygons as such also in hstore only mode where no additional tag columns should be added. For this reason we introduce a new flag called phstore which will do the same as the polygon flag but without adding a column for the tag specified in the stylefile. 2010-03-16 08:26 +0000 [r20505] giggls: * \r und \n in hstore value needs to be escaped for pgsql copy import as well 2010-03-15 14:47 +0000 [r20493] giggls: * TAB in hstore value needs to be escaped for pgsql copy import 2010-03-14 14:31 +0000 [r20475] giggls: * Add an experimental feature to generate hstore enabled PgSQL tables. At least in theory this will allow for 3-column output tables now. Tested with the following environment: * non slim mode * hstore-new (http://pgfoundry.org/projects/hstore-new/ * PgSQL 8.4/PostGIS 1.4 2010-03-11 22:00 +0000 [r20429] frederik: * patch not needed any longer 2010-03-11 13:53 +0000 [r20418] twain: * only do address intrapolation in append mode 2010-03-08 13:40 +0000 [r20370] twain: * add reverse and reverse.php to the block list 2010-03-08 13:32 +0000 [r20369] gslater: * Add robots.txt, add form action 2010-03-03 14:32 +0000 [r20255] twain: * applied #2769 search field should have focus patch (firefishy) 2010-02-26 14:23 +0000 [r20163] twain: * change json output to be complaint (no comments allowed) 2010-02-26 14:09 +0000 [r20161] frederik: * fix json compliance 2010-02-25 18:41 +0000 [r20149] twain: * new name options, fix error when importing new data 2010-02-24 12:17 +0000 [r20134] twain: * Improve install documentation 2010-02-16 14:25 +0000 [r20035] twain: * More detailed loging. Tweaks to how house numbers are pressented. Fix json output (incorrect address details) 2010-02-16 14:19 +0000 [r20034] twain: * improve handling of house numbers, more install documentation (thanks to Frans Hals) 2010-02-11 23:50 +0000 [r19974] jonb: * osm2pgsql: Complain if we got an error while reading the style file or were unable to parse any valid columns 2010-01-27 00:01 +0000 [r19640] twain: * broken output for [lat,lon] searches, attempting to search on blank queries 2010-01-26 14:45 +0000 [r19633] twain: * suggest alternatives for missing words 2010-01-23 17:58 +0000 [r19603-19604] twain: * minor runtime warning * extra error checking, smaller indexing partitions 2010-01-11 15:34 +0000 [r19407] twain: * More logging, improved UK postcodes, fix more_url 2010-01-11 02:20 +0000 [r19398] ldp: * Make shop into type polygon, to have closed ways with no polygon-enforcing tags (eg. missing building=yes) still show up in the polygon table. 2010-01-05 13:45 +0000 [r19281] twain: * rounding error in generation of bounding box 2009-12-19 23:23 +0000 [r19148] jonb: * osm2pgsql: Apply multipolygon patch from Twain with a few changes. 2009-12-19 17:16 +0000 [r19147] jonb: * Overhaul the osm2pgsql readme text 2009-12-19 16:31 +0000 [r19145] jonb: * Disable the new osm_{user,uid,timestamp,version} columns since the --extra-attributes option is off by default 2009-12-19 16:21 +0000 [r19144] jonb: * Allow user,uid,version & timestamp attributes to be imported from osm objects. Fixes #2405. 2009-12-18 14:56 +0000 [r19133] twain: * expose 'more_url' in xml format 2009-12-18 14:26 +0000 [r19132] twain: * fix bug in finding 'bus stop wilhelmshaven' introduced in recent commit 2009-12-17 18:47 +0000 [r19128] twain: * support for various lat,lon formats as part of the query i.e. village near 48,7.7 2009-12-17 17:46 +0000 [r19127] twain: * correct error handling for missing osm points 2009-12-17 15:54 +0000 [r19126] twain: * ignore empty search phrases 2009-12-14 22:12 +0000 [r19092] twain: * OSM Copright notice in XML reverse geocode by OSM ID (not just lat/lon) improved debuging information added IP block lists 2009-12-14 22:04 +0000 [r19091] twain: * multi-language amenities, one off import of specific nodes/ways/relations 2009-12-04 00:11 +0000 [r18937] twain: * Order results by distance to specified location (#2519) 2009-12-01 17:16 +0000 [r18884] twain: * revert accidentally committed multi-polygon patch 2009-11-30 12:11 +0000 [r18873] twain: * add test in the associatedStreet code to ensure associatedStreet is actually a road 2009-11-29 16:24 +0000 [r18851] twain: * missing library commands 2009-11-28 21:03 +0000 [r18846] twain: * Fix quote type problems in JSON formating (#2508) 2009-11-28 20:29 +0000 [r18845] twain: * fix problems reported with running script first time 2009-11-28 20:24 +0000 [r18844] twain: * some missed files 2009-11-28 18:17 +0000 [r18843] twain: * missing name space for vector 2009-11-28 16:24 +0000 [r18840] twain: * lots of minor changes since going live 2009-11-15 18:13 +0000 [r18632] jonb: * Clear out tag list after parsing a changeset otherwise the accumulated tags will appear on the first node. Fixes ticket #2426 2009-11-12 12:23 +0000 [r18558] ldp: * Add shop=*, as a first step to be able to render them. 2009-11-08 15:09 +0000 [r18509-18510] twain: * changes to indexing * reverse geocoding and output format changes 2009-11-04 08:52 +0000 [r18451] frederik: * bump version to 0.69 2009-11-03 23:55 +0000 [r18440] frederik: * new! now with even fewer annoying debug print statements! 2009-11-03 23:32 +0000 [r18439] frederik: * Allow creation of expiry lists for 900913 tiles even if your target projection is not 900913 (e.g. you have your PostGIS table in lat/lon). Also fixes another bug in the old projection code where expire_from_bbox would not expire the whole box properly (expire-tiles.c around line 333, should have used min/max lon/lat but used min lon/lat twice). 2009-11-03 09:55 +0000 [r18437] frederik: * fix a bug that would sometimes expire tiles at the other end of the world instead of those where a change has occurred. 2009-10-28 22:27 +0000 [r18353] jonb: * osm2pgsql: Update code to use DROP TABLE IF EXISTS. This avoids errors in the postgresql logs and requires postgresql-8.2+. Fixes ticket #2379. Update version to 0.68 2009-10-27 19:28 +0000 [r18316] jonb: * Add double quotes around the column name when performing lookup otherwsie postgres may convert it to lower case 2009-10-27 14:58 +0000 [r18309] twain: * gazetteer diff updates 2009-10-27 14:10 +0000 [r18308] twain: * code cleanup and support for diff updates 2009-10-15 08:22 +0000 [r18167] frederik: * fix error message 2009-10-07 19:23 +0000 [r18009] jonb: * osm2pgsql: Split very long ways into multiple segments. Mapnik has some rendering artifacts for very long ways, this is the cause of #2234. Currently ways are split after about 100km or 1 degree. This should help the rendering performance too since these large and often complex ways have enormous bounding boxes and are therefore fetched when rendering many tiles. The bounding box of each segment is typically a lot smaller than the complete way. 2009-10-06 20:46 +0000 [r18001] jonb: * Apply fix from Milo to display projection information when executed with: -h -v. Fixes #2357 2009-10-04 12:47 +0000 [r17981] jonb: * Update projection strings to match proj-4.7.1 definitions. 2009-10-02 20:09 +0000 [r17947] jonb: * Cascade node changes all the way through to relations. Previously a node change might only trigger updates to ways without these then triggering a relation update. 2009-09-17 16:55 +0000 [r17671] twain: * Missed out the readme file 2009-09-17 15:06 +0000 [r17669] twain: * missed table name change 2009-09-17 15:01 +0000 [r17668] twain: * Refactored website (php), minor indexing changes, documentation 2009-09-04 19:47 +0000 [r17459] ldp: * Add operator for nodes,ways 2009-09-04 18:52 +0000 [r17456] jonb: * Disable add_parking_node() in osm2pgsql since the current osm.xml renders the symbol on parking areas now. 2009-09-01 15:14 +0000 [r17424] tomhughes: * Fix buffer overflow. 2009-08-28 17:53 +0000 [r17326] jonb: * Update osm2pgsql version to 0.67, the previous change to planet_osm_nodes in the previous commit may break things so a version bump is a good idea 2009-08-28 17:06 +0000 [r17325] jonb: * Use fixed point storage for node positions in planet_osm_nodes. This reduces the DB size which should make things a little bit faster too. 2009-08-07 11:42 +0000 [r16911] twain: * add script to show how an address was constructed 2009-07-19 08:15 +0000 [r16573] avar: * If this parser parses a style file with more than MAX_STYLES it'll start writing into unallocated memory and segfault. This fix should change it to malloc/realloc but I don't have the time now, so I'll just extend the memory it's taking up. 2009-07-14 16:47 +0000 [r16498] twain: * extra chars in postgresql transliteration function introduces ranked sql generation graphical updates to search page support for house numbers and ways/nodes connected using relations 2009-07-08 11:01 +0000 [r16380] twain: * Addition of support for Karlsruhe schema / house numbers Various minor bug fixes 2009-07-01 22:47 +0000 [r16258] twain: * New version of gazetteer, performance and scaleing updates 2009-06-02 12:58 +0000 [r15538] twain: * Misc missing characters 2009-06-02 12:36 +0000 [r15536] twain: * Added Hangul Syllables to transliteration table 2009-06-01 14:19 +0000 [r15460] twain: * correct escape sequence 2009-05-30 11:09 +0000 [r15323-15325] twain: * correct spelling of gazatteer folder * adding multi-language support and relations 2009-05-22 19:00 +0000 [r15176] jonb: * osm2pgsql: consider area key as indicating a polygon. This fixes some multipolygon cases with: highway=pedestrian, area=yes 2009-05-22 18:40 +0000 [r15173] joerg: * ubuntu-hardy has older debhelper 2009-05-22 18:36 +0000 [r15172] jonb: * osm2pgsql: Still allow multipolygons to inherit tags from the outer way even if the relation has a name tag. I've seen several examples where people have added a name tag to a relation even though the wiki says they should be untagged. 2009-05-20 18:54 +0000 [r15131] jonb: * Update osm2pgsql to ignore elements 2009-05-19 22:59 +0000 [r15119] jonb: * osm2pgsql: prevent route relation name from getting into the name column, we just want it in route_name, fixes ticket #1703 2009-05-19 21:52 +0000 [r15118] jonb: * osm2pgsql 0.66: Allow final mod & output steps to run in parallel. Display more information about final index creation steps. Fix bug which caused diff updates of multipolygon relations to leave some incorrect ways behind. Form polygons from boundary relations if the ways form a closed ring. 2009-05-10 13:42 +0000 [r14997] jonb: * Fix polygon ring directions using geos normalize() 2009-05-08 13:51 +0000 [r14965] frederik: * Make sure that osm2pgsql does not attempt to append data to a table when it already has data in a different SRS. Without this patch it is perfectly possible for the mindless user (Y.T.) to create a table with -l and later append to it without -l, which will land you with a "select distinct srid(way) from planet_osm_point" returning two SRSs. Mapnik's queries will then fail with an "Operation on two geometries with different SRIDs" error. Note, this patch only checks the default SRID given in the geometry_columns table. 2009-05-01 16:54 +0000 [r14863] jonb: * fix projection help output. osm2pgsql option for old-style mrecator is -M 2009-04-30 16:20 +0000 [r14846-14847] zere: * Fixed bug in bzip handling near end-of-file, plus better error reporting. * Fixed compiler warnings about unused parameters. 2009-04-30 13:36 +0000 [r14843] tomhughes: * Initial work on generating a gazetteer database. 2009-04-30 13:34 +0000 [r14842] zere: * Changed terminology for choosable backend. 2009-04-30 13:26 +0000 [r14841] zere: * Use bzip2 interface directly, rather than through the zlib compatibility interface to deal with multiple streams in pbzip2-generated files. Also added a 'null' output for testing purposes. 2009-04-22 19:58 +0000 [r14703] stevechilton: * service added for parking_aisle 2009-03-22 13:19 +0000 [r14211] guenther: * - changed path for geoinfo.db in mapnik-osm-updater.sh 2009-03-21 22:27 +0000 [r14207] joerg: * use new name osm2poidb for gpsdrive-update-osm-poi-db; more tests if executables exist; --no-mirror also for geofabrik imports 2009-03-13 21:36 +0000 [r14072] frederik: * simple stand-alone debian packaging for osm2pgsql 2009-03-09 06:47 +0000 [r14039] guenther: * - updated mapnik-osm-updater script for new gpsdrive poi database 2009-03-01 21:41 +0000 [r13945] stevechilton: * add construction to default.style 2009-02-25 18:46 +0000 [r13898] jonb: * osm2pgsql: When processing boundary relations, create linestrings, not polygons geometries, even if they form a closed ring. 2009-02-16 12:16 +0000 [r13756] jochen: * database istn't hardcoded any more 2009-02-15 19:44 +0000 [r13745] jonb: * osm2pgsql: Add ability to generate new columns from default.style when operating in append mode 2009-02-15 18:46 +0000 [r13744] jonb: * osm2pgsql: Attempt to make code work with columns in unexpected order, e.g. if default.style updated. Not fully automatted, you still need to manually create any new columns 2009-02-14 11:33 +0000 [r13721] jonb: * osm2pgsql: Fix likely cause of crash reported by cmarqu. This would trigger if you defined too many coluns defined in your default.style. 2009-02-11 20:28 +0000 [r13671] jonb: * osm2pgsql: remove from some targets since they dont work well with mmm:nnn svnversion strings 2009-02-11 20:24 +0000 [r13670] jonb: * osm2plsql: Use svnversion for version string. Update to version 0.65. Fix compile warning about basename. Switch default error message to direct people at using --help instead of flooding them with all options 2009-02-11 17:28 +0000 [r13668] stevehill: * Replace the in-memory dirty tile store with something a bit more efficient. Also adds support for specifying a range of zoom levels - i.e. "-o 0-17". The output dirty tile list will use the lowest zoom level which accurately describes the tiles which have been expired. 2009-02-10 20:40 +0000 [r13653] jonb: * Perform polygon processing on relations with type=boundary 2009-02-08 20:47 +0000 [r13616] stevechilton: * add capital and lock to default.style 2009-02-08 20:44 +0000 [r13615] jonb: * osm2pgsql: Tweak geos includes to work with geos-3. Hopefully this should continue to work with geos-2.2 as well 2009-02-08 20:19 +0000 [r13613] stevehill: * Link to the OPM expire_tiles.py script 2009-02-08 20:12 +0000 [r13612] stevehill: * Adds tile expiry support - see http://lists.openstreetmap.org/pipermail/dev/2009-February/013934.html This introduces 2 new commandline options: "-e " and "-o ". So, specifying "-e 17 -o /tmp/dirty_tiles" when importing a delta will cause osm2pgsql to generate a list of all zoom level 17 tiles which the delta has made dirty and store it in /tmp/dirty_tiles. Proviso: for polygons, it currently takes a simplistic approach of drawing a bounding box around the whole polygon and marking every tile in the box as dirty. If the bounding box is large (over 30x30Km) the polygon is treated as a line instead, so only the perimeter will be marked as dirty (this is so that huge polygons don't expire vast numbers of tiles and is based on the assumption that we probably aren't going to shade the area of massive polygons). The dirty tile list is maintained in memory as a binary tree and dumped to disk at the end of the run. 2009-02-07 23:36 +0000 [r13578] jonb: * Declate out_pgsql as extern in header file. Rename __unused since it may clash with other definitions. 2009-02-02 22:22 +0000 [r13511] jonb: * osm2pgsql: fixes #1550. Don't inherit tags from ways if the multipolygon has its own tags. Don't match inner way tags if there are no poly_tags to match against 2009-02-01 11:39 +0000 [r13474] stevechilton: * add three addr: lines 2009-01-31 21:34 +0000 [r13470] jonb: * osm2pgsql: Fix relation processing in non-slim mode. It now needs more memory during the processing since it needs to remember ways even if they dont have any tags 2009-01-27 22:59 +0000 [r13407] jonb: * Add barrier for latest osm.xml 2009-01-13 13:55 +0000 [r13189] guenther: * - fixed bug in mapnik-osm-updater.sh preventing generation of poi database 2009-01-04 17:27 +0000 [r12912] jonb: * Fix compile problem by removing output-gazetteer.h reference 2009-01-02 23:58 +0000 [r12828] tomhughes: * Allow osm2pgsql to process planetdiff files. 2008-12-29 12:06 +0000 [r12661] guenther: * - updated creation of poi database in mapnik-osm-updater.sh 2008-12-29 11:21 +0000 [r12659] guenther: * - updated part for generation of gpsdrive POI database in mapnik-osm-updater.sh 2008-12-21 10:10 +0000 [r12447] joerg: * show more directory levels of GeoFabrik with option --all-planet-geofabrik=\? 2008-12-21 09:08 +0000 [r12446] joerg: * add creation of spatial_ref_sys in more cases 2008-12-21 08:47 +0000 [r12445] joerg: * also include spatial_ref_sys.sql, check for more possible postgis versions 2008-12-16 22:12 +0000 [r12383] jonb: * Add ele column as used by latest osm.xml 2008-12-02 23:10 +0000 [r12197] jonb: * Add historic= as polygon 2008-12-02 07:15 +0000 [r12182] joerg: * Add the 900913 File to postgress; change order for granting rights; check for file existence of lwpostgis 2008-11-23 12:46 +0000 [r12049] ksharp: * Fixed case of README.txt in Makefile and SPEC file, fixed make clean to remove generated SPEC file. 2008-11-23 01:25 +0000 [r12044] joerg: * Add support for Ubuntu new Postgis 2008-11-23 01:16 +0000 [r12043] joerg: * Type; missing ` 2008-11-16 12:51 +0000 [r11942] jonb: * Update default.style. We now want to render aerialway on points too 2008-10-28 15:08 +0000 [r11520] martinvoosterhout: * Fix reference to fixed table name planet_osm. Not sure how this one slipped through. 2008-10-22 23:24 +0000 [r11410] jonb: * osm2pgsql: Treat lines and polygons the same way when trying to work out if the way should go into the roads table. This allows ways with both waterway and boundary set to be rendered correctly. Also allow tagged islands to appear. Swap order of entries in the layer table to put the most common ones near the front which will speed up the matching. 2008-10-19 15:37 +0000 [r11320] martinvoosterhout: * Add escaping for \r. 2008-10-19 14:18 +0000 [r11315-11316] joerg: * mapnik-osm-updater.sh: improve searching for tools * mapnik-osm-updater.sh: adapt searching for tools 2008-10-07 22:42 +0000 [r11078] jonb: * Add 'disused' column into osm2pgsql style 2008-09-30 21:25 +0000 [r11007] jonb: * Convert waterway into a polygon to match latest osm.xml 2008-09-08 17:33 +0000 [r10564] guenther: * - replaced script for gpsdrive extensions by binary in mapnik-osm-updater.sh 2008-09-03 20:43 +0000 [r10464] martinvoosterhout: * Turns all creates into modifies for osmChange files. Technically wrong but it matches what osmosis does and should probably be the default until the whole snapshot thing gets sorted out. 2008-09-02 21:25 +0000 [r10429] jonb: * Add power_source column for latest osm.xml 2008-09-02 11:03 +0000 [r10387] martinvoosterhout: * Remove the special cases where extra things need to get prepared when you have intarray. An extra field in the table is much nicer then nasty if statements. 2008-08-30 15:32 +0000 [r10338] martinvoosterhout: * Allow the location of the style file to be specified on the command line. Patch by Roeland Douma. 2008-08-26 19:27 +0000 [r10184] martinvoosterhout: * Typo in index creation. 2008-08-25 21:24 +0000 [r10149] martinvoosterhout: * The optimisation steps should not be applied in append mode since they will take forever on a complete database and patching is supposed to be quick. 2008-08-25 21:06 +0000 [r10148] martinvoosterhout: * Use GIN indexes instead of GIST. This means we require a newer version of PostgreSQL but GiST is way too slow here. Also don't try ANALYSE after each endCopy, takes far too long when just applying a patch. 2008-08-12 21:44 +0000 [r9756] jonb: * Add postgis definition for the 900913 spherical mercator projection we use. Import like: psql gis <900913.sql or \i 900913.sql 2008-08-03 20:09 +0000 [r9441] andreas: * add missing include to compile with gcc-4.3 2008-07-27 20:06 +0000 [r9315-9316] jonb: * osm2pgsql: Drop any left over tmp tables at start of import * Add aerialway as linear way type to osm2pgsql default.style 2008-07-24 00:15 +0000 [r9267] jonb: * osm2pgsql: Comment out debug lines 2008-07-24 00:11 +0000 [r9266] jonb: * osm2pgsql: Fix up crash in relation handling. The list of members does not match the x' arrays if one or more members is a node or relation, do all processing on the arrays instead (maybe the member structure can be expanded in future to make this more generic but this requires changes where xnodes is used in build_geometry etc). 2008-07-24 00:07 +0000 [r9265] jonb: * osm2pgsql: Move type definitions to a more appropriate location 2008-07-23 00:54 +0000 [r9252] jonb: * osm2pgsql: Update multi-polygon algorithm to detect multipolygons with different tags on the inner rings and emit these as ways to be rendered seperately. 2008-07-23 00:31 +0000 [r9251] jonb: * osm2pgsql: reduce frequency of out-of-order node warning. Turns out this just effects the cache efficiency not the operation of the overall processing. This is fine for small files like the ones from Josm. Closes #1058 2008-07-21 09:49 +0000 [r9211] tomhughes: * Only prompt for a password if -W/--password is given. This is what psql does and it allows for implicit authentication as a different user using -U without -W. 2008-07-14 20:41 +0000 [r9013] jonb: * osm2pgsql: Re-order arithmetic expression to avoid overflow at --cache 2048. Fix compile warning 2008-07-11 13:50 +0000 [r8944] martinvoosterhout: * Commit all the necessary changes to make saving and restoring of relations work. This means that when a way that is part of a relation changes the relation will be properly reconstructed. 2008-07-10 09:51 +0000 [r8907] martinvoosterhout: * We can't prepare the statement until the table is created, which makes the program break on a clean database. Hopefully it really does work now... 2008-07-09 18:01 +0000 [r8887] martinvoosterhout: * Clearly you can't even prepare statements relying on intarray if you don't have it. Change code so it all works without properly, as long as you don't try to apply patches. 2008-07-09 15:28 +0000 [r8885-8886] martinvoosterhout: * When a prepared statement fails, log the parameters for debugging purposes. * Finally, add the code to process modifies and deletes from patches. Almost everything should work, except if a member of a relation is changed, the relation isn't updated. If the relation is updated though, it will pick up the new members so it could in principle be worked around by reloading all the relations afterwards. In addition there were the following changes: * Fixing escaping bugs since forever when output-pgsql uses prepared statement mode. * The ways table gets a partial index on pending, for performance. * Only bother with the intarray stuff if we're creating tables. Hope nothing else got borked. 2008-07-09 12:01 +0000 [r8871] martinvoosterhout: * Add the necessary infrastructure to build the GIST indexes for finding ways that need updating when a node moved and things like that. It tests for the intarray module and warns if it doesn't find it. It's not an error to run without since the user may be using slim mode to save memory. Perhaps in time we should look into a seperating the slim and the patching mode more clearly. 2008-07-09 11:02 +0000 [r8867] martinvoosterhout: * Add support for process delete commands for ways and nodes. This is the easy part since we don't need to search for objects depending on them (the diff should contains modifications for them anyway). Relations not done because the whole save/restore for them does not exist at all at the moment. In the process output-pgsql needed to be taught how to handle jumping in and out of COPY mode. 2008-07-09 09:34 +0000 [r8863] martinvoosterhout: * Commit parser changes to support the loading of diffs. Supports both osmChange and JOSM though it doesn't support placeholders (it's not clear that's useful in this context). Anything other than creating still results in an error so far, so it doesn't change anything from a practical point of view yet. In passing, fix a bug where the append option didn't work in slim mode. 2008-06-18 21:01 +0000 [r8319-8320] jonb: * osm2pgsql: Up-rev to 0.55 for the new default projection change * osm2pgsql: Make spherical mercator the default, old format is now -M|--oldmerc 2008-06-17 21:38 +0000 [r8288] joerg: * Revert wrong header change. We need the C(not C++) Header to determine the Version Number of geos 2008-06-17 21:34 +0000 [r8287] jonb: * Add tracktype which is required by latest osm.xml 2008-06-12 10:30 +0000 [r8186] guenther: * - adding poi key 2008-06-10 23:18 +0000 [r8159] joerg: * use 0 instead of 0{@{}}, since it is deprecated in newer perl 2008-06-09 12:36 +0000 [r8143] stevehill: * Added "road" to layers. 2008-06-05 21:09 +0000 [r8109] jonb: * osm2pgsql: Allow printf style arguments to pgsql_exec(). Use table specific temporary name during final data indexing 2008-05-28 07:13 +0000 [r7976] martinvoosterhout: * Add some changes from Edgemaster for MinGW support, see #926 2008-05-27 22:32 +0000 [r7975] jonb: * osm2pgsql: Make -C option work (instead of just --cache). Remove commented out ifdef lines for old slim mode 2008-05-26 21:08 +0000 [r7948] jonb: * Add explicit support for area= into osm2pgsql 2008-05-26 19:33 +0000 [r7947] jonb: * Update osm2pgsql to remove minor memory leak of style data. Free up mid-layer memory before doing final step which only touches the final DB. Move boundary data into roads table. Document use of roads table for low-zoom features. Make final DB step multi-threaded. Update default.style to work with existing mapnik code + osm.xml (otherwise Mapnik fails to handle string/integer comparisons in admin_level). 2008-05-18 08:31 +0000 [r7850] martinvoosterhout: * Give relations a negative ID in the database so they don't clash with other objects. 2008-05-18 08:10 +0000 [r7849] martinvoosterhout: * Add support for route relations. It has some special processing for bicycle routes which I just copied from the gravitystorm code. For normal relations like bus routes it should work also. To actually use bicycle relations the user will need to uncomment the relevent columns in the style file. 2008-05-03 14:13 +0000 [r7641] martinvoosterhout: * Fix bug that was reversing all the ways due to subtle interaction of ordering of nodes. Old code assumed the nodes would be provided in reverse order. 2008-04-29 21:54 +0000 [r7596] joerg: * applications/utils/mapnik-osm-updater.sh: use the directory /usr/share/openstreetmap/ for the default.styles 2008-04-29 21:41 +0000 [r7595] joerg: * comment out creation of users, because it might break the system 2008-04-29 21:11 +0000 [r7593] joerg: * export/osm2pgsql/mapnik-osm-updater.sh: go to right directory for asm2pqsql call 2008-04-29 20:57 +0000 [r7591-7592] joerg: * mapnik default.style:remove gpsdrive line * mapnik default.style: move to /usr/share/openstreetmap-utils for debian package 2008-04-28 22:14 +0000 [r7570] joerg: * mapnik-osm-updater.sh: adapt to new osm2pqsql; add more error checks, fix wrong usage of command users 2008-04-19 21:54 +0000 [r7444] martinvoosterhout: * Add code to coalesce output COPY data into larger blocks to avoid excessive overhead. Also start using some of the pgsql.c helper functions to reduce the amount of code for the standard error checking. 2008-04-19 14:53 +0000 [r7440] martinvoosterhout: * Store empty tag lists as NULLs, to try and squeeze out some more space savings. 2008-04-19 14:18 +0000 [r7436] martinvoosterhout: * Use the same filter_tags code for nodes as we do for ways. Apart from simplifying the code it stops us storing useless tags data in the nodes table (including all the hugely long tiger tags). 2008-04-17 10:33 +0000 [r7397] martinvoosterhout: * Try harder to get large file pointers working 2008-04-15 20:35 +0000 [r7371] martinvoosterhout: * Make the style file's use of tags more strict, we now define a strict set of flags which are used and the remainder are warned about. In particular we have the 'delete' flag which indicates the tag should be ignored entirely. Additionally we now look through all the tags in filter_tags and remove any we don't know about. This is primarily for slim mode, stuff which it doesn't understand should be deleted to save space. 2008-04-13 10:33 +0000 [r7350] martinvoosterhout: * Patch from David Stubbs so that columns defined as integers Just Work(tm). We parse the string directly to an integer if the column is defined as int4. 2008-04-12 17:05 +0000 [r7345] martinvoosterhout: * Add a caching level to the slim-mode with configurable size, so it actually has decent performance. It is implemented as a lossy sparse array with a priority queue tracking how much of each block is used to ensure we maximize the number of nodes we fit in the given amount of memory. Also rearrange some header definitions. 2008-04-11 19:14 +0000 [r7334] martinvoosterhout: * Update version number so people know what they're running 2008-04-11 12:10 +0000 [r7331] martinvoosterhout: * Commit many new changes to osm2pgsql, including: - list of tags read from file - slim mode works again - relations properly supported - more efficient DB usage It includes some restructuring of the code, in particular, the output module manages the mid-level now and the main program doesn't call it at all. This moves many of the previous hacks to the output module which can manage the mid-level as appropriate for its output (i.e. slim mode requires different semantics from ram mode) 2008-03-22 17:58 +0000 [r7141] martinvoosterhout: * Add support for a -E|--proj option which allows users to use any epsg projection that be used by proj4's +init=epsg: format. Should help all those people wanting to do maps in different projections. 2008-03-19 20:00 +0000 [r7120] jonb: * osm2pgsql: Add 'width' key 2008-03-14 11:55 +0000 [r7083] guenther: * - added creation of poi column to --all* options in mapnik-osm-updater.sh 2008-03-09 18:50 +0000 [r7064] guenther: * - added script to add gpsdrive poi-types to mapnik database. use the option --add-gpsdrive-types to activate this feature. this is recommended, if you create the database for use with gpsdrive. 2008-03-07 21:17 +0000 [r7048] jonb: * osm2pgsql: Treat man_made & power as possible areas. Longer term we'll probably need to drop this linear/area designation from osm2pgsql since more keys are being used for both. 2008-03-04 22:46 +0000 [r7032] jonb: * osm2pgsql: request from cmarqu to add wood= into DB 2008-02-18 23:41 +0000 [r6920] jonb: * osm2pgsql: Add authentication options (user, host, port, password). 2008-02-16 19:31 +0000 [r6893] jonb: * osm2pgsql: Convert boundary keys into a linear feature to match the current osm.xml. The bulk of boundaries are defined by multiple ways. If we want polygons then we'll need some way to join these to form closed areas. 2008-02-09 19:21 +0000 [r6816] jonb: * osm2pgsql: Add option to filter import with bounding box 2008-01-31 23:51 +0000 [r6734] jonb: * osm2pgsql: Add power= to exported list 2008-01-03 19:40 +0000 [r6239] joerg: * mapnik-osm-updater.sh: add support for automatically downloading and installing smaller planet excerpts from Frederiks Geofabrik Page 2007-12-30 10:48 +0000 [r6191] joerg: * mapnik-osm-updater.sh: Add option to only download and import euope extract 2007-12-21 23:16 +0000 [r6138-6139] jonb: * osm2pgsql: Comment out the broken --slim mode. Export access= tag. Only create automatic parking symbols if there is no defined access= tag or access=public * osm2pgsql: Update Makefile with Solaris compatability fixes from Martin Spott 2007-12-20 15:59 +0000 [r6116] jochen: * added svn:ignore stuff 2007-12-18 23:11 +0000 [r6089] jonb: * osm2pgsql: Add keys for military, embankment, cutting & admin_level 2007-12-16 17:41 +0000 [r6078] jochen: * changed name of readme.txt to README.txt so that it sticks out more 2007-12-15 15:41 +0000 [r6068] jochen: * typo fixed 2007-12-10 22:02 +0000 [r6012] jonb: * osm2pgsql: Apply gcc-4.3 compile fix from Martin Michlmayr & Andreas Putzo 2007-12-04 22:15 +0000 [r5901] jonb: * osm2pgsql: Add religion into exported tags 2007-11-30 17:02 +0000 [r5834] jonb: * osm2pgsql: make tunnel=yes be equivalent to layer=-1 for rendering order 2007-11-23 17:00 +0000 [r5716] martinvoosterhout: * Update the readme to inform users about the new features and how to use them. Also rename the default projection to "WGS84 Mercator" which better describes what it actually is. 2007-11-23 16:54 +0000 [r5715] martinvoosterhout: * Add support for a --prefix option so that you can easily run multiple mapnik instances out of the one DB. The default is ofcourse still "planet_osm" so if you don't use it you won't see a difference. I did however need to change the names of the indexes so they don't clash also, but this is unlikely to break anything. 2007-11-20 08:51 +0000 [r5633] martinvoosterhout: * Restructure the projection code so it can support more projections, primarily the true spherical mercator used by Google, TileCache and others. Add the -m option to select this. The default is still the incorrect projection used before. Also display the used projection during processing. Finally some minor cleanups to fix some warnings about undefined functions. 2007-11-19 07:11 +0000 [r5594] joerg: * mapnik-osm-updater.sh: correct Error checking 2007-11-14 11:12 +0000 [r5506] joerg: * osm2pgsql/mapnik-osm-updater.sh: add option for bz2 dump files 2007-11-14 11:09 +0000 [r5505] joerg: * osm2pgsql/mapnik-osm-updater.sh: have two options for with/without updatechek on import 2007-11-11 15:29 +0000 [r5456] martinvoosterhout: * Fix the pgsql output so it sets the right projection in the database. Usually not terribly important (which is why it's been broken this long) but if you start trying to do more sophisticated operations on the data it's better if the projection is what it says it is. 2007-10-28 21:24 +0000 [r5223] joerg: * mapnik-osm-updater.sh: rename options - instead of _; allow empty username, write logfile while importing, add updateifchanged option 2007-10-24 22:06 +0000 [r5160] jonb: * osm2pgsql: Drop unused index on z_order. Change clustering to use temporary table which is faster (but uses more temporary disk space). Remove Vacuums since we do not remove or update data. 2007-10-14 20:10 +0000 [r5002] joerg: * mapnik-osm-updater: search for planet-mirror.pl 2007-10-14 10:56 +0000 [r4986] joerg: * mapnik-osm-updater: Add check against own userid and root 2007-10-14 10:33 +0000 [r4983] joerg: * move mapnik-osm-updater.sh to utils where it better fits 2007-10-12 21:18 +0000 [r4955] jonb: * mod_tile: Apache module and rendering daemon for serving OSM tiles 2007-10-10 21:48 +0000 [r4935] jonb: * osm2pgsql: check for null pointer 2007-10-10 21:18 +0000 [r4934] jonb: * osm2pgsql: Need to look for type=multipolygon not a multipolygon key (was breaking all polygons with holes) 2007-10-10 21:04 +0000 [r4933] jonb: * osm2pgsql: Reverse direction of one-way streets (bug #559) 2007-10-09 22:10 +0000 [r4917] jonb: * osm2pgsql: Ensure we only process multipolygon relations. Add pre-filter to reduce memory usage by writing out ways which should never be part of a multipolygon instead of storing them 2007-10-09 19:44 +0000 [r4914] jonb: * osm2pgsql: Trap duplicate points and ways which end up with only a single node. These remove some exceptions 2007-10-09 01:05 +0000 [r4903] jonb: * osm2pgsql: Catch exceptions thrown by geos 2007-10-08 23:23 +0000 [r4901] jonb: * osm2pgsql: Swap lat/lon on parking nodes 2007-10-08 22:11 +0000 [r4897] jonb: * osm2pgsql: Fix some memory leaks. Remove debug output 2007-10-08 22:05 +0000 [r4895] jonb: * osm2pgsql: update to handle polygons with holes in 0.5 API (described using relations). The code is nasty but appears to work on small datasets. 2007-10-07 11:24 +0000 [r4842] gabriel: * Make changes for 0.5. 2007-10-03 19:30 +0000 [r4816] jonb: * osm2pgsql: Reduce memory usage by processing ways during the XML reading 2007-09-07 22:22 +0000 [r4485] jonb: * osm2pgsql 0.07: Make UTF8sanitize optional since it is generally no longer required. Add option to output in latlong format 2007-09-03 22:26 +0000 [r4441] jonb: * osm2pgsql version 0.06. Add command line options to select database name, slim memory usage and appending of data. Ignore bound tag. Improve stdin reading. 2007-09-02 23:00 +0000 [r4426] jonb: * osm2pgsql: Version 0.05. Cleaup progress output 2007-09-02 22:25 +0000 [r4424] jonb: * osm2pgsql: Allow multiple .osm files to be imported simultaneoulsy, e.g. for lots of Tiger county.osm.gz files 2007-08-27 14:49 +0000 [r4319] martinvoosterhout: * Whoops, got the test wrong last commit :) 2007-08-27 14:45 +0000 [r4318] martinvoosterhout: * Patch supplied by David Siegel on the mailing list, with editorialisation by moi. 2007-08-23 18:46 +0000 [r4286] jonb: * osm2pgsql: polygon area now used to determine the rendering order 2007-08-19 18:45 +0000 [r4231] jonb: * osm2pgsql 0.04: Further improve handling of polygons with holes. Reduce memory usage by about 10% 2007-08-19 00:16 +0000 [r4221] jonb: * osm2pgsql version 0.03: Handle polygons with holes correctly. Fix minor memory leak. 2007-08-12 21:05 +0000 [r4092] jonb: * osm2pgsql: Add boundary & tunnel. Enable polygon flag on a few columns. Sort key table 2007-08-12 19:03 +0000 [r4090] jonb: * osm2pgsql: Remove hard coded maximum IDs. Support negative IDs. Add RPM build target 2007-08-09 21:26 +0000 [r4059] jonb: * osm2pgsql: Remove warning flags which cause problems with older GCC 2007-08-03 14:56 +0000 [r3899] joerg: * add some linefeeds to error messages to make them easier readable 2007-08-01 21:17 +0000 [r3887] jonb: * osm2pgsql: increase max IDs to work with latest planet dump 2007-07-28 21:32 +0000 [r3826] jonb: * osm2pgsql: tidy up pgsql code, fix a few warnings and add some disabled code to render multiple name tags. No functional changes 2007-06-20 21:05 +0000 [r3280] jonb: * osm2pgsql: Increase max IDs in middle-ram 2007-05-28 21:25 +0000 [r3052] jonb: * osm2pgsql: improve help text, allow - to be used again 2007-05-28 21:16 +0000 [r3051] jonb: * osm2pgsql: enable O_LARGEFILE 2007-05-23 21:02 +0000 [r3008] artem: * added rendering direction arrows for oneway={yes,true,-1} 2007-05-20 20:48 +0000 [r2966] jonb: * osm2pgsql: Add fix for motorway shields. Move landuse to -1. Make tables public 2007-05-14 19:21 +0000 [r2904] jonb: * mapnik layer= implementation for areas/polygons 2007-05-07 21:35 +0000 [r2827] jonb: * osm2pgsql - make experimental version current, move previous implementation to legacy 2007-05-07 20:59 +0000 [r2826] jonb: * osm2pgsql: Mac OS-X and GEOS-3 compatability tweaks 2007-05-07 13:47 +0000 [r2813] jonb: * osm2pgsql/mapnik database changed to store data in mercator. Saving reprojection during rendering 2007-05-07 13:34 +0000 [r2812] jonb: * osm2pgsql update build_geometry.cpp for get_centroid() 2007-05-06 18:19 +0000 [r2797] jonb: * osm2pgsql exprimental update. Increase max IDs. Implement "add parking node for polygons with amenity=parking" as per latest setup_z_order script. First step towards an incremental update process. 2007-05-05 23:06 +0000 [r2770] jonb: * osm2pgsql - add bz2 and UTF8sanitizer support directly into this code 2007-04-21 06:29 +0000 [r2605] joerg: * move utils to applications. This way it should be easier to build Packages 2007-04-14 11:39 +0000 [r2498] jochen: * Movev lots of stuff into export directory 2007-03-25 00:42 +0000 [r2334] jonb: * osm2pgsql exp: Make sure all polygon table contains only polygons 2007-03-24 15:55 +0000 [r2331-2332] jonb: * Automatic GEOS2/3 detection * Add info on requirements and building 2007-03-24 15:42 +0000 [r2330] jonb: * Automatic GEOS version detection 2007-03-23 20:56 +0000 [r2323] jonb: * Switch to printf(%.15g) to give more precision on generated points 2007-03-22 20:20 +0000 [r2320] jonb: * Replace asprintf. Increase max IDs by 10% to give room for future growth 2007-03-22 10:27 +0000 [r2316] ksharp: * Fixing accidental change to Makefile 2007-03-22 10:17 +0000 [r2314] ksharp: * Added the polygons directory containing the extract-polygon.pl script, a README, and an example polygon. 2007-03-21 00:25 +0000 [r2310] jonb: * Add GEOS_TWO from non-experimental version 2007-03-21 00:11 +0000 [r2309] jonb: * Split implementation into middle and output layers. Choice of RAM or PGSQL middle layers offers speed vs memory tradeoff. Implements new point/line/poly tables with z_order for latest osm.xml 2007-03-17 12:46 +0000 [r2269] nick: * Added foot, horse, motorcar, bicycle and residence tags 2007-03-16 12:37 +0000 [r2267] artem: * Use GEOS_TWO flag to control version being used e.g. CXXFLAGS="-DGEOS_TWO" make 2007-03-13 14:06 +0000 [r2253] artem: * wropping geos stuff in try/catch 2007-03-13 12:39 +0000 [r2252] artem: * latest osm2pgsql generates one table per geometry type 2007-03-10 13:28 +0000 [r2249] jonb: * Direct database version of osm2pgsql 2007-03-10 13:17 +0000 [r2248] jonb: * Clone osm2pgsql files to experimental directory 2007-03-09 10:31 +0000 [r2243] artem: * use geos-config to setup compile flags 2007-03-09 00:26 +0000 [r2242] jonb: * Remove duplicate suppression code since this provides little benefit now that the Tiger data has been removed (the source of almost all the duplicate data). This allows the removal of the AVL & BST code and the ID field from the node/segment/way sctrucutres. This saves some memory and simplifies the code. Fixed a memory leak in WKT which forgot to free the segment item. Added counters for maximum node/segment/way IDs. Split the assert(id) checks to make it obvious which is failing. Cleaned up some white space. 2007-02-27 12:13 +0000 [r2180] artem: * we cannot use anythig appart from 'text' data type 2007-02-27 10:52 +0000 [r2178-2179] artem: * added 'bridge','building' and 'layer' tags * applied geos-2.2.3.patch (slightly modified) from jonb 2007-02-24 19:26 +0000 [r2160] artem: * cluster planet on spatial index for extra speed 2007-02-24 14:50 +0000 [r2159] jonb: * Fix empty segment logic 2007-02-24 08:40 +0000 [r2158] artem: * use geos to create geometries 2007-02-21 23:14 +0000 [r2153] jonb: * Make mapnik & osm2pgsql use NULL instead of empty strings in db 2007-02-21 22:43 +0000 [r2152] artem: * Added myself in place of unknown author 2007-02-12 19:42 +0000 [r2104] jonb: * osm2pgsql polygons for closed ways only 2007-02-11 15:58 +0000 [r2099] jonb: * osm2pgql filter duplicate segments in ways 2007-01-17 14:08 +0000 [r1918] steve: * up the max seg ids 2006-12-04 20:34 +0000 [r1734] nick: * man_made added 2006-12-03 01:01 +0000 [r1718] jonb: * Improved version of osm2pgsql. Adds 'natural' attribute. Some alogorithm improvments to reduce run time. Optional duplicate way detection (at expense of RAM usage). 2006-11-28 21:35 +0000 [r1669] nick: * added natural type 2006-11-27 20:38 +0000 [r1655] jonb: * Initial high level description of code and algorithm. 2006-11-22 15:10 +0000 [r1623] steve: * add railways 2006-11-22 12:36 +0000 [r1622] steve: * add ops to geom col 2006-11-22 11:41 +0000 [r1621] steve: * change varchars to text 2006-11-19 18:34 +0000 [r1604] jonb: * Handle missing nodes and segments instead of putting 0,0 into linestrings which was causing lots of rogue lines to appear on the map. Improved linestring generation for non-contiguous ways. Added a GIST index into the SQL output. Corrected usage info and added a gzip example. Removed some redundant lines. 2006-11-17 10:12 +0000 [r1577] jonb: * Initial version of C implmentation of OSM to Postgresql converter osm2pgsql-0.82.0/INSTALL000066400000000000000000000363321213272333300145670ustar00rootroot00000000000000Installation Instructions ************************* Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc. Copying and distribution of this file, with or without modification, are permitted in any medium without royalty provided the copyright notice and this notice are preserved. This file is offered as-is, without warranty of any kind. Basic Installation ================== Briefly, the shell commands `./configure; make; make install' should configure, build, and install this package. The following more-detailed instructions are generic; see the `README' file for instructions specific to this package. Some packages provide this `INSTALL' file but do not implement all of the features documented below. The lack of an optional feature in a given package is not necessarily a bug. More recommendations for GNU packages can be found in *note Makefile Conventions: (standards)Makefile Conventions. The `configure' shell script attempts to guess correct values for various system-dependent variables used during compilation. It uses those values to create a `Makefile' in each directory of the package. It may also create one or more `.h' files containing system-dependent definitions. Finally, it creates a shell script `config.status' that you can run in the future to recreate the current configuration, and a file `config.log' containing compiler output (useful mainly for debugging `configure'). It can also use an optional file (typically called `config.cache' and enabled with `--cache-file=config.cache' or simply `-C') that saves the results of its tests to speed up reconfiguring. Caching is disabled by default to prevent problems with accidental use of stale cache files. If you need to do unusual things to compile the package, please try to figure out how `configure' could check whether to do them, and mail diffs or instructions to the address given in the `README' so they can be considered for the next release. If you are using the cache, and at some point `config.cache' contains results you don't want to keep, you may remove or edit it. The file `configure.ac' (or `configure.in') is used to create `configure' by a program called `autoconf'. You need `configure.ac' if you want to change it or regenerate `configure' using a newer version of `autoconf'. The simplest way to compile this package is: 1. `cd' to the directory containing the package's source code and type `./configure' to configure the package for your system. Running `configure' might take a while. While running, it prints some messages telling which features it is checking for. 2. Type `make' to compile the package. 3. Optionally, type `make check' to run any self-tests that come with the package, generally using the just-built uninstalled binaries. 4. Type `make install' to install the programs and any data files and documentation. When installing into a prefix owned by root, it is recommended that the package be configured and built as a regular user, and only the `make install' phase executed with root privileges. 5. Optionally, type `make installcheck' to repeat any self-tests, but this time using the binaries in their final installed location. This target does not install anything. Running this target as a regular user, particularly if the prior `make install' required root privileges, verifies that the installation completed correctly. 6. You can remove the program binaries and object files from the source code directory by typing `make clean'. To also remove the files that `configure' created (so you can compile the package for a different kind of computer), type `make distclean'. There is also a `make maintainer-clean' target, but that is intended mainly for the package's developers. If you use it, you may have to get all sorts of other programs in order to regenerate files that came with the distribution. 7. Often, you can also type `make uninstall' to remove the installed files again. In practice, not all packages have tested that uninstallation works correctly, even though it is required by the GNU Coding Standards. 8. Some packages, particularly those that use Automake, provide `make distcheck', which can by used by developers to test that all other targets like `make install' and `make uninstall' work correctly. This target is generally not run by end users. Compilers and Options ===================== Some systems require unusual options for compilation or linking that the `configure' script does not know about. Run `./configure --help' for details on some of the pertinent environment variables. You can give `configure' initial values for configuration parameters by setting variables in the command line or in the environment. Here is an example: ./configure CC=c99 CFLAGS=-g LIBS=-lposix *Note Defining Variables::, for more details. Compiling For Multiple Architectures ==================================== You can compile the package for more than one kind of computer at the same time, by placing the object files for each architecture in their own directory. To do this, you can use GNU `make'. `cd' to the directory where you want the object files and executables to go and run the `configure' script. `configure' automatically checks for the source code in the directory that `configure' is in and in `..'. This is known as a "VPATH" build. With a non-GNU `make', it is safer to compile the package for one architecture at a time in the source code directory. After you have installed the package for one architecture, use `make distclean' before reconfiguring for another architecture. On MacOS X 10.5 and later systems, you can create libraries and executables that work on multiple system types--known as "fat" or "universal" binaries--by specifying multiple `-arch' options to the compiler but only a single `-arch' option to the preprocessor. Like this: ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ CPP="gcc -E" CXXCPP="g++ -E" This is not guaranteed to produce working output in all cases, you may have to build one architecture at a time and combine the results using the `lipo' tool if you have problems. Installation Names ================== By default, `make install' installs the package's commands under `/usr/local/bin', include files under `/usr/local/include', etc. You can specify an installation prefix other than `/usr/local' by giving `configure' the option `--prefix=PREFIX', where PREFIX must be an absolute file name. You can specify separate installation prefixes for architecture-specific files and architecture-independent files. If you pass the option `--exec-prefix=PREFIX' to `configure', the package uses PREFIX as the prefix for installing programs and libraries. Documentation and other data files still use the regular prefix. In addition, if you use an unusual directory layout you can give options like `--bindir=DIR' to specify different values for particular kinds of files. Run `configure --help' for a list of the directories you can set and what kinds of files go in them. In general, the default for these options is expressed in terms of `${prefix}', so that specifying just `--prefix' will affect all of the other directory specifications that were not explicitly provided. The most portable way to affect installation locations is to pass the correct locations to `configure'; however, many packages provide one or both of the following shortcuts of passing variable assignments to the `make install' command line to change installation locations without having to reconfigure or recompile. The first method involves providing an override variable for each affected directory. For example, `make install prefix=/alternate/directory' will choose an alternate location for all directory configuration variables that were expressed in terms of `${prefix}'. Any directories that were specified during `configure', but not in terms of `${prefix}', must each be overridden at install time for the entire installation to be relocated. The approach of makefile variable overrides for each directory variable is required by the GNU Coding Standards, and ideally causes no recompilation. However, some platforms have known limitations with the semantics of shared libraries that end up requiring recompilation when using this method, particularly noticeable in packages that use GNU Libtool. The second method involves providing the `DESTDIR' variable. For example, `make install DESTDIR=/alternate/directory' will prepend `/alternate/directory' before all installation names. The approach of `DESTDIR' overrides is not required by the GNU Coding Standards, and does not work on platforms that have drive letters. On the other hand, it does better at avoiding recompilation issues, and works well even when some directory options were not specified in terms of `${prefix}' at `configure' time. Optional Features ================= If the package supports it, you can cause programs to be installed with an extra prefix or suffix on their names by giving `configure' the option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. Some packages pay attention to `--enable-FEATURE' options to `configure', where FEATURE indicates an optional part of the package. They may also pay attention to `--with-PACKAGE' options, where PACKAGE is something like `gnu-as' or `x' (for the X Window System). The `README' should mention any `--enable-' and `--with-' options that the package recognizes. For packages that use the X Window System, `configure' can usually find the X include and library files automatically, but if it doesn't, you can use the `configure' options `--x-includes=DIR' and `--x-libraries=DIR' to specify their locations. Some packages offer the ability to configure how verbose the execution of `make' will be. For these packages, running `./configure --enable-silent-rules' sets the default to minimal output, which can be overridden with `make V=1'; while running `./configure --disable-silent-rules' sets the default to verbose, which can be overridden with `make V=0'. Particular systems ================== On HP-UX, the default C compiler is not ANSI C compatible. If GNU CC is not installed, it is recommended to use the following options in order to use an ANSI C compiler: ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" and if that doesn't work, install pre-built binaries of GCC for HP-UX. On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot parse its `' header file. The option `-nodtk' can be used as a workaround. If GNU CC is not installed, it is therefore recommended to try ./configure CC="cc" and if that doesn't work, try ./configure CC="cc -nodtk" On Solaris, don't put `/usr/ucb' early in your `PATH'. This directory contains several dysfunctional programs; working variants of these programs are available in `/usr/bin'. So, if you need `/usr/ucb' in your `PATH', put it _after_ `/usr/bin'. On Haiku, software installed for all users goes in `/boot/common', not `/usr/local'. It is recommended to use the following options: ./configure --prefix=/boot/common Specifying the System Type ========================== There may be some features `configure' cannot figure out automatically, but needs to determine by the type of machine the package will run on. Usually, assuming the package is built to be run on the _same_ architectures, `configure' can figure that out, but if it prints a message saying it cannot guess the machine type, give it the `--build=TYPE' option. TYPE can either be a short name for the system type, such as `sun4', or a canonical name which has the form: CPU-COMPANY-SYSTEM where SYSTEM can have one of these forms: OS KERNEL-OS See the file `config.sub' for the possible values of each field. If `config.sub' isn't included in this package, then this package doesn't need to know the machine type. If you are _building_ compiler tools for cross-compiling, you should use the option `--target=TYPE' to select the type of system they will produce code for. If you want to _use_ a cross compiler, that generates code for a platform different from the build platform, you should specify the "host" platform (i.e., that on which the generated programs will eventually be run) with `--host=TYPE'. Sharing Defaults ================ If you want to set default values for `configure' scripts to share, you can create a site shell script called `config.site' that gives default values for variables like `CC', `cache_file', and `prefix'. `configure' looks for `PREFIX/share/config.site' if it exists, then `PREFIX/etc/config.site' if it exists. Or, you can set the `CONFIG_SITE' environment variable to the location of the site script. A warning: not all `configure' scripts look for a site script. Defining Variables ================== Variables not defined in a site shell script can be set in the environment passed to `configure'. However, some packages may run configure again during the build, and the customized values of these variables may be lost. In order to avoid this problem, you should set them in the `configure' command line, using `VAR=value'. For example: ./configure CC=/usr/local2/bin/gcc causes the specified `gcc' to be used as the C compiler (unless it is overridden in the site shell script). Unfortunately, this technique does not work for `CONFIG_SHELL' due to an Autoconf bug. Until the bug is fixed you can use this workaround: CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash `configure' Invocation ====================== `configure' recognizes the following options to control how it operates. `--help' `-h' Print a summary of all of the options to `configure', and exit. `--help=short' `--help=recursive' Print a summary of the options unique to this package's `configure', and exit. The `short' variant lists options used only in the top level, while the `recursive' variant lists options also present in any nested packages. `--version' `-V' Print the version of Autoconf used to generate the `configure' script, and exit. `--cache-file=FILE' Enable the cache: use and save the results of the tests in FILE, traditionally `config.cache'. FILE defaults to `/dev/null' to disable caching. `--config-cache' `-C' Alias for `--cache-file=config.cache'. `--quiet' `--silent' `-q' Do not print messages saying which checks are being made. To suppress all normal output, redirect it to `/dev/null' (any error messages will still be shown). `--srcdir=DIR' Look for the package's source code in directory DIR. Usually `configure' can determine that directory automatically. `--prefix=DIR' Use DIR as the installation prefix. *note Installation Names:: for more details, including other options available for fine-tuning the installation locations. `--no-create' `-n' Run the configure checks, but stop before creating any output files. `configure' also accepts some other, not widely useful, options. Run `configure --help' for more details. osm2pgsql-0.82.0/Makefile.am000066400000000000000000000053241213272333300155670ustar00rootroot00000000000000ACLOCAL_AMFLAGS = -I m4 DIST_SUBDIRS = legacy bin_PROGRAMS = osm2pgsql nodecachefilereader osm2pgsql_SOURCES = build_geometry.cpp input.c middle.h middle-ram.h output-gazetteer.h output-pgsql.c rb.c sanitizer.h text-tree.h build_geometry.h input.h middle-pgsql.c osm2pgsql.c output.h output-pgsql.h rb.h sprompt.c UTF8sanitizer.c expire-tiles.c keyvals.c middle-pgsql.h osmtypes.h output-null.c parse-o5m.c parse-o5m.h parse-primitive.c parse-primitive.h parse-xml2.c parse-xml2.h pgsql.c reprojection.c sprompt.h expire-tiles.h keyvals.h middle-ram.c output-gazetteer.c output-null.h pgsql.h reprojection.h text-tree.c node-ram-cache.c wildcmp.c node-ram-cache.h node-persistent-cache.c node-persistent-cache.h binarysearcharray.c binarysearcharray.h nodecachefilereader_SOURCES = node-persistent-cache-reader.c node-persistent-cache.c node-ram-cache.c binarysearcharray.c if READER_PBF osm2pgsql_SOURCES += parse-pbf.c parse-pbf.h fileformat.pb-c.c fileformat.pb-c.h osmformat.pb-c.c osmformat.pb-c.h fileformat.pb-c.c: protobuf/fileformat.proto $(AM_V_GEN) $(PROTOC_C) --proto_path=protobuf --c_out=. $< fileformat.pb-c.h: fileformat.pb-c.c @if test ! -f $@; then \ rm -f $<; \ $(MAKE) $(AM_MAKEFLAGS) $<; \ else :; fi osmformat.pb-c.c: protobuf/osmformat.proto $(AM_V_GEN) $(PROTOC_C) --proto_path=protobuf --c_out=. $< osmformat.pb-c.h: osmformat.pb-c.c @if test ! -f $@; then \ rm -f $<; \ $(MAKE) $(AM_MAKEFLAGS) $<; \ else :; fi BUILT_SOURCES = \ fileformat.pb-c.c fileformat.pb-c.h \ osmformat.pb-c.c osmformat.pb-c.h endif osm2pgsqldir = $(datadir)/osm2pgsql AM_CFLAGS = @PTHREAD_CFLAGS@ @LFS_CFLAGS@ @POSTGRESQL_CFLAGS@ @XML2_CFLAGS@ @BZIP2_CFLAGS@ @GEOS_CFLAGS@ @PROJ_CFLAGS@ @PROTOBUF_C_CFLAGS@ -DOSM2PGSQL_DATADIR='"$(osm2pgsqldir)"' -DVERSION='"@PACKAGE_VERSION@"' AM_CPPFLAGS = @PTHREAD_CFLAGS@ @POSTGRESQL_CFLAGS@ @XML2_CFLAGS@ @BZIP2_CFLAGS@ @GEOS_CFLAGS@ @PROJ_CFLAGS@ -DOSM2PGSQL_DATADIR='"$(osm2pgsqldir)"' -Igeos-fallback AM_LDFLAGS = @PTHREAD_CFLAGS@ @ZLIB_LDFLAGS@ @ZLIB_LIBS@ @POSTGRESQL_LDFLAGS@ @POSTGRESQL_LIBS@ @XML2_LDFLAGS@ @BZIP2_LDFLAGS@ @BZIP2_LIBS@ @GEOS_LDFLAGS@ @GEOS_LIBS@ @PROJ_LDFLAGS@ @PROJ_LIBS@ @PROTOBUF_C_LDFLAGS@ @PROTOBUF_C_LIBS@ osm2pgsql_DATA = default.style 900913.sql man1_MANS = docs/osm2pgsql.1 docs/nodecachefilereader.1 EXTRA_DIST = osm2pgsql.spec.in \ osm2pgsql.spec \ protobuf/fileformat.proto \ protobuf/osmformat.proto \ debian \ $(osm2pgsql_DATA) $(PACKAGE).spec: $(PACKAGE).spec.in sed -e "s/@""PACKAGE""@/$(PACKAGE)/g; s/@""VERSION""@/$(VERSION)/g; s/@""SVN""@/`svnversion`/g;" $^ > $@ rpm: dist-gzip rpmbuild -ta $(distdir).tar.gz distclean-local: @rm -f $(PACKAGE).spec @rm -f config.nice osm2pgsql-0.82.0/NEWS000066400000000000000000000006011213272333300142230ustar00rootroot00000000000000== 2010-11-06 Version 0.70.5 == * missing libraries are now already detected by 'configure' instead of only breaking the build at compile time later * the existing 'libxml2' and experimental 'primitive' XML readers and a new 'pbf' reader for the Protobuf based format are now all available in the main binary, selectable using the new -r|--input-reader command line optionosm2pgsql-0.82.0/README000066400000000000000000000251401213272333300144110ustar00rootroot00000000000000osm2pgsql ========= Converts OSM planet.osm data to a PostgreSQL / PostGIS database suitable for specific applications like rendering into map tiles by Mapnik or geocoding with Nominatim. osm2pgsql currently supports two different database schemas 1) A database schema that is optimized for ease of rendering by Mapnik. 2) A database schema that is optimized for geocoding with Nominatim, emphasizing the spatially hierarchical organizations of objects. Both schemas were specifically optimized for the purpose they were intended for and they may therefore be less suitable for other general purpose processing. Nevertheless, the rendering schema might be useful for other purposes as well, and has been used for a variety of additionally purposes. For a broader view of the whole map rendering tool chain see http://wiki.openstreetmap.org/index.php/Mapnik http://wiki.openstreetmap.org/index.php/Osm2pgsql http://wiki.openstreetmap.org/index.php/Slippy_Map You may find that the wiki pages are more up to date than this readme and may include answers to issues not mentioned here. Any questions should be directed at the osm dev list http://wiki.openstreetmap.org/index.php/Mailing_lists Features ======== - Converts OSM files to a PostgreSQL DB - Conversion of tags to columns is configurable in the style file - Able to read .gz, .bz2, .pbf and .o5m files directly - Can apply diffs to keep the database up to data - Support the choice of output projection - Configurable table names - Gazetteer back-end for Nominatim http://wiki.openstreetmap.org/wiki/Nominatim - Support for hstore field type to store the complete set of tags in one database field if desired Source code =========== The latest source code is available in the OSM git repository on github and can be downloaded as follows: $ git clone git://github.com/openstreetmap/osm2pgsql.git Build requirements ================== The code is written in C and C++ and relies on the libraries below: - libxml2 http://xmlsoft.org/ - geos http://geos.refractions.net/ - proj http://www.remotesensing.org/proj/ - bzip2 http://www.bzip.org/ - zlib http://www.zlib.net/ - PostgreSQL http://www.postgresql.org/ - PostGIS http://postgis.refractions.net/ To make use of the database generated by this tool you will probably also want to install: - Mapnik from http://mapnik.org/ Building ======== Make sure you have installed the development packages for the libraries mentioned in the requirements section and a C and C++ compiler. e.g. on Fedora: # yum install geos-devel proj-devel postgresql-devel libxml2-devel bzip2-devel gcc-c++ on Debian: # aptitude install libxml2-dev libgeos-dev libgeos++-dev libpq-dev libbz2-dev libproj-dev protobuf-c-compiler libprotobuf-c0-dev autoconf automake libtool make g++ On most Unix-like systems the program can be compiled by running './autogen.sh && ./configure && make'. Operation ========= You must create a PostgreSQL user and a database with the PostGIS functions enabled. This requires access as the database administrator, normally the 'postgres' user. The default name for this database is 'gis' but this may be changed by using the osm2pgsql --database option. If the matches the unix user id running the import and rendering then this allows the PostgreSQL 'ident sameuser' authentication to be used which avoids the need to enter a password when accessing the database. This is setup by default on many Unix installs but does not work on Windows (due to the lack of unix sockets). Some example commands are given below but you may find this wiki page has more up to data information: http://wiki.openstreetmap.org/wiki/Mapnik/PostGIS $ sudo -u postgres createuser $ sudo -u postgres createdb -E UTF8 -O $ sudo -u postgres createlang plpgsql Adding the PostGIS extensions. Note the location of the files may vary. $ sudo -u postgres psql < /usr/share/postgresql/8.4/contrib/postgis-1.5/postgis.sql $ sudo -u postgres psql < /usr/share/postgresql/8.4/contrib/postgis-1.5/spatial_ref_sys.sql Next we need to give the access to update the postgis meta-data tables $ sudo -u postgres psql -d -c "ALTER TABLE geometry_columns OWNER TO " $ sudo -u postgres psql -d -c "ALTER TABLE spatial_ref_sys OWNER TO " The 900913 is not normally included with PostGIS. To add it you should run: $ sudo psql -u postgres psql -d -f 900913.sql If you want to use hstore support then you will also need to enable the PostgreSQL hstore-new extension. $ sudo -u postgres psql < /usr/share/postgresql/8.4/contrib/hstore.sql On PostgreSQL 9.1 and above, you can install it by running "CREATE EXTENSION hstore;" in your database. Now you can run osm2pgsql to import the OSM data. This will perform the following actions: 1) Osm2pgsql connects to database and creates the following 4 tables when used with the default output back-end (pgsql): - planet_osm_point - planet_osm_line - planet_osm_roads - planet_osm_polygon The prefix "planet_osm" can be changed with the --prefix option, the above is the default. If you are using --slim mode, it will create the following additional 3 tables: - planet_osm_nodes - planet_osm_ways - planet_osm_rels 2) Runs an XML parser on the input file (typically planet.osm) and processes the nodes, ways and relations. 3) If a node has a tag declared in the style file then it is added to planet_osm_point. If it has no such tag then the position is noted, but not added to the database. 4) Ways are read in converted into WKT geometries by using the positions of the nodes read in earlier. If the tags on the way are listed in the style file then the way will be written into the line or roads tables. 5) If the way has one or more tags marked as 'polygon' and forms a closed ring then it will be added to the planet_osm_polygon table. 6) The relations are parsed. Osm2pgsql has special handling for a limited number of types: multipolygon, route, boundary The code will build the appropriate geometries by referencing the members and outputting these into the database. 7) Indexes are added to speed up the queries by Mapnik. Tuning PostgreSQL ================= For an efficient operation of PostgreSQL you will need to tune the config parameters of PostgreSQL from its default values. These are set in the config file at /etc/postgresql/8.4/main/postgresql.conf The values you need to set will depend on the hardware you have available, but you will likely need to increase the values for the following parameters: - shared_buffers - checkpoint_segments - work_mem - maintenance_work_mem - effective_cache_size A quick note on projections =========================== Depending on the command-line switches you can select which projection you want the database in. You have three choices: 4326: The standard lat/long coordinates 900913: The spherical Mercator projection, used by TileCache, Google Earth etc. 3395: The legacy (broken) WGS84 Mercator projection Depending on what you're using one or the other is appropriate. The default Mapnik style (osm.xml) assumes that the data is stored in 900913 and this is the default for osm2pgsql. Combining the -v and -h switches will tell about the exact definitions of the projections. In case you want to use some completely different projection there is the -E option. It will initialize the projection as +init=epsg:. This allows you to use any projection recognized by proj4, which is useful if you want to make a map in a different projection. These projections are usually defined in /usr/share/proj/epsg. Database Access Examples ======================== If you wish to access the data from the database then the queries below should give you some hints. Note that these examples all use the 'latlong' projection which is not the default. $ psql gis gis=> \d List of relations Schema | Name | Type | Owner --------+--------------------+-------+---------- ... public | planet_osm_line | table | jburgess public | planet_osm_point | table | jburgess public | planet_osm_polygon | table | jburgess public | planet_osm_roads | table | jburgess ... gis=> \d planet_osm_line Table "public.planet_osm_line" Column | Type | Modifiers -----------+----------+----------- osm_id | integer | name | text | place | text | landuse | text | ... [ lots of stuff deleted ] ... way | geometry | not null z_order | integer | default 0 Each of the tables contains a subset of the planet.osm file representing a particular geometry type - Point contains nodes which have interesting tags e.g. place=city, name=London - Line contains ways with interesting tags e.g. highway=motorway, ref=M25 - Polygon contains ways which form an enclosed area e.g. landuse=reservoir The DB columns are used as follows: - osm_id = the planet.osm ID of the node(point) or way(line,polygon) - name, place, landuse, ... = the value of the given key, if present on the node/way. If the tag is not present, the value is NULL. Only a subset of all possible tags are stored in the DB. Only ones rendered in the osm.xml are actually interesting to mapnik. - way = PostGIS geometry describing the physical layout of the object. Querying specific data requires knowlege of SQL and the OSM key/value system, e.g. gis=> select osm_id,astext(way),name from planet_osm_point where amenity='cinema' limit 5; osm_id | astext | name ----------+-------------------------------------------+-------------------- 26236284 | POINT(-79.7160836579093 43.6802306464618) | 26206699 | POINT(51.4051989797638 35.7066045032235) | Cinema Felestin 26206700 | POINT(51.3994885141459 35.7058460359352) | Cinema Asr-e Jadid 20979630 | POINT(151.225781789807 -33.8943079539886) | Paris Cinema 20979684 | POINT(151.226855394904 -33.8946830511095) | Hoyts (5 rows) Mapnik renders the data in each table by applying the rules in the osm.xml file. > How could I get e.g. all highways in a given bounding box? The 'way' column contains the geo info and is the one which you need to use in your WHERE clause. e.g. gis=> select osm_id,highway,name from planet_osm_line where highway is not null and way && GeomFromText('POLYGON((0 52, 0.1 52, 0.1 52.1, 0 52.1, 0 52))',4326); osm_id | highway | name ---------+--------------+------------------ 4273848 | unclassified | 3977133 | trunk | to Royston (tbc) 4004841 | trunk | 4019198 | trunk | 4019199 | trunk | 4238966 | unclassified | See the Postgis docs for details, e.g. http://postgis.refractions.net/docs/ch04.html osm2pgsql-0.82.0/TODO000066400000000000000000000011511213272333300142150ustar00rootroot00000000000000== osm2pgsql == * there is still room for code cleanups ** the postgres specific files also contain some general functions that could/should be moved out to separate files so that they may be shared by other middle and output backends in the future * in the long run it may make sense to go for C++ all the way ** the C++ interface to Google ProtoBuffers is much more mature than its C counterpart ** inheriting input, middle, and output plugin implementations from abstract C++ base clases would provide a cleaner and more readable interface than the current C99 struct based implementation IMHO osm2pgsql-0.82.0/UTF8sanitizer.c000066400000000000000000000112411213272333300163510ustar00rootroot00000000000000#include #include #include #include #include #include #include #include "sanitizer.h" #include "input.h" int sanitizerClose(void *context); int sanitizerProcess(void *context, char *buffer, int len); /* UTF8sanitizer algorithm has some nasty edge cases when trying to operate * in a 'block at a time' mode. For example, in the following scenario: * * INPUT sequence is 2 buffers with a 6 byte char starting at X1 * * [ len = 5 ] [len = 1] * X1 X2 X3 X4 X5 X6 * * OUTPUT: nothing is generated for first buffer * This will itself cause caller to assume EOF (hopefully normal reader will read >> 5 bytes). * subsequent read of len=1 whille return all 6 bytes potentially causing output buffer overflow (and overwriting input data) * * The solution is to provice a small output buffer to hold anything bigger than a single byte * */ struct Context { long long line; long long chars1, chars2, chars3, chars4, chars5, chars6; int state, current_size; int long_char[6]; int out_char[10]; int pend; int verbose; void *file; }; int sanitizerClose(void *context) { struct Context *ctx = context; int r = inputClose(ctx->file); if (ctx->verbose) { fprintf(stderr, "Summary:\n"); fprintf(stderr, "chars1: %lld\n", ctx->chars1); fprintf(stderr, "chars2: %lld\n", ctx->chars2); fprintf(stderr, "chars3: %lld\n", ctx->chars3); fprintf(stderr, "chars4: %lld\n", ctx->chars4); fprintf(stderr, "chars5: %lld\n", ctx->chars5); fprintf(stderr, "chars6: %lld\n", ctx->chars6); fprintf(stderr, "lines : %lld\n", ctx->line); } free(ctx); return r; } xmlTextReaderPtr sanitizerOpen(const char *name) { struct Context *ctx = malloc (sizeof(*ctx)); if (!ctx) return NULL; memset(ctx, 0, sizeof(*ctx)); ctx->verbose = 0; ctx->state = 1; ctx->pend = 0; ctx->file = inputOpen(name); if (!ctx->file) { fprintf(stderr, "Input reader create failed\n"); free(ctx); return NULL; } return xmlReaderForIO(sanitizerProcess, sanitizerClose, (void *)ctx, NULL, NULL, 0); } int sanitizerProcess(void *context, char *buffer, int len) { struct Context *ctx = context; int current_char, i, out = 0; while (out < len) { if (ctx->pend) { buffer[out++] = ctx->out_char[--ctx->pend]; continue; } current_char=inputGetChar(ctx->file); if (inputEof(ctx->file)) break; if ((current_char & 128) == 0) { /* Handle_ASCII_char(); */ if (current_char == '\n') ctx->line++; else ctx->chars1++; if (ctx->state != 1) { if (ctx->verbose) fprintf(stderr, "Error at line %lld\n", ctx->line); buffer[out++] = '_'; ctx->state = 1; } /* buffer[out++] = current_char; */ ctx->out_char[ctx->pend++] = current_char; } else if ((current_char & (128+64)) == 128) { /* Handle_continue_char(); */ if(ctx->state > 1) { ctx->state--; if(ctx->state==1) { ctx->out_char[ctx->pend++] = current_char; for(i=ctx->current_size-1; i>0; i--) { ctx->out_char[ctx->pend++] = ctx->long_char[i-1]; } } } else { if (ctx->verbose) fprintf(stderr, "Error at line %lld\n", ctx->line); buffer[out++] = '_'; ctx->state=1; } } else if ((current_char & (128+64+32)) == (128+64)) { /* Handle_two_bytes(); */ ctx->state=2; ctx->chars2++; ctx->current_size=2; } else if ((current_char & (128+64+32+16)) == (128+64+32)) { /* Handle_three_bytes(); */ ctx->state=3; ctx->chars3++; ctx->current_size=3; } else if ((current_char & (128+64+32+16+8)) == (128+64+32+16)) { /* Handle_four_bytes(); */ ctx->state=4; ctx->chars4++; ctx->current_size=4; } else if ((current_char & (128+64+32+16+8+4)) == (128+64+32+16+8)) { /* Handle_five_bytes(); */ ctx->state=5; ctx->chars5++; ctx->current_size=5; } else if ((current_char & (128+64+32+16+8+4+2)) == (128+64+32+16+8+4)) { /* Handle_six_bytes(); */ ctx->state=6; ctx->chars6++; ctx->current_size=6; } if(ctx->state>1) { ctx->long_char[ctx->current_size-ctx->state]=current_char; } } return out; } osm2pgsql-0.82.0/autogen.sh000077500000000000000000000000321213272333300155230ustar00rootroot00000000000000#!/bin/sh autoreconf -vfi osm2pgsql-0.82.0/binarysearcharray.c000066400000000000000000000055111213272333300174060ustar00rootroot00000000000000#include #include #include #include #include "osmtypes.h" #include "binarysearcharray.h" static int binary_search_lookup(struct binary_search_array * array, int key) { int a = 0; int b = array->size - 1; while (a <= b) { int pivot = ((b - a) >> 1) + a; if (array->array[pivot].key == key) { return pivot; } else if (array->array[pivot].key > key) { b = pivot - 1; } else { a = pivot + 1; } } if ((a < array->size) && (array->array[a].key < key)) a++; return a | (1 << (sizeof(int) * 8 - 1)); } osmid_t binary_search_get(struct binary_search_array * array, int key) { int idx; if (array->size == 0) return -1; idx = binary_search_lookup(array, key); if (idx < 0) { return -1; } else { return array->array[idx].value; } exit(1); } void binary_search_remove(struct binary_search_array * array, int key) { int idx = binary_search_lookup(array, key); if (idx < 0) { return; } else { memmove(&(array->array[idx]), &(array->array[idx + 1]), sizeof(struct key_val_tuple) * (array->capacity - idx - 1)); array->size--; } } void binary_search_add(struct binary_search_array * array, int key, osmid_t value) { int idx; if (array->size < array->capacity) { if (array->size == 0) { array->array[0].key = key; array->array[0].value = value; array->size++; return; } idx = binary_search_lookup(array, key); if (idx < 0) { idx = idx & (~(1 << (sizeof(int) * 8 - 1))); memmove(&(array->array[idx + 1]), &(array->array[idx]), sizeof(struct key_val_tuple) * (array->capacity - idx - 1)); array->array[idx].key = key; array->array[idx].value = value; array->size++; } else { fprintf(stderr, "dupplicate!\n"); exit(1); } } } struct binary_search_array * init_search_array(int capacity) { struct binary_search_array * array = calloc(1, sizeof(struct binary_search_array)); array->array = calloc(capacity + 1, sizeof(struct key_val_tuple)); if (!array->array) { fprintf(stderr, "Out of memory trying to allocate %li bytes for binary search array\n", ((capacity + 1) * sizeof(struct key_val_tuple))); exit_nicely(); } array->capacity = capacity; array->size = 0; return array; } void shutdown_search_array(struct binary_search_array ** array) { free((*array)->array); (*array)->array = NULL; (*array)->capacity = 0; free(*array); *array = NULL; } osm2pgsql-0.82.0/binarysearcharray.h000066400000000000000000000007671213272333300174230ustar00rootroot00000000000000 struct key_val_tuple { int key; osmid_t value; }; struct binary_search_array { int capacity; int size; struct key_val_tuple * array; }; void binary_search_remove(struct binary_search_array * array, int key); void binary_search_add(struct binary_search_array * array, int key, osmid_t value); osmid_t binary_search_get(struct binary_search_array * array, int key); struct binary_search_array * init_search_array(int capacity); void shutdown_search_array(struct binary_search_array ** array); osm2pgsql-0.82.0/build_geometry.cpp000066400000000000000000000477041213272333300172610ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # Part of osm2pgsql utility #----------------------------------------------------------------------------- # By Artem Pavlenko, Copyright 2007 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #include #include #include #include /* Need to know which geos version we have to work out which headers to include */ #include /* geos (3.0.0+) */ #if (GEOS_VERSION_MAJOR==3) #if (GEOS_VERSION_MINOR>=1) /* Prepared geometries are new in 3.1.0 */ #define HAS_PREPARED_GEOMETRIES #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace geos::geom; using namespace geos::io; using namespace geos::util; using namespace geos::operation::linemerge; #else /* geos-2.2.3 */ #include #include #include using namespace geos; #endif #include "build_geometry.h" typedef std::auto_ptr geom_ptr; static std::vector wkts; static std::vector areas; static int excludepoly = 0; char *get_wkt_simple(osmNode *nodes, int count, int polygon) { GeometryFactory gf; std::auto_ptr coords(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); try { for (int i = 0; i < count ; i++) { Coordinate c; c.x = nodes[i].lon; c.y = nodes[i].lat; coords->add(c, 0); } geom_ptr geom; if (polygon && (coords->getSize() >= 4) && (coords->getAt(coords->getSize() - 1).equals2D(coords->getAt(0)))) { std::auto_ptr shell(gf.createLinearRing(coords.release())); geom = geom_ptr(gf.createPolygon(shell.release(), new std::vector)); if (!geom->isValid()) { if (excludepoly) { return NULL; } else { geom = geom_ptr(geom->buffer(0)); } } geom->normalize(); // Fix direction of ring } else { if (coords->getSize() < 2) return NULL; geom = geom_ptr(gf.createLineString(coords.release())); } WKTWriter wktw; std::string wkt = wktw.write(geom.get()); return strdup(wkt.c_str()); } catch (std::bad_alloc) { std::cerr << std::endl << "Exception caught processing way. You are likelly running out of memory." << std::endl; std::cerr << "Try in slim mode, using -s parameter." << std::endl; return NULL; } catch (...) { std::cerr << std::endl << "Exception caught processing way" << std::endl; return NULL; } } size_t get_wkt_split(osmNode *nodes, int count, int polygon, double split_at) { GeometryFactory gf; std::auto_ptr coords(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); double area; WKTWriter wktw; size_t wkt_size = 0; try { for (int i = 0; i < count ; i++) { Coordinate c; c.x = nodes[i].lon; c.y = nodes[i].lat; coords->add(c, 0); } geom_ptr geom; if (polygon && (coords->getSize() >= 4) && (coords->getAt(coords->getSize() - 1).equals2D(coords->getAt(0)))) { std::auto_ptr shell(gf.createLinearRing(coords.release())); geom = geom_ptr(gf.createPolygon(shell.release(), new std::vector)); if (!geom->isValid()) { if (excludepoly) { return 0; } else { geom = geom_ptr(geom->buffer(0)); } } geom->normalize(); // Fix direction of ring area = geom->getArea(); std::string wkt = wktw.write(geom.get()); wkts.push_back(wkt); areas.push_back(area); wkt_size++; } else { if (coords->getSize() < 2) return 0; double distance = 0; std::auto_ptr segment; segment = std::auto_ptr(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); segment->add(coords->getAt(0)); for(unsigned i=1; igetSize(); i++) { segment->add(coords->getAt(i)); distance += coords->getAt(i).distance(coords->getAt(i-1)); if ((distance >= split_at) || (i == coords->getSize()-1)) { geom = geom_ptr(gf.createLineString(segment.release())); std::string wkt = wktw.write(geom.get()); wkts.push_back(wkt); areas.push_back(0); wkt_size++; distance=0; segment = std::auto_ptr(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); segment->add(coords->getAt(i)); } } } } catch (std::bad_alloc) { std::cerr << std::endl << "Exception caught processing way. You are likelly running out of memory." << std::endl; std::cerr << "Try in slim mode, using -s parameter." << std::endl; wkt_size = 0; } catch (...) { std::cerr << std::endl << "Exception caught processing way" << std::endl; wkt_size = 0; } return wkt_size; } char * get_wkt(size_t index) { // return wkts[index].c_str(); char *result; result = (char*) std::malloc( wkts[index].length() + 1); // At least give some idea of why we about to seg fault if (!result) std::cerr << std::endl << "Unable to allocate memory: " << (wkts[index].length() + 1) << std::endl; std::strcpy(result, wkts[index].c_str()); return result; } double get_area(size_t index) { return areas[index]; } void clear_wkts() { wkts.clear(); areas.clear(); } static int coords2nodes(CoordinateSequence * coords, struct osmNode ** nodes) { size_t num_coords; size_t i; Coordinate coord; num_coords = coords->getSize(); *nodes = (struct osmNode *) malloc(num_coords * sizeof(struct osmNode)); for (i = 0; i < num_coords; i++) { coord = coords->getAt(i); (*nodes)[i].lon = coord.x; (*nodes)[i].lat = coord.y; } return num_coords; } int parse_wkt(const char * wkt, struct osmNode *** xnodes, int ** xcount, int * polygon) { GeometryFactory gf; WKTReader reader(&gf); std::string wkt_string(wkt); Geometry * geometry; const Geometry * subgeometry; GeometryCollection * gc; CoordinateSequence * coords; size_t num_geometries; size_t i; *polygon = 0; try { geometry = reader.read(wkt_string); switch (geometry->getGeometryTypeId()) { // Single geometries case GEOS_POLYGON: // Drop through case GEOS_LINEARRING: *polygon = 1; // Drop through case GEOS_POINT: // Drop through case GEOS_LINESTRING: *xnodes = (struct osmNode **) malloc(2 * sizeof(struct osmNode *)); *xcount = (int *) malloc(sizeof(int)); coords = geometry->getCoordinates(); (*xcount)[0] = coords2nodes(coords, &((*xnodes)[0])); (*xnodes)[1] = NULL; delete coords; break; // Geometry collections case GEOS_MULTIPOLYGON: *polygon = 1; // Drop through case GEOS_MULTIPOINT: // Drop through case GEOS_MULTILINESTRING: gc = dynamic_cast(geometry);; num_geometries = gc->getNumGeometries(); *xnodes = (struct osmNode **) malloc((num_geometries + 1) * sizeof(struct osmNode *)); *xcount = (int *) malloc(num_geometries * sizeof(int)); for (i = 0; i < num_geometries; i++) { subgeometry = gc->getGeometryN(i); coords = subgeometry->getCoordinates(); (*xcount)[i] = coords2nodes(coords, &((*xnodes)[i])); delete coords; } (*xnodes)[i] = NULL; break; default: std::cerr << std::endl << "unexpected object type while processing PostGIS data" << std::endl; delete geometry; return -1; } delete geometry; } catch (...) { std::cerr << std::endl << "Exception caught parsing PostGIS data" << std::endl; return -1; } return 0; } struct polygondata { Polygon* polygon; LinearRing* ring; double area; int iscontained; unsigned containedbyid; }; static int polygondata_comparearea(const void* vp1, const void* vp2) { const polygondata* p1 = (const polygondata*)vp1; const polygondata* p2 = (const polygondata*)vp2; if (p1->area == p2->area) return 0; if (p1->area > p2->area) return -1; return 1; } size_t build_geometry(osmid_t osm_id, struct osmNode **xnodes, int *xcount, int make_polygon, int enable_multi, double split_at) { size_t wkt_size = 0; std::auto_ptr > lines(new std::vector); GeometryFactory gf; geom_ptr geom; #ifdef HAS_PREPARED_GEOMETRIES geos::geom::prep::PreparedGeometryFactory pgf; #endif try { for (int c=0; xnodes[c]; c++) { std::auto_ptr coords(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); for (int i = 0; i < xcount[c]; i++) { struct osmNode *nodes = xnodes[c]; Coordinate c; c.x = nodes[i].lon; c.y = nodes[i].lat; coords->add(c, 0); } if (coords->getSize() > 1) { geom = geom_ptr(gf.createLineString(coords.release())); lines->push_back(geom.release()); } } //geom_ptr segment(0); geom_ptr mline (gf.createMultiLineString(lines.release())); //geom_ptr noded (segment->Union(mline.get())); LineMerger merger; //merger.add(noded.get()); merger.add(mline.get()); std::auto_ptr > merged(merger.getMergedLineStrings()); WKTWriter writer; // Procces ways into lines or simple polygon list polygondata* polys = new polygondata[merged->size()]; unsigned totalpolys = 0; for (unsigned i=0 ;i < merged->size(); ++i) { std::auto_ptr pline ((*merged ) [i]); if (make_polygon && pline->getNumPoints() > 3 && pline->isClosed()) { polys[totalpolys].polygon = gf.createPolygon(gf.createLinearRing(pline->getCoordinates()),0); polys[totalpolys].ring = gf.createLinearRing(pline->getCoordinates()); polys[totalpolys].area = polys[totalpolys].polygon->getArea(); polys[totalpolys].iscontained = 0; polys[totalpolys].containedbyid = 0; if (polys[totalpolys].area > 0.0) totalpolys++; else { delete(polys[totalpolys].polygon); delete(polys[totalpolys].ring); } } else { //std::cerr << "polygon(" << osm_id << ") is no good: points(" << pline->getNumPoints() << "), closed(" << pline->isClosed() << "). " << writer.write(pline.get()) << std::endl; double distance = 0; std::auto_ptr segment; segment = std::auto_ptr(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); segment->add(pline->getCoordinateN(0)); for(unsigned i=1; igetNumPoints(); i++) { segment->add(pline->getCoordinateN(i)); distance += pline->getCoordinateN(i).distance(pline->getCoordinateN(i-1)); if ((distance >= split_at) || (i == pline->getNumPoints()-1)) { geom = geom_ptr(gf.createLineString(segment.release())); std::string wkt = writer.write(geom.get()); wkts.push_back(wkt); areas.push_back(0); wkt_size++; distance=0; segment = std::auto_ptr(gf.getCoordinateSequenceFactory()->create((size_t)0, (size_t)2)); segment->add(pline->getCoordinateN(i)); } } //std::string text = writer.write(pline.get()); //wkts.push_back(text); //areas.push_back(0.0); //wkt_size++; } } if (totalpolys) { qsort(polys, totalpolys, sizeof(polygondata), polygondata_comparearea); unsigned toplevelpolygons = 0; int istoplevelafterall; for (unsigned i=0 ;i < totalpolys; ++i) { if (polys[i].iscontained != 0) continue; toplevelpolygons++; #ifdef HAS_PREPARED_GEOMETRIES const geos::geom::prep::PreparedGeometry* preparedtoplevelpolygon = pgf.create(polys[i].polygon); #endif for (unsigned j=i+1; j < totalpolys; ++j) { #ifdef HAS_PREPARED_GEOMETRIES // Does preparedtoplevelpolygon contain the smaller polygon[j]? if (polys[j].containedbyid == 0 && preparedtoplevelpolygon->contains(polys[j].polygon)) #else // Does polygon[i] contain the smaller polygon[j]? if (polys[j].containedbyid == 0 && polys[i].polygon->contains(polys[j].polygon)) #endif { // are we in a [i] contains [k] contains [j] situation // which would actually make j top level istoplevelafterall = 0; for (unsigned k=i+1; k < j; ++k) { if (polys[k].iscontained && polys[k].containedbyid == i && polys[k].polygon->contains(polys[j].polygon)) { istoplevelafterall = 1; break; } #if 0 else if (polys[k].polygon->intersects(polys[j].polygon) || polys[k].polygon->touches(polys[j].polygon)) { // FIXME: This code does not work as intended // It should be setting the polys[k].ring in order to update this object // but the value of polys[k].polygon calculated is normally NULL // Add polygon this polygon (j) to k since they intersect // Mark ourselfs to be dropped (2), delete the original k Geometry* polyunion = polys[k].polygon->Union(polys[j].polygon); delete(polys[k].polygon); polys[k].polygon = dynamic_cast(polyunion); polys[j].iscontained = 2; // Drop istoplevelafterall = 2; break; } #endif } if (istoplevelafterall == 0) { polys[j].iscontained = 1; polys[j].containedbyid = i; } } } #ifdef HAS_PREPARED_GEOMETRIES pgf.destroy(preparedtoplevelpolygon); #endif } // polys now is a list of ploygons tagged with which ones are inside each other // List of polygons for multipolygon std::auto_ptr > polygons(new std::vector); // For each top level polygon create a new polygon including any holes for (unsigned i=0 ;i < totalpolys; ++i) { if (polys[i].iscontained != 0) continue; // List of holes for this top level polygon std::auto_ptr > interior(new std::vector); for (unsigned j=i+1; j < totalpolys; ++j) { if (polys[j].iscontained == 1 && polys[j].containedbyid == i) { interior->push_back(polys[j].ring); } } Polygon* poly(gf.createPolygon(polys[i].ring, interior.release())); poly->normalize(); polygons->push_back(poly); } // Make a multipolygon if required if ((toplevelpolygons > 1) && enable_multi) { geom_ptr multipoly(gf.createMultiPolygon(polygons.release())); if (!multipoly->isValid() && (excludepoly == 0)) { multipoly = geom_ptr(multipoly->buffer(0)); } multipoly->normalize(); if ((excludepoly == 0) || (multipoly->isValid())) { std::string text = writer.write(multipoly.get()); wkts.push_back(text); areas.push_back(multipoly->getArea()); wkt_size++; } } else { for(unsigned i=0; i(polygons->at(i)); if (!poly->isValid() && (excludepoly == 0)) { poly = dynamic_cast(poly->buffer(0)); poly->normalize(); } if ((excludepoly == 0) || (poly->isValid())) { std::string text = writer.write(poly); wkts.push_back(text); areas.push_back(poly->getArea()); wkt_size++; } delete(poly); } } } for (unsigned i=0; i < totalpolys; ++i) { delete(polys[i].polygon); } delete[](polys); } catch (std::exception& e) { std::cerr << std::endl << "Standard exception processing way_id "<< osm_id << ": " << e.what() << std::endl; wkt_size = 0; } catch (...) { std::cerr << std::endl << "Exception caught processing way id=" << osm_id << std::endl; wkt_size = 0; } return wkt_size; } void exclude_broken_polygon () { excludepoly = 1; } osm2pgsql-0.82.0/build_geometry.h000066400000000000000000000031631213272333300167150ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # Part of osm2pgsql utility #----------------------------------------------------------------------------- # By Artem Pavlenko, Copyright 2007 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef BUILD_GEOMETRY_H #define BUILD_GEOMETRY_H #ifdef __cplusplus extern "C" { #endif #include "osmtypes.h" int parse_wkt(const char * wkt, struct osmNode *** xnodes, int ** xcount, int * polygon); char *get_wkt_simple(struct osmNode *, int count, int polygon); size_t get_wkt_split(struct osmNode *, int count, int polygon, double split_at); char* get_wkt(size_t index); double get_area(size_t index); size_t build_geometry(osmid_t osm_id, struct osmNode **xnodes, int *xcount, int make_polygon, int enable_multi, double split_at); void clear_wkts(); void exclude_broken_polygon (); #ifdef __cplusplus } #endif #endif osm2pgsql-0.82.0/configure.ac000066400000000000000000000057311213272333300160230ustar00rootroot00000000000000dnl Process this file with autoconf to produce a configure script. AC_INIT(osm2pgsql, 0.82.0) dnl Required autoconf version AC_PREREQ(2.61) AX_CONFIG_NICE dnl use automake to generate standard Makefiles AM_INIT_AUTOMAKE([1.9.6 dist-bzip2 std-options check-news]) dnl use 'silent' make rules by default (disabled for now) dnl AM_INIT_AUTOMAKE([1.11 dist-bzip2 silent-rules]) dnl AM_SILENT_RULES([yes]) dnl Additional macro definitions are in here AC_CONFIG_MACRO_DIR([m4]) dnl Generate configuration header file AC_CONFIG_HEADER(config.h) AC_GNU_SOURCE AC_HEADER_SYS_WAIT AC_FUNC_MMAP dnl Find C compiler dnl AC_PROG_CC_C99 dnl Find C++ compiler AC_PROG_CXX AC_CHECK_PROG(HAVE_CXX, $CXX, yes, no) if test "$HAVE_CXX" = "no" then AC_MSG_ERROR([Could not find a c++ compiler]); fi dnl AX_CFLAGS_WARN_ALL dnl AX_CXXFLAGS_WARN_ALL dnl Make sure we have libtool installed AM_PROG_LIBTOOL LFS_CFLAGS=`getconf LFS_CFLAGS` AC_SUBST(LFS_CFLAGS) AC_CHECK_FUNC(lseek64,[AC_DEFINE(HAVE_LSEEK64, [1], [lseek64 is present])],[AX_COMPILE_CHECK_SIZEOF(off_t)]) AC_CHECK_FUNCS([posix_fallocate posix_fadvise sync_file_range fork]) dnl legacy 32bit ID mode AC_ARG_ENABLE([64bit-ids], AS_HELP_STRING([--disable-64bit-ids], [Disable 64bit IDs for OSM IDs]), [ if test "$enableval" = "yes" then AC_DEFINE(OSMID64, [1], [Enable 64bit OSM IDs]) fi ], [ AC_DEFINE(OSMID64, [1], [Enable 64bit OSM IDs])]) dnl Check for libxml2 library AX_LIB_XML2 if test "$HAVE_XML2" = "no" then AC_MSG_ERROR([required library not found]); fi dnl check for zlib library AX_LIB_ZLIB if test "$HAVE_ZLIB" = "no" then AC_MSG_ERROR([required library not found]); fi dnl Check for bzip2 library AX_LIB_BZIP2 if test "$HAVE_BZIP2" = "no" then AC_MSG_ERROR([required library not found]); fi dnl Check for Geos library AX_LIB_GEOS if test "x$GEOS_VERSION" = "x" then AC_MSG_ERROR([geos library not found]); fi dnl Check for Proj library AX_LIB_PROJ if test "$HAVE_PROJ" = "no" then AC_MSG_ERROR([required library not found]); fi dnl Check for protobuf-c library and protoc-c binary AX_LIB_PROTOBUF_C([0.14]) dnl Decide whether to include PBF import support BUILD_READER_PBF=no if test "$HAVE_PROTOBUF_C" = "yes" then if test "$PROTOC_C" != "false" then BUILD_READER_PBF=yes AC_DEFINE([BUILD_READER_PBF], [1], [Requirements for building the PBF reader are met]) fi fi AM_CONDITIONAL([READER_PBF], [test "$BUILD_READER_PBF" = "yes"]) dnl Check for PostgresSQL client library AX_LIB_POSTGRESQL if test "x$POSTGRESQL_VERSION" = "x" then AC_MSG_ERROR([postgresql client library not found]) fi dnl Check for pthread library AX_PTHREAD(,[AC_MSG_ERROR([no])]) dnl Generate Makefile AC_OUTPUT(Makefile legacy/Makefile) if test "$BUILD_READER_PBF" != "yes" then AC_MSG_WARN([ protobuf libraries not found. You will NOT be able to import PBF files. To enable PBF support, the protobuf library and compiler are required. Look for packages named: libprotobuf-c0-dev protobuf-c-compiler ]) fi osm2pgsql-0.82.0/debian/000077500000000000000000000000001213272333300147515ustar00rootroot00000000000000osm2pgsql-0.82.0/debian/README000066400000000000000000000004631213272333300156340ustar00rootroot00000000000000The Debian Package osm2pgsql ---------------------------- Debian packaging stuff created to allow building a stand-alone osm2pgsql package. Unrelated to Joerg Ostertag's "openstreetmap-utils" package which also contains osm2pgsql! -- Frederik Ramm Fri, 13 Mar 2009 00:14:15 +0100 osm2pgsql-0.82.0/debian/changelog000066400000000000000000000052701213272333300166270ustar00rootroot00000000000000osm2pgsql (0.80.0-13~precise1) precise; urgency=low * Update from upstream to svn version 28638 - Implement a file based node lookup in slim mode - fix segfault if out of memory -- Kai Krueger Sun, 26 Aug 2012 10:00:00 +0000 osm2pgsql (0.80.0-12~precise1) precise; urgency=low * Update from upstream to svn version 28406 -- Kai Krueger Thu, 24 May 2012 10:00:00 +0000 osm2pgsql (0.80.0-11~precise1) precise; urgency=low * Be less agressive on tile expiry * Fix gazeteer -- Kai Krueger Sat, 14 Jan 2012 10:00:00 +0000 osm2pgsql (0.80.0-10~lucid1) lucid; urgency=low * Improve handling of multiple processes. Gracefully fallback if creating of helper processes fails * Improve speed when fsync is enabled * Fix expiry code when using multiple processes * Introduce a --drop option, if updating is not needed but using --slim for memory saving purposes * Create an additional index on planet_osm_polygon to improve rendering speed -- Kai Krueger Sun, 18 Dec 2011 10:00:00 +0000 osm2pgsql (0.80.0-9~lucid1) lucid; urgency=low * Bugfix: incorrectly round lat / lon to 2 s.f. in -l projection * Reduce virtual memory usage to reduce chance of out of memory errors -- Kai Krueger Thu, 3 Nov 2011 10:00:00 +0000 osm2pgsql (0.80.0-8~oneiric1) oneiric; urgency=low * Bugfix: Did not handle cache values larger than 2Gb correctly -- Kai Krueger Sat, 22 Oct 2011 10:00:00 +0000 osm2pgsql (0.80.0-7~natty2) natty; urgency=low * Memory optimize the node cache to handle larger extracts * parallelize parts of the import and diff processing -- Kai Krueger Sun, 16 Oct 2011 10:00:00 +0000 osm2pgsql (0.80.0-6~oneiric1) oneiric; urgency=low * Add a script to add permissions to a db afterwards * Handle postgresql 9.1 -- Kai Krueger Sun, 09 Oct 2011 10:00:00 +0000 osm2pgsql (0.80.0-5~oneiric1) oneiric; urgency=low * Fix debian package scripts. -- Kai Krueger Fri, 07 Oct 2011 10:00:00 +0000 osm2pgsql (0.80.0) lucid; urgency=low * version 0.80 does not use intarray any longer. -- Frederik Ramm Wed, 03 Aug 2011 15:03:42 +0200 osm2pgsql (0.70.5-karmic1) karmic; urgency=low * 0.70.5 build -- Frederik Ramm Sat, 29 Jan 2011 12:11:42 +0000 osm2pgsql (0.69) jaunty; urgency=low * v0.69 -- Frederik Ramm Wed, 04 Nov 2009 09:51:59 +0100 osm2pgsql (0.65) unstable; urgency=low * Initial standalone debian package -- Frederik Ramm Fri, 13 Mar 2009 00:14:15 +0100 osm2pgsql-0.82.0/debian/compat000066400000000000000000000000021213272333300161470ustar00rootroot000000000000007 osm2pgsql-0.82.0/debian/control000066400000000000000000000020721213272333300163550ustar00rootroot00000000000000Source: osm2pgsql Section: utils Priority: optional Maintainer: Frederik Ramm Build-Depends: debhelper (>= 7), libgeos-dev, libxml2-dev, libpq-dev, proj | libproj0, libbz2-dev, automake, autoconf, libtool, protobuf-c-compiler, libprotobuf-c0-dev Standards-Version: 3.9.3 Package: osm2pgsql Architecture: any Depends: ${shlibs:Depends}, ${misc:Depends} Recommends: postgresql, postgresql-8.4-postgis | postgresql-9.1-postgis, openstreetmap-postgis-db-setup Conflicts: openstreetmap-utils Description: Openstreetmap data to PostgreSQL converter osm2pgsql imports OpenStreetMap data into a PostGIS database. Package: openstreetmap-postgis-db-setup Architecture: all Depends: ${misc:Depends}, debconf, postgis, postgresql-8.4-postgis | postgresql-9.1-postgis, postgresql-contrib Recommends: osm2pgsql Suggests: libapache2-mod-tile, openstreetmap-mapnik-data Description: Provides setup scripts to create a postgis database for OpenStreetMap The database can be used with e.g. osm2pgsql to load data and mapnik for rendering It sets up a database named 'gis' osm2pgsql-0.82.0/debian/copyright000066400000000000000000000003011213272333300166760ustar00rootroot00000000000000Licensed under GPL v2 or later The full license text can be found at /usr/share/common-licenses/GPL-2 Various authors, as per http://svn.openstreetmap.org/applications/utils/export/osm2pgsql/ osm2pgsql-0.82.0/debian/dirs000066400000000000000000000000341213272333300156320ustar00rootroot00000000000000usr/bin usr/share/osm2pgsql osm2pgsql-0.82.0/debian/manpages000066400000000000000000000000231213272333300164620ustar00rootroot00000000000000debian/osm2pgsql.1 osm2pgsql-0.82.0/debian/openstreetmap-postgis-db-setup.config000077500000000000000000000010411213272333300242340ustar00rootroot00000000000000#!/bin/sh set -e . /usr/share/debconf/confmodule db_input high openstreetmap-postgis-db-setup/initdb || true db_input high openstreetmap-postgis-db-setup/dbname || true db_input high openstreetmap-postgis-db-setup/grant_user || true db_go || true db_get openstreetmap-postgis-db-setup/initdb || true INIT_DB=$RET db_get openstreetmap-postgis-db-setup/grant_user || true GRANT_USER=$RET cat < /etc/default/openstreetmap-postgis-db-setup # Default settings for openstreetmap-mapnik-data INIT_DB="$INIT_DB" GRANT_USER="$GRANT_USER" EOF osm2pgsql-0.82.0/debian/openstreetmap-postgis-db-setup.install000066400000000000000000000001111213272333300244270ustar00rootroot00000000000000install-postgis-osm-db.sh /usr/bin/ install-postgis-osm-user.sh /usr/bin/osm2pgsql-0.82.0/debian/openstreetmap-postgis-db-setup.postinst000077500000000000000000000023301213272333300246540ustar00rootroot00000000000000#!/bin/sh # postinst script for openstreetmap-mapnik-data # # see: dh_installdeb(1) set -e case "$1" in configure) . /usr/share/debconf/confmodule db_get openstreetmap-postgis-db-setup/initdb || true INIT_DB=$RET db_get openstreetmap-postgis-db-setup/dbname || true DBNAME=$RET db_get openstreetmap-postgis-db-setup/grant_user || true GRANT_USER=$RET #echo "# Default settings for openstreetmap-mapnik-data" > /etc/default/openstreetmap-mapnik-data #echo "GRANT_USER='$GRANT_USER' " >> /etc/default/openstreetmap-mapnik-data #echo "INIT_DB='$INIT_DB' " >> /etc/default/openstreetmap-mapnik-data # only call the install postgis script on the first configuration # we don't want to delete and recreate the db on upgrades of osm2pgsql if [ -z "$2" ] ; then if [ "x$INIT_DB" = "xtrue" ] ; then export DBNAME GRANT_USER /usr/bin/install-postgis-osm-db.sh || true fi fi true ;; abort-upgrade|abort-remove|abort-deconfigure) ;; *) echo "postinst called with unknown argument \`$1'" >&2 exit 1 ;; esac # dh_installdeb will replace this with shell code automatically # generated by other debhelper scripts. #DEBHELPER# exit 0 osm2pgsql-0.82.0/debian/openstreetmap-postgis-db-setup.templates000066400000000000000000000015541213272333300247730ustar00rootroot00000000000000Template: openstreetmap-postgis-db-setup/initdb Type: boolean Default: true Description: Do you want to create a postgis db? Do you want these scripts to create and setup a new postgis database ready to be used with e.g. Osm2pgsql. WARNING: This will delete an existing db Template: openstreetmap-postgis-db-setup/dbname Type: string Default: gis Description: Name of the database to create: If you don't use the default name, you might need to adapt programs and scripts to use the new name Template: openstreetmap-postgis-db-setup/grant_user Type: string Default: www-data Description: Other users that should have access to the db: Please specify which users should have access to the newly created db. You will want the user www-data for rendering and your own user name to import data into the db. The list of users is blank separated: E.g. "www-data peter" osm2pgsql-0.82.0/debian/osm2pgsql.install000066400000000000000000000000671213272333300202730ustar00rootroot00000000000000default.style /usr/share/osm2pgsql osm2pgsql /usr/bin/ osm2pgsql-0.82.0/debian/rules000077500000000000000000000037051213272333300160360ustar00rootroot00000000000000#!/usr/bin/make -f # -*- makefile -*- # Sample debian/rules that uses debhelper. # This file was originally written by Joey Hess and Craig Small. # As a special exception, when this file is copied by dh-make into a # dh-make output file, you may use that output file without restriction. # This special exception was added by Craig Small in version 0.37 of dh-make. # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 configure: configure-stamp configure-stamp: dh_testdir # Add here commands to configure the package. touch configure-stamp build: build-arch build-indep build-arch: build-stamp build-indep: build-stamp build-stamp: configure-stamp dh_testdir # Add here commands to compile the package. ./autogen.sh automake --add-missing aclocal autoreconf -f ./configure --prefix=/usr --datadir=/usr/share/osm2pgsql $(MAKE) #docbook-to-man debian/osm2pgsql.sgml > osm2pgsql.1 touch $@ clean: dh_testdir dh_testroot rm -f build-stamp configure-stamp # Add here commands to clean up after the build process. $(MAKE) clean || /bin/true dh_clean install: build dh_testdir dh_testroot dh_prep dh_installdirs # Add here commands to install the package into debian/osm2pgsql. $(MAKE) DESTDIR=$(CURDIR)/debian/osm2pgsql install # Build architecture-independent files here. binary-indep: build install # We have nothing to do by default. # Build architecture-dependent files here. binary-arch: build install dh_testdir dh_testroot dh_installchangelogs dh_installdocs dh_installexamples dh_install # dh_installmenu dh_installdebconf # dh_installlogrotate # dh_installemacsen # dh_installpam # dh_installmime # dh_python # dh_installinit # dh_installcron # dh_installinfo dh_installman dh_link dh_strip dh_compress dh_fixperms # dh_perl # dh_makeshlibs dh_installdeb dh_shlibdeps dh_gencontrol dh_md5sums dh_builddeb binary: binary-indep binary-arch .PHONY: build clean binary-indep binary-arch binary install configure osm2pgsql-0.82.0/debian/source/000077500000000000000000000000001213272333300162515ustar00rootroot00000000000000osm2pgsql-0.82.0/debian/source/format000066400000000000000000000000151213272333300174600ustar00rootroot000000000000003.0 (native) osm2pgsql-0.82.0/default.style000066400000000000000000000134771213272333300162510ustar00rootroot00000000000000# This is the style file that matches the old version of osm2pgsql, which # did not make distinctions between tags for nodes and for ways. There are a # number of optimisations that can be applied here. Firstly, certain tags # only apply to only nodes or only ways. By fixing this we reduce the amount # of useless data loaded into the DB, which is a good thing. Possible # optimisations for the future: # 1. Generate this file directly from the mapnik XML config, so it's always # optimal # 2. Extend it so it can understand that highway=tertiary is for ways and # highway=bus_stop is for nodes # Flags field isn't used much yet, expect if it contains the text "polygon" # it indicates the shape is candidate for the polygon table. In the future I # would like to be able to add directives like "nocache" which tells # osm2pgsql that it is unlikely this node will be used by a way and so it # doesn't need to be stored (eg coastline nodes). While in essence an # optimisation hack, for --slim mode it doesn't matter if you're wrong, but # in non-slim you might break something! # Also possibly an ignore flag, for things like "note" and "source" which # can simply be deleted. (In slim mode this is, does not apply to non-slim # obviously) # OsmType Tag DataType Flags node,way note text delete # These tags can be long but are useless for rendering node,way source text delete # This indicates that we shouldn't store them node,way created_by text delete node,way access text linear node,way addr:housename text linear node,way addr:housenumber text linear node,way addr:interpolation text linear node,way admin_level text linear node,way aerialway text linear node,way aeroway text polygon node,way amenity text nocache,polygon node,way area text # hard coded support for area=1/yes => polygon is in osm2pgsql node,way barrier text linear node,way bicycle text nocache node,way brand text linear node,way bridge text linear node,way boundary text linear node,way building text polygon node capital text linear node,way construction text linear node,way covered text linear node,way culvert text linear node,way cutting text linear node,way denomination text linear node,way disused text linear node ele text linear node,way embankment text linear node,way foot text linear node,way generator:source text linear node,way harbour text polygon node,way highway text linear node,way historic text polygon node,way horse text linear node,way intermittent text linear node,way junction text linear node,way landuse text polygon node,way layer text linear node,way leisure text polygon node,way lock text linear node,way man_made text polygon node,way military text polygon node,way motorcar text linear node,way name text linear node,way natural text polygon # natural=coastline tags are discarded by a hard coded rule in osm2pgsql node,way office text polygon node,way oneway text linear node,way operator text linear node,way place text polygon node poi text node,way population text linear node,way power text polygon node,way power_source text linear node,way public_transport text polygon node,way railway text linear node,way ref text linear node,way religion text nocache node,way route text linear node,way service text linear node,way shop text polygon node,way sport text polygon node,way surface text linear node,way toll text linear node,way tourism text polygon node,way tower:type text linear way tracktype text linear node,way tunnel text linear node,way water text polygon node,way waterway text polygon node,way wetland text polygon node,way width text linear node,way wood text linear node,way z_order int4 linear # This is calculated during import way way_area real # This is calculated during import # If you're interested in bicycle routes, you may want the following fields # To make these work you need slim mode or the necessary data won't be remembered. #way lcn_ref text linear #way rcn_ref text linear #way ncn_ref text linear #way lcn text linear #way rcn text linear #way ncn text linear #way lwn_ref text linear #way rwn_ref text linear #way nwn_ref text linear #way lwn text linear #way rwn text linear #way nwn text linear #way route_pref_color text linear #way route_name text linear # The following entries can be used with the --extra-attributes option # to include the username, userid, version & timstamp in the DB #node,way osm_user text #node,way osm_uid text #node,way osm_version text #node,way osm_timestamp text osm2pgsql-0.82.0/docs/000077500000000000000000000000001213272333300144575ustar00rootroot00000000000000osm2pgsql-0.82.0/docs/nodecachefilereader.1000066400000000000000000000021061213272333300204740ustar00rootroot00000000000000.TH NODECACHEFILEREADER 1 "April 06, 2013" .\" Please adjust this date whenever revising the manpage. .SH NAME nodecachefilereader \- utility to inspect osm2pgsql's node cache. .SH SYNOPSIS .B nodecachefilereader .RI /path/to/node.cache .br .B nodecachefilereader .RI /path/to/node.cache\ node_id1\ node_id2\ node_id3 .br .B nodecachefilereader .RI /path/to/node.cache\ node_id1,node_id2,node_id3 .br .SH DESCRIPTION This manual page documents briefly the .B nodecachefilereader command. .PP .B nodecachefilereader allows you to inspect and test osm2pgsql's custome node database. .PP .SH OPTIONS If only the filename of the node cache is given, nodecachefilereader performs some basic diagnostics on the node cache, as well as give some basic benchmark results on how quickly it can lookup nodes by id .PP If one or more node_ids are given, nodecachefilereader retrieves the information stored (latitude / longitude) about those nodes in the database .PP .SH SEE ALSO .BR osm2pgsql (1), .br .SH AUTHOR nodecachefilereader was written by Kai Krueger and other OpenStreetMap project members. .PPosm2pgsql-0.82.0/docs/osm2pgsql.1000066400000000000000000000263011213272333300164720ustar00rootroot00000000000000.TH OSM2PGSQL 1 "April 06, 2013" .\" Please adjust this date whenever revising the manpage. .SH NAME osm2pgsql \- Openstreetmap data to PostgreSQL converter. .SH SYNOPSIS .B osm2pgsql .RI [ options ] " planet.osm" .br .B osm2pgsql .RI [ options ] " planet.osm.{gz,bz2,pbf}" .br .B osm2pgsql .RI [ options ] " file1.osm file2.osm file3.osm" .br .SH DESCRIPTION This manual page documents briefly the .B osm2pgsql command. .PP .B osm2pgsql imports data from OSM file(s) into a PostgreSQL database suitable for use by the Mapnik renderer or the Nominatim geocoder. .br OSM planet snapshots can be downloaded from http://planet.openstreetmap.org/. Partial planet files ("extracts") for various countries are available, see http://wiki.openstreetmap.org/wiki/Planet.osm. .PP Extracts in PBF (ProtoBufBinary) format are also available from http://download.geofabrik.de/osm/. .PP When operating in "slim" mode (and on a database created in "slim" mode!), .B osm2pgsql can also process OSM change files (osc files), thereby bringing an existing database up to date. .PP .SH OPTIONS These programs follow the usual GNU command line syntax, with long options starting with two dashes (`-'). A summary of options is included below. .TP \fB\-a\fR|\-\-append Add the OSM file into the database without removing existing data. .TP \fB\-b\fR|\-\-bbox Apply a bounding box filter on the imported data Must be specified as: minlon,minlat,maxlon,maxlat e.g. \fB\-\-bbox\fR \fB\-0\fR.5,51.25,0.5,51.75 .TP \fB\-c\fR|\-\-create Remove existing data from the database. This is the default if \fB\-\-append\fR is not specified. .TP \fB\-d\fR|\-\-database name The name of the PostgreSQL database to connect to (default: gis). .TP \fB\-i\fR|\-\-tablespace\-index tablespacename Store all indices in a separate PostgreSQL tablespace named by this parameter. This allows to e.g. store the indices on faster storage like SSDs .TP \fB\ \fR\-\-tablespace\-main\-data tablespacename Store the data tables (non slim) in the given tablespace .TP \fB\ \fR\-\-tablespace\-main\-index tablespacename Store the indices of the main tables (non slim) in the given tablespace .TP \fB\ \fR\-\-tablespace\-slim\-data tablespacename Store the slim mode tables in the given tablespace .TP \fB\ \fR\-\-tablespace\-slim\-index tablespacename Store the indices of the slim mode tables in the given tablespace .TP \fB\-l\fR|\-\-latlong Store data in degrees of latitude & longitude. .TP \fB\-m\fR|\-\-merc Store data in proper spherical Mercator (the default) .TP \fB\-M\fR|\-\-oldmerc Store data in the legacy OSM Mercator format .TP \fB\-E\fR|\-\-proj num Use projection EPSG:num .TP \fB\-u\fR|\-\-utf8\-sanitize Repair bad UTF8 input data (present in planet dumps prior to August 2007). Adds about 10% overhead. .TP \fB\-p\fR|\-\-prefix prefix_string Prefix for table names (default planet_osm) .TP \fB\-r\fR|\-\-input\-reader format Select input format reader. Available choices are \fBlibxml2\fR (default) and \fBprimitive\fR for OSM XML format files, \fBo5m\fR for o5m formatted file and \fBpbf\fR for OSM PBF binary format (may not be available on all platforms) .TP \fB\-s\fR|\-\-slim Store temporary data in the database. Without this mode, all temporary data is stored in RAM and if you do not have enough the import will not succeed successfully. With slim mode, you should be able to import the data even on a system with limited RAM, although if you do no have enough RAM to cache at least all of the nodes, the time to import the data will likely be greatly increased. .TP \fB\ \fR\-\-drop Drop the slim mode tables from the database once the import is complete. This can greatly reduce the size of the database, as the slim mode tables typically are the same size, if not slightly bigger than the main tables. It does not, however, reduce the maximum spike of disk usage during import. It can furthermore increase the import speed, as no indices need to be created for the slim mode tables, which (depending on hardware) can nearly half import time. Slim mode tables however have to be persistent if you want to be able to update your database, as these tables are needed for diff processing. .TP \fB\-S\fR|\-\-style /path/to/style Location of the osm2pgsql style file. This specifies which tags from the data get imported into database columns and which tags get dropped. Defaults to /usr/share/osm2pgsql/default.style. .TP \fB\-C\fR|\-\-cache num Only for slim mode: Use up to num many MB of RAM for caching nodes. Giving osm2pgsql sufficient cache to store all imported nodes typically greatly increases the speed of the import. Each cached node requires 8 bytes of cache, plus about 10% - 30% overhead. For a current OSM full planet import with its ~ 1.9 billion nodes, a good value would be 17000 if you have enough RAM. If you don't have enough RAM, it is likely beneficial to give osm2pgsql close to the full available amount of RAM. Defaults to 800. .TP \fB\ \fR\-\-cache-strategy strategy There are a number of different modes in which osm2pgsql can organize its node cache in RAM. These are optimized for different assumptions of the data and the hardware resources available. Currently available strategies are \fBdense\fR, \fBchunked\fR, \fBsparse\fR and \fBoptimized\fR. \fBdense\fR assumes that the node id numbers are densely packed, i.e. only a few IDs in the range are missing / deleted. For planet extracts this is usually not the case, making the cache very inefficient and wasteful of RAM. \fBsparse\fR assumes node IDs in the data are not densely packed, greatly increasing caching efficiency in these cases. If node IDs are densely packed, like in the full planet, this strategy has a higher overhead for indexing the cache. \fBoptimized\fR uses both dense and sparse strategies for different ranges of the ID space. On a block by block basis it tries to determine if it is more effective to store the block of IDs in sparse or dense mode. This is the default and should be typically used. .TP \fB\-U\fR|\-\-username name Postgresql user name. .TP \fB\-W\fR|\-\-password Force password prompt. .TP \fB\-H\fR|\-\-host hostname Database server hostname or socket location. .TP \fB\-P\fR|\-\-port num Database server port. .TP \fB\-e\fR|\-\-expire-tiles [min_zoom-]max-zoom Create a tile expiry list. .TP \fB\-o\fR|\-\-expire-output /path/to/expire.list Output file name for expired tiles list. .TP \fB\-o\fR|\-\-output Specifies the output back-end or database schema to use. Currently osm2pgsql supports \fBpgsql\fR, \fBgazetteer\fR and \fBnull\fR. \fBpgsql\fR is the default output back-end / schema and is optimized for rendering with Mapnik. \fBgazetteer\fR is a db schema optimized for geocoding and is used by Nominatim. \fBnull\fR does not write any output and is only useful for testing. .TP \fB\-x\fR|\-\-extra\-attributes Include attributes for each object in the database. This includes the username, userid, timestamp and version. Note: this option also requires additional entries in your style file. .TP \fB\-k\fR|\-\-hstore Add tags without column to an additional hstore (key/value) column to PostgreSQL tables .TP \fB\-j\fR|\-\-hstore\-all Add all tags to an additional history (key/value) column in PostgreSQL tables .TP \fB\-z\fR|\-\-hstore\-column key_name Add an additional history (key/value) column containing all tags that start with the specified string, egg --hstore-column "name:" will produce an extra hstore column that contains all name:xx tags .TP \fB\ \fR\-\-hstore\-match\-only Only keep objects that have a value in one of the columns (normal action with --hstore is to keep all objects) .TP \fB\ \fR\-\-hstore-add-index Create indices for the hstore columns during import. .TP \fB\-G\fR|\-\-melts\-geometry Normally osm2pgsql splits multi-part geometries into separate database rows per part. A single OSM id can therefore have several rows. With this option, PostgreSQL instead generates multi-geometry features in the PostgreSQL tables. .TP \fB\-K\fR|\-\-keep-coastlines Keep coastline data rather than filtering it out. By default natural=coastline tagged data will be discarded based on the assumption that post-processed Coastline Checker shape files will be used. .TP \fB\ \fR\-\-exclude-invalid-polygon OpenStreetMap data is defined in terms of nodes, ways and relations and not in terms of actual geometric features. Osm2pgsql therefore tries to build postgis geometries out of this data representation. However not all ways and relations correspond to valid postgis geometries (e.g. self intersecting polygons). By default osm2pgsql tries to automatically fix these geometries using ST_Buffer(0) around the invalid polygons. With this option, invalid polygons are instead simply dropped from the database. .TP \fB\ \fR\-\-unlogged Use postgresql's unlogged tables for storing data. This requires PostgreSQL 9.1 or above. Data written to unlogged tables is not written to PostgreSQL's write-ahead log, which makes them considerably faster than ordinary tables. However, they are not crash-safe: an unlogged table is automatically truncated after a crash or unclean shutdown. .TP \fB\ \fR\-\-number\-processes num Specifies the number of parallel processes used for certain operations. If disks are fast enough e.g. if you have an SSD, then this can greatly increase speed of the "going over pending ways" and "going over pending relations" stages on a multi-core server. .TP \fB\-I\fR|\-\-disable-parallel-indexing By default osm2pgsql initiates the index building on all tables in parallel to increase performance. This can be disadvantages on slow disks, or if you don't have enough ram for PostgreSQL to perform up to 7 parallel index building processes (e.g. because maintenance_work_mem is set high) .TP \fB\ \fR\-\-flat-nodes /path/to/nodes.cache The flat-nodes mode is a separate method to store slim mode node information on disk. Instead of storing this information in the main PostgreSQL database, this mode creates its own separate custom database to store the information. As this custom database has application level knowledge about the data to store and is not general purpose, it can store the data much more efficient. Storing the node information for the full planet requires about 100GB in PostgreSQL, the same data is stored in only ~16GB using the flat-nodes mode. This can also increase the speed of applying diff files. This option activates the flat-nodes mode and specifies the location of the database file. It is a single large > 16GB file. This mode is only recommended for full planet imports as it doesn't work well with small extracts. The default is disabled .TP \fB\-h\fR|\-\-help Help information. .br Add \fB\-v\fR to display supported projections. .TP \fB\-v\fR|\-\-verbose Verbose output. .PP .SH SUPPORTED PROJECTIONS Latlong (-l) SRS: 4326 (none) .br WGS84 Mercator ( ) SRS: 3395 +proj=merc +datum=WGS84 +k=1.0 +units=m +over +no_defs .br Spherical Mercator (-m) SRS:900913 +proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +no_defs +over .PP .SH SEE ALSO .BR proj (1), .BR postgres (1). .br .SH AUTHOR osm2pgsql was written by Jon Burgess, Artem Pavlenko, and other OpenStreetMap project members. .PP This manual page was written by Andreas Putzo for the Debian project, and amended by OpenStreetMap authors. \" LocalWords: hstore multi Openstreetmap ProtoBufBinary osm2pgsql-0.82.0/expire-tiles.c000066400000000000000000000332261213272333300163130ustar00rootroot00000000000000/* * Dirty tile list generation * * Steve Hill * * Please refer to the OpenPisteMap expire_tiles.py script for a demonstration * of how to make use of the output: * https://subversion.nexusuk.org/trac/browser/openpistemap/trunk/scripts/expire_tiles.py */ #include #include #include #include #include #include "expire-tiles.h" #include "output.h" #include "pgsql.h" #include "build_geometry.h" #include "reprojection.h" #define EARTH_CIRCUMFERENCE 40075016.68 #define HALF_EARTH_CIRCUMFERENCE (EARTH_CIRCUMFERENCE / 2) #define TILE_EXPIRY_LEEWAY 0.1 /* How many tiles worth of space to leave either side of a changed feature */ #define EXPIRE_TILES_MAX_BBOX 20000 /* Maximum width or height of a bounding box (metres) */ struct tile { int complete[2][2]; /* Flags */ struct tile * subtiles[2][2]; }; int map_width; /* not "static" since used in reprojection.c! */ static double tile_width; static const struct output_options * Options; static struct tile * dirty = NULL; static int outcount; /* * We store the dirty tiles in an in-memory tree during runtime * and dump them out to a file at the end. This allows us to easilly drop * duplicate tiles from the output. * * This data structure consists of a node, representing a tile at zoom level 0, * which contains 4 pointers to nodes representing each of the child tiles at * zoom level 1, and so on down the the zoom level specified in * Options->expire_tiles_zoom. * * The memory allowed to this structure is not capped, but daily deltas * generally produce a few hundred thousand expired tiles at zoom level 17, * which are easilly accommodated. */ static int calc_complete(struct tile * tile) { int c; c = tile->complete[0][0]; c += tile->complete[0][1]; c += tile->complete[1][0]; c += tile->complete[1][1]; return c; } static void destroy_tree(struct tile * tree) { if (! tree) return; if (tree->subtiles[0][0]) destroy_tree(tree->subtiles[0][0]); if (tree->subtiles[0][1]) destroy_tree(tree->subtiles[0][1]); if (tree->subtiles[1][0]) destroy_tree(tree->subtiles[1][0]); if (tree->subtiles[1][1]) destroy_tree(tree->subtiles[1][1]); free(tree); } /* * Mark a tile as dirty. * Returns the number of subtiles which have all their children marked as dirty. */ static int _mark_tile(struct tile ** tree, int x, int y, int zoom, int this_zoom) { int zoom_diff = zoom - this_zoom; int rel_x; int rel_y; int complete; if (! *tree) *tree = calloc(1, sizeof(**tree)); zoom_diff = (zoom - this_zoom) - 1; rel_x = (x >> zoom_diff) & 1; rel_y = (y >> zoom_diff) & 1; if (! (*tree)->complete[rel_x][rel_y]) { if (zoom_diff <= 0) { (*tree)->complete[rel_x][rel_y] = 1; } else { complete = _mark_tile(&((*tree)->subtiles[rel_x][rel_y]), x, y, zoom, this_zoom + 1); if (complete >= 4) { (*tree)->complete[rel_x][rel_y] = 1; /* We can destroy the subtree to save memory now all the children are dirty */ destroy_tree((*tree)->subtiles[rel_x][rel_y]); (*tree)->subtiles[rel_x][rel_y] = NULL; } } } return calc_complete(*tree); } /* * Mark a tile as dirty. * Returns the number of subtiles which have all their children marked as dirty. */ static int mark_tile(struct tile ** tree_head, int x, int y, int zoom) { return _mark_tile(tree_head, x, y, zoom, 0); } static void output_dirty_tile(FILE * outfile, int x, int y, int zoom, int min_zoom) { int y_min; int x_iter; int y_iter; int x_max; int y_max; int out_zoom; int zoom_diff; if (zoom > min_zoom) out_zoom = zoom; else out_zoom = min_zoom; zoom_diff = out_zoom - zoom; y_min = y << zoom_diff; x_max = (x + 1) << zoom_diff; y_max = (y + 1) << zoom_diff; for (x_iter = x << zoom_diff; x_iter < x_max; x_iter++) { for (y_iter = y_min; y_iter < y_max; y_iter++) { outcount++; if ((outcount <= 1) || (! (outcount % 1000))) { fprintf(stderr, "\rWriting dirty tile list (%iK)", outcount / 1000); fflush(stderr); } fprintf(outfile, "%i/%i/%i\n", out_zoom, x_iter, y_iter); } } } static void _output_and_destroy_tree(FILE * outfile, struct tile * tree, int x, int y, int this_zoom, int min_zoom) { int sub_x = x << 1; int sub_y = y << 1; FILE * ofile; if (! tree) return; ofile = outfile; if ((tree->complete[0][0]) && outfile) { output_dirty_tile(outfile, sub_x + 0, sub_y + 0, this_zoom + 1, min_zoom); ofile = NULL; } if (tree->subtiles[0][0]) _output_and_destroy_tree(ofile, tree->subtiles[0][0], sub_x + 0, sub_y + 0, this_zoom + 1, min_zoom); ofile = outfile; if ((tree->complete[0][1]) && outfile) { output_dirty_tile(outfile, sub_x + 0, sub_y + 1, this_zoom + 1, min_zoom); ofile = NULL; } if (tree->subtiles[0][1]) _output_and_destroy_tree(ofile, tree->subtiles[0][1], sub_x + 0, sub_y + 1, this_zoom + 1, min_zoom); ofile = outfile; if ((tree->complete[1][0]) && outfile) { output_dirty_tile(outfile, sub_x + 1, sub_y + 0, this_zoom + 1, min_zoom); ofile = NULL; } if (tree->subtiles[1][0]) _output_and_destroy_tree(ofile, tree->subtiles[1][0], sub_x + 1, sub_y + 0, this_zoom + 1, min_zoom); ofile = outfile; if ((tree->complete[1][1]) && outfile) { output_dirty_tile(outfile, sub_x + 1, sub_y + 1, this_zoom + 1, min_zoom); ofile = NULL; } if (tree->subtiles[1][1]) _output_and_destroy_tree(ofile, tree->subtiles[1][1], sub_x + 1, sub_y + 1, this_zoom + 1, min_zoom); free(tree); } static void output_and_destroy_tree(FILE * outfile, struct tile * tree) { _output_and_destroy_tree(outfile, tree, 0, 0, 0, Options->expire_tiles_zoom_min); } void expire_tiles_stop(void) { FILE * outfile; if (Options->expire_tiles_zoom < 0) return; outcount = 0; if ((outfile = fopen(Options->expire_tiles_filename, "a"))) { output_and_destroy_tree(outfile, dirty); fclose(outfile); } else { fprintf(stderr, "Failed to open expired tiles file (%s). Tile expiry list will not be written!\n", strerror(errno)); } dirty = NULL; } void expire_tiles_init(const struct output_options *options) { Options = options; if (Options->expire_tiles_zoom < 0) return; map_width = 1 << Options->expire_tiles_zoom; tile_width = EARTH_CIRCUMFERENCE / map_width; } static void expire_tile(int x, int y) { mark_tile(&dirty, x, y, Options->expire_tiles_zoom); } static int normalise_tile_x_coord(int x) { x %= map_width; if (x < 0) x = (map_width - x) + 1; return x; } /* * Expire tiles that a line crosses */ static void expire_tiles_from_line(double lon_a, double lat_a, double lon_b, double lat_b) { double tile_x_a; double tile_y_a; double tile_x_b; double tile_y_b; double temp; double x1; double y1; double x2; double y2; double hyp_len; double x_len; double y_len; double x_step; double y_step; double step; double next_step; int x; int y; int norm_x; coords_to_tile(&tile_x_a, &tile_y_a, lon_a, lat_a); coords_to_tile(&tile_x_b, &tile_y_b, lon_b, lat_b); if (tile_x_a > tile_x_b) { /* We always want the line to go from left to right - swap the ends if it doesn't */ temp = tile_x_b; tile_x_b = tile_x_a; tile_x_a = temp; temp = tile_y_b; tile_y_b = tile_y_a; tile_y_a = temp; } x_len = tile_x_b - tile_x_a; if (x_len > map_width / 2) { /* If the line is wider than half the map, assume it crosses the international date line. These coordinates get normalised again later */ tile_x_a += map_width; temp = tile_x_b; tile_x_b = tile_x_a; tile_x_a = temp; temp = tile_y_b; tile_y_b = tile_y_a; tile_y_a = temp; } y_len = tile_y_b - tile_y_a; hyp_len = sqrt(pow(x_len, 2) + pow(y_len, 2)); /* Pythagoras */ x_step = x_len / hyp_len; y_step = y_len / hyp_len; for (step = 0; step <= hyp_len; step+= 0.4) { /* Interpolate points 1 tile width apart */ next_step = step + 0.4; if (next_step > hyp_len) next_step = hyp_len; x1 = tile_x_a + ((double)step * x_step); y1 = tile_y_a + ((double)step * y_step); x2 = tile_x_a + ((double)next_step * x_step); y2 = tile_y_a + ((double)next_step * y_step); /* The line (x1,y1),(x2,y2) is up to 1 tile width long x1 will always be <= x2 We could be smart and figure out the exact tiles intersected, but for simplicity, treat the coordinates as a bounding box and expire everything within that box. */ if (y1 > y2) { temp = y2; y2 = y1; y1 = temp; } for (x = x1 - TILE_EXPIRY_LEEWAY; x <= x2 + TILE_EXPIRY_LEEWAY; x ++) { norm_x = normalise_tile_x_coord(x); for (y = y1 - TILE_EXPIRY_LEEWAY; y <= y2 + TILE_EXPIRY_LEEWAY; y ++) { expire_tile(norm_x, y); } } } } /* * Expire tiles within a bounding box */ int expire_tiles_from_bbox(double min_lon, double min_lat, double max_lon, double max_lat) { double width; double height; int min_tile_x; int min_tile_y; int max_tile_x; int max_tile_y; int iterator_x; int iterator_y; int norm_x; int ret; double tmp_x; double tmp_y; if (Options->expire_tiles_zoom < 0) return 0; width = max_lon - min_lon; height = max_lat - min_lat; if (width > HALF_EARTH_CIRCUMFERENCE + 1) { /* Over half the planet's width within the bounding box - assume the box crosses the international date line and split it into two boxes */ ret = expire_tiles_from_bbox(-HALF_EARTH_CIRCUMFERENCE, min_lat, min_lon, max_lat); ret += expire_tiles_from_bbox(max_lon, min_lat, HALF_EARTH_CIRCUMFERENCE, max_lat); return ret; } if (width > EXPIRE_TILES_MAX_BBOX) return -1; if (height > EXPIRE_TILES_MAX_BBOX) return -1; /* Convert the box's Mercator coordinates into tile coordinates */ coords_to_tile(&tmp_x, &tmp_y, min_lon, max_lat); min_tile_x = tmp_x - TILE_EXPIRY_LEEWAY; min_tile_y = tmp_y - TILE_EXPIRY_LEEWAY; coords_to_tile(&tmp_x, &tmp_y, max_lon, min_lat); max_tile_x = tmp_x + TILE_EXPIRY_LEEWAY; max_tile_y = tmp_y + TILE_EXPIRY_LEEWAY; if (min_tile_x < 0) min_tile_x = 0; if (min_tile_y < 0) min_tile_y = 0; if (max_tile_x > map_width) max_tile_x = map_width; if (max_tile_y > map_width) max_tile_y = map_width; for (iterator_x = min_tile_x; iterator_x <= max_tile_x; iterator_x ++) { norm_x = normalise_tile_x_coord(iterator_x); for (iterator_y = min_tile_y; iterator_y <= max_tile_y; iterator_y ++) { expire_tile(norm_x, iterator_y); } } return 0; } void expire_tiles_from_nodes_line(struct osmNode * nodes, int count) { int i; double last_lat; double last_lon; if (Options->expire_tiles_zoom < 0) return; if (count < 1) return; last_lat = nodes[0].lat; last_lon = nodes[0].lon; if (count < 2) { expire_tiles_from_bbox(last_lon, last_lat, last_lon, last_lat); return; } for (i = 1; i < count; i ++) { expire_tiles_from_line(last_lon, last_lat, nodes[i].lon, nodes[i].lat); last_lat = nodes[i].lat; last_lon = nodes[i].lon; } } /* * Calculate a bounding box from a list of nodes and expire all tiles within it */ void expire_tiles_from_nodes_poly(struct osmNode * nodes, int count, osmid_t osm_id) { int i; int got_coords = 0; double min_lon = 0.0; double min_lat = 0.0; double max_lon = 0.0; double max_lat = 0.0; if (Options->expire_tiles_zoom < 0) return; for (i = 0; i < count; i++) { if ((! got_coords) || (nodes[i].lon < min_lon)) min_lon = nodes[i].lon; if ((! got_coords) || (nodes[i].lat < min_lat)) min_lat = nodes[i].lat; if ((! got_coords) || (nodes[i].lon > max_lon)) max_lon = nodes[i].lon; if ((! got_coords) || (nodes[i].lat > max_lat)) max_lat = nodes[i].lat; got_coords = 1; } if (got_coords) { if (expire_tiles_from_bbox(min_lon, min_lat, max_lon, max_lat)) { /* Bounding box too big - just expire tiles on the line */ fprintf(stderr, "\rLarge polygon (%.0f x %.0f metres, OSM ID %" PRIdOSMID ") - only expiring perimeter\n", max_lon - min_lon, max_lat - min_lat, osm_id); expire_tiles_from_nodes_line(nodes, count); } } } static void expire_tiles_from_xnodes_poly(struct osmNode ** xnodes, int * xcount, osmid_t osm_id) { int i; for (i = 0; xnodes[i]; i++) expire_tiles_from_nodes_poly(xnodes[i], xcount[i], osm_id); } static void expire_tiles_from_xnodes_line(struct osmNode ** xnodes, int * xcount) { int i; for (i = 0; xnodes[i]; i++) expire_tiles_from_nodes_line(xnodes[i], xcount[i]); } void expire_tiles_from_wkt(const char * wkt, osmid_t osm_id) { struct osmNode ** xnodes; int * xcount; int polygon; int i; if (Options->expire_tiles_zoom < 0) return; if (! parse_wkt(wkt, &xnodes, &xcount, &polygon)) { if (polygon) expire_tiles_from_xnodes_poly(xnodes, xcount, osm_id); else expire_tiles_from_xnodes_line(xnodes, xcount); for (i = 0; xnodes[i]; i++) free(xnodes[i]); free(xnodes); free(xcount); } } /* * Expire tiles based on an osm element. * What type of element (node, line, polygon) osm_id refers to depends on * sql_conn. Each type of table has its own sql_conn and the prepared statement * get_wkt refers to the appropriate table. * * The function returns -1 if expiry is not enabled. Otherwise it returns the number * of elements that refer to the osm_id. */ int expire_tiles_from_db(PGconn * sql_conn, osmid_t osm_id) { PGresult * res; char * wkt; int i, noElements = 0; char const *paramValues[1]; char tmp[16]; if (Options->expire_tiles_zoom < 0) return -1; snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, osm_id); paramValues[0] = tmp; /* The prepared statement get_wkt will behave differently depending on the sql_conn * each table has its own sql_connection with the get_way refering to the approriate table */ res = pgsql_execPrepared(sql_conn, "get_wkt", 1, (const char * const *)paramValues, PGRES_TUPLES_OK); noElements = PQntuples(res); for (i = 0; i < noElements; i++) { wkt = PQgetvalue(res, i, 0); expire_tiles_from_wkt(wkt, osm_id); } PQclear(res); return noElements; } osm2pgsql-0.82.0/expire-tiles.h000066400000000000000000000010331213272333300163070ustar00rootroot00000000000000#ifndef EXPIRE_TILES_H #define EXPIRE_TILES_H #include "output.h" void expire_tiles_init(const struct output_options *options); void expire_tiles_stop(void); int expire_tiles_from_bbox(double min_lon, double min_lat, double max_lon, double max_lat); void expire_tiles_from_nodes_line(struct osmNode * nodes, int count); void expire_tiles_from_nodes_poly(struct osmNode * nodes, int count, osmid_t osm_id); void expire_tiles_from_wkt(const char * wkt, osmid_t osm_id); int expire_tiles_from_db(PGconn * sql_conn, osmid_t osm_id); #endif osm2pgsql-0.82.0/geos-fallback/000077500000000000000000000000001213272333300162215ustar00rootroot00000000000000osm2pgsql-0.82.0/geos-fallback/geos/000077500000000000000000000000001213272333300171565ustar00rootroot00000000000000osm2pgsql-0.82.0/geos-fallback/geos/noding/000077500000000000000000000000001213272333300204345ustar00rootroot00000000000000osm2pgsql-0.82.0/geos-fallback/geos/noding/SegmentNode.h000066400000000000000000000061201213272333300230140ustar00rootroot00000000000000/********************************************************************** * $Id: SegmentNode.h 1820 2006-09-06 16:54:23Z mloskot $ * * GEOS - Geometry Engine Open Source * http://geos.refractions.net * * Copyright (C) 2006 Refractions Research Inc. * * This is free software; you can redistribute and/or modify it under * the terms of the GNU Lesser General Public Licence as published * by the Free Software Foundation. * See the COPYING file for more information. * **********************************************************************/ #ifndef GEOS_NODING_SEGMENTNODE_H #define GEOS_NODING_SEGMENTNODE_H #include #include #include #include // Forward declarations namespace geos { namespace noding { class SegmentString; } } namespace geos { namespace noding { // geos.noding /// Represents an intersection point between two SegmentString. // /// Final class. /// /// Last port: noding/SegmentNode.java rev. 1.5 (JTS-1.7) /// class SegmentNode { private: const SegmentString& segString; int segmentOctant; bool isInteriorVar; public: friend std::ostream& operator<< (std::ostream& os, const SegmentNode& n); /// the point of intersection (own copy) geom::Coordinate coord; /// the index of the containing line segment in the parent edge unsigned int segmentIndex; /// Construct a node on the given SegmentString // /// @param ss the parent SegmentString /// /// @param coord the coordinate of the intersection, will be copied /// /// @param nSegmentIndex the index of the segment on parent SegmentString /// where the Node is located. /// /// @param nSegmentOctant /// SegmentNode(const SegmentString& ss, const geom::Coordinate& nCoord, unsigned int nSegmentIndex, int nSegmentOctant); ~SegmentNode() {} /// \brief /// Return true if this Node is *internal* (not on the boundary) /// of the corresponding segment. Currently only the *first* /// segment endpoint is checked, actually. /// bool isInterior() const { return isInteriorVar; } bool isEndPoint(unsigned int maxSegmentIndex) const; /** * @return -1 this EdgeIntersection is located before * the argument location * @return 0 this EdgeIntersection is at the argument location * @return 1 this EdgeIntersection is located after the * argument location */ int compareTo(const SegmentNode& other); //string print() const; }; std::ostream& operator<< (std::ostream& os, const SegmentNode& n); struct SegmentNodeLT { bool operator()(SegmentNode *s1, SegmentNode *s2) const { return s1->compareTo(*s2)<0; } }; } // namespace geos.noding } // namespace geos //#ifdef GEOS_INLINE //# include "geos/noding/SegmentNode.inl" //#endif #endif // GEOS_NODING_SEGMENTNODE_H /********************************************************************** * $Log$ * Revision 1.2 2006/03/24 09:52:41 strk * USE_INLINE => GEOS_INLINE * * Revision 1.1 2006/03/09 16:46:49 strk * geos::geom namespace definition, first pass at headers split * **********************************************************************/ osm2pgsql-0.82.0/geos-fallback/geos/noding/SegmentNodeList.h000066400000000000000000000133671213272333300236630ustar00rootroot00000000000000/********************************************************************** * $Id: SegmentNodeList.h 1820 2006-09-06 16:54:23Z mloskot $ * * GEOS - Geometry Engine Open Source * http://geos.refractions.net * * Copyright (C) 2006 Refractions Research Inc. * * This is free software; you can redistribute and/or modify it under * the terms of the GNU Lesser General Public Licence as published * by the Free Software Foundation. * See the COPYING file for more information. * **********************************************************************/ #ifndef GEOS_NODING_SEGMENTNODELIST_H #define GEOS_NODING_SEGMENTNODELIST_H #include #include #include #include #include #include // Forward declarations namespace geos { namespace geom { class CoordinateSequence; } namespace noding { class SegmentString; } } namespace geos { namespace noding { // geos::noding /** \brief * A list of the SegmentNode present along a * noded SegmentString. * * Last port: noding/SegmentNodeList.java rev. 1.7 (JTS-1.7) */ class SegmentNodeList { private: std::set nodeMap; // the parent edge const SegmentString& edge; // UNUSED //std::vector *sortedNodes; // This vector is here to keep track of created splitEdges std::vector splitEdges; // This vector is here to keep track of created Coordinates std::vector splitCoordLists; /** * Checks the correctness of the set of split edges corresponding * to this edge * * @param splitEdges the split edges for this edge (in order) */ void checkSplitEdgesCorrectness(std::vector& splitEdges); /** * Create a new "split edge" with the section of points between * (and including) the two intersections. * The label for the new edge is the same as the label for the * parent edge. */ SegmentString* createSplitEdge(SegmentNode *ei0, SegmentNode *ei1); /** * Adds nodes for any collapsed edge pairs. * Collapsed edge pairs can be caused by inserted nodes, or they * can be pre-existing in the edge vertex list. * In order to provide the correct fully noded semantics, * the vertex at the base of a collapsed pair must also be added * as a node. */ void addCollapsedNodes(); /** * Adds nodes for any collapsed edge pairs * which are pre-existing in the vertex list. */ void findCollapsesFromExistingVertices( std::vector& collapsedVertexIndexes); /** * Adds nodes for any collapsed edge pairs caused by inserted nodes * Collapsed edge pairs occur when the same coordinate is inserted * as a node both before and after an existing edge vertex. * To provide the correct fully noded semantics, * the vertex must be added as a node as well. */ void findCollapsesFromInsertedNodes( std::vector& collapsedVertexIndexes); bool findCollapseIndex(SegmentNode& ei0, SegmentNode& ei1, size_t& collapsedVertexIndex); public: friend std::ostream& operator<< (std::ostream& os, const SegmentNodeList& l); typedef std::set container; typedef container::iterator iterator; typedef container::const_iterator const_iterator; SegmentNodeList(const SegmentString* newEdge): edge(*newEdge) {} SegmentNodeList(const SegmentString& newEdge): edge(newEdge) {} const SegmentString& getEdge() const { return edge; } // TODO: Is this a final class ? // Should remove the virtual in that case virtual ~SegmentNodeList(); /** * Adds an intersection into the list, if it isn't already there. * The input segmentIndex is expected to be normalized. * * @return the SegmentIntersection found or added. It will be * destroyed at SegmentNodeList destruction time. * * @param intPt the intersection Coordinate, will be copied * @param segmentIndex */ SegmentNode* add(const geom::Coordinate& intPt, size_t segmentIndex); SegmentNode* add(const geom::Coordinate *intPt, size_t segmentIndex) { return add(*intPt, segmentIndex); } /* * returns the set of SegmentNodes */ //replaces iterator() // TODO: obsolete this function std::set* getNodes() { return &nodeMap; } /// Return the number of nodes in this list size_t size() const { return nodeMap.size(); } container::iterator begin() { return nodeMap.begin(); } container::const_iterator begin() const { return nodeMap.begin(); } container::iterator end() { return nodeMap.end(); } container::const_iterator end() const { return nodeMap.end(); } /** * Adds entries for the first and last points of the edge to the list */ void addEndpoints(); /** * Creates new edges for all the edges that the intersections in this * list split the parent edge into. * Adds the edges to the input list (this is so a single list * can be used to accumulate all split edges for a Geometry). */ void addSplitEdges(std::vector& edgeList); void addSplitEdges(std::vector* edgeList) { assert(edgeList); addSplitEdges(*edgeList); } //string print(); }; std::ostream& operator<< (std::ostream& os, const SegmentNodeList& l); } // namespace geos::noding } // namespace geos //#ifdef GEOS_INLINE //# include "geos/noding/SegmentNodeList.inl" //#endif #endif /********************************************************************** * $Log$ * Revision 1.4 2006/06/12 11:29:23 strk * unsigned int => size_t * * Revision 1.3 2006/05/04 07:41:56 strk * const-correct size() method for SegmentNodeList * * Revision 1.2 2006/03/24 09:52:41 strk * USE_INLINE => GEOS_INLINE * * Revision 1.1 2006/03/09 16:46:49 strk * geos::geom namespace definition, first pass at headers split * **********************************************************************/ osm2pgsql-0.82.0/geos-fallback/geos/noding/SegmentString.h000066400000000000000000000143311213272333300234000ustar00rootroot00000000000000/********************************************************************** * $Id: SegmentString.h 1872 2006-10-20 11:18:39Z strk $ * * GEOS - Geometry Engine Open Source * http://geos.refractions.net * * Copyright (C) 2006 Refractions Research Inc. * * This is free software; you can redistribute and/or modify it under * the terms of the GNU Lesser General Public Licence as published * by the Free Software Foundation. * See the COPYING file for more information. * **********************************************************************/ #ifndef GEOS_NODING_SEGMENTSTRING_H #define GEOS_NODING_SEGMENTSTRING_H #include #include // for testInvariant #include #include // Forward declarations namespace geos { namespace algorithm { class LineIntersector; } } namespace geos { namespace noding { // geos.noding /** \brief * Represents a list of contiguous line segments, * and supports noding the segments. * * The line segments are represented by a CoordinateSequence. * * TODO: * This should be changed to use a vector of Coordinate, * to optimize the noding of contiguous segments by * reducing the number of allocated objects. * * SegmentStrings can carry a context object, which is useful * for preserving topological or parentage information. * All noded substrings are initialized with the same context object. * * Final class. * * Last port: noding/SegmentString.java rev. 1.5 (JTS-1.7) */ class SegmentString { public: typedef std::vector ConstVect; typedef std::vector NonConstVect; friend std::ostream& operator<< (std::ostream& os, const SegmentString& ss); private: SegmentNodeList nodeList; geom::CoordinateSequence *pts; mutable unsigned int npts; // this is a cache const void* context; bool isIsolatedVar; public: void testInvariant() const; /// Construct a SegmentString. // /// @param newPts CoordinateSequence representing the string, /// externally owned /// /// @param newContext the context associated to this SegmentString /// SegmentString(geom::CoordinateSequence *newPts, const void* newContext); ~SegmentString(); //const void* getContext() const { return getData(); } const void* getData() const; const SegmentNodeList& getNodeList() const; SegmentNodeList& getNodeList(); unsigned int size() const; const geom::Coordinate& getCoordinate(unsigned int i) const; /// \brief /// Return a pointer to the CoordinateSequence associated /// with this SegmentString. // /// Note that the CoordinateSequence is not owned by /// this SegmentString! /// geom::CoordinateSequence* getCoordinates() const; /// \brief /// Notify this object that the CoordinateSequence associated /// with it might have been updated. // /// This must be called so that the SegmentString object makes /// all the necessary checks and updates to verify consistency /// void notifyCoordinatesChange() const; // Return a read-only pointer to this SegmentString CoordinateSequence //const CoordinateSequence* getCoordinatesRO() const { return pts; } void setIsolated(bool isIsolated); bool isIsolated() const; bool isClosed() const; /** \brief * Gets the octant of the segment starting at vertex * index. * * @param index the index of the vertex starting the segment. * Must not be the last index in the vertex list * @return the octant of the segment at the vertex */ int getSegmentOctant(unsigned int index) const; /** \brief * Add {SegmentNode}s for one or both * intersections found for a segment of an edge to the edge * intersection list. */ void addIntersections(algorithm::LineIntersector *li, unsigned int segmentIndex, int geomIndex); /** \brief * Add an SegmentNode for intersection intIndex. * * An intersection that falls exactly on a vertex * of the SegmentString is normalized * to use the higher of the two possible segmentIndexes */ void addIntersection(algorithm::LineIntersector *li, unsigned int segmentIndex, int geomIndex, int intIndex); /** \brief * Add an SegmentNode for intersection intIndex. * * An intersection that falls exactly on a vertex of the * edge is normalized * to use the higher of the two possible segmentIndexes */ void addIntersection(const geom::Coordinate& intPt, unsigned int segmentIndex); static void getNodedSubstrings( const SegmentString::NonConstVect& segStrings, SegmentString::NonConstVect* resultEdgeList); static SegmentString::NonConstVect* getNodedSubstrings( const SegmentString::NonConstVect& segStrings); }; inline void SegmentString::testInvariant() const { assert(pts); assert(pts->size() > 1); assert(pts->size() == npts); } std::ostream& operator<< (std::ostream& os, const SegmentString& ss); } // namespace geos.noding } // namespace geos #ifdef GEOS_INLINE # include "geos/noding/SegmentString.inl" #endif #endif /********************************************************************** * $Log$ * Revision 1.10 2006/05/05 14:25:05 strk * moved getSegmentOctant out of .inl into .cpp, renamed private eiList to nodeList as in JTS, added more assertion checking and fixed doxygen comments * * Revision 1.9 2006/05/05 10:19:06 strk * droppped SegmentString::getContext(), new name is getData() to reflect change in JTS * * Revision 1.8 2006/05/04 08:29:07 strk * * source/noding/ScaledNoder.cpp: removed use of SegmentString::setCoordinates(). * * source/headers/geos/noding/SegmentStrign.{h,inl}: removed new setCoordinates() interface. * * Revision 1.7 2006/05/04 07:43:44 strk * output operator for SegmentString class * * Revision 1.6 2006/05/03 18:04:49 strk * added SegmentString::setCoordinates() interface * * Revision 1.5 2006/05/03 16:19:39 strk * fit in 80 columns * * Revision 1.4 2006/05/03 15:26:02 strk * testInvariant made public and always inlined * * Revision 1.3 2006/03/24 09:52:41 strk * USE_INLINE => GEOS_INLINE * * Revision 1.2 2006/03/13 21:14:24 strk * Added missing forward declarations * * Revision 1.1 2006/03/09 16:46:49 strk * geos::geom namespace definition, first pass at headers split * **********************************************************************/ osm2pgsql-0.82.0/input.c000066400000000000000000000126531213272333300150410ustar00rootroot00000000000000#define _FILE_OFFSET_BITS 64 #define _LARGEFILE64_SOURCE #ifdef __MINGW_H # include #else #include #include #include #include #include #include #include #endif #include #include #include "sanitizer.h" #include "input.h" struct Input { char *name; enum { plainFile, gzipFile, bzip2File } type; void *fileHandle; /* needed by bzip2 when decompressing from multiple streams. other decompressors must ignore it. */ FILE *systemHandle; int eof; char buf[4096]; int buf_ptr, buf_fill; }; /* tries to re-open the bz stream at the next stream start. returns 0 on success, -1 on failure. */ int bzReOpen(struct Input *ctx, int *error) { /* for copying out the last unused part of the block which has an EOS token in it. needed for re-initialising the next stream. */ unsigned char unused[BZ_MAX_UNUSED]; void *unused_tmp_ptr = NULL; int nUnused, i; BZ2_bzReadGetUnused(error, (BZFILE *)(ctx->fileHandle), &unused_tmp_ptr, &nUnused); if (*error != BZ_OK) return -1; /* when bzReadClose is called the unused buffer is deallocated, so it needs to be copied somewhere safe first. */ for (i = 0; i < nUnused; ++i) unused[i] = ((unsigned char *)unused_tmp_ptr)[i]; BZ2_bzReadClose(error, (BZFILE *)(ctx->fileHandle)); if (*error != BZ_OK) return -1; /* reassign the file handle */ ctx->fileHandle = BZ2_bzReadOpen(error, ctx->systemHandle, 0, 0, unused, nUnused); if (ctx->fileHandle == NULL || *error != BZ_OK) return -1; return 0; } int readFile(void *context, char * buffer, int len) { struct Input *ctx = context; void *f = ctx->fileHandle; int l = 0, error = 0; if (ctx->eof || (len == 0)) return 0; switch(ctx->type) { case plainFile: l = read(*(int *)f, buffer, len); if (l <= 0) ctx->eof = 1; break; case gzipFile: l = gzread((gzFile)f, buffer, len); if (l <= 0) ctx->eof = 1; break; case bzip2File: l = BZ2_bzRead(&error, (BZFILE *)f, buffer, len); /* error codes BZ_OK and BZ_STREAM_END are both "OK", but the stream end means the reader needs to be reset from the original handle. */ if (error != BZ_OK) { /* for stream errors, try re-opening the stream before admitting defeat. */ if (error != BZ_STREAM_END || bzReOpen(ctx, &error) != 0) { l = 0; ctx->eof = 1; } } break; default: fprintf(stderr, "Bad file type\n"); break; } if (l < 0) { fprintf(stderr, "File reader received error %d (%d)\n", l, error); l = 0; } return l; } char inputGetChar(void *context) { struct Input *ctx = context; if (ctx->buf_ptr == ctx->buf_fill) { ctx->buf_fill = readFile(context, &ctx->buf[0], sizeof(ctx->buf)); ctx->buf_ptr = 0; if (ctx->buf_fill == 0) return 0; if (ctx->buf_fill < 0) { perror("Error while reading file"); exit(1); } } return ctx->buf[ctx->buf_ptr++]; } int inputEof(void *context) { return ((struct Input *)context)->eof; } void *inputOpen(const char *name) { const char *ext = strrchr(name, '.'); struct Input *ctx = malloc (sizeof(*ctx)); if (!ctx) return NULL; memset(ctx, 0, sizeof(*ctx)); ctx->name = malloc(strlen(name) + 1); if (ctx->name) strcpy(ctx->name, name); if (ext && !strcmp(ext, ".gz")) { ctx->fileHandle = (void *)gzopen(name, "rb"); ctx->type = gzipFile; } else if (ext && !strcmp(ext, ".bz2")) { int error = 0; ctx->systemHandle = fopen(name, "rb"); if (!ctx->systemHandle) { fprintf(stderr, "error while opening file %s\n", name); exit(10); } ctx->fileHandle = (void *)BZ2_bzReadOpen(&error, ctx->systemHandle, 0, 0, NULL, 0); ctx->type = bzip2File; } else { int *pfd = malloc(sizeof(int)); if (pfd) { if (!strcmp(name, "-")) { *pfd = STDIN_FILENO; } else { int flags = O_RDONLY; #ifdef O_LARGEFILE flags |= O_LARGEFILE; #endif *pfd = open(name, flags); if (*pfd < 0) { free(pfd); pfd = NULL; } } } ctx->fileHandle = (void *)pfd; ctx->type = plainFile; } if (!ctx->fileHandle) { fprintf(stderr, "error while opening file %s\n", name); exit(10); } ctx->buf_ptr = 0; ctx->buf_fill = 0; return (void *)ctx; } int inputClose(void *context) { struct Input *ctx = context; void *f = ctx->fileHandle; switch(ctx->type) { case plainFile: close(*(int *)f); free(f); break; case gzipFile: gzclose((gzFile)f); break; case bzip2File: BZ2_bzclose((BZFILE *)f); break; default: fprintf(stderr, "Bad file type\n"); break; } free(ctx->name); free(ctx); return 0; } xmlTextReaderPtr inputUTF8(const char *name) { void *ctx = inputOpen(name); if (!ctx) { fprintf(stderr, "Input reader create failed for: %s\n", name); return NULL; } return xmlReaderForIO(readFile, inputClose, (void *)ctx, NULL, NULL, 0); } osm2pgsql-0.82.0/input.h000066400000000000000000000004151213272333300150370ustar00rootroot00000000000000#ifndef INPUT_H #define INPUT_H int readFile(void *context, char * buffer, int len); int inputClose(void *context); void *inputOpen(const char *name); char inputGetChar(void *context); int inputEof(void *context); xmlTextReaderPtr inputUTF8(const char *name); #endif osm2pgsql-0.82.0/install-postgis-osm-db.sh000077500000000000000000000051531213272333300204050ustar00rootroot00000000000000#!/bin/sh set -e if [ -z $DBOWNER ]; then DBOWNER=gis fi if [ -z $DBNAME ]; then DBNAME=gis fi # echo "Removing Old Database" # sudo -u postgres dropdb $DBNAME >/dev/null 2>&1 || true echo "Create user $DBOWNER" sudo -u postgres createuser --no-superuser --no-createdb --no-createrole "$DBOWNER" || true echo "Creating Database" sudo -u postgres createdb -EUTF8 -O $DBOWNER $DBNAME echo "Initializing Database" sudo -u postgres createlang plpgsql $DBNAME || true if [ -e /usr/share/postgresql/9.1/contrib/postgis-1.5/postgis.sql ] ; then echo "Initializing Spatial Extentions for postgresql 9.1" file_postgis=/usr/share/postgresql/9.1/contrib/postgis-1.5/postgis.sql file_spatial_ref=/usr/share/postgresql/9.1/contrib/postgis-1.5/spatial_ref_sys.sql sudo -u postgres psql $DBNAME <$file_postgis >/dev/null 2>&1 sudo -u postgres psql $DBNAME <$file_spatial_ref >/dev/null 2>&1 echo "Spatial Extentions initialized" echo "Initializing hstore" echo "CREATE EXTENSION hstore;" | sudo -u postgres psql $DBNAME else echo "Initializing Spatial Extentions for postgresql 8.4" file_postgis=/usr/share/postgresql/8.4/contrib/postgis-1.5/postgis.sql file_spatial_ref=/usr/share/postgresql/8.4/contrib/postgis-1.5/spatial_ref_sys.sql sudo -u postgres psql $DBNAME <$file_postgis >/dev/null 2>&1 sudo -u postgres psql $DBNAME <$file_spatial_ref >/dev/null 2>&1 echo "Spatial Extentions initialized" echo "Initializing hstore" file_hstore=/usr/share/postgresql/8.4/contrib/hstore.sql sudo -u postgres psql $DBNAME <$file_hstore >/dev/null 2>&1 fi echo "Setting ownership to user $DBOWNER" echo 'ALTER TABLE geometry_columns OWNER TO ' $DBOWNER '; ALTER TABLE spatial_ref_sys OWNER TO ' $DBOWNER ';' | sudo -u postgres psql $DBNAME if [ -n "$GRANT_USER" ] ; then if [ "$GRANT_USER" = "*" ] ; then echo "GRANT Rights to every USER" GRANT_USER='' for user in `users` ; do GRANT_USER="$GRANT_USER $user" done fi for user in $GRANT_USER; do sudo -u postgres createuser --no-superuser --no-createdb --no-createrole "$user" || true echo "Granting rights to user '$user'" ( echo "GRANT ALL on geometry_columns TO \"$user\";" echo "GRANT ALL ON SCHEMA PUBLIC TO \"$user\";" echo "GRANT ALL on spatial_ref_sys TO \"$user\";" )| sudo -u postgres psql -U postgres $DBNAME done else echo "No extra user for postgress Database created. Please do so yourself" fi exit 0 osm2pgsql-0.82.0/install-postgis-osm-user.sh000077500000000000000000000024041213272333300207720ustar00rootroot00000000000000#!/bin/sh set -e if [ $# -ne 2 ] ; then echo "Usage: install-postgis-osm-user.sh DBNAME USERNAME" exit fi DBNAME=$1 GRANT_USER=$2 if [ -n "$GRANT_USER" ] ; then if [ "$GRANT_USER" = "*" ] ; then echo "GRANT Rights to every USER" GRANT_USER='' for user in `users` ; do GRANT_USER="$GRANT_USER $user" done fi for user in $GRANT_USER; do sudo -u postgres createuser --no-superuser --no-createdb --no-createrole "$user" || true echo "Granting rights to user '$user'" ( echo "GRANT ALL on geometry_columns TO \"$user\";" echo "GRANT ALL ON SCHEMA PUBLIC TO \"$user\";" echo "GRANT ALL on spatial_ref_sys TO \"$user\";" echo "GRANT ALL on planet_osm_line TO \"$user\";" echo "GRANT ALL on planet_osm_nodes TO \"$user\";" echo "GRANT ALL on planet_osm_point TO \"$user\";" echo "GRANT ALL on planet_osm_rels TO \"$user\";" echo "GRANT ALL on planet_osm_roads TO \"$user\";" echo "GRANT ALL on planet_osm_ways TO \"$user\";" echo "GRANT ALL on planet_osm_polygon TO \"$user\";" )| sudo -u postgres psql -Upostgres $DBNAME done else echo "No extra user for postgress Database created. Please do so yourself" fi exit 0osm2pgsql-0.82.0/keyvals.c000066400000000000000000000147271213272333300153640ustar00rootroot00000000000000/* Common key-value list processing * * Used as a small general purpose store for * tags, segment lists etc * */ #define USE_TREE #include #include #include #include #include #include "keyvals.h" #ifdef USE_TREE #include "text-tree.h" #endif void initList(struct keyval *head) { assert(head); head->next = head; head->prev = head; head->key = NULL; head->value = NULL; head->has_column = 0; } void freeItem(struct keyval *p) { if (!p) return; #ifdef USE_TREE text_release(tree_ctx, p->key); text_release(tree_ctx, p->value); #else free(p->key); free(p->value); #endif free(p); } unsigned int countList(struct keyval *head) { struct keyval *p; unsigned int count = 0; if (!head) return 0; p = head->next; while(p != head) { count++; p = p->next; } return count; } int listHasData(struct keyval *head) { if (!head) return 0; return (head->next != head); } char *getItem(struct keyval *head, const char *name) { struct keyval *p; if (!head) return NULL; p = head->next; while(p != head) { if (!strcmp(p->key, name)) return p->value; p = p->next; } return NULL; } /* unlike getItem this function gives a pointer to the whole list item which can be used to remove the tag from the linked list with the removeTag function */ struct keyval *getTag(struct keyval *head, const char *name) { struct keyval *p; if (!head) return NULL; p = head->next; while(p != head) { if (!strcmp(p->key, name)) return p; p = p->next; } return NULL; } void removeTag(struct keyval *tag) { tag->prev->next=tag->next; tag->next->prev=tag->prev; freeItem(tag); } struct keyval *firstItem(struct keyval *head) { if (head == NULL || head == head->next) return NULL; return head->next; } struct keyval *nextItem(struct keyval *head, struct keyval *item) { if (item->next == head) return NULL; return item->next; } /* Pulls all items from list which match this prefix * note: they are removed from the original list an returned in a new one */ struct keyval *getMatches(struct keyval *head, const char *name) { struct keyval *out = NULL; struct keyval *p; if (!head) return NULL; out = malloc(sizeof(struct keyval)); if (!out) return NULL; initList(out); p = head->next; while(p != head) { struct keyval *next = p->next; if (!strncmp(p->key, name, strlen(name))) { p->next->prev = p->prev; p->prev->next = p->next; pushItem(out, p); } p = next; } if (listHasData(out)) return out; free(out); return NULL; } void updateItem(struct keyval *head, const char *name, const char *value) { struct keyval *item; if (!head) return; item = head->next; while(item != head) { if (!strcmp(item->key, name)) { #ifdef USE_TREE text_release(tree_ctx, item->value); item->value = (char *)text_get(tree_ctx,value); #else free(item->value); item->value = strdup(value); #endif return; } item = item->next; } addItem(head, name, value, 0); } struct keyval *popItem(struct keyval *head) { struct keyval *p; if (!head) return NULL; p = head->next; if (p == head) return NULL; head->next = p->next; p->next->prev = head; p->next = NULL; p->prev = NULL; return p; } void pushItem(struct keyval *head, struct keyval *item) { assert(head); assert(item); item->next = head; item->prev = head->prev; head->prev->next = item; head->prev = item; } int addItem(struct keyval *head, const char *name, const char *value, int noDupe) { struct keyval *item; assert(head); assert(name); assert(value); if (noDupe) { item = head->next; while (item != head) { if (!strcmp(item->value, value) && !strcmp(item->key, name)) return 1; item = item->next; } } item = malloc(sizeof(struct keyval)); if (!item) { fprintf(stderr, "Error allocating keyval\n"); return 2; } #ifdef USE_TREE item->key = (char *)text_get(tree_ctx,name); item->value = (char *)text_get(tree_ctx,value); #else item->key = strdup(name); item->value = strdup(value); #endif item->has_column=0; #if 1 /* Add to head */ item->next = head->next; item->prev = head; head->next->prev = item; head->next = item; #else /* Add to tail */ item->prev = head->prev; item->next = head; head->prev->next = item; head->prev = item; #endif return 0; } void resetList(struct keyval *head) { struct keyval *item; while((item = popItem(head))) freeItem(item); } void cloneList( struct keyval *target, struct keyval *source ) { struct keyval *ptr; for( ptr = source->next; ptr != source; ptr=ptr->next ) addItem( target, ptr->key, ptr->value, 0 ); } /* create an escaped version of the string for hstore table insert */ /* make shure dst is 2*strlen(src) */ static void escape4hstore(char *dst, char *src) { size_t i,j; j=0; for (i=0;ikey, tags->value); } void keyval2hstore_manual(char *hstring, char *key, char *value) { static char* str=NULL; static size_t stlen=0; size_t len; len=strlen(value); if (len>stlen) { stlen=len; str=realloc(str,1+stlen*2); } len=strlen(key); if (len>stlen) { stlen=len; str=realloc(str,1+stlen*2); } escape4hstore(str,key); hstring+=sprintf(hstring,"\"%s\"=>",str); escape4hstore(str,value); sprintf(hstring,"\"%s\"",str); } osm2pgsql-0.82.0/keyvals.h000066400000000000000000000026171213272333300153640ustar00rootroot00000000000000/* Common key-value list processing * * Used as a small general purpose store for * tags, segment lists etc * */ #ifndef KEYVAL_H #define KEYVAL_H struct keyval { char *key; char *value; /* if a hstore column is requested we need a flag to store if a key has its own column because it should not be added to the hstore in this case */ int has_column; struct keyval *next; struct keyval *prev; }; void initList(struct keyval *head); void freeItem(struct keyval *p); unsigned int countList(struct keyval *head); int listHasData(struct keyval *head); char *getItem(struct keyval *head, const char *name); struct keyval *getTag(struct keyval *head, const char *name); void removeTag(struct keyval *tag); struct keyval *firstItem(struct keyval *head); struct keyval *nextItem(struct keyval *head, struct keyval *item); struct keyval *popItem(struct keyval *head); void pushItem(struct keyval *head, struct keyval *item); int addItem(struct keyval *head, const char *name, const char *value, int noDupe); void resetList(struct keyval *head); struct keyval *getMatches(struct keyval *head, const char *name); void updateItem(struct keyval *head, const char *name, const char *value); void cloneList( struct keyval *target, struct keyval *source ); void keyval2hstore(char *hstring, struct keyval *tags); void keyval2hstore_manual(char *hstring, char *key, char *value); #endif osm2pgsql-0.82.0/legacy/000077500000000000000000000000001213272333300147735ustar00rootroot00000000000000osm2pgsql-0.82.0/legacy/Makefile.am000066400000000000000000000004071213272333300170300ustar00rootroot00000000000000bin_PROGRAMS = osm2pgsql-legacy osm2pgsql_legacy_SOURCES = build_geometry.cpp osm2pgsql.c AM_CFLAGS=@XML2_CFLAGS@ @GEOS_CFLAGS@ AM_CPPFLAGS=@XML2_CFLAGS@ @GEOS_CFLAGS@ AM_LDFLAGS=@XML2_LDFLAGS@ @GEOS_LDFLAGS@ @GEOS_LIBS@ noinst_HEADERS = build_geometry.h osm2pgsql-0.82.0/legacy/build_geometry.cpp000066400000000000000000000103461213272333300205150ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # Part of osm2pgsql utility #----------------------------------------------------------------------------- # By Artem Pavlenko, Copyright 2007 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #include #include #if (GEOS_VERSION_MAJOR==3) /* geos trunk (3.0.0rc) */ #include #include #include #include #include #include #include #include #include #include using namespace geos::geom; using namespace geos::io; using namespace geos::operation::linemerge; #else /* geos-2.2 */ #include #include #include using namespace geos; #endif #include "build_geometry.h" struct Segment { Segment(double x0_,double y0_,double x1_,double y1_) :x0(x0_),y0(y0_),x1(x1_),y1(y1_) {} double x0; double y0; double x1; double y1; }; static std::vector segs; static std::vector wkts; typedef std::auto_ptr geom_ptr; int is_simple(const char* wkt) { GeometryFactory factory; WKTReader reader(&factory); geom_ptr geom(reader.read(wkt)); if (geom->isSimple()) return 1; return 0; } void add_segment(double x0,double y0,double x1,double y1) { segs.push_back(Segment(x0,y0,x1,y1)); } const char * get_wkt(size_t index) { return wkts[index].c_str(); } void clear_wkts() { wkts.clear(); } size_t build_geometry(int polygon) { size_t wkt_size = 0; GeometryFactory factory; geom_ptr segment(0); std::auto_ptr > lines(new std::vector); std::vector::const_iterator pos=segs.begin(); std::vector::const_iterator end=segs.end(); bool first=true; try { while (pos != end) { if (pos->x0 != pos->x1 || pos->y0 != pos->y1) { std::auto_ptr coords(factory.getCoordinateSequenceFactory()->create(0,2)); coords->add(Coordinate(pos->x0,pos->y0)); coords->add(Coordinate(pos->x1,pos->y1)); geom_ptr linestring(factory.createLineString(coords.release())); if (first) { segment = linestring; first=false; } else { lines->push_back(linestring.release()); } } ++pos; } segs.clear(); if (segment.get()) { geom_ptr mline (factory.createMultiLineString(lines.release())); geom_ptr noded (segment->Union(mline.get())); LineMerger merger; merger.add(noded.get()); std::auto_ptr > merged(merger.getMergedLineStrings()); WKTWriter writer; for (unsigned i=0 ;i < merged->size(); ++i) { std::auto_ptr pline ((*merged ) [i]); if (polygon == 1 && pline->getNumPoints() > 3 && pline->isClosed()) { std::auto_ptr ring(factory.createLinearRing(pline->getCoordinates())); geom_ptr poly(factory.createPolygon(ring.release(),0)); std::string text = writer.write(poly.get()); wkts.push_back(text); ++wkt_size; } else { std::string text = writer.write(pline.get()); wkts.push_back(text); ++wkt_size; } } } } catch (...) { std::cerr << "excepton caught \n"; wkt_size = 0; } return wkt_size; } osm2pgsql-0.82.0/legacy/build_geometry.h000066400000000000000000000024641213272333300201640ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # Part of osm2pgsql utility #----------------------------------------------------------------------------- # By Artem Pavlenko, Copyright 2007 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef BUILD_GEOMETRY_H #define BUILD_GEOMETRY_H #ifdef __cplusplus extern "C" { #endif int is_simple(const char* wkt); void add_segment(double x0,double y0,double x1, double y1); const char* get_wkt(size_t index); size_t build_geometry(int polygon); void clear_wkts(); #ifdef __cplusplus } #endif #endif osm2pgsql-0.82.0/legacy/osm2pgsql.c000066400000000000000000000435071213272333300170770ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik # Use: osm2pgsql planet.osm > planet.sql #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include "build_geometry.h" #if 0 #define DEBUG printf #else #define DEBUG(x, ...) #endif struct tagDesc { const char *name; const char *type; const int polygon; }; static struct tagDesc exportTags[] = { {"name", "text", 0}, {"place", "text", 0}, {"landuse", "text", 1}, {"leisure", "text", 1}, {"natural", "text", 1}, {"man_made","text", 0}, {"waterway","text", 0}, {"highway", "text", 0}, {"foot", "text", 0}, {"horse", "text", 0}, {"bicycle", "text", 0}, {"motorcar","text", 0}, {"residence","text", 0}, {"railway", "text", 0}, {"amenity", "text", 1}, {"tourism", "text", 1}, {"learning","text", 0}, {"building","text", 1}, {"bridge", "text", 0}, {"layer", "text", 0}, {"junction","text", 0}, {"sport", "text", 1}, {"route", "text", 0}, {"aeroway", "text", 0} }; static const char *table_name_point = "planet_osm_point"; static const char *table_name_line = "planet_osm_line"; static const char *table_name_polygon = "planet_osm_polygon"; #define MAX_ID_NODE (35000000) #define MAX_ID_SEGMENT (35000000) struct osmNode { double lon; double lat; }; struct osmSegment { unsigned int from; unsigned int to; }; struct osmWay { char *values; char *wkt; }; static struct osmNode nodes[MAX_ID_NODE+1]; static struct osmSegment segments[MAX_ID_SEGMENT+1]; static int count_node, count_all_node, max_node; static int count_segment, count_all_segment, max_segment; static int count_way, count_all_way, max_way; static int count_way_seg; struct keyval { char *key; char *value; struct keyval *next; struct keyval *prev; }; static struct keyval keys, tags, segs; void usage(const char *arg0) { fprintf(stderr, "Usage error:\n\t%s planet.osm > planet.sql\n", arg0); fprintf(stderr, "or\n\tgzip -dc planet.osm.gz | %s - | gzip -c > planet.sql.gz\n", arg0); } void initList(struct keyval *head) { head->next = head; head->prev = head; head->key = NULL; head->value = NULL; } void freeItem(struct keyval *p) { free(p->key); free(p->value); free(p); } unsigned int countList(struct keyval *head) { struct keyval *p = head->next; unsigned int count = 0; while(p != head) { count++; p = p->next; } return count; } int listHasData(struct keyval *head) { return (head->next != head); } char *getItem(struct keyval *head, const char *name) { struct keyval *p = head->next; while(p != head) { if (!strcmp(p->key, name)) return p->value; p = p->next; } return NULL; } struct keyval *popItem(struct keyval *head) { struct keyval *p = head->next; if (p == head) return NULL; head->next = p->next; p->next->prev = head; p->next = NULL; p->prev = NULL; return p; } void pushItem(struct keyval *head, struct keyval *item) { item->next = head; item->prev = head->prev; head->prev->next = item; head->prev = item; } int addItem(struct keyval *head, const char *name, const char *value, int noDupe) { struct keyval *item; if (noDupe) { item = head->next; while (item != head) { if (!strcmp(item->value, value) && !strcmp(item->key, name)) { //fprintf(stderr, "Discarded %s=%s\n", name, value); return 1; } item = item->next; } } item = malloc(sizeof(struct keyval)); if (!item) { fprintf(stderr, "Error allocating keyval\n"); return 2; } item->key = strdup(name); item->value = strdup(value); item->next = head->next; item->prev = head; head->next->prev = item; head->next = item; return 0; } void resetList(struct keyval *head) { struct keyval *item; while((item = popItem(head))) freeItem(item); } size_t WKT(int polygon) { while (listHasData(&segs)) { struct keyval *p; unsigned int id, to, from; double x0, y0, x1, y1; p = popItem(&segs); id = strtoul(p->value, NULL, 10); freeItem(p); from = segments[id].from; to = segments[id].to; x0 = nodes[from].lon; y0 = nodes[from].lat; x1 = nodes[to].lon; y1 = nodes[to].lat; add_segment(x0,y0,x1,y1); } return build_geometry(polygon); } void StartElement(xmlTextReaderPtr reader, const xmlChar *name) { xmlChar *xid, *xlat, *xlon, *xfrom, *xto, *xk, *xv; unsigned int id, to, from; double lon, lat; char *k; if (xmlStrEqual(name, BAD_CAST "node")) { struct osmNode *node; xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); xlon = xmlTextReaderGetAttribute(reader, BAD_CAST "lon"); xlat = xmlTextReaderGetAttribute(reader, BAD_CAST "lat"); assert(xid); assert(xlon); assert(xlat); id = strtoul((char *)xid, NULL, 10); lon = strtod((char *)xlon, NULL); lat = strtod((char *)xlat, NULL); assert(id > 0); assert(id < MAX_ID_NODE); if (id > max_node) max_node = id; count_all_node++; if (count_all_node%10000 == 0) fprintf(stderr, "\rProcessing: Node(%dk)", count_all_node/1000); node = &nodes[id]; node->lon = lon; node->lat = lat; DEBUG("NODE(%d) %f %f\n", id, lon, lat); addItem(&keys, "id", (char *)xid, 0); xmlFree(xid); xmlFree(xlon); xmlFree(xlat); } else if (xmlStrEqual(name, BAD_CAST "segment")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); xfrom = xmlTextReaderGetAttribute(reader, BAD_CAST "from"); xto = xmlTextReaderGetAttribute(reader, BAD_CAST "to"); assert(xid); assert(xfrom); assert(xto); id = strtoul((char *)xid, NULL, 10); from = strtoul((char *)xfrom, NULL, 10); to = strtoul((char *)xto, NULL, 10); assert(id > 0); assert(id < MAX_ID_SEGMENT); if (id > max_segment) max_segment = id; if (count_all_segment == 0) fprintf(stderr, "\n"); count_all_segment++; if (count_all_segment%10000 == 0) fprintf(stderr, "\rProcessing: Segment(%dk)", count_all_segment/1000); if (!nodes[to].lat && !nodes[to].lon) { DEBUG("SEGMENT(%d), NODE(%d) is missing\n", id, to); } else if (!nodes[from].lat && !nodes[from].lon) { DEBUG("SEGMENT(%d), NODE(%d) is missing\n", id, from); } else { if (from != to) { struct osmSegment *segment; segment = &segments[id]; segment->to = to; segment->from = from; count_segment++; DEBUG("SEGMENT(%d) %d, %d\n", id, from, to); } } xmlFree(xid); xmlFree(xfrom); xmlFree(xto); } else if (xmlStrEqual(name, BAD_CAST "tag")) { char *p; xk = xmlTextReaderGetAttribute(reader, BAD_CAST "k"); xv = xmlTextReaderGetAttribute(reader, BAD_CAST "v"); assert(xk); assert(xv); k = (char *)xmlStrdup(xk); while ((p = strchr(k, ':'))) *p = '_'; while ((p = strchr(k, ' '))) *p = '_'; addItem(&tags, k, (char *)xv, 0); DEBUG("\t%s = %s\n", xk, xv); xmlFree(k); xmlFree(xk); xmlFree(xv); } else if (xmlStrEqual(name, BAD_CAST "way")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); assert(xid); id = strtoul((char *)xid, NULL, 10); addItem(&keys, "id", (char *)xid, 0); DEBUG("WAY(%s)\n", xid); if (id > max_way) max_way = id; if (count_all_way == 0) fprintf(stderr, "\n"); count_all_way++; if (count_all_way%1000 == 0) fprintf(stderr, "\rProcessing: Way(%dk)", count_all_way/1000); xmlFree(xid); } else if (xmlStrEqual(name, BAD_CAST "seg")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); assert(xid); id = strtoul((char *)xid, NULL, 10); if (!id || (id > MAX_ID_SEGMENT)) DEBUG("\tSEG(%s) - invalid segment ID\n", xid); else if (!segments[id].from || !segments[id].to) DEBUG("\tSEG(%s) - missing segment\n", xid); else { if (addItem(&segs, "id", (char *)xid, 1)) { const char *way_id = getItem(&keys, "id"); if (!way_id) way_id = "???"; //fprintf(stderr, "Way %s with duplicate segment id %d\n", way_id, id); count_way_seg++; } DEBUG("\tSEG(%s)\n", xid); } xmlFree(xid); } else if (xmlStrEqual(name, BAD_CAST "osm")) { /* ignore */ } else { fprintf(stderr, "%s: Unknown element name: %s\n", __FUNCTION__, name); } } void EndElement(xmlTextReaderPtr reader, const xmlChar *name) { unsigned int id; DEBUG("%s: %s\n", __FUNCTION__, name); if (xmlStrEqual(name, BAD_CAST "node")) { int i; char *values = NULL, *names = NULL; char *osm_id = getItem(&keys, "id"); if (!osm_id) { fprintf(stderr, "%s: Node ID not in keys\n", __FUNCTION__); resetList(&keys); resetList(&tags); return; } id = strtoul(osm_id, NULL, 10); //assert(nodes[id].lat && nodes[id].lon); for (i=0; i < sizeof(exportTags) / sizeof(exportTags[0]); i++) { char *v; if ((v = getItem(&tags, exportTags[i].name))) { if (values) { char *oldval = values, *oldnam = names; asprintf(&names, "%s,\"%s\"", oldnam, exportTags[i].name); asprintf(&values, "%s,$$%s$$", oldval, v); free(oldnam); free(oldval); } else { asprintf(&names, "\"%s\"", exportTags[i].name); asprintf(&values, "$$%s$$", v); } } } if (values) { count_node++; printf("insert into %s (osm_id,%s,way) values " "(%s,%s,GeomFromText('POINT(%.15g %.15g)',4326));\n", table_name_point,names,osm_id,values,nodes[id].lon, nodes[id].lat); } resetList(&keys); resetList(&tags); free(values); free(names); } else if (xmlStrEqual(name, BAD_CAST "segment")) { resetList(&tags); } else if (xmlStrEqual(name, BAD_CAST "tag")) { /* Separate tag list so tag stack unused */ } else if (xmlStrEqual(name, BAD_CAST "way")) { int i, polygon = 0; char *values = NULL, *names = NULL; char *osm_id = getItem(&keys, "id"); if (!osm_id) { fprintf(stderr, "%s: WAY ID not in keys\n", __FUNCTION__); resetList(&keys); resetList(&tags); resetList(&segs); return; } if (!listHasData(&segs)) { DEBUG("%s: WAY(%s) has no segments\n", __FUNCTION__, osm_id); resetList(&keys); resetList(&tags); resetList(&segs); return; } id = strtoul(osm_id, NULL, 10); for (i=0; i < sizeof(exportTags) / sizeof(exportTags[0]); i++) { char *v; if ((v = getItem(&tags, exportTags[i].name))) { if (values) { char *oldval = values, *oldnam = names; asprintf(&names, "%s,\"%s\"", oldnam, exportTags[i].name); asprintf(&values, "%s,$$%s$$", oldval, v); free(oldnam); free(oldval); } else { asprintf(&names, "\"%s\"", exportTags[i].name); asprintf(&values, "$$%s$$", v); } polygon |= exportTags[i].polygon; } } if (values) { size_t wkt_size = WKT(polygon); if (wkt_size) { unsigned i; for (i=0;i # Copyright (c) 2011 Maarten Bosmans # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 2 AC_DEFUN([AX_APPEND_FLAG], [AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX AS_VAR_PUSHDEF([FLAGS], [m4_default($2,_AC_LANG_PREFIX[FLAGS])])dnl AS_VAR_SET_IF(FLAGS, [case " AS_VAR_GET(FLAGS) " in *" $1 "*) AC_RUN_LOG([: FLAGS already contains $1]) ;; *) AC_RUN_LOG([: FLAGS="$FLAGS $1"]) AS_VAR_SET(FLAGS, ["AS_VAR_GET(FLAGS) $1"]) ;; esac], [AS_VAR_SET(FLAGS,["$1"])]) AS_VAR_POPDEF([FLAGS])dnl ])dnl AX_APPEND_FLAG osm2pgsql-0.82.0/m4/ax_cflags_warn_all.m4000066400000000000000000000116711213272333300201250ustar00rootroot00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_cflags_warn_all.html # =========================================================================== # # SYNOPSIS # # AX_CFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] # AX_CXXFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] # AX_FCFLAGS_WARN_ALL [(shellvar [,default, [A/NA]])] # # DESCRIPTION # # Try to find a compiler option that enables most reasonable warnings. # # For the GNU compiler it will be -Wall (and -ansi -pedantic) The result # is added to the shellvar being CFLAGS, CXXFLAGS, or FCFLAGS by default. # # Currently this macro knows about the GCC, Solaris, Digital Unix, AIX, # HP-UX, IRIX, NEC SX-5 (Super-UX 10), Cray J90 (Unicos 10.0.0.8), and # Intel compilers. For a given compiler, the Fortran flags are much more # experimental than their C equivalents. # # - $1 shell-variable-to-add-to : CFLAGS, CXXFLAGS, or FCFLAGS # - $2 add-value-if-not-found : nothing # - $3 action-if-found : add value to shellvariable # - $4 action-if-not-found : nothing # # NOTE: These macros depend on AX_APPEND_FLAG. # # LICENSE # # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2010 Rhys Ulerich # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 14 AC_DEFUN([AX_FLAGS_WARN_ALL],[dnl AS_VAR_PUSHDEF([FLAGS],[_AC_LANG_PREFIX[]FLAGS])dnl AS_VAR_PUSHDEF([VAR],[ac_cv_[]_AC_LANG_ABBREV[]flags_warn_all])dnl AC_CACHE_CHECK([m4_ifval($1,$1,FLAGS) for maximum warnings], VAR,[VAR="no, unknown" ac_save_[]FLAGS="$[]FLAGS" for ac_arg dnl in "-warn all % -warn all" dnl Intel "-pedantic % -Wall" dnl GCC "-xstrconst % -v" dnl Solaris C "-std1 % -verbose -w0 -warnprotos" dnl Digital Unix "-qlanglvl=ansi % -qsrcmsg -qinfo=all:noppt:noppc:noobs:nocnd" dnl AIX "-ansi -ansiE % -fullwarn" dnl IRIX "+ESlit % +w1" dnl HP-UX C "-Xc % -pvctl[,]fullmsg" dnl NEC SX-5 (Super-UX 10) "-h conform % -h msglevel 2" dnl Cray C (Unicos) # do FLAGS="$ac_save_[]FLAGS "`echo $ac_arg | sed -e 's,%%.*,,' -e 's,%,,'` AC_COMPILE_IFELSE([AC_LANG_PROGRAM], [VAR=`echo $ac_arg | sed -e 's,.*% *,,'` ; break]) done FLAGS="$ac_save_[]FLAGS" ]) AS_VAR_POPDEF([FLAGS])dnl AC_REQUIRE([AX_APPEND_FLAG]) case ".$VAR" in .ok|.ok,*) m4_ifvaln($3,$3) ;; .|.no|.no,*) m4_default($4,[m4_ifval($2,[AX_APPEND_FLAG([$2], [$1])])]) ;; *) m4_default($3,[AX_APPEND_FLAG([$VAR], [$1])]) ;; esac AS_VAR_POPDEF([VAR])dnl ])dnl AX_FLAGS_WARN_ALL dnl implementation tactics: dnl the for-argument contains a list of options. The first part of dnl these does only exist to detect the compiler - usually it is dnl a global option to enable -ansi or -extrawarnings. All other dnl compilers will fail about it. That was needed since a lot of dnl compilers will give false positives for some option-syntax dnl like -Woption or -Xoption as they think of it is a pass-through dnl to later compile stages or something. The "%" is used as a dnl delimiter. A non-option comment can be given after "%%" marks dnl which will be shown but not added to the respective C/CXXFLAGS. AC_DEFUN([AX_CFLAGS_WARN_ALL],[dnl AC_LANG_PUSH([C]) AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) AC_LANG_POP([C]) ]) AC_DEFUN([AX_CXXFLAGS_WARN_ALL],[dnl AC_LANG_PUSH([C++]) AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) AC_LANG_POP([C++]) ]) AC_DEFUN([AX_FCFLAGS_WARN_ALL],[dnl AC_LANG_PUSH([Fortran]) AX_FLAGS_WARN_ALL([$1], [$2], [$3], [$4]) AC_LANG_POP([Fortran]) ]) osm2pgsql-0.82.0/m4/ax_compare_version.m4000066400000000000000000000146531213272333300202050ustar00rootroot00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_compare_version.html # =========================================================================== # # SYNOPSIS # # AX_COMPARE_VERSION(VERSION_A, OP, VERSION_B, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) # # DESCRIPTION # # This macro compares two version strings. Due to the various number of # minor-version numbers that can exist, and the fact that string # comparisons are not compatible with numeric comparisons, this is not # necessarily trivial to do in a autoconf script. This macro makes doing # these comparisons easy. # # The six basic comparisons are available, as well as checking equality # limited to a certain number of minor-version levels. # # The operator OP determines what type of comparison to do, and can be one # of: # # eq - equal (test A == B) # ne - not equal (test A != B) # le - less than or equal (test A <= B) # ge - greater than or equal (test A >= B) # lt - less than (test A < B) # gt - greater than (test A > B) # # Additionally, the eq and ne operator can have a number after it to limit # the test to that number of minor versions. # # eq0 - equal up to the length of the shorter version # ne0 - not equal up to the length of the shorter version # eqN - equal up to N sub-version levels # neN - not equal up to N sub-version levels # # When the condition is true, shell commands ACTION-IF-TRUE are run, # otherwise shell commands ACTION-IF-FALSE are run. The environment # variable 'ax_compare_version' is always set to either 'true' or 'false' # as well. # # Examples: # # AX_COMPARE_VERSION([3.15.7],[lt],[3.15.8]) # AX_COMPARE_VERSION([3.15],[lt],[3.15.8]) # # would both be true. # # AX_COMPARE_VERSION([3.15.7],[eq],[3.15.8]) # AX_COMPARE_VERSION([3.15],[gt],[3.15.8]) # # would both be false. # # AX_COMPARE_VERSION([3.15.7],[eq2],[3.15.8]) # # would be true because it is only comparing two minor versions. # # AX_COMPARE_VERSION([3.15.7],[eq0],[3.15]) # # would be true because it is only comparing the lesser number of minor # versions of the two values. # # Note: The characters that separate the version numbers do not matter. An # empty string is the same as version 0. OP is evaluated by autoconf, not # configure, so must be a string, not a variable. # # The author would like to acknowledge Guido Draheim whose advice about # the m4_case and m4_ifvaln functions make this macro only include the # portions necessary to perform the specific comparison specified by the # OP argument in the final configure script. # # LICENSE # # Copyright (c) 2008 Tim Toolan # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. This file is offered as-is, without any # warranty. #serial 11 dnl ######################################################################### AC_DEFUN([AX_COMPARE_VERSION], [ AC_REQUIRE([AC_PROG_AWK]) # Used to indicate true or false condition ax_compare_version=false # Convert the two version strings to be compared into a format that # allows a simple string comparison. The end result is that a version # string of the form 1.12.5-r617 will be converted to the form # 0001001200050617. In other words, each number is zero padded to four # digits, and non digits are removed. AS_VAR_PUSHDEF([A],[ax_compare_version_A]) A=`echo "$1" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ -e 's/[[^0-9]]//g'` AS_VAR_PUSHDEF([B],[ax_compare_version_B]) B=`echo "$3" | sed -e 's/\([[0-9]]*\)/Z\1Z/g' \ -e 's/Z\([[0-9]]\)Z/Z0\1Z/g' \ -e 's/Z\([[0-9]][[0-9]]\)Z/Z0\1Z/g' \ -e 's/Z\([[0-9]][[0-9]][[0-9]]\)Z/Z0\1Z/g' \ -e 's/[[^0-9]]//g'` dnl # In the case of le, ge, lt, and gt, the strings are sorted as necessary dnl # then the first line is used to determine if the condition is true. dnl # The sed right after the echo is to remove any indented white space. m4_case(m4_tolower($2), [lt],[ ax_compare_version=`echo "x$A x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/false/;s/x${B}/true/;1q"` ], [gt],[ ax_compare_version=`echo "x$A x$B" | sed 's/^ *//' | sort | sed "s/x${A}/false/;s/x${B}/true/;1q"` ], [le],[ ax_compare_version=`echo "x$A x$B" | sed 's/^ *//' | sort | sed "s/x${A}/true/;s/x${B}/false/;1q"` ], [ge],[ ax_compare_version=`echo "x$A x$B" | sed 's/^ *//' | sort -r | sed "s/x${A}/true/;s/x${B}/false/;1q"` ],[ dnl Split the operator from the subversion count if present. m4_bmatch(m4_substr($2,2), [0],[ # A count of zero means use the length of the shorter version. # Determine the number of characters in A and B. ax_compare_version_len_A=`echo "$A" | $AWK '{print(length)}'` ax_compare_version_len_B=`echo "$B" | $AWK '{print(length)}'` # Set A to no more than B's length and B to no more than A's length. A=`echo "$A" | sed "s/\(.\{$ax_compare_version_len_B\}\).*/\1/"` B=`echo "$B" | sed "s/\(.\{$ax_compare_version_len_A\}\).*/\1/"` ], [[0-9]+],[ # A count greater than zero means use only that many subversions A=`echo "$A" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` B=`echo "$B" | sed "s/\(\([[0-9]]\{4\}\)\{m4_substr($2,2)\}\).*/\1/"` ], [.+],[ AC_WARNING( [illegal OP numeric parameter: $2]) ],[]) # Pad zeros at end of numbers to make same length. ax_compare_version_tmp_A="$A`echo $B | sed 's/./0/g'`" B="$B`echo $A | sed 's/./0/g'`" A="$ax_compare_version_tmp_A" # Check for equality or inequality as necessary. m4_case(m4_tolower(m4_substr($2,0,2)), [eq],[ test "x$A" = "x$B" && ax_compare_version=true ], [ne],[ test "x$A" != "x$B" && ax_compare_version=true ],[ AC_WARNING([illegal OP parameter: $2]) ]) ]) AS_VAR_POPDEF([A])dnl AS_VAR_POPDEF([B])dnl dnl # Execute ACTION-IF-TRUE / ACTION-IF-FALSE. if test "$ax_compare_version" = "true" ; then m4_ifvaln([$4],[$4],[:])dnl m4_ifvaln([$5],[else $5])dnl fi ]) dnl AX_COMPARE_VERSION osm2pgsql-0.82.0/m4/ax_compile_check_sizeof.m4000066400000000000000000000102361213272333300211470ustar00rootroot00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_compile_check_sizeof.html # =========================================================================== # # SYNOPSIS # # AX_COMPILE_CHECK_SIZEOF(TYPE [, HEADERS [, EXTRA_SIZES...]]) # # DESCRIPTION # # This macro checks for the size of TYPE using compile checks, not run # checks. You can supply extra HEADERS to look into. the check will cycle # through 1 2 4 8 16 and any EXTRA_SIZES the user supplies. If a match is # found, it will #define SIZEOF_`TYPE' to that value. Otherwise it will # emit a configure time error indicating the size of the type could not be # determined. # # The trick is that C will not allow duplicate case labels. While this is # valid C code: # # switch (0) case 0: case 1:; # # The following is not: # # switch (0) case 0: case 0:; # # Thus, the AC_TRY_COMPILE will fail if the currently tried size does not # match. # # Here is an example skeleton configure.in script, demonstrating the # macro's usage: # # AC_PROG_CC # AC_CHECK_HEADERS(stddef.h unistd.h) # AC_TYPE_SIZE_T # AC_CHECK_TYPE(ssize_t, int) # # headers='#ifdef HAVE_STDDEF_H # #include # #endif # #ifdef HAVE_UNISTD_H # #include # #endif # ' # # AX_COMPILE_CHECK_SIZEOF(char) # AX_COMPILE_CHECK_SIZEOF(short) # AX_COMPILE_CHECK_SIZEOF(int) # AX_COMPILE_CHECK_SIZEOF(long) # AX_COMPILE_CHECK_SIZEOF(unsigned char *) # AX_COMPILE_CHECK_SIZEOF(void *) # AX_COMPILE_CHECK_SIZEOF(size_t, $headers) # AX_COMPILE_CHECK_SIZEOF(ssize_t, $headers) # AX_COMPILE_CHECK_SIZEOF(ptrdiff_t, $headers) # AX_COMPILE_CHECK_SIZEOF(off_t, $headers) # # LICENSE # # Copyright (c) 2008 Kaveh Ghazi # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 5 AU_ALIAS([AC_COMPILE_CHECK_SIZEOF], [AX_COMPILE_CHECK_SIZEOF]) AC_DEFUN([AX_COMPILE_CHECK_SIZEOF], [changequote(<<, >>)dnl dnl The name to #define. define(<>, translit(sizeof_$1, [a-z *], [A-Z_P]))dnl dnl The cache variable name. define(<>, translit(ac_cv_sizeof_$1, [ *], [_p]))dnl changequote([, ])dnl AC_MSG_CHECKING(size of $1) AC_CACHE_VAL(AC_CV_NAME, [for ac_size in 4 8 1 2 16 $3 ; do # List sizes in rough order of prevalence. AC_TRY_COMPILE([#include "confdefs.h" #include $2 ], [switch (0) case 0: case (sizeof ($1) == $ac_size):;], AC_CV_NAME=$ac_size) if test x$AC_CV_NAME != x ; then break; fi done ]) if test x$AC_CV_NAME = x ; then AC_MSG_ERROR([cannot determine a size for $1]) fi AC_MSG_RESULT($AC_CV_NAME) AC_DEFINE_UNQUOTED(AC_TYPE_NAME, $AC_CV_NAME, [The number of bytes in type $1]) undefine([AC_TYPE_NAME])dnl undefine([AC_CV_NAME])dnl ])osm2pgsql-0.82.0/m4/ax_config_nice.m4000066400000000000000000000014741213272333300172520ustar00rootroot00000000000000AC_DEFUN([AX_CONFIG_NICE],[ config_nice="config.nice" test -f $config_nice && mv $config_nice $config_nice.old rm -f $config_nice.old cat >$config_nice <> $config_nice fi done echo "'[$]0' \\" >> $config_nice for arg in $ac_configure_args; do if test `expr -- $arg : "'.*"` = 0; then if test `expr -- $arg : "--.*"` = 0; then break; fi echo "'[$]arg' \\" >> $config_nice else if test `expr -- $arg : "'--.*"` = 0; then break; fi echo "[$]arg \\" >> $config_nice fi done echo '"[$]@"' >> $config_nice chmod 755 $config_nice ]) osm2pgsql-0.82.0/m4/ax_lib_bzip2.m4000066400000000000000000000133631213272333300166630ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_BZIP2() # # DESCRIPTION # # This macro provides tests of availability of the bzip2 # compression library. This macro checks for bzip2 # headers and libraries and defines compilation flags # # Macro supports following options and their values: # # 1) Single-option usage: # # --with-bzip2 -- yes, no, or path to bzip2 library # installation prefix # # 2) Three-options usage (all options are required): # # --with-bzip2=yes # --with-bzip2-inc -- path to base directory with bzip2 headers # --with-bzip2-lib -- linker flags for bzip2 # # This macro calls: # # AC_SUBST(BZIP2_CFLAGS) # AC_SUBST(BZIP2_LDFLAGS) # AC_SUBST(BZIP2_LiBS) # # And sets: # # HAVE_BZIP2 # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_BZIP2], [ AC_ARG_WITH([bzip2], AC_HELP_STRING([--with-bzip2=@<:@ARG@:>@], [use bzip2 library from given prefix (ARG=path); check standard prefixes (ARG=yes); disable (ARG=no)] ), [ if test "$withval" = "yes"; then if test -f /usr/local/include/bzlib.h ; then bzlib_prefix=/usr/local elif test -f /usr/include/bzlib.h ; then bzlib_prefix=/usr else bzlib_prefix="" fi bzlib_requested="yes" elif test -d "$withval"; then bzlib_prefix="$withval" bzlib_requested="yes" else bzlib_prefix="" bzlib_requested="no" fi ], [ dnl Default behavior is implicit yes if test -f /usr/local/include/bzlib.h ; then bzlib_prefix=/usr/local elif test -f /usr/include/bzlib.h ; then bzlib_prefix=/usr else bzlib_prefix="" fi ] ) AC_ARG_WITH([bzip2-inc], AC_HELP_STRING([--with-bzip2-inc=@<:@DIR@:>@], [path to bzip2 library headers] ), [bzlib_include_dir="$withval"], [bzlib_include_dir=""] ) AC_ARG_WITH([bzip2-lib], AC_HELP_STRING([--with-bzip2-lib=@<:@ARG@:>@], [link options for bzip2 library] ), [bzlib_lib_flags="$withval"], [bzlib_lib_flags=""] ) BZIP2_CFLAGS="" BZIP2_LDFLAGS="" BZIP2_LIBS="" dnl dnl Collect include/lib paths and flags dnl run_bzlib_test="no" if test -n "$bzlib_prefix"; then bzlib_include_dir="$bzlib_prefix/include" bzlib_lib_flags="-L$bzlib_prefix/lib" bzlib_lib_libs="-lbz2" run_bzlib_test="yes" elif test "$bzlib_requested" = "yes"; then if test -n "$bzlib_include_dir" -a -n "$bzlib_lib_flags" -a -n "$bzlib_lib_libs"; then run_bzlib_test="yes" fi else run_bzlib_test="no" fi dnl dnl Check bzip2 files dnl if test "$run_bzlib_test" = "yes"; then saved_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I$bzlib_include_dir" saved_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $bzlib_lib_flags" saved_LIBSS="$LIBS" LIBS="$LIBS $bzlib_lib_libs" dnl dnl Check bzip2 headers dnl AC_MSG_CHECKING([for bzip2 headers in $bzlib_include_dir]) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[]] )], [ BZIP2_CFLAGS="-I$bzlib_include_dir" bzlib_header_found="yes" AC_MSG_RESULT([found]) ], [ bzlib_header_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) dnl dnl Check bzip2 libraries dnl if test "$bzlib_header_found" = "yes"; then AC_MSG_CHECKING([for bzip2 library]) AC_LANG_PUSH([C++]) AC_LINK_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[ const char *version; version = BZ2_bzlibVersion(); ]] )], [ BZIP2_LDFLAGS="$bzlib_lib_flags" BZIP2_LIBS="$bzlib_lib_libs" bzlib_lib_found="yes" AC_MSG_RESULT([found]) ], [ bzlib_lib_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) fi CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" LIBS="$saved_LIBS" fi AC_MSG_CHECKING([for bzip2 compression library]) if test "$run_bzlib_test" = "yes"; then if test "$bzlib_header_found" = "yes" -a "$bzlib_lib_found" = "yes"; then AC_SUBST([BZIP2_CFLAGS]) AC_SUBST([BZIP2_LDFLAGS]) AC_SUBST([BZIP2_LIBS]) AC_SUBST([HAVE_BZIP2]) AC_DEFINE([HAVE_BZIP2], [1], [Define to 1 if bzip2 library is available]) HAVE_BZIP2="yes" else HAVE_BZIP2="no" fi AC_MSG_RESULT([$HAVE_BZIP2]) else HAVE_BZIP2="no" AC_MSG_RESULT([$HAVE_BZIP2]) if test "$bzlib_requested" = "yes"; then AC_MSG_WARN([bzip2 compression support requested but headers or library not found. Specify valid prefix of bzip2 using --with-bzip2=@<:@DIR@:>@ or provide include directory and linker flags using --with-bzip2-inc and --with-bzip2-lib]) fi fi ]) osm2pgsql-0.82.0/m4/ax_lib_geos.m4000066400000000000000000000114021213272333300165620ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_GEOS([MINIMUM-VERSION]) # # DESCRIPTION # # This macro provides tests of availability of geos 'libgeos' library # of particular version or newer. # # AX_LIB_GEOS macro takes only one argument which is optional. If # there is no required version passed, then macro does not run version # test. # # The --with-geos option takes one of three possible values: # # no - do not check for geos library # # yes - do check for geos library in standard locations (geos-config # should be in the PATH) # # path - complete path to geos-config utility, use this option if geos-config # can't be found in the PATH # # This macro calls: # # AC_SUBST(GEOS_CFLAGS) # AC_SUBST(GEOS_LDFLAGS) # AC_SUBST(GEOS_LIBS) # AC_SUBST(GEOS_VERSION) # # And sets: # # HAVE_GEOS # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_GEOS], [ AC_ARG_WITH([geos], AC_HELP_STRING([--with-geos=@<:@ARG@:>@], [use geos library @<:@default=yes@:>@, optionally specify path to geos-config] ), [ if test "$withval" = "no"; then want_geos="no" elif test "$withval" = "yes"; then want_geos="yes" else want_geos="yes" GEOS_CONFIG="$withval" fi ], [want_geos="yes"] ) GEOS_CFLAGS="" GEOS_LDFLAGS="" GEOS_LIBS="" GEOS_VERSION="" dnl dnl Check geos libraries (geos) dnl if test "$want_geos" = "yes"; then if test -z "$GEOS_CONFIG" -o test; then AC_PATH_PROG([GEOS_CONFIG], [geos-config], []) fi if test ! -x "$GEOS_CONFIG"; then AC_MSG_ERROR([${GEOS_CONFIG:-geos-config} does not exist or it is not an exectuable file]) GEOS_CONFIG="no" found_geos="no" fi if test "$GEOS_CONFIG" != "no"; then AC_MSG_CHECKING([for geos libraries]) GEOS_CFLAGS="`$GEOS_CONFIG --cflags`" GEOS_LDFLAGS="`$GEOS_CONFIG --ldflags`" GEOS_LIBS="`$GEOS_CONFIG --libs`" GEOS_VERSION=`$GEOS_CONFIG --version` dnl Headers are in a different package in Debian, so check again. CPPFLAGS="$CPPFLAGS $GEOS_CFLAGS" AC_CHECK_HEADER([geos/version.h], [], [AC_MSG_ERROR([development headers for geos not found])]) echo $ac_save_CPPFLAGS CPPFLAGS="$ac_save_CPPFLAGS" AC_DEFINE([HAVE_GEOS], [1], [Define to 1 if geos libraries are available]) found_geos="yes" AC_MSG_RESULT([yes]) else found_geos="no" AC_MSG_RESULT([no]) fi fi dnl dnl Check if required version of geos is available dnl geos_version_req=ifelse([$1], [], [], [$1]) if test "$found_geos" = "yes" -a -n "$geos_version_req"; then AC_MSG_CHECKING([if geos version is >= $geos_version_req]) dnl Decompose required version string of geos dnl and calculate its number representation geos_version_req_major=`expr $geos_version_req : '\([[0-9]]*\)'` geos_version_req_minor=`expr $geos_version_req : '[[0-9]]*\.\([[0-9]]*\)'` geos_version_req_micro=`expr $geos_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$geos_version_req_micro" = "x"; then geos_version_req_micro="0" fi geos_version_req_number=`expr $geos_version_req_major \* 1000000 \ \+ $geos_version_req_minor \* 1000 \ \+ $geos_version_req_micro` dnl Decompose version string of installed PostgreSQL dnl and calculate its number representation geos_version_major=`expr $GEOS_VERSION : '\([[0-9]]*\)'` geos_version_minor=`expr $GEOS_VERSION : '[[0-9]]*\.\([[0-9]]*\)'` geos_version_micro=`expr $GEOS_VERSION : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$geos_version_micro" = "x"; then geos_version_micro="0" fi geos_version_number=`expr $geos_version_major \* 1000000 \ \+ $geos_version_minor \* 1000 \ \+ $geos_version_micro` geos_version_check=`expr $geos_version_number \>\= $geos_version_req_number` if test "$geos_version_check" = "1"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi fi AC_SUBST([GEOS_VERSION]) AC_SUBST([GEOS_CFLAGS]) AC_SUBST([GEOS_LDFLAGS]) AC_SUBST([GEOS_LIBS]) ]) osm2pgsql-0.82.0/m4/ax_lib_postgresql.m4000066400000000000000000000124471213272333300200420ustar00rootroot00000000000000# =========================================================================== # http://www.nongnu.org/autoconf-archive/ax_lib_postgresql.html # =========================================================================== # # SYNOPSIS # # AX_LIB_POSTGRESQL([MINIMUM-VERSION]) # # DESCRIPTION # # This macro provides tests of availability of PostgreSQL 'libpq' library # of particular version or newer. # # AX_LIB_POSTGRESQL macro takes only one argument which is optional. If # there is no required version passed, then macro does not run version # test. # # The --with-postgresql option takes one of three possible values: # # no - do not check for PostgreSQL client library # # yes - do check for PostgreSQL library in standard locations (pg_config # should be in the PATH) # # path - complete path to pg_config utility, use this option if pg_config # can't be found in the PATH # # This macro calls: # # AC_SUBST(POSTGRESQL_CFLAGS) # AC_SUBST(POSTGRESQL_LDFLAGS) # AC_SUBST(POSTGRESQL_LIBS) # AC_SUBST(POSTGRESQL_VERSION) # # And sets: # # HAVE_POSTGRESQL # # LICENSE # # Copyright (c) 2008 Mateusz Loskot # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_POSTGRESQL], [ AC_ARG_WITH([postgresql], AC_HELP_STRING([--with-postgresql=@<:@ARG@:>@], [use PostgreSQL library @<:@default=yes@:>@, optionally specify path to pg_config] ), [ if test "$withval" = "no"; then want_postgresql="no" elif test "$withval" = "yes"; then want_postgresql="yes" else want_postgresql="yes" PG_CONFIG="$withval" fi ], [want_postgresql="yes"] ) POSTGRESQL_CFLAGS="" POSTGRESQL_LDFLAGS="" POSTGRESQL_LIBS="" POSTGRESQL_VERSION="" dnl dnl Check PostgreSQL libraries (libpq) dnl if test "$want_postgresql" = "yes"; then if test -z "$PG_CONFIG" -o test; then AC_PATH_PROG([PG_CONFIG], [pg_config], []) fi if test ! -x "$PG_CONFIG"; then AC_MSG_ERROR([$PG_CONFIG does not exist or it is not an exectuable file]) PG_CONFIG="no" found_postgresql="no" fi if test "$PG_CONFIG" != "no"; then AC_MSG_CHECKING([for PostgreSQL libraries]) POSTGRESQL_CFLAGS="-I`$PG_CONFIG --includedir`" POSTGRESQL_SERVER_CFLAGS="-I`$PG_CONFIG --includedir-server`" POSTGRESQL_LDFLAGS="-L`$PG_CONFIG --libdir`" POSTGRESQL_LIBS="-lpq" POSTGRESQL_VERSION=`$PG_CONFIG --version | sed -e 's#PostgreSQL ##'` AC_DEFINE([HAVE_POSTGRESQL], [1], [Define to 1 if PostgreSQL libraries are available]) POSTGRESQL_PGXS=`$PG_CONFIG --pgxs` if test -f "$POSTGRESQL_PGXS" then found_postgresql="yes" AC_MSG_RESULT([yes]) fi else found_postgresql="no" AC_MSG_RESULT([no]) fi fi dnl dnl Check if required version of PostgreSQL is available dnl postgresql_version_req=ifelse([$1], [], [], [$1]) if test "$found_postgresql" = "yes" -a -n "$postgresql_version_req"; then AC_MSG_CHECKING([if PostgreSQL version is >= $postgresql_version_req]) dnl Decompose required version string of PostgreSQL dnl and calculate its number representation postgresql_version_req_major=`expr $postgresql_version_req : '\([[0-9]]*\)'` postgresql_version_req_minor=`expr $postgresql_version_req : '[[0-9]]*\.\([[0-9]]*\)'` postgresql_version_req_micro=`expr $postgresql_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$postgresql_version_req_micro" = "x"; then postgresql_version_req_micro="0" fi postgresql_version_req_number=`expr $postgresql_version_req_major \* 1000000 \ \+ $postgresql_version_req_minor \* 1000 \ \+ $postgresql_version_req_micro` dnl Decompose version string of installed PostgreSQL dnl and calculate its number representation postgresql_version_major=`expr $POSTGRESQL_VERSION : '\([[0-9]]*\)'` postgresql_version_minor=`expr $POSTGRESQL_VERSION : '[[0-9]]*\.\([[0-9]]*\)'` postgresql_version_micro=`expr $POSTGRESQL_VERSION : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$postgresql_version_micro" = "x"; then postgresql_version_micro="0" fi postgresql_version_number=`expr $postgresql_version_major \* 1000000 \ \+ $postgresql_version_minor \* 1000 \ \+ $postgresql_version_micro` postgresql_version_check=`expr $postgresql_version_number \>\= $postgresql_version_req_number` if test "$postgresql_version_check" = "1"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi fi AC_SUBST([POSTGRESQL_PGXS]) AC_SUBST([POSTGRESQL_VERSION]) AC_SUBST([POSTGRESQL_CFLAGS]) AC_SUBST([POSTGRESQL_SERVER_CFLAGS]) AC_SUBST([POSTGRESQL_LDFLAGS]) AC_SUBST([POSTGRESQL_LIBS]) ]) osm2pgsql-0.82.0/m4/ax_lib_proj.m4000066400000000000000000000127721213272333300166120ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_PROJ() # # DESCRIPTION # # This macro provides tests of availability of the proj # projection library. This macro checks for proj # headers and libraries and defines compilation flags # # Macro supports following options and their values: # # 1) Single-option usage: # # --with-proj -- yes, no, or path to proj library # installation prefix # # 2) Three-options usage (all options are required): # # --with-proj=yes # --with-proj-inc -- path to base directory with proj headers # --with-proj-lib -- linker flags for proj # # This macro calls: # # AC_SUBST(PROJ_CFLAGS) # AC_SUBST(PROJ_LDFLAGS) # AC_SUBST(PROJ_LIBSS) # # And sets: # # HAVE_PROJ # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_PROJ], [ AC_ARG_WITH([proj], AC_HELP_STRING([--with-proj=@<:@ARG@:>@], [use proj library from given prefix (ARG=path); check standard prefixes (ARG=yes); disable (ARG=no)] ), [ if test "$withval" = "yes"; then if test -f /usr/local/include/proj_api.h ; then proj_prefix=/usr/local elif test -f /usr/include/proj_api.h ; then proj_prefix=/usr else proj_prefix="" fi proj_requested="yes" elif test -d "$withval"; then proj_prefix="$withval" proj_requested="yes" else proj_prefix="" proj_requested="no" fi ], [ dnl Default behavior is implicit yes if test -f /usr/local/include/proj_api.h ; then proj_prefix=/usr/local elif test -f /usr/include/proj_api.h ; then proj_prefix=/usr else proj_prefix="" fi ] ) AC_ARG_WITH([proj-inc], AC_HELP_STRING([--with-proj-inc=@<:@DIR@:>@], [path to proj library headers] ), [proj_include_dir="$withval"], [proj_include_dir=""] ) AC_ARG_WITH([proj-lib], AC_HELP_STRING([--with-proj-lib=@<:@ARG@:>@], [link options for proj library] ), [proj_lib_flags="$withval"], [proj_lib_flags=""] ) PROJ_CFLAGS="" PROJ_LDFLAGS="" PROJ_LIBS="" dnl dnl Collect include/lib paths and flags dnl run_proj_test="no" if test -n "$proj_prefix"; then proj_include_dir="$proj_prefix/include" proj_lib_flags="-L$proj_prefix/lib" proj_lib_libs="-lproj" run_proj_test="yes" elif test "$proj_requested" = "yes"; then if test -n "$proj_include_dir" -a -n "$proj_lib_flags" -a -n "$proj_lib_libs"; then run_proj_test="yes" fi else run_proj_test="no" fi dnl dnl Check proj files dnl if test "$run_proj_test" = "yes"; then saved_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I$proj_include_dir" saved_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $proj_lib_flags" saved_LIBS="$LIBS" LIBS="$LIBS $proj_lib_libs" dnl dnl Check proj headers dnl AC_MSG_CHECKING([for proj headers in $proj_include_dir]) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[]] )], [ PROJ_CFLAGS="-I$proj_include_dir" proj_header_found="yes" AC_MSG_RESULT([found]) ], [ proj_header_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) dnl dnl Check proj libraries dnl if test "$proj_header_found" = "yes"; then AC_MSG_CHECKING([for proj library]) AC_LANG_PUSH([C++]) AC_LINK_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[ /* TODO add a real test */ ]] )], [ PROJ_LDFLAGS="$proj_lib_flags" PROJ_LIBS="$proj_lib_libs" proj_lib_found="yes" AC_MSG_RESULT([found]) ], [ proj_lib_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) fi CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" LIBS="$saved_LIBS" fi AC_MSG_CHECKING([for proj projection library]) if test "$run_proj_test" = "yes"; then if test "$proj_header_found" = "yes" -a "$proj_lib_found" = "yes"; then AC_SUBST([PROJ_CFLAGS]) AC_SUBST([PROJ_LDFLAGS]) AC_SUBST([PROJ_LIBS]) HAVE_PROJ="yes" else HAVE_PROJ="no" fi AC_MSG_RESULT([$HAVE_PROJ]) else HAVE_PROJ="no" AC_MSG_RESULT([$HAVE_PROJ]) if test "$proj_requested" = "yes"; then AC_MSG_WARN([proj projection support requested but headers or library not found. Specify valid prefix of proj using --with-proj=@<:@DIR@:>@ or provide include directory and linker flags using --with-proj-inc and --with-proj-lib]) fi fi ]) osm2pgsql-0.82.0/m4/ax_lib_protobuf_c.m4000066400000000000000000000170341213272333300177760ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_PROTOBUF_C() # # DESCRIPTION # # This macro provides tests of availability of the Google # Protocol Buffers C library. This macro checks for protobufr-c # headers and libraries and defines compilation flags # # Macro supports following options and their values: # # 1) Single-option usage: # # --with-protobuf_c -- yes, no, or path to protobuf_c library # installation prefix # # 2) Three-options usage (all options are required): # # --with-protobuf_c=yes # --with-protobuf_c-inc -- path to base directory with protobuf_c headers # --with-protobuf_c-lib -- linker flags for protobuf_c # # This macro calls: # # AC_SUBST(PROTOBUF_C_CFLAGS) # AC_SUBST(PROTOBUF_C_LDFLAGS) # AC_SUBST(PROTOBUF_C_LIBS) # # And sets: # # HAVE_PROTOBUF_C # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_PROTOBUF_C], [ protobuf_c_wanted_version=$1 AC_MSG_CHECKING([for protobuf-c $protobuf_c_wanted_version]) AC_MSG_RESULT AC_ARG_WITH([protobuf-c], AC_HELP_STRING([--with-protobuf-c=@<:@ARG@:>@], [use protobuf-c library from given prefix (ARG=path); check standard prefixes (ARG=yes); disable (ARG=no)] ), [ if test "$withval" = "yes"; then if test -f /usr/local/include/google/protobuf-c/protobuf-c.h ; then protobuf_c_prefix=/usr/local elif test -f /usr/include/google/protobuf-c/protobuf-c.h ; then protobuf_c_prefix=/usr else protobuf_c_prefix="" fi protobuf_c_requested="yes" elif test -d "$withval"; then protobuf_c_prefix="$withval" protobuf_c_requested="yes" else protobuf_c_prefix="" protobuf_c_requested="no" fi ], [ dnl Default behavior is implicit yes if test -f /usr/local/include/google/protobuf-c/protobuf-c.h ; then protobuf_c_prefix=/usr/local elif test -f /usr/include/google/protobuf-c/protobuf-c.h ; then protobuf_c_prefix=/usr else protobuf_c_prefix="" fi ] ) AC_ARG_WITH([protobuf-c-inc], AC_HELP_STRING([--with-protobuf-c-inc=@<:@DIR@:>@], [path to protobuf-c library headers] ), [protobuf_c_include_dir="$withval"], [protobuf_c_include_dir=""] ) AC_ARG_WITH([protobuf-c-lib], AC_HELP_STRING([--with-protobuf-c-lib=@<:@ARG@:>@], [link options for protobuf-c library] ), [protobuf_c_lib_flags="$withval"], [protobuf_c_lib_flags=""] ) PROTOBUF_C_CFLAGS="" PROTOBUF_C_LDFLAGS="" PROTOBUF_C_LIBS="" dnl dnl Collect include/lib paths and flags dnl run_protobuf_c_test="no" if test -n "$protobuf_c_prefix"; then protobuf_c_include_dir="$protobuf_c_prefix/include" protobuf_c_lib_flags="-L$protobuf_c_prefix/lib" protobuf_c_lib_libs="-lprotobuf-c" run_protobuf_c_test="yes" elif test "$protobuf_c_requested" = "yes"; then if test -n "$protobuf_c_include_dir" -a -n "$protobuf_c_lib_flags" -a -n "$protobuf_c_lib_libs"; then run_protobuf_c_test="yes" fi else run_protobuf_c_test="no" fi dnl dnl Check protobuf_c files dnl if test "$run_protobuf_c_test" = "yes"; then saved_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I$protobuf_c_include_dir" saved_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $protobuf_c_lib_flags" saved_LIBS="$LIBS" LIBS="$LIBS $protobuf_c_lib_libs" dnl dnl Check protobuf_c headers dnl AC_MSG_CHECKING([for protobuf_c headers in $protobuf_c_include_dir]) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[]] )], [ PROTOBUF_C_CFLAGS="-I$protobuf_c_include_dir" protobuf_c_header_found="yes" AC_MSG_RESULT([found]) ], [ protobuf_c_header_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) dnl dnl Check protobuf_c libraries dnl if test "$protobuf_c_header_found" = "yes"; then AC_MSG_CHECKING([for protobuf_c library]) AC_LANG_PUSH([C++]) AC_LINK_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[ protobuf_c_service_destroy((ProtobufCService *)NULL); ]] )], [ PROTOBUF_C_LDFLAGS="$protobuf_c_lib_flags" PROTOBUF_C_LIBS="$protobuf_c_lib_libs" protobuf_c_lib_found="yes" AC_MSG_RESULT([found]) ], [ protobuf_c_lib_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) fi CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" LIBS="$saved_LIBS" fi protobuf_c_version_ok=yes if test "x$protobuf_c_wanted_version" != "x" then AC_MSG_CHECKING([for protobuf-c version >= $protobuf_c_wanted_version]) AC_MSG_RESULT dnl protobuf-c does not provide any version information in its header dnl files or from within the library itself, so we have to check dnl for availability of features here for now ... dnl protobuf-c 0.14 introduced member 'packed' in ProtobufCFieldDescriptor saved_CFLAGS=$CFLAGS CFLAGS="$CFLAGS $PROTOBUF_C_CFLAGS" AX_COMPARE_VERSION([$protobuf_c_wanted_version], [ge], [0.14], [AC_CHECK_MEMBER([ProtobufCFieldDescriptor.packed],, [protobuf_c_version_ok="no"], [[#include ] ]) ]) CFLAGS=$saved_CFLAGS AC_MSG_RESULT([protobuf-c >= $protobuf_c_wanted_version: $protobuf_c_version_ok]) fi AC_MSG_CHECKING([for protobuf-c usability]) if test "$run_protobuf_c_test" = "yes"; then if test "$protobuf_c_header_found" = "yes" -a "$protobuf_c_lib_found" = "yes" -a "$protobuf_c_version_ok" = "yes" then AC_SUBST([PROTOBUF_C_CFLAGS]) AC_SUBST([PROTOBUF_C_LDFLAGS]) AC_SUBST([PROTOBUF_C_LIBS]) AC_SUBST([HAVE_PROTOBUF_C]) AC_DEFINE([HAVE_PROTOBUF_C], [1], [Define to 1 if protobuf_c library is available]) HAVE_PROTOBUF_C="yes" AC_MSG_RESULT([yes]) protoc_path="$protobuf_c_prefix/bin:$PATH" AC_PATH_PROG(PROTOC_C, protoc-c, false, $protoc_path) else HAVE_PROTOBUF_C="no" AC_MSG_RESULT([no]) fi else HAVE_PROTOBUF_C="no" AC_MSG_RESULT([no]) if test "$protobuf_c_requested" = "yes"; then AC_MSG_WARN([protobuf-c support requested but headers or library not found. Specify valid prefix of protobuf-c using --with-protobuf-c=@<:@DIR@:>@ or provide include directory and linker flags using --with-protobuf-c-inc and --with-protobuf-c-lib]) fi fi ]) osm2pgsql-0.82.0/m4/ax_lib_xml2.m4000066400000000000000000000106621213272333300165160ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_XML2([MINIMUM-VERSION]) # # DESCRIPTION # # This macro provides tests of availability of xml2 'libxml2' library # of particular version or newer. # # AX_LIB_LIBXML2 macro takes only one argument which is optional. If # there is no required version passed, then macro does not run version # test. # # The --with-libxml2 option takes one of three possible values: # # no - do not check for xml2 library # # yes - do check for xml2 library in standard locations (xml2-config # should be in the PATH) # # path - complete path to xml2-config utility, use this option if xml2-config # can't be found in the PATH # # This macro calls: # # AC_SUBST(XML2_CFLAGS) # AC_SUBST(XML2_LDFLAGS) # AC_SUBST(XML2_VERSION) # # And sets: # # HAVE_XML2 # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_XML2], [ AC_ARG_WITH([libxml2], AC_HELP_STRING([--with-libxml2=@<:@ARG@:>@], [use libxml2 library @<:@default=yes@:>@, optionally specify path to xml2-config] ), [ if test "$withval" = "no"; then want_libxml2="no" elif test "$withval" = "yes"; then want_libxml2="yes" else want_libxml2="yes" XML2_CONFIG="$withval" fi ], [want_libxml2="yes"] ) XML2_CFLAGS="" XML2_LDFLAGS="" XML2_VERSION="" dnl dnl Check xml2 libraries (libxml2) dnl if test "$want_libxml2" = "yes"; then if test -z "$XML2_CONFIG" -o test; then AC_PATH_PROG([XML2_CONFIG], [xml2-config], []) fi if test ! -x "$XML2_CONFIG"; then AC_MSG_ERROR([$XML2_CONFIG does not exist or it is not an exectuable file]) XML2_CONFIG="no" found_libxml2="no" fi if test "$XML2_CONFIG" != "no"; then AC_MSG_CHECKING([for xml2 libraries]) XML2_CFLAGS="`$XML2_CONFIG --cflags`" XML2_LDFLAGS="`$XML2_CONFIG --libs`" XML2_VERSION=`$XML2_CONFIG --version` AC_DEFINE([HAVE_XML2], [1], [Define to 1 if xml2 libraries are available]) found_libxml2="yes" AC_MSG_RESULT([yes]) else found_libxml2="no" AC_MSG_RESULT([no]) fi fi dnl dnl Check if required version of xml2 is available dnl libxml2_version_req=ifelse([$1], [], [], [$1]) if test "$found_libxml2" = "yes" -a -n "$libxml2_version_req"; then AC_MSG_CHECKING([if libxml2 version is >= $libxml2_version_req]) dnl Decompose required version string of libxml2 dnl and calculate its number representation libxml2_version_req_major=`expr $libxml2_version_req : '\([[0-9]]*\)'` libxml2_version_req_minor=`expr $libxml2_version_req : '[[0-9]]*\.\([[0-9]]*\)'` libxml2_version_req_micro=`expr $libxml2_version_req : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$libxml2_version_req_micro" = "x"; then libxml2_version_req_micro="0" fi libxml2_version_req_number=`expr $libxml2_version_req_major \* 1000000 \ \+ $libxml2_version_req_minor \* 1000 \ \+ $libxml2_version_req_micro` dnl Decompose version string of installed PostgreSQL dnl and calculate its number representation libxml2_version_major=`expr $XML2_VERSION : '\([[0-9]]*\)'` libxml2_version_minor=`expr $XML2_VERSION : '[[0-9]]*\.\([[0-9]]*\)'` libxml2_version_micro=`expr $XML2_VERSION : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'` if test "x$libxml2_version_micro" = "x"; then libxml2_version_micro="0" fi libxml2_version_number=`expr $libxml2_version_major \* 1000000 \ \+ $libxml2_version_minor \* 1000 \ \+ $libxml2_version_micro` libxml2_version_check=`expr $libxml2_version_number \>\= $libxml2_version_req_number` if test "$libxml2_version_check" = "1"; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) fi fi AC_SUBST([XML2_VERSION]) AC_SUBST([XML2_CFLAGS]) AC_SUBST([XML2_LDFLAGS]) ]) osm2pgsql-0.82.0/m4/ax_lib_zlib.m4000066400000000000000000000131621213272333300165720ustar00rootroot00000000000000# SYNOPSIS # # AX_LIB_ZLIB() # # DESCRIPTION # # This macro provides tests of availability of the zlib # compression library. This macro checks for zlib # headers and libraries and defines compilation flags # # Macro supports following options and their values: # # 1) Single-option usage: # # --with-zlib -- yes, no, or path to zlib library # installation prefix # # 2) Three-options usage (all options are required): # # --with-zlib=yes # --with-zlib-inc -- path to base directory with zlib headers # --with-zlib-lib -- linker flags for zlib # # This macro calls: # # AC_SUBST(ZLIB_CFLAGS) # AC_SUBST(ZLIB_LDFLAGS) # AC_SUBST(ZLIB_LIBS) # # And sets: # # HAVE_ZLIB # # LICENSE # # Copyright (c) 2009 Hartmut Holzgraefe # # Copying and distribution of this file, with or without modification, are # permitted in any medium without royalty provided the copyright notice # and this notice are preserved. AC_DEFUN([AX_LIB_ZLIB], [ AC_ARG_WITH([zlib], AC_HELP_STRING([--with-zlib=@<:@ARG@:>@], [use zlib library from given prefix (ARG=path); check standard prefixes (ARG=yes); disable (ARG=no)] ), [ if test "$withval" = "yes"; then if test -f /usr/local/include/zlib.h ; then zlib_prefix=/usr/local elif test -f /usr/include/zlib.h ; then zlib_prefix=/usr else zlib_prefix="" fi zlib_requested="yes" elif test -d "$withval"; then zlib_prefix="$withval" zlib_requested="yes" else zlib_prefix="" zlib_requested="no" fi ], [ dnl Default behavior is implicit yes if test -f /usr/local/include/zlib.h ; then zlib_prefix=/usr/local elif test -f /usr/include/zlib.h ; then zlib_prefix=/usr else zlib_prefix="" fi ] ) AC_ARG_WITH([zlib-inc], AC_HELP_STRING([--with-zlib-inc=@<:@DIR@:>@], [path to zlib library headers] ), [zlib_include_dir="$withval"], [zlib_include_dir=""] ) AC_ARG_WITH([zlib-lib], AC_HELP_STRING([--with-zlib-lib=@<:@ARG@:>@], [link options for zlib library] ), [zlib_lib_flags="$withval"], [zlib_lib_flags=""] ) ZLIB_CFLAGS="" ZLIB_LDFLAGS="" dnl dnl Collect include/lib paths and flags dnl run_zlib_test="no" if test -n "$zlib_prefix"; then zlib_include_dir="$zlib_prefix/include" zlib_lib_flags="-L$zlib_prefix/lib" zlib_lib_libs="-lz" run_zlib_test="yes" elif test "$zlib_requested" = "yes"; then if test -n "$zlib_include_dir" -a -n "$zlib_lib_flags" -a -n "$zlib_lib_libs"; then run_zlib_test="yes" fi else run_zlib_test="no" fi dnl dnl Check zlib files dnl if test "$run_zlib_test" = "yes"; then saved_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS -I$zlib_include_dir" saved_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS $zlib_lib_flags" saved_LIBS="$LIBS" LIBS="$LIBS $zlib_lib_libs" dnl dnl Check zlib headers dnl AC_MSG_CHECKING([for zlib headers in $zlib_include_dir]) AC_LANG_PUSH([C++]) AC_COMPILE_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[]] )], [ ZLIB_CFLAGS="-I$zlib_include_dir" zlib_header_found="yes" AC_MSG_RESULT([found]) ], [ zlib_header_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) dnl dnl Check zlib libraries dnl if test "$zlib_header_found" = "yes"; then AC_MSG_CHECKING([for zlib library]) AC_LANG_PUSH([C++]) AC_LINK_IFELSE([ AC_LANG_PROGRAM( [[ @%:@include ]], [[ const char *version; version = zlibVersion(); ]] )], [ ZLIB_LDFLAGS="$zlib_lib_flags" ZLIB_LIBS="$zlib_lib_libs" zlib_lib_found="yes" AC_MSG_RESULT([found]) ], [ zlib_lib_found="no" AC_MSG_RESULT([not found]) ] ) AC_LANG_POP([C++]) fi CPPFLAGS="$saved_CPPFLAGS" LDFLAGS="$saved_LDFLAGS" LIBS="$saved_LIBS" fi AC_MSG_CHECKING([for zlib compression library]) if test "$run_zlib_test" = "yes"; then if test "$zlib_header_found" = "yes" -a "$zlib_lib_found" = "yes"; then AC_SUBST([ZLIB_CFLAGS]) AC_SUBST([ZLIB_LDFLAGS]) AC_SUBST([ZLIB_LIBS]) AC_SUBST([HAVE_ZLIB]) AC_DEFINE([HAVE_ZLIB], [1], [Define to 1 if zlib library is available]) HAVE_ZLIB="yes" else HAVE_ZLIB="no" fi AC_MSG_RESULT([$HAVE_ZLIB]) else HAVE_ZLIB="no" AC_MSG_RESULT([$HAVE_ZLIB]) if test "$zlib_requested" = "yes"; then AC_MSG_WARN([zlib compression support requested but headers or library not found. Specify valid prefix of zlib using --with-zlib=@<:@DIR@:>@ or provide include directory and linker flags using --with-zlib-inc and --with-zlib-lib]) fi fi ]) osm2pgsql-0.82.0/m4/ax_pthread.m4000066400000000000000000000260631213272333300164370ustar00rootroot00000000000000# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_pthread.html # =========================================================================== # # SYNOPSIS # # AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) # # DESCRIPTION # # This macro figures out how to build C programs using POSIX threads. It # sets the PTHREAD_LIBS output variable to the threads library and linker # flags, and the PTHREAD_CFLAGS output variable to any special C compiler # flags that are needed. (The user can also force certain compiler # flags/libs to be tested by setting these environment variables.) # # Also sets PTHREAD_CC to any special C compiler that is needed for # multi-threaded programs (defaults to the value of CC otherwise). (This # is necessary on AIX to use the special cc_r compiler alias.) # # NOTE: You are assumed to not only compile your program with these flags, # but also link it with them as well. e.g. you should link with # $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS # # If you are only building threads programs, you may wish to use these # variables in your default LIBS, CFLAGS, and CC: # # LIBS="$PTHREAD_LIBS $LIBS" # CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # CC="$PTHREAD_CC" # # In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant # has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name # (e.g. PTHREAD_CREATE_UNDETACHED on AIX). # # ACTION-IF-FOUND is a list of shell commands to run if a threads library # is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it # is not found. If ACTION-IF-FOUND is not specified, the default action # will define HAVE_PTHREAD. # # Please let the authors know if this macro fails on any platform, or if # you have any other suggestions or comments. This macro was based on work # by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help # from M. Frigo), as well as ac_pthread and hb_pthread macros posted by # Alejandro Forero Cuervo to the autoconf macro repository. We are also # grateful for the helpful feedback of numerous users. # # LICENSE # # Copyright (c) 2008 Steven G. Johnson # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 11 AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) AC_DEFUN([AX_PTHREAD], [ AC_REQUIRE([AC_CANONICAL_HOST]) AC_LANG_SAVE AC_LANG_C ax_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h # requires special compiler flags (e.g. on True64 or Sequent). # It gets checked for in the link test anyway. # First of all, check if the user has set any of the PTHREAD_LIBS, # etcetera environment variables, and if threads linking works using # them: if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) AC_TRY_LINK_FUNC(pthread_join, ax_pthread_ok=yes) AC_MSG_RESULT($ax_pthread_ok) if test x"$ax_pthread_ok" = xno; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" fi # We must check for the threads library under a number of different # names; the ordering is very important because some systems # (e.g. DEC) have both -lpthread and -lpthreads, where one of the # libraries is broken (non-POSIX). # Create a list of thread flags to try. Items starting with a "-" are # C compiler flags, and other items are library names, except for "none" # which indicates that we try without any flags at all, and "pthread-config" # which is a program returning the flags for the Pth emulation library. ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # pthreads: AIX (must check this before -lpthread) # none: in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) # -pthreads: Solaris/gcc # -mthreads: Mingw32/gcc, Lynx/gcc # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) case "${host_cpu}-${host_os}" in *solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" ;; *-darwin*) ax_pthread_flags="-pthread $ax_pthread_flags" ;; esac if test x"$ax_pthread_ok" = xno; then for flag in $ax_pthread_flags; do case $flag in none) AC_MSG_CHECKING([whether pthreads work without any flags]) ;; -*) AC_MSG_CHECKING([whether pthreads work with $flag]) PTHREAD_CFLAGS="$flag" ;; pthread-config) AC_CHECK_PROG(ax_pthread_config, pthread-config, yes, no) if test x"$ax_pthread_config" = xno; then continue; fi PTHREAD_CFLAGS="`pthread-config --cflags`" PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) AC_MSG_CHECKING([for the pthreads library -l$flag]) PTHREAD_LIBS="-l$flag" ;; esac save_LIBS="$LIBS" save_CFLAGS="$CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we # need a special flag -Kthread to make this header compile.) # We check for pthread_join because it is in -lpthread on IRIX # while pthread_create is in libc. We check for pthread_attr_init # due to DEC craziness with -lpthreads. We check for # pthread_cleanup_push because it is one of the few pthread # functions on Solaris that doesn't have a non-functional libc stub. # We try pthread_create on general principles. AC_TRY_LINK([#include static void routine(void* a) {a=0;} static void* start_routine(void* a) {return a;}], [pthread_t th; pthread_attr_t attr; pthread_create(&th,0,start_routine,0); pthread_join(th, 0); pthread_attr_init(&attr); pthread_cleanup_push(routine, 0); pthread_cleanup_pop(0); ], [ax_pthread_ok=yes]) LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" AC_MSG_RESULT($ax_pthread_ok) if test "x$ax_pthread_ok" = xyes; then break; fi PTHREAD_LIBS="" PTHREAD_CFLAGS="" done fi # Various other checks: if test "x$ax_pthread_ok" = xyes; then save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. AC_MSG_CHECKING([for joinable pthread attribute]) attr_name=unknown for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do AC_TRY_LINK([#include ], [int attr=$attr; return attr;], [attr_name=$attr; break]) done AC_MSG_RESULT($attr_name) if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name, [Define to necessary symbol if this constant uses a non-standard name on your system.]) fi AC_MSG_CHECKING([if more special flags are required for pthreads]) flag=no case "${host_cpu}-${host_os}" in *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; esac AC_MSG_RESULT(${flag}) if test "x$flag" != xno; then PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" # More AIX lossage: must compile with xlc_r or cc_r if test x"$GCC" != xyes; then AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC}) else PTHREAD_CC=$CC fi else PTHREAD_CC="$CC" fi AC_SUBST(PTHREAD_LIBS) AC_SUBST(PTHREAD_CFLAGS) AC_SUBST(PTHREAD_CC) # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: if test x"$ax_pthread_ok" = xyes; then ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1]) : else ax_pthread_ok=no $2 fi AC_LANG_RESTORE ])dnl AX_PTHREAD osm2pgsql-0.82.0/mapnik-osm-updater.sh000077500000000000000000000633711213272333300176150ustar00rootroot00000000000000#!/bin/bash export osm_username="osm" export database_name="gis" export planet_dir="/home/$osm_username/osm/planet" export planet_file="$planet_dir/planet.osm.bz2" export sql_dump="$planet_dir/planet.osm.sql.bz2" export log_dir=/var/log export geoinfodb_file="/usr/share/icons/map-icons/geoinfo.db" export osmdb_file="/usr/share/gpsdrive/osm.db" export osm2pgsql_cmd=`which osm2pgsql` test -x "$osm2pgsql_cmd" || echo "Missing osm2pgsql in PATH" test -x "$osm2pgsql_cmd" || osm2pgsql_cmd="$HOME/svn.openstreetmap.org/applications/utils/export/osm2pgsql/osm2pgsql" test -x "$osm2pgsql_cmd" || echo "Missing osm2pgsql" export cmd_osm2poidb=`which osm2poidb` test -x "$cmd_osm2poidb" || echo "Missing osm2poidb in PATH" test -x "$cmd_osm2poidb" || cmd_osm2poidb="`dirname $0`/../osm2poidb/build/osm2poidb" test -x "$cmd_osm2poidb" || cmd_osm2poidb="$HOME/svn.openstreetmap.org/applications/utils/export/osm2poidb/build/osm2poidb" test -x "$cmd_osm2poidb" || echo "Missing osm2poidb" osm_planet_mirror_cmd=`which osm-planet-mirror` test -x "$osm_planet_mirror_cmd" || echo "Missing planet-mirror.pl in PATH" test -x "$osm_planet_mirror_cmd" || osm_planet_mirror_cmd="`dirname $0`/../../planet-mirror/planet-mirror.pl" test -x "$osm_planet_mirror_cmd" || osm_planet_mirror_cmd="$HOME/svn.openstreetmap.org/applications/utils/planet-mirror/planet-mirror.pl" test -x "$osm_planet_mirror_cmd" || osm_planet_mirror_cmd="`dirname ../../planet-mirror/planet-mirror.pl`" test -x "$osm_planet_mirror_cmd" || echo "Missing planet-mirror.pl" test -n "$1" || help=1 quiet=" -q " verbose=1 for arg in "$@" ; do case $arg in --all-planet) # Do all the creation steps listed below from planet file create_osm_user=1 mirror=1 check_newer_planet= drop=1 create_db=1 db_table_create=1 create_db=1 create_db_user=1 db_add_900913=1 db_add_spatial_ref_sys=1 grant_all_rights_to_user_osm=1 planet_fill=1 db_add_gpsdrive_poitypes=1 create_db_users=${create_db_users:-*} grant_db_users=${grant_db_users:-*} ;; --all-planet-geofabrik=\?) # Use Planet Extract from Frederics GeoFabrik.de Page as planet File and import # Use ? for a list of possible files dir_country=${arg#*=} country=`basename $dir_country` planet_file="$planet_dir/${country}.osm.bz2" mirror_geofabrik=${dir_country} mirror= ;; --all-planet-geofabrik=*) # Use Planet Extract from Frederics GeoFabrik.de Page as planet File and import # Use ? for a list of possible files # Example: europe/germany/baden-wuerttemberg dir_country=${arg#*=} country=`basename $dir_country` planet_file="$planet_dir/${country}.osm.bz2" mirror_geofabrik=${dir_country} create_osm_user=1 mirror= check_newer_planet= drop=1 create_db=1 db_table_create=1 db_add_900913=1 db_add_spatial_ref_sys=1 create_db_user=1 grant_all_rights_to_user_osm=1 planet_fill=1 db_add_gpsdrive_poitypes=1 create_db_users=${create_db_users:-*} grant_db_users=${grant_db_users:-*} ;; --all-planet-update) # Do all the creation steps listed below from planet file with up to date checking create_osm_user=1 mirror=1 check_newer_planet=1 drop=1 create_db=1 db_add_900913=1 db_add_spatial_ref_sys=1 create_db_user=1 grant_all_rights_to_user_osm=1 planet_fill=1 db_add_gpsdrive_poitypes=1 create_db_users=${create_db_users:-*} grant_db_users=${grant_db_users:-*} ;; --all-from-dump) # Do all the creation steps listed below # from planet-dump file # !!! all-from-dump is not completely tested yet create_osm_user=1 mirror_dump=1 drop=1 create_db=1 db_add_900913=1 db_add_spatial_ref_sys=1 create_db_user=1 grant_all_rights_to_user_osm=1 create_db_users=${create_db_users:-*} fill_from_dump="$sql_dump" grant_db_users=${grant_db_users:-*} db_add_gpsdrive_poitypes=1 ;; --all-create) # Do all the creation steps listed below only no data # import and no planet mirroring create_osm_user=1 drop=1 create_db=1 db_add_900913=1 db_add_spatial_ref_sys=1 create_db_user=1 grant_all_rights_to_user_osm=1 create_db_users=${create_db_users:-*} grant_db_users=${grant_db_users:-*} ;; --create-osm-user) # create the osm-user needed # This means creating a user 'osm' and his home directory # with useradd, mkdir, chmod and chown create_osm_user=1 ;; --mirror) # mirror planet File (http://planet.openstreetmap.org/) mirror=1 ;; --no-mirror) # do not mirror planet File mirror= ;; --check-newer-planet) # Check if Planet File is newer then stampfile. # If yes: Continue check_newer_planet=1 ;; --drop) # drop the old Database (gis) and Database-user (osm) drop=1 ;; --create-db) # create the database (gis) # with this command only the database is created, # but no tables inside it create_db=1 ;; --create-db-user) # create the database-user (osm) create_db_user=1 ;; --grant-all2osm-user) # grant all rights for the database to the DB-User osm grant_all_rights_to_user_osm=1 ;; --create-db-users=*) #Create a Database user for all users specified. # To create a db-user for all available system-user # specify *. (Except root)) create_db_users=${arg#*=} ;; --grant-db-users=*) # Grant database-users all rights (including write, ...) # to the gis Database !!! This has to be changed in the # future, normally only the osm user needs update rights grant_db_users=${arg#*=} ;; --add-gpsdrive-types) # add GpsDrive POI-Types to points table db_add_gpsdrive_poitypes=1 ;; --planet-fill) # fill database from planet File planet_fill=1 ;; --mirror-dump) # mirror the planet.sql dump File mirror_dump=1 ;; --no-mirror-dump) # Do not mirror the planet.sql dump File mirror_dump= ;; --fill-from-dump=*) # fill database from Dump File fill_from_dump=${arg#*=} ;; --mapnik-dump=*) # Dump Content of Mapnik Database to a File (.sql|.sql.bz)) postgis_mapnik_dump=${arg#*=} ;; --db-table-create) # Create tables in Database with osm2pgsql db_table_create=1 ;; --db-add-srid-900913) # Add SRID 900913 db_add_900913=1 ;; --db-add-spatial_ref_sys) # Add SRIDs to spatial_ref_sys db_add_spatial_ref_sys=1 ;; --count-db) # Count entries in Database. This is to check # if the database really contains entries # if you set an empty user with the option osm_username='' # the current user is used count_db=1 ;; -h) help=1 ;; --help) help=1 ;; -help) help=1 ;; --debug) # switch on debugging debug=1 verbose=1 quiet="" ;; -debug) debug=1 verbose=1 quiet="" ;; --nv) # be a little bit less verbose verbose='' ;; --planet-dir=*) # define Directory for Planet-File planet_dir=${arg#*=} planet_file="$planet_dir/planet.osm.bz2" ;; --planet-file=*) # define Planet-File including Directory planet_file=${arg#*=} ;; --poi-file=*) # define POI Database file including Directory osmdb_file=${arg#*=} ;; --geoinfo-file=*) # define geoinfo database file containing poi-types geoinfodb_file=${arg#*=} ;; --osm-username=*) # Define username to use for DB creation and planet # download # !! You shouldn't use your username or root as the # !! download and install user. # This username is the download and install user. # The osm-user normally only should have the planet files # in hishome directory and nothing else. By default # the osm-username is 'osm' osm_username=${arg#*=} if [ "$osm_username" = "$USER" ] ; then echo echo "!!!!!! ERROR: Don't use your own login account as the osm_username!!" 1>&2 echo exit 1 fi if [ "$osm_username" = "root" ] ; then echo echo "!!!!!! ERROR: Don't use the root account as the osm_username!!" 1>&2 echo exit 1 fi planet_dir="/home/$osm_username/osm/planet" planet_file="$planet_dir/planet.osm.bz2" ;; --osm2pgsql-cmd=*) # The path to the osm2pgsql command # It can be found at # svn.openstreetmap.org/applications/utils/export/osm2pgsql/ # and has to be compiled. Alternatively you can install # the Debian Package openstreetmap-utils osm2pgsql_cmd=${arg#*=} if ! [ -x "$osm2pgsql_cmd" ]; then echo "!!!!!! ERROR: Cannot execute '$osm2pgsql_cmd'" 1>&2 exit -1 fi ;; --database-name=*) # use this name for the database default is 'gis' database_name=${arg#*=} ;; *) echo "" echo "!!!!!!!!! Unknown option $arg" echo "" help=1 ;; esac done if [ -n "$help" ] ; then # extract options from case commands above options=`grep -E -e esac -e '\s*--.*\).*#' $0 | sed '/esac/,$d;s/.*--/ [--/; s/=\*)/=val]/; s/)[\s ]/]/; s/#.*\s*//; s/[\n/]//g;'` options=`for a in $options; do echo -n " $a" ; done` echo "$0 $options" echo " !!! Warning: This Script is for now a quick hack to make setting up !!! Warning: My databases easier. Please check if it really works for you!! !!! Warning: Especially when using different Database names or username, ... !!! Warning: not every combination of values except the default is tested. This script tries to install the mapnik database. For this it first creates a new user osm on the system and mirrors the current planet to his home directory. Then this planet is imported into the postgis Database from a newly created user named osm This script uses sudo. So you either have to have sudo right or you'll have to start the script as root. The users needed will be postgres and osm " # extract options + description from case commands above grep -E -e esac -e '--.*\).*#' -e '^[\t\s ]+#' $0 | \ grep -v /bin/bash | sed '/esac/,$d;s/.*--/ --/;s/=\*)/=val/;s/)//;s/#//;s/\\//;' exit; fi if [ -n "$osm_username" ] ; then sudo_cmd="sudo -u $osm_username" else sudo_cmd='' fi export import_stamp_file=${log_dir}/osm2pgsql_postgis-$database_name.stamp export import_log=${log_dir}/osm2pgsql_postgis-$database_name.log if [ -n "$debug" ] ; then echo "Planet File: `ls -l $planet_file`" echo "Import Stamp : `ls -l $import_stamp_file`" fi ############################################ # Create a user on the system ############################################ if [ -n "$create_osm_user" ] ; then test -n "$verbose" && echo "----- Check if we already have an user '$osm_username'" if ! id "$osm_username" >/dev/null; then echo "create '$osm_username' User" useradd "$osm_username" fi mkdir -p "/home/$osm_username/osm/planet" # The user itself should be allowed to read/write all his own files # in the ~/osm/ Directory chown "$osm_username" "/home/$osm_username" chown -R "$osm_username" "/home/$osm_username/osm" chmod +rwX "/home/$osm_username" chmod -R +rwX "/home/$osm_username/osm" # Everyone on the system is allowed to read the planet.osm Files chmod -R a+rX "/home/$osm_username/osm" fi ############################################ # Mirror the planet-dump File for Europe ############################################ if [ -n "$mirror_geofabrik" ] ; then geofabrik_basedir="http://download.geofabrik.de/osm" if [ "$mirror_geofabrik" = "?" ]; then echo "Retreiving available planet extracts from GeoFabrik ..." # Find all Subdirs in the first 3 levels wget_out=`wget --no-convert-links -q --level=0 -O - "http://download.geofabrik.de/osm" | grep DIR | grep -v -i Parent ` sub_dirs=`echo "$wget_out" | perl -ne 'm,href="(.*)/",;print "$1 "'` for level in 1 2 3; do for sub_dir in $sub_dirs ; do #echo "Get dirs in Subdir: $sub_dir" wget_out=`wget -q --level=0 -O - "$geofabrik_basedir/$sub_dir" | grep 'DIR' | grep -v Parent ` new_dirs="$new_dirs `echo "$wget_out" | perl -ne 'm,href="(.*)/", && print "'$sub_dir'/$1 "'`" # echo "WGET: '$wget_out'" done sub_dirs="$sub_dirs $new_dirs" done sub_dirs=`for dir in $sub_dirs; do echo $dir; done | sort -u` # Printout content of all $sub_dirs echo "Possible Values are:" for sub_dir in "" $sub_dirs ; do wget -q --level=0 -O - "$geofabrik_basedir/$sub_dir" | grep 'OpenStreetMap data' | \ perl -ne 'm/.*href="([^"]+)\.osm.bz2"/;print " '$sub_dir/'$1\n"' done exit 1 fi planet_source_file="${geofabrik_basedir}/${mirror_geofabrik}.osm.bz2" if [ -n "$mirror" ] ; then test -n "$verbose" && echo "----- Mirroring planet File $planet_source_file" wget -v --mirror "$planet_source_file" \ --no-directories --directory-prefix=$planet_dir/ fi fi ############################################ # Mirror the newest planet File from planet.openstreetmap.org ############################################ if [ -n "$mirror" ] ; then test -n "$verbose" && echo "----- Mirroring planet File" if ! [ -x "$osm_planet_mirror_cmd" ]; then echo "!!!!!! ERROR: Cannot execute '$osm_planet_mirror_cmd'" 1>&2 exit -1 fi if ! $sudo_cmd $osm_planet_mirror_cmd -v -v --planet-dir=$planet_dir ; then echo "!!!!!! ERROR: Cannot Mirror Planet File" 1>&2 exit 1 fi if ! [ -s $planet_file ] ; then echo "!!!!!! ERROR: File $planet_file is missing" exit -1 fi fi ############################################ # Check if Planet File is newer than import Stamp ############################################ if [ -n "$check_newer_planet" ] ; then if [ "$planet_file" -nt "$import_stamp_file" ] ; then if [ -n "$verbose" ] ; then echo "----- New File needs updating" echo "Planet File: `ls -l $planet_file`" echo "Import Stamp : `ls -l $import_stamp_file`" fi else echo "Postgis Database already Up To Date" echo "`ls -l $import_stamp_file`" exit 0 fi fi ############################################ # Drop the old Database and Database-user ############################################ if [ -n "$drop" ] ; then test -n "$verbose" && echo "----- Drop complete Database '$database_name' and user '$osm_username'" echo "CHECKPOINT" | sudo -u postgres psql $quiet sudo -u postgres dropdb $quiet -Upostgres "$database_name" sudo -u postgres dropuser $quiet -Upostgres "$osm_username" fi ############################################ # Create db ############################################ if [ -n "$create_db" ] ; then test -n "$verbose" && echo test -n "$verbose" && echo "----- Create Database '$database_name'" if ! sudo -u postgres createdb -Upostgres $quiet -EUTF8 "$database_name"; then echo "!!!!!! ERROR: Creation of '$database_name' Failed" exit -1 fi if ! sudo -u postgres createlang plpgsql "$database_name"; then echo "!!!!!! ERROR: Creation Failed" exit -1 fi lwpostgis="/usr/share/postgresql-8.4-postgis/lwpostgis.sql" test -s $lwpostgis || lwpostgis="/usr/share/postgresql-8.3-postgis/lwpostgis.sql" test -s $lwpostgis || lwpostgis="/usr/share/postgresql-8.2-postgis/lwpostgis.sql" test -s $lwpostgis || lwpostgis="`ls /usr/share/postgresql-*-postgis/lwpostgis.sql| sort -n | head 1`" if [ ! -s $lwpostgis ] ; then echo "!!!!!! ERROR: Cannot find $lwpostgis" exit -1 fi if sudo -u postgres psql $quiet -Upostgres "$database_name" <${lwpostgis} ; then echo "Enabling spacial Extentions done with '$lwpostgis'" else echo "!!!!!! ERROR: Creation with '$lwpostgis' Failed" exit -1 fi fi ############################################ # Create db-user ############################################ if [ -n "$create_db_user" ] ; then test -n "$verbose" && echo "----- Create Database-user '$osm_username'" sudo -u postgres createuser -Upostgres $quiet -S -D -R "$osm_username" || exit -1 fi if [ -n "$grant_all_rights_to_user_osm" ] ; then test -n "$verbose" && echo test -n "$verbose" && echo "----- Grant rights on Database '$database_name' for '$osm_username'" ( echo "GRANT ALL ON SCHEMA PUBLIC TO \"$osm_username\";" echo "GRANT ALL on geometry_columns TO \"$osm_username\";" echo "GRANT ALL on spatial_ref_sys TO \"$osm_username\";" echo "GRANT ALL ON SCHEMA PUBLIC TO \"$osm_username\";" ) | sudo -u postgres psql $quiet -Upostgres "$database_name" fi ############################################ # Create a Database user for all users specified (*) or available on the system. Except root ############################################ if [ -n "$create_db_users" ] ; then if [ "$create_db_users" = "*" ] ; then echo "Create DB User for every USER" create_db_users='' # try to see if all users above uid=1000 are interesting all_users=`cat /etc/passwd | sed 's/:/ /g' | while read user pwd uid rest ; do test "$uid" -ge "1000" || continue; echo $user; done` echo "all_users: $all_users" for user in $all_users ; do echo $user | grep -q -e root && continue echo $user | grep -q -e "$osm_username" && continue echo $user | grep -q -e "nobody" && continue echo "$create_db_users" | grep -q " $user " && continue create_db_users=" $create_db_users $user " done fi # This is not good; this probably broke my postgres installation # dpkg --purge postgresql-8.2 # Stopping PostgreSQL 8.2 database server: main* Error: The cluster is owned by user id 107 which does not exist any more # apt-get -f install postgresql-8.2 # Starting PostgreSQL 8.2 database server: main* Error: The cluster is owned by user id 107 which does not exist any more #if false ; then for user in $create_db_users; do echo " Create DB User for $user" sudo -u postgres createuser $quiet -Upostgres --no-superuser --no-createdb --no-createrole "$user" done #fi fi ############################################ # Create Database tables with osm2pgsql ############################################ if [ -n "$db_table_create" ] ; then if ! [ -x "$osm2pgsql_cmd" ]; then echo "!!!!!! ERROR: Cannot execute '$osm2pgsql_cmd'" 1>&2 exit -1 fi echo "" echo "--------- Unpack and import $planet_file" cd /usr/share/openstreetmap/ $sudo_cmd $osm2pgsql_cmd --create "$database_name" fi ############################################ # Add SRID spatial_ref_sys ############################################ if [ -n "$db_add_spatial_ref_sys" ] ; then test -s "$srid_spatial_ref_sys" || srid_spatial_ref_sys="/usr/share/postgresql-8.4-postgis/spatial_ref_sys.sql" test -s "$srid_spatial_ref_sys" || srid_spatial_ref_sys="/usr/share/postgresql-8.3-postgis/spatial_ref_sys.sql" test -s "$srid_spatial_ref_sys" || srid_spatial_ref_sys="/usr/share/postgresql-8.2-postgis/spatial_ref_sys.sql" test -s "$srid_spatial_ref_sys" || srid_spatial_ref_sys="/usr/share/postgresql-8.*-postgis/spatial_ref_sys.sql" test -s "$srid_spatial_ref_sys" || srid_spatial_ref_sys="/usr/share/postgresql-*-postgis/spatial_ref_sys.sql" if [ ! -s $srid_spatial_ref_sys ] ; then echo "!!!!!! ERROR: Cannot find $srid_spatial_ref_sys" exit -1 fi if sudo -u postgres psql $quiet -Upostgres "$database_name" <${srid_spatial_ref_sys} ; then echo "Adding '$srid_spatial_ref_sys'" else echo "!!!!!! ERROR: Creation Failed" exit -1 fi fi ############################################ # Add SRID 900913 ############################################ if [ -n "$db_add_900913" ] ; then test -s "$srid_900913" || srid_900913="`dirname $0`/900913.sql" test -s "$srid_900913" || srid_900913="$HOME/svn.openstreetmap.org/applications/utils/export/osm2pgsql/900913.sql" test -s "$srid_900913" || srid_900913="/usr/share/mapnik/900913.sql" if [ ! -s $srid_900913 ] ; then echo "!!!!!! ERROR: Cannot find $srid_900913" exit -1 fi if sudo -u postgres psql $quiet -Upostgres "$database_name" <${srid_900913} ; then echo "Adding '$srid_900913'" else echo "!!!!!! ERROR: Creation Failed" exit -1 fi fi ############################################ # Grant all rights on the gis Database to all system users or selected users in the system ############################################ if [ -n "$grant_db_users" ] ; then if [ "$grant_db_users" = "*" ] ; then echo "-------- GRANT Rights to every USER" grant_db_users='' for user in `users` ; do echo "$user" | grep -q "root" && continue echo " $grant_db_users " | grep -q " $user " && continue grant_db_users="$grant_db_users $user" done fi test -n "$verbose" && echo "Granting rights to users: '$grant_db_users'" for user in $grant_db_users; do echo "Granting all rights to user '$user' for Database '$database_name'" ( echo "GRANT ALL on geometry_columns TO \"$user\";" echo "GRANT ALL ON SCHEMA PUBLIC TO \"$user\";" echo "GRANT ALL on spatial_ref_sys TO \"$user\";" echo "GRANT ALL on TABLE planet_osm_line TO \"$user\";" echo "GRANT ALL on TABLE planet_osm_point TO \"$user\";" echo "GRANT ALL on TABLE planet_osm_roads TO \"$user\";" echo "GRANT ALL on TABLE planet_osm_polygon TO \"$user\";" )| sudo -u postgres psql $quiet -Upostgres "$database_name" || true done fi ############################################ # Fill Database from planet File ############################################ if [ -n "$planet_fill" ] ; then if ! [ -x "$osm2pgsql_cmd" ]; then echo "!!!!!! ERROR: Cannot execute '$osm2pgsql_cmd'" 1>&2 exit -1 fi echo "" echo "--------- Unpack and import $planet_file" echo "Import started: `date`" >>"$import_log" cd /usr/share/openstreetmap/ $sudo_cmd $osm2pgsql_cmd --database "$database_name" $planet_file rc=$? if [ "$rc" -gt "0" ]; then echo "`date`: Import With Error $rc:" >> "$import_log" echo "`ls -l $planet_file` import --> rc($rc)" >> "$import_log" echo "!!!!!!!! ERROR while running '$sudo_cmd $osm2pgsql_cmd --database "$database_name" $planet_file'" echo "Creation with for Database "$database_name" from planet-file '$planet_file' with '$osm2pgsql_cmd' Failed" echo "see Logfile for more Information:" echo "less $import_log" exit -1 fi echo "`date`: Import Done: `ls -l $planet_file` import --> $rc" >> "$import_log" echo "`date`: `ls -l $planet_file` import --> $rc" >>$import_stamp_file touch --reference=$planet_file $import_stamp_file fi ############################################ # Create GpsDrive POI-Database ############################################ if [ -n "$db_add_gpsdrive_poitypes" ] ; then if ! [ -x "$cmd_osm2poidb" ]; then echo "!!!!!! ERROR: Cannot execute gpsdrive_poitypes: '$cmd_osm2poidb'" 1>&2 exit -1 fi echo "" echo "--------- Create GpsDrive POI-Database $osmdb_file" bunzip2 -c $planet_file | sudo $cmd_osm2poidb -w -f $geoinfodb_file -o $osmdb_file STDIN rc=$? if [ "$rc" -ne "0" ]; then echo "!!!!!!! ERROR: cannot create POI Database" exit -1 fi fi ############################################ # Dump the complete Database ############################################ if [ -n "$postgis_mapnik_dump" ] ; then # get Database Content with Dump postgis_mapnik_dump_dir=`dirname $postgis_mapnik_dump` mkdir -p "$postgis_mapnik_dump_dir" case "$postgis_mapnik_dump" in *.bz2) $sudo_cmd pg_dump --data-only -U "$osm_username" "$database_name" | bzip2 >"$postgis_mapnik_dump" ;; *.gz) $sudo_cmd pg_dump --data-only -U "$osm_username" "$database_name" | gzip >"$postgis_mapnik_dump" ;; *) $sudo_cmd pg_dump --data-only -U "$osm_username" "$database_name" >"$postgis_mapnik_dump" ;; esac if [ "$?" -gt "0" ]; then echo "Error While dumping Database" fi fi ############################################ # Mirror the planet-dump File from planet.openstreetmap.de ############################################ if [ -n "$mirror_dump" ] ; then test -n "$verbose" && echo "----- Mirroring planet-dump File" wget -v --mirror http://planet.openstreetmap.de/planet.osm.sql.bz2 \ --no-directories --directory-prefix=$planet_dir/ fi ############################################ # Fill Database from Dump File ############################################ if [ -n "$fill_from_dump" ] ; then echo "" echo "--------- Import from Dump '$fill_from_dump'" sudo -u postgres createdb -T template0 $database_name case "$fill_from_dump" in *.bz2) test -n "$verbose" && echo "Uncompress File ..." bzip2 -dc "$fill_from_dump" | $sudo_cmd psql $quiet "$database_name" ;; *.gz) test -n "$verbose" && echo "Uncompress File ..." gzip -dc "$fill_from_dump" | $sudo_cmd psql $quiet "$database_name" ;; *) test -n "$verbose" && echo "Import uncompressed File ..." $sudo_cmd psql $quiet "$database_name" <"$fill_from_dump" ;; esac if [ "$?" -gt "0" ]; then echo "Error While reding Dump into Database" fi fi ############################################ # Check number of entries in Database ############################################ if [ -n "$count_db" ] ; then echo "" echo "--------- Check Number of lines in Database '$database_name'" # Get the Table names if [ -n "$osm_username" ]; then table_owner=" AND tableowner ='$osm_username' "; fi table_names=`echo "SELECT tablename from pg_catalog.pg_tables where schemaname = 'public' $tableowner;" | \ $sudo_cmd psql "$database_name" -h /var/run/postgresql | grep -E -e '^ planet'` echo "Counting entries in all Tables (" $table_names ")" for table in $table_names; do echo -n "Table $table = " echo "SELECT COUNT(*) from $table;" | \ $sudo_cmd psql gis -h /var/run/postgresql | grep -v -e count -e '------' -e '1 row' | head -1 done fi osm2pgsql-0.82.0/middle-pgsql.c000066400000000000000000001703041213272333300162620ustar00rootroot00000000000000/* Implements the mid-layer processing for osm2pgsql * using several PostgreSQL tables * * This layer stores data read in from the planet.osm file * and is then read by the backend processing code to * emit the final geometry-enabled output formats */ #include "config.h" #include #include #include #include #include #include #include #ifdef HAVE_PTHREAD #include #endif #ifdef HAVE_SYS_WAIT_H #include #endif #ifdef HAVE_MMAP #include #ifndef MAP_ANONYMOUS #ifdef MAP_ANON #define MAP_ANONYMOUS MAP_ANON #endif #endif #endif #include #include "osmtypes.h" #include "middle.h" #include "middle-pgsql.h" #include "output-pgsql.h" #include "node-ram-cache.h" #include "node-persistent-cache.h" #include "pgsql.h" struct progress_info { time_t start; time_t end; int count; int finished; }; enum table_id { t_node, t_way, t_rel } ; struct table_desc { const char *name; const char *start; const char *create; const char *create_index; const char *prepare; const char *prepare_intarray; const char *copy; const char *analyze; const char *stop; const char *array_indexes; int copyMode; /* True if we are in copy mode */ int transactionMode; /* True if we are in an extended transaction */ PGconn *sql_conn; }; static struct table_desc tables [] = { { /*table = t_node,*/ .name = "%p_nodes", .start = "BEGIN;\n", #ifdef FIXED_POINT .create = "CREATE %m TABLE %p_nodes (id " POSTGRES_OSMID_TYPE " PRIMARY KEY {USING INDEX TABLESPACE %i}, lat int4 not null, lon int4 not null, tags text[]) {TABLESPACE %t};\n", .prepare = "PREPARE insert_node (" POSTGRES_OSMID_TYPE ", int4, int4, text[]) AS INSERT INTO %p_nodes VALUES ($1,$2,$3,$4);\n" #else .create = "CREATE %m TABLE %p_nodes (id " POSTGRES_OSMID_TYPE " PRIMARY KEY {USING INDEX TABLESPACE %i}, lat double precision not null, lon double precision not null, tags text[]) {TABLESPACE %t};\n", .prepare = "PREPARE insert_node (" POSTGRES_OSMID_TYPE ", double precision, double precision, text[]) AS INSERT INTO %p_nodes VALUES ($1,$2,$3,$4);\n" #endif "PREPARE get_node (" POSTGRES_OSMID_TYPE ") AS SELECT lat,lon,tags FROM %p_nodes WHERE id = $1 LIMIT 1;\n" "PREPARE delete_node (" POSTGRES_OSMID_TYPE ") AS DELETE FROM %p_nodes WHERE id = $1;\n", .prepare_intarray = "PREPARE get_node_list(" POSTGRES_OSMID_TYPE "[]) AS SELECT id, lat, lon FROM %p_nodes WHERE id = ANY($1::" POSTGRES_OSMID_TYPE "[])", .copy = "COPY %p_nodes FROM STDIN;\n", .analyze = "ANALYZE %p_nodes;\n", .stop = "COMMIT;\n" }, { /*table = t_way,*/ .name = "%p_ways", .start = "BEGIN;\n", .create = "CREATE %m TABLE %p_ways (id " POSTGRES_OSMID_TYPE " PRIMARY KEY {USING INDEX TABLESPACE %i}, nodes " POSTGRES_OSMID_TYPE "[] not null, tags text[], pending boolean not null) {TABLESPACE %t};\n", .create_index = "CREATE INDEX %p_ways_idx ON %p_ways (id) {TABLESPACE %i} WHERE pending;\n", .array_indexes = "CREATE INDEX %p_ways_nodes ON %p_ways USING gin (nodes) {TABLESPACE %i};\n", .prepare = "PREPARE insert_way (" POSTGRES_OSMID_TYPE ", " POSTGRES_OSMID_TYPE "[], text[], boolean) AS INSERT INTO %p_ways VALUES ($1,$2,$3,$4);\n" "PREPARE get_way (" POSTGRES_OSMID_TYPE ") AS SELECT nodes, tags, array_upper(nodes,1) FROM %p_ways WHERE id = $1;\n" "PREPARE get_way_list (" POSTGRES_OSMID_TYPE "[]) AS SELECT id, nodes, tags, array_upper(nodes,1) FROM %p_ways WHERE id = ANY($1::" POSTGRES_OSMID_TYPE "[]);\n" "PREPARE way_done(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_ways SET pending = false WHERE id = $1;\n" "PREPARE pending_ways AS SELECT id FROM %p_ways WHERE pending;\n" "PREPARE delete_way(" POSTGRES_OSMID_TYPE ") AS DELETE FROM %p_ways WHERE id = $1;\n", .prepare_intarray = "PREPARE node_changed_mark(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_ways SET pending = true WHERE nodes && ARRAY[$1] AND NOT pending;\n", .copy = "COPY %p_ways FROM STDIN;\n", .analyze = "ANALYZE %p_ways;\n", .stop = "COMMIT;\n" }, { /*table = t_rel,*/ .name = "%p_rels", .start = "BEGIN;\n", .create = "CREATE %m TABLE %p_rels(id " POSTGRES_OSMID_TYPE " PRIMARY KEY {USING INDEX TABLESPACE %i}, way_off int2, rel_off int2, parts " POSTGRES_OSMID_TYPE "[], members text[], tags text[], pending boolean not null) {TABLESPACE %t};\n", .create_index = "CREATE INDEX %p_rels_idx ON %p_rels (id) {TABLESPACE %i} WHERE pending;\n", .array_indexes = "CREATE INDEX %p_rels_parts ON %p_rels USING gin (parts) {TABLESPACE %i};\n", .prepare = "PREPARE insert_rel (" POSTGRES_OSMID_TYPE ", int2, int2, " POSTGRES_OSMID_TYPE "[], text[], text[]) AS INSERT INTO %p_rels VALUES ($1,$2,$3,$4,$5,$6,false);\n" "PREPARE get_rel (" POSTGRES_OSMID_TYPE ") AS SELECT members, tags, array_upper(members,1)/2 FROM %p_rels WHERE id = $1;\n" "PREPARE rel_done(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_rels SET pending = false WHERE id = $1;\n" "PREPARE pending_rels AS SELECT id FROM %p_rels WHERE pending;\n" "PREPARE delete_rel(" POSTGRES_OSMID_TYPE ") AS DELETE FROM %p_rels WHERE id = $1;\n", .prepare_intarray = "PREPARE node_changed_mark(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_rels SET pending = true WHERE parts && ARRAY[$1] AND parts[1:way_off] && ARRAY[$1] AND NOT pending;\n" "PREPARE way_changed_mark(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_rels SET pending = true WHERE parts && ARRAY[$1] AND parts[way_off+1:rel_off] && ARRAY[$1] AND NOT pending;\n" "PREPARE rel_changed_mark(" POSTGRES_OSMID_TYPE ") AS UPDATE %p_rels SET pending = true WHERE parts && ARRAY[$1] AND parts[rel_off+1:array_length(parts,1)] && ARRAY[$1] AND NOT pending;\n", .copy = "COPY %p_rels FROM STDIN;\n", .analyze = "ANALYZE %p_rels;\n", .stop = "COMMIT;\n" } }; static const int num_tables = sizeof(tables)/sizeof(tables[0]); static struct table_desc *node_table = &tables[t_node]; static struct table_desc *way_table = &tables[t_way]; static struct table_desc *rel_table = &tables[t_rel]; static int Append; const struct output_options *out_options; #define HELPER_STATE_UNINITIALIZED -1 #define HELPER_STATE_FORKED -2 #define HELPER_STATE_RUNNING 0 #define HELPER_STATE_FINISHED 1 #define HELPER_STATE_CONNECTED 2 #define HELPER_STATE_FAILED 3 static int pgsql_connect(const struct output_options *options) { int i; /* We use a connection per table to enable the use of COPY */ for (i=0; iconninfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(sql_conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(sql_conn)); return 1; } tables[i].sql_conn = sql_conn; pgsql_exec(sql_conn, PGRES_COMMAND_OK, "SET synchronous_commit TO off;"); if (tables[i].prepare) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].prepare); } if (tables[i].prepare_intarray) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].prepare_intarray); } } return 0; } static void pgsql_cleanup(void) { int i; for (i=0; i (buflen-20) ) /* Almost overflowed? */ { buflen <<= 1; buffer = realloc( buffer, buflen ); goto _restart; } first = 0; } *ptr++ = '}'; *ptr++ = 0; return buffer; } /* Special escape routine for escaping strings in array constants: double quote, backslash,newline, tab*/ static char *escape_tag( char *ptr, const char *in, int escape ) { while( *in ) { switch(*in) { case '"': if( escape ) *ptr++ = '\\'; *ptr++ = '\\'; *ptr++ = '"'; break; case '\\': if( escape ) *ptr++ = '\\'; if( escape ) *ptr++ = '\\'; *ptr++ = '\\'; *ptr++ = '\\'; break; case '\n': if( escape ) *ptr++ = '\\'; *ptr++ = '\\'; *ptr++ = 'n'; break; case '\r': if( escape ) *ptr++ = '\\'; *ptr++ = '\\'; *ptr++ = 'r'; break; case '\t': if( escape ) *ptr++ = '\\'; *ptr++ = '\\'; *ptr++ = 't'; break; default: *ptr++ = *in; break; } in++; } return ptr; } /* escape means we return '\N' for copy mode, otherwise we return just NULL */ char *pgsql_store_tags(struct keyval *tags, int escape) { static char *buffer; static int buflen; char *ptr; struct keyval *i; int first; int countlist = countList(tags); if( countlist == 0 ) { if( escape ) return "\\N"; else return NULL; } if( buflen <= countlist * 24 ) /* LE so 0 always matches */ { buflen = ((countlist * 24) | 4095) + 1; /* Round up to next page */ buffer = realloc( buffer, buflen ); } _restart: ptr = buffer; first = 1; *ptr++ = '{'; /* The lists are circular, exit when we reach the head again */ for( i=tags->next; i->key; i = i->next ) { int maxlen = (strlen(i->key) + strlen(i->value)) * 4; if( (ptr+maxlen-buffer) > (buflen-20) ) /* Almost overflowed? */ { buflen <<= 1; buffer = realloc( buffer, buflen ); goto _restart; } if( !first ) *ptr++ = ','; *ptr++ = '"'; ptr = escape_tag( ptr, i->key, escape ); *ptr++ = '"'; *ptr++ = ','; *ptr++ = '"'; ptr = escape_tag( ptr, i->value, escape ); *ptr++ = '"'; first=0; } *ptr++ = '}'; *ptr++ = 0; return buffer; } /* Decodes a portion of an array literal from postgres */ /* Argument should point to beginning of literal, on return points to delimiter */ static const char *decode_upto( const char *src, char *dst ) { int quoted = (*src == '"'); if( quoted ) src++; while( quoted ? (*src != '"') : (*src != ',' && *src != '}') ) { if( *src == '\\' ) { switch( src[1] ) { case 'n': *dst++ = '\n'; break; case 't': *dst++ = '\t'; break; default: *dst++ = src[1]; break; } src+=2; } else *dst++ = *src++; } if( quoted ) src++; *dst = 0; return src; } static void pgsql_parse_tags( const char *string, struct keyval *tags ) { char key[1024]; char val[1024]; if( *string == '\0' ) return; if( *string++ != '{' ) return; while( *string != '}' ) { string = decode_upto( string, key ); /* String points to the comma */ string++; string = decode_upto( string, val ); /* String points to the comma or closing '}' */ addItem( tags, key, val, 0 ); if( *string == ',' ) string++; } } /* Parses an array of integers */ static void pgsql_parse_nodes(const char *src, osmid_t *nds, int nd_count ) { int count = 0; const char *string = src; if( *string++ != '{' ) return; while( *string != '}' ) { char *ptr; nds[count] = strtoosmid( string, &ptr, 10 ); string = ptr; if( *string == ',' ) string++; count++; } if( count != nd_count ) { fprintf( stderr, "parse_nodes problem: '%s' expected %d got %d\n", src, nd_count, count ); exit_nicely(); } } static int pgsql_endCopy( struct table_desc *table) { PGresult *res; PGconn *sql_conn; int stop; /* Terminate any pending COPY */ if (table->copyMode) { sql_conn = table->sql_conn; stop = PQputCopyEnd(sql_conn, NULL); if (stop != 1) { fprintf(stderr, "COPY_END for %s failed: %s\n", table->copy, PQerrorMessage(sql_conn)); exit_nicely(); } res = PQgetResult(sql_conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for %s failed: %s\n", table->copy, PQerrorMessage(sql_conn)); PQclear(res); exit_nicely(); } PQclear(res); table->copyMode = 0; } return 0; } static int pgsql_nodes_set(osmid_t id, double lat, double lon, struct keyval *tags) { /* Four params: id, lat, lon, tags */ char *paramValues[4]; char *buffer; if( node_table->copyMode ) { char *tag_buf = pgsql_store_tags(tags,1); int length = strlen(tag_buf) + 64; buffer = alloca( length ); #ifdef FIXED_POINT if( snprintf( buffer, length, "%" PRIdOSMID "\t%d\t%d\t%s\n", id, DOUBLE_TO_FIX(lat), DOUBLE_TO_FIX(lon), tag_buf ) > (length-10) ) { fprintf( stderr, "buffer overflow node id %" PRIdOSMID "\n", id); return 1; } #else if( snprintf( buffer, length, "%" PRIdOSMID "\t%.10f\t%.10f\t%s\n", id, lat, lon, tag_buf ) > (length-10) ) { fprintf( stderr, "buffer overflow node id %" PRIdOSMID "\n", id); return 1; } #endif return pgsql_CopyData(__FUNCTION__, node_table->sql_conn, buffer); } buffer = alloca(64); paramValues[0] = buffer; paramValues[1] = paramValues[0] + sprintf( paramValues[0], "%" PRIdOSMID, id ) + 1; #ifdef FIXED_POINT paramValues[2] = paramValues[1] + sprintf( paramValues[1], "%d", DOUBLE_TO_FIX(lat) ) + 1; sprintf( paramValues[2], "%d", DOUBLE_TO_FIX(lon) ); #else paramValues[2] = paramValues[1] + sprintf( paramValues[1], "%.10f", lat ) + 1; sprintf( paramValues[2], "%.10f", lon ); #endif paramValues[3] = pgsql_store_tags(tags,0); pgsql_execPrepared(node_table->sql_conn, "insert_node", 4, (const char * const *)paramValues, PGRES_COMMAND_OK); return 0; } static int middle_nodes_set(osmid_t id, double lat, double lon, struct keyval *tags) { ram_cache_nodes_set( id, lat, lon, tags ); return (out_options->flat_node_cache_enabled) ? persistent_cache_nodes_set(id, lat, lon) : pgsql_nodes_set(id, lat, lon, tags); } static int pgsql_nodes_get(struct osmNode *out, osmid_t id) { PGresult *res; char tmp[16]; char const *paramValues[1]; PGconn *sql_conn = node_table->sql_conn; /* Make sure we're out of copy mode */ pgsql_endCopy( node_table ); snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, id); paramValues[0] = tmp; res = pgsql_execPrepared(sql_conn, "get_node", 1, paramValues, PGRES_TUPLES_OK); if (PQntuples(res) != 1) { PQclear(res); return 1; } #ifdef FIXED_POINT out->lat = FIX_TO_DOUBLE(strtol(PQgetvalue(res, 0, 0), NULL, 10)); out->lon = FIX_TO_DOUBLE(strtol(PQgetvalue(res, 0, 1), NULL, 10)); #else out->lat = strtod(PQgetvalue(res, 0, 0), NULL); out->lon = strtod(PQgetvalue(res, 0, 1), NULL); #endif PQclear(res); return 0; } /* Currently not used static int middle_nodes_get(struct osmNode *out, osmid_t id) { / * Check cache first * / if( ram_cache_nodes_get( out, id ) == 0 ) return 0; return (out_options->flat_node_cache_enabled) ? persistent_cache_nodes_get(out, id) : pgsql_nodes_get(out, id); }*/ /* This should be made more efficient by using an IN(ARRAY[]) construct */ static int pgsql_nodes_get_list(struct osmNode *nodes, osmid_t *ndids, int nd_count) { char tmp[16]; char *tmp2; int count, countDB, countPG, i,j; osmid_t *ndidspg; struct osmNode *nodespg; char const *paramValues[1]; PGresult *res; PGconn *sql_conn = node_table->sql_conn; count = 0; countDB = 0; tmp2 = malloc(sizeof(char)*nd_count*16); if (tmp2 == NULL) return 0; /*failed to allocate memory, return */ /* create a list of ids in tmp2 to query the database */ sprintf(tmp2, "{"); for( i=0; iflat_node_cache_enabled) ? persistent_cache_nodes_get_list(nodes, ndids, nd_count) : pgsql_nodes_get_list(nodes, ndids, nd_count); } static int pgsql_nodes_delete(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( node_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(node_table->sql_conn, "delete_node", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static int middle_nodes_delete(osmid_t osm_id) { return ((out_options->flat_node_cache_enabled) ? persistent_cache_nodes_set(osm_id, NAN, NAN) : pgsql_nodes_delete(osm_id)); } static int pgsql_node_changed(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( way_table ); pgsql_endCopy( rel_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(way_table->sql_conn, "node_changed_mark", 1, paramValues, PGRES_COMMAND_OK ); pgsql_execPrepared(rel_table->sql_conn, "node_changed_mark", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static int pgsql_ways_set(osmid_t way_id, osmid_t *nds, int nd_count, struct keyval *tags, int pending) { /* Three params: id, nodes, tags, pending */ char *paramValues[4]; char *buffer; if( way_table->copyMode ) { char *tag_buf = pgsql_store_tags(tags,1); char *node_buf = pgsql_store_nodes(nds, nd_count); int length = strlen(tag_buf) + strlen(node_buf) + 64; buffer = alloca(length); if( snprintf( buffer, length, "%" PRIdOSMID "\t%s\t%s\t%c\n", way_id, node_buf, tag_buf, pending?'t':'f' ) > (length-10) ) { fprintf( stderr, "buffer overflow way id %" PRIdOSMID "\n", way_id); return 1; } return pgsql_CopyData(__FUNCTION__, way_table->sql_conn, buffer); } buffer = alloca(64); paramValues[0] = buffer; paramValues[3] = paramValues[0] + sprintf( paramValues[0], "%" PRIdOSMID, way_id ) + 1; sprintf( paramValues[3], "%c", pending?'t':'f' ); paramValues[1] = pgsql_store_nodes(nds, nd_count); paramValues[2] = pgsql_store_tags(tags,0); pgsql_execPrepared(way_table->sql_conn, "insert_way", 4, (const char * const *)paramValues, PGRES_COMMAND_OK); return 0; } /* Caller is responsible for freeing nodesptr & resetList(tags) */ static int pgsql_ways_get(osmid_t id, struct keyval *tags, struct osmNode **nodes_ptr, int *count_ptr) { PGresult *res; char tmp[16]; char const *paramValues[1]; PGconn *sql_conn = way_table->sql_conn; int num_nodes; osmid_t *list; /* Make sure we're out of copy mode */ pgsql_endCopy( way_table ); snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, id); paramValues[0] = tmp; res = pgsql_execPrepared(sql_conn, "get_way", 1, paramValues, PGRES_TUPLES_OK); if (PQntuples(res) != 1) { PQclear(res); return 1; } pgsql_parse_tags( PQgetvalue(res, 0, 1), tags ); num_nodes = strtol(PQgetvalue(res, 0, 2), NULL, 10); list = alloca(sizeof(osmid_t)*num_nodes ); *nodes_ptr = malloc(sizeof(struct osmNode) * num_nodes); pgsql_parse_nodes( PQgetvalue(res, 0, 0), list, num_nodes); *count_ptr = out_options->flat_node_cache_enabled ? persistent_cache_nodes_get_list(*nodes_ptr, list, num_nodes) : pgsql_nodes_get_list( *nodes_ptr, list, num_nodes); PQclear(res); return 0; } static int pgsql_ways_get_list(osmid_t *ids, int way_count, osmid_t **way_ids, struct keyval *tags, struct osmNode **nodes_ptr, int *count_ptr) { char tmp[16]; char *tmp2; int count, countPG, i, j; osmid_t *wayidspg; char const *paramValues[1]; int num_nodes; osmid_t *list; PGresult *res; PGconn *sql_conn = way_table->sql_conn; *way_ids = malloc( sizeof(osmid_t) * (way_count + 1)); if (way_count == 0) return 0; tmp2 = malloc(sizeof(char)*way_count*16); if (tmp2 == NULL) return 0; /*failed to allocate memory, return */ /* create a list of ids in tmp2 to query the database */ sprintf(tmp2, "{"); for( i=0; iflat_node_cache_enabled ? persistent_cache_nodes_get_list(nodes_ptr[count], list, num_nodes) : pgsql_nodes_get_list( nodes_ptr[count], list, num_nodes); count++; initList(&(tags[count])); } } } PQclear(res); free(tmp2); free(wayidspg); return count; } static int pgsql_ways_done(osmid_t id) { char tmp[16]; char const *paramValues[1]; PGconn *sql_conn = way_table->sql_conn; /* Make sure we're out of copy mode */ pgsql_endCopy( way_table ); snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, id); paramValues[0] = tmp; pgsql_execPrepared(sql_conn, "way_done", 1, paramValues, PGRES_COMMAND_OK); return 0; } static int pgsql_ways_delete(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( way_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(way_table->sql_conn, "delete_way", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static void pgsql_iterate_ways(int (*callback)(osmid_t id, struct keyval *tags, struct osmNode *nodes, int count, int exists)) { int noProcs = out_options->num_procs; int pid = 0; PGresult *res_ways; int i, p, count = 0; /* The flag we pass to indicate that the way in question might exist already in the database */ int exists = Append; time_t start, end; time(&start); #if HAVE_MMAP struct progress_info *info = 0; if(noProcs > 1) { info = mmap(0, sizeof(struct progress_info)*noProcs, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); info[0].finished = HELPER_STATE_CONNECTED; for (i = 1; i < noProcs; i++) { info[i].finished = HELPER_STATE_UNINITIALIZED; /* Register that the process was not yet initialised; */ } } #endif fprintf(stderr, "\nGoing over pending ways...\n"); /* Make sure we're out of copy mode */ pgsql_endCopy( way_table ); if (out_options->flat_node_cache_enabled) shutdown_node_persistent_cache(); res_ways = pgsql_execPrepared(way_table->sql_conn, "pending_ways", 0, NULL, PGRES_TUPLES_OK); fprintf(stderr, "\t%i ways are pending\n", PQntuples(res_ways)); /** * To speed up processing of pending ways, fork noProcs worker processes * each of which independently goes through an equal subset of the pending ways array */ fprintf(stderr, "\nUsing %i helper-processes\n", noProcs); #ifdef HAVE_FORK for (p = 1; p < noProcs; p++) { pid=fork(); if (pid==0) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FORKED; #endif break; } if (pid==-1) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FAILED; fprintf(stderr,"WARNING: Failed to fork helper processes %i. Trying to recover.\n", p); #else fprintf(stderr,"ERROR: Failed to fork helper processes. Can't recover! \n"); exit_nicely(); #endif } } #endif if ((pid == 0) && (noProcs > 1)) { /* After forking, need to reconnect to the postgresql db */ if ((pgsql_connect(out_options) != 0) || (out_options->out->connect(out_options, 1) != 0)) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FAILED; #else fprintf(stderr,"\n\n!!!FATAL: Helper process failed, but can't compensate. Your DB will be broken and corrupt!!!!\n\n"); #endif exit_nicely(); }; } else { p = 0; } if (out_options->flat_node_cache_enabled) init_node_persistent_cache(out_options,1); /* at this point we always want to be in append mode, to not delete and recreate the node cache file */ /* Only start an extended transaction on the ways table, * which should cover the bulk of the update statements. * The nodes table should not be written to in this phase. * The relations table can't be wrapped in an extended * transaction, as with prallel processing it may deadlock. * Updating a way will trigger an update of the pending status * on connected relations. This should not be as many updates, * so in combination with the synchronous_comit = off it should be fine. * */ if (tables[t_way].start) { pgsql_endCopy(&tables[t_way]); pgsql_exec(tables[t_way].sql_conn, PGRES_COMMAND_OK, "%s", tables[t_way].start); tables[t_way].transactionMode = 1; } #if HAVE_MMAP if (noProcs > 1) { info[p].finished = HELPER_STATE_CONNECTED; /* Syncronize all processes to make sure they have all run through the initialisation steps */ int all_processes_initialised = 0; while (all_processes_initialised == 0) { all_processes_initialised = 1; for (i = 0; i < noProcs; i++) { if (info[i].finished < 0) { all_processes_initialised = 0; sleep(1); } } } /* As we process the pending ways in steps of noProcs, we need to make sure that all processes correctly forked and have connected to the db. Otherwise we need to readjust the step size of going through the pending ways array */ int noProcsTmp = noProcs; int pTmp = p; for (i = 0; i < noProcs; i++) { if (info[i].finished == HELPER_STATE_FAILED) { noProcsTmp--; if (i < p) pTmp--; } } info[p].finished = HELPER_STATE_RUNNING; p = pTmp; /* reset the process number to account for failed processes */ /* As we have potentially changed the process number assignment, we need to synchronize on all processes having performed the reassignment as otherwise multiple process might have the same number and overwrite the info fields incorrectly. */ all_processes_initialised = 0; while (all_processes_initialised == 0) { all_processes_initialised = 1; for (i = 0; i < noProcs; i++) { if (info[i].finished == HELPER_STATE_CONNECTED) { /* Process is connected, but hasn't performed the re-assignment of p. */ all_processes_initialised = 0; sleep(1); break; } } } noProcs = noProcsTmp; } #endif /* some spaces at end, so that processings outputs get cleaned if already existing */ fprintf(stderr, "\rHelper process %i out of %i initialised \n", p, noProcs); /* Use a stride length of the number of worker processes, starting with an offset for each worker process p */ for (i = p; i < PQntuples(res_ways); i+= noProcs) { osmid_t id = strtoosmid(PQgetvalue(res_ways, i, 0), NULL, 10); struct keyval tags; struct osmNode *nodes; int nd_count; if (count++ %1000 == 0) { time(&end); #if HAVE_MMAP if(info) { double rate = 0; int n, total = 0, finished = 0; struct progress_info f; f.start = start; f.end = end; f.count = count; f.finished = HELPER_STATE_RUNNING; info[p] = f; for(n = 0; n < noProcs; ++n) { f = info[n]; total += f.count; finished += f.finished; if(f.end > f.start) rate += (double)f.count / (double)(f.end - f.start); } fprintf(stderr, "\rprocessing way (%dk) at %.2fk/s (done %d of %d)", total/1000, rate/1000.0, finished, noProcs); } else #endif { fprintf(stderr, "\rprocessing way (%dk) at %.2fk/s", count/1000, end > start ? ((double)count / 1000.0 / (double)(end - start)) : 0); } } initList(&tags); if( pgsql_ways_get(id, &tags, &nodes, &nd_count) ) continue; callback(id, &tags, nodes, nd_count, exists); pgsql_ways_done( id ); free(nodes); resetList(&tags); } if (tables[t_way].stop && tables[t_way].transactionMode) { pgsql_exec(tables[t_way].sql_conn, PGRES_COMMAND_OK, "%s", tables[t_way].stop); tables[t_way].transactionMode = 0; } time(&end); #if HAVE_MMAP if(info) { struct progress_info f; f.start = start; f.end = end; f.count = count; f.finished = 1; info[p] = f; } #endif fprintf(stderr, "\rProcess %i finished processing %i ways in %i sec\n", p, count, (int)(end - start)); if ((pid == 0) && (noProcs > 1)) { pgsql_cleanup(); out_options->out->close(1); if (out_options->flat_node_cache_enabled) shutdown_node_persistent_cache(); exit(0); } else { for (p = 0; p < noProcs; p++) wait(NULL); fprintf(stderr, "\nAll child processes exited\n"); } #if HAVE_MMAP munmap(info, sizeof(struct progress_info)*noProcs); #endif fprintf(stderr, "\n"); time(&end); if (end - start > 0) fprintf(stderr, "%i Pending ways took %ds at a rate of %.2f/s\n",PQntuples(res_ways), (int)(end - start), ((double)PQntuples(res_ways) / (double)(end - start))); PQclear(res_ways); } static int pgsql_way_changed(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(rel_table->sql_conn, "way_changed_mark", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static int pgsql_rels_set(osmid_t id, struct member *members, int member_count, struct keyval *tags) { /* Params: id, way_off, rel_off, parts, members, tags */ char *paramValues[6]; char *buffer; int i; struct keyval member_list; char buf[64]; osmid_t node_parts[member_count], way_parts[member_count], rel_parts[member_count]; int node_count = 0, way_count = 0, rel_count = 0; osmid_t all_parts[member_count]; int all_count = 0; initList( &member_list ); for( i=0; icopyMode ) { char *tag_buf = strdup(pgsql_store_tags(tags,1)); char *member_buf = pgsql_store_tags(&member_list,1); char *parts_buf = pgsql_store_nodes(all_parts, all_count); int length = strlen(member_buf) + strlen(tag_buf) + strlen(parts_buf) + 64; buffer = alloca(length); if( snprintf( buffer, length, "%" PRIdOSMID "\t%d\t%d\t%s\t%s\t%s\tf\n", id, node_count, node_count+way_count, parts_buf, member_buf, tag_buf ) > (length-10) ) { fprintf( stderr, "buffer overflow relation id %" PRIdOSMID "\n", id); return 1; } free(tag_buf); resetList(&member_list); return pgsql_CopyData(__FUNCTION__, rel_table->sql_conn, buffer); } buffer = alloca(64); paramValues[0] = buffer; paramValues[1] = paramValues[0] + sprintf( paramValues[0], "%" PRIdOSMID, id ) + 1; paramValues[2] = paramValues[1] + sprintf( paramValues[1], "%d", node_count ) + 1; sprintf( paramValues[2], "%d", node_count+way_count ); paramValues[3] = pgsql_store_nodes(all_parts, all_count); paramValues[4] = pgsql_store_tags(&member_list,0); if( paramValues[4] ) paramValues[4] = strdup(paramValues[4]); paramValues[5] = pgsql_store_tags(tags,0); pgsql_execPrepared(rel_table->sql_conn, "insert_rel", 6, (const char * const *)paramValues, PGRES_COMMAND_OK); if( paramValues[4] ) free(paramValues[4]); resetList(&member_list); return 0; } /* Caller is responsible for freeing members & resetList(tags) */ static int pgsql_rels_get(osmid_t id, struct member **members, int *member_count, struct keyval *tags) { PGresult *res; char tmp[16]; char const *paramValues[1]; PGconn *sql_conn = rel_table->sql_conn; struct keyval member_temp; char tag; int num_members; struct member *list; int i=0; struct keyval *item; /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, id); paramValues[0] = tmp; res = pgsql_execPrepared(sql_conn, "get_rel", 1, paramValues, PGRES_TUPLES_OK); /* Fields are: members, tags, member_count */ if (PQntuples(res) != 1) { PQclear(res); return 1; } pgsql_parse_tags( PQgetvalue(res, 0, 1), tags ); initList(&member_temp); pgsql_parse_tags( PQgetvalue(res, 0, 0), &member_temp ); num_members = strtol(PQgetvalue(res, 0, 2), NULL, 10); list = malloc( sizeof(struct member)*num_members ); while( (item = popItem(&member_temp)) ) { if( i >= num_members ) { fprintf(stderr, "Unexpected member_count reading relation %" PRIdOSMID "\n", id); exit_nicely(); } tag = item->key[0]; list[i].type = (tag == 'n')?OSMTYPE_NODE:(tag == 'w')?OSMTYPE_WAY:(tag == 'r')?OSMTYPE_RELATION:-1; list[i].id = strtoosmid(item->key+1, NULL, 10 ); list[i].role = strdup( item->value ); freeItem(item); i++; } *members = list; *member_count = num_members; PQclear(res); return 0; } static int pgsql_rels_done(osmid_t id) { char tmp[16]; char const *paramValues[1]; PGconn *sql_conn = rel_table->sql_conn; /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, id); paramValues[0] = tmp; pgsql_execPrepared(sql_conn, "rel_done", 1, paramValues, PGRES_COMMAND_OK); return 0; } static int pgsql_rels_delete(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(rel_table->sql_conn, "delete_rel", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static void pgsql_iterate_relations(int (*callback)(osmid_t id, struct member *members, int member_count, struct keyval *tags, int exists)) { PGresult *res_rels; int noProcs = out_options->num_procs; int pid; int i, p, count = 0; /* The flag we pass to indicate that the way in question might exist already in the database */ int exists = Append; time_t start, end; time(&start); #if HAVE_MMAP struct progress_info *info = 0; if(noProcs > 1) { info = mmap(0, sizeof(struct progress_info)*noProcs, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); info[0].finished = HELPER_STATE_CONNECTED; for (i = 1; i < noProcs; i++) { info[i].finished = HELPER_STATE_UNINITIALIZED; /* Register that the process was not yet initialised; */ } } #endif fprintf(stderr, "\nGoing over pending relations...\n"); /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); if (out_options->flat_node_cache_enabled) shutdown_node_persistent_cache(); res_rels = pgsql_execPrepared(rel_table->sql_conn, "pending_rels", 0, NULL, PGRES_TUPLES_OK); fprintf(stderr, "\t%i relations are pending\n", PQntuples(res_rels)); fprintf(stderr, "\nUsing %i helper-processes\n", noProcs); pid = 0; #ifdef HAVE_FORK for (p = 1; p < noProcs; p++) { pid=fork(); if (pid==0) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FORKED; #endif break; } if (pid==-1) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FAILED; fprintf(stderr,"WARNING: Failed to fork helper processes %i. Trying to recover.\n", p); #else fprintf(stderr,"ERROR: Failed to fork helper processes. Can't recover! \n"); exit_nicely(); #endif } } #endif if ((pid == 0) && (noProcs > 1)) { if ((out_options->out->connect(out_options, 0) != 0) || (pgsql_connect(out_options) != 0)) { #if HAVE_MMAP info[p].finished = HELPER_STATE_FAILED; #endif exit_nicely(); }; } else { p = 0; } if (out_options->flat_node_cache_enabled) init_node_persistent_cache(out_options, 1); /* at this point we always want to be in append mode, to not delete and recreate the node cache file */ #if HAVE_MMAP if (noProcs > 1) { info[p].finished = HELPER_STATE_CONNECTED; /* Syncronize all processes to make sure they have all run through the initialisation steps */ int all_processes_initialised = 0; while (all_processes_initialised == 0) { all_processes_initialised = 1; for (i = 0; i < noProcs; i++) { if (info[i].finished < 0) { all_processes_initialised = 0; sleep(1); } } } /* As we process the pending ways in steps of noProcs, we need to make sure that all processes correctly forked and have connected to the db. Otherwise we need to readjust the step size of going through the pending ways array */ int noProcsTmp = noProcs; int pTmp = p; for (i = 0; i < noProcs; i++) { if (info[i].finished == HELPER_STATE_FAILED) { noProcsTmp--; if (i < p) pTmp--; } } info[p].finished = HELPER_STATE_RUNNING; p = pTmp; /* reset the process number to account for failed processes */ /* As we have potentially changed the process number assignment, we need to synchronize on all processes having performed the reassignment as otherwise multiple process might have the same number and overwrite the info fields incorrectly. */ all_processes_initialised = 0; while (all_processes_initialised == 0) { all_processes_initialised = 1; for (i = 0; i < noProcs; i++) { if (info[i].finished == HELPER_STATE_CONNECTED) { /* Process is connected, but hasn't performed the re-assignment of p. */ all_processes_initialised = 0; sleep(1); break; } } } noProcs = noProcsTmp; } #endif for (i = p; i < PQntuples(res_rels); i+= noProcs) { osmid_t id = strtoosmid(PQgetvalue(res_rels, i, 0), NULL, 10); struct keyval tags; struct member *members; int member_count; if (count++ %10 == 0) { time(&end); #if HAVE_MMAP if(info) { double rate = 0; int n, total = 0, finished = 0; struct progress_info f; f.start = start; f.end = end; f.count = count; f.finished = HELPER_STATE_RUNNING; info[p] = f; for(n = 0; n < noProcs; ++n) { f = info[n]; total += f.count; finished += f.finished; if(f.end > f.start) rate += (double)f.count / (double)(f.end - f.start); } fprintf(stderr, "\rprocessing relation (%d) at %.2f/s (done %d of %d)", total, rate, finished, noProcs); } else #endif { fprintf(stderr, "\rprocessing relation (%d) at %.2f/s", count, end > start ? ((double)count / (double)(end - start)) : 0); } } initList(&tags); if( pgsql_rels_get(id, &members, &member_count, &tags) ) continue; callback(id, members, member_count, &tags, exists); pgsql_rels_done( id ); free(members); resetList(&tags); } time(&end); #if HAVE_MMAP if(info) { struct progress_info f; f.start = start; f.end = end; f.count = count; f.finished = 1; info[p] = f; } #endif fprintf(stderr, "\rProcess %i finished processing %i relations in %i sec\n", p, count, (int)(end - start)); if ((pid == 0) && (noProcs > 1)) { pgsql_cleanup(); out_options->out->close(0); if (out_options->flat_node_cache_enabled) shutdown_node_persistent_cache(); exit(0); } else { for (p = 0; p < noProcs; p++) wait(NULL); fprintf(stderr, "\nAll child processes exited\n"); } #if HAVE_MMAP munmap(info, sizeof(struct progress_info)*noProcs); #endif time(&end); if (end - start > 0) fprintf(stderr, "%i Pending relations took %ds at a rate of %.2f/s\n",PQntuples(res_rels), (int)(end - start), ((double)PQntuples(res_rels) / (double)(end - start))); PQclear(res_rels); fprintf(stderr, "\n"); } static int pgsql_rel_changed(osmid_t osm_id) { char const *paramValues[1]; char buffer[64]; /* Make sure we're out of copy mode */ pgsql_endCopy( rel_table ); sprintf( buffer, "%" PRIdOSMID, osm_id ); paramValues[0] = buffer; pgsql_execPrepared(rel_table->sql_conn, "rel_changed_mark", 1, paramValues, PGRES_COMMAND_OK ); return 0; } static void pgsql_analyze(void) { int i; for (i=0; iprefix) { strcpy(dest, options->prefix); dest += strlen(options->prefix); copied = 1; } source+=2; continue; } else if (*(source+1) == 't') { if (options->tblsslim_data) { strcpy(dest, options->tblsslim_data); dest += strlen(options->tblsslim_data); copied = 1; } source+=2; continue; } else if (*(source+1) == 'i') { if (options->tblsslim_index) { strcpy(dest, options->tblsslim_index); dest += strlen(options->tblsslim_index); copied = 1; } source+=2; continue; } else if (*(source+1) == 'm') { if (options->unlogged) { strcpy(dest, "UNLOGGED"); dest += 8; copied = 1; } source+=2; continue; } } *(dest++) = *(source++); } *dest = 0; *string = strdup(buffer); } static int build_indexes; static int pgsql_start(const struct output_options *options) { PGresult *res; int i; int dropcreate = !options->append; char * sql; scale = options->scale; Append = options->append; out_options = options; init_node_ram_cache( options->alloc_chunkwise | ALLOC_LOSSY, options->cache, scale); if (options->flat_node_cache_enabled) init_node_persistent_cache(options, options->append); fprintf(stderr, "Mid: pgsql, scale=%d cache=%d\n", scale, options->cache); /* We use a connection per table to enable the use of COPY */ for (i=0; iconninfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(sql_conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(sql_conn)); exit_nicely(); } tables[i].sql_conn = sql_conn; /* * To allow for parallelisation, the second phase (iterate_ways), cannot be run * in an extended transaction and each update statement is its own transaction. * Therefore commit rate of postgresql is very important to ensure high speed. * If fsync is enabled to ensure safe transactions, the commit rate can be very low. * To compensate for this, one can set the postgresql parameter synchronous_commit * to off. This means an update statement returns to the client as success before the * transaction is saved to disk via fsync, which in return allows to bunch up multiple * transactions into a single fsync. This may result in some data loss in the case of a * database crash. However, as we don't currently have the ability to restart a full osm2pgsql * import session anyway, this is fine. Diff imports are also not effected, as the next * diff import would simply deal with all pending ways that were not previously finished. * This parameter does not effect safety from data corruption on the back-end. */ pgsql_exec(sql_conn, PGRES_COMMAND_OK, "SET synchronous_commit TO off;"); /* Not really the right place for this test, but we need a live * connection that not used for anything else yet, and we'd like to * warn users *before* we start doing mountains of work */ if (i == t_node) { res = PQexec(sql_conn, "select 1 from pg_opclass where opcname='gist__intbig_ops'" ); if(PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1) { /* intarray is problematic now; causes at least postgres 8.4 * to not use the index on nodes[]/parts[] which slows diff * updates to a crawl! * If someone find a way to fix this rather than bow out here, * please do.*/ fprintf(stderr, "\n" "The target database has the intarray contrib module loaded.\n" "While required for earlier versions of osm2pgsql, intarray \n" "is now unnecessary and will interfere with osm2pgsql's array\n" "handling. Please use a database without intarray.\n\n"); exit_nicely(); } PQclear(res); if (options->append) { sql = malloc (2048); snprintf(sql, 2047, "SELECT id FROM %s LIMIT 1", tables[t_node].name); res = PQexec(sql_conn, sql ); free(sql); sql = NULL; if(PQresultStatus(res) == PGRES_TUPLES_OK && PQntuples(res) == 1) { int size = PQfsize(res, 0); if (size != sizeof(osmid_t)) { fprintf(stderr, "\n" "The target database has been created with %dbit ID fields,\n" "but this version of osm2pgsql has been compiled to use %ldbit IDs.\n" "You cannot append data to this database with this program.\n" "Either re-create the database or use a matching osm2pgsql.\n\n", size * 8, sizeof(osmid_t) * 8); exit_nicely(); } } PQclear(res); } if(!options->append) build_indexes = 1; } if (dropcreate) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "DROP TABLE IF EXISTS %s", tables[i].name); } if (tables[i].start) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].start); tables[i].transactionMode = 1; } if (dropcreate && tables[i].create) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].create); if (tables[i].create_index) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].create_index); } } if (tables[i].prepare) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].prepare); } if (tables[i].prepare_intarray) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", tables[i].prepare_intarray); } if (tables[i].copy) { pgsql_exec(sql_conn, PGRES_COPY_IN, "%s", tables[i].copy); tables[i].copyMode = 1; } } return 0; } static void pgsql_commit(void) { int i; for (i=0; isql_conn; fprintf(stderr, "Stopping table: %s\n", table->name); pgsql_endCopy(table); time(&start); if (!out_options->droptemp) { if (build_indexes && table->array_indexes) { char *buffer = (char *) malloc(strlen(table->array_indexes) + 99); /* we need to insert before the TABLESPACE setting, if any */ char *insertpos = strstr(table->array_indexes, "TABLESPACE"); if (!insertpos) insertpos = strchr(table->array_indexes, ';'); /* automatically insert FASTUPDATE=OFF when creating, indexes for PostgreSQL 8.4 and higher see http://lists.openstreetmap.org/pipermail/dev/2011-January/021704.html */ if (insertpos && PQserverVersion(sql_conn) >= 80400) { char old = *insertpos; fprintf(stderr, "Building index on table: %s (fastupdate=off)\n", table->name); *insertpos = 0; /* temporary null byte for following strcpy operation */ strcpy(buffer, table->array_indexes); *insertpos = old; /* restore old content */ strcat(buffer, " WITH (FASTUPDATE=OFF)"); strcat(buffer, insertpos); } else { fprintf(stderr, "Building index on table: %s\n", table->name); strcpy(buffer, table->array_indexes); } pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", buffer); free(buffer); } } else { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "drop table %s", table->name); } PQfinish(sql_conn); table->sql_conn = NULL; time(&end); fprintf(stderr, "Stopped table: %s in %is\n", table->name, (int)(end - start)); return NULL; } static void pgsql_stop(void) { int i; #ifdef HAVE_PTHREAD pthread_t threads[num_tables]; #endif free_node_ram_cache(); if (out_options->flat_node_cache_enabled) shutdown_node_persistent_cache(); #ifdef HAVE_PTHREAD for (i=0; i #include #include #include #include #include #include "osmtypes.h" #include "middle.h" #include "middle-ram.h" #include "node-ram-cache.h" #include "output-pgsql.h" /* Store +-20,000km Mercator co-ordinates as fixed point 32bit number with maximum precision */ /* Scale is chosen such that 40,000 * SCALE < 2^32 */ #define FIXED_POINT struct ramWay { struct keyval *tags; osmid_t *ndids; int pending; }; struct ramRel { struct keyval *tags; struct member *members; int member_count; }; /* Object storage now uses 2 levels of storage arrays. * * - Low level storage of 2^16 (~65k) objects in an indexed array * These are allocated dynamically when we need to first store data with * an ID in this block * * - Fixed array of 2^(32 - 16) = 65k pointers to the dynamically allocated arrays. * * This allows memory usage to be efficient and scale dynamically without needing to * hard code maximum IDs. We now support an ID range of -2^31 to +2^31. * The negative IDs often occur in non-uploaded JOSM data or other data import scripts. * */ #define BLOCK_SHIFT 10 #define PER_BLOCK (1 << BLOCK_SHIFT) #define NUM_BLOCKS (1 << (32 - BLOCK_SHIFT)) static struct ramWay *ways[NUM_BLOCKS]; static struct ramRel *rels[NUM_BLOCKS]; static int node_blocks; static int way_blocks; static int way_out_count; static int rel_out_count; static osmid_t id2block(osmid_t id) { /* + NUM_BLOCKS/2 allows for negative IDs */ return (id >> BLOCK_SHIFT) + NUM_BLOCKS/2; } static osmid_t id2offset(osmid_t id) { return id & (PER_BLOCK-1); } static int block2id(int block, int offset) { return ((block - NUM_BLOCKS/2) << BLOCK_SHIFT) + offset; } #define UNUSED __attribute__ ((unused)) static int ram_ways_set(osmid_t id, osmid_t *nds, int nd_count, struct keyval *tags, int pending) { int block = id2block(id); int offset = id2offset(id); struct keyval *p; if (!ways[block]) { ways[block] = calloc(PER_BLOCK, sizeof(struct ramWay)); if (!ways[block]) { fprintf(stderr, "Error allocating ways\n"); exit_nicely(); } way_blocks++; } if (ways[block][offset].ndids) { free(ways[block][offset].ndids); ways[block][offset].ndids = NULL; } /* Copy into length prefixed array */ ways[block][offset].ndids = malloc( (nd_count+1)*sizeof(osmid_t) ); memcpy( ways[block][offset].ndids+1, nds, nd_count*sizeof(osmid_t) ); ways[block][offset].ndids[0] = nd_count; ways[block][offset].pending = pending; if (!ways[block][offset].tags) { p = malloc(sizeof(struct keyval)); if (p) { initList(p); ways[block][offset].tags = p; } else { fprintf(stderr, "%s malloc failed\n", __FUNCTION__); exit_nicely(); } } else resetList(ways[block][offset].tags); cloneList(ways[block][offset].tags, tags); return 0; } static int ram_relations_set(osmid_t id, struct member *members, int member_count, struct keyval *tags) { struct keyval *p; struct member *ptr; int block = id2block(id); int offset = id2offset(id); if (!rels[block]) { rels[block] = calloc(PER_BLOCK, sizeof(struct ramRel)); if (!rels[block]) { fprintf(stderr, "Error allocating rels\n"); exit_nicely(); } } if (!rels[block][offset].tags) { p = malloc(sizeof(struct keyval)); if (p) { initList(p); rels[block][offset].tags = p; } else { fprintf(stderr, "%s malloc failed\n", __FUNCTION__); exit_nicely(); } } else resetList(rels[block][offset].tags); cloneList(rels[block][offset].tags, tags); if (!rels[block][offset].members) free( rels[block][offset].members ); ptr = malloc(sizeof(struct member) * member_count); if (ptr) { memcpy( ptr, members, sizeof(struct member) * member_count ); rels[block][offset].member_count = member_count; rels[block][offset].members = ptr; } else { fprintf(stderr, "%s malloc failed\n", __FUNCTION__); exit_nicely(); } return 0; } static int ram_nodes_get_list(struct osmNode *nodes, osmid_t *ndids, int nd_count) { int i, count; count = 0; for( i=0; i=0; block--) { if (!rels[block]) continue; for (offset=0; offset < PER_BLOCK; offset++) { if (rels[block][offset].members) { osmid_t id = block2id(block, offset); rel_out_count++; if (rel_out_count % 10 == 0) fprintf(stderr, "\rWriting relation (%u)", rel_out_count); callback(id, rels[block][offset].members, rels[block][offset].member_count, rels[block][offset].tags, 0); } free(rels[block][offset].members); rels[block][offset].members = NULL; resetList(rels[block][offset].tags); free(rels[block][offset].tags); rels[block][offset].tags=NULL; } free(rels[block]); rels[block] = NULL; } fprintf(stderr, "\rWriting relation (%u)\n", rel_out_count); } static void ram_iterate_ways(int (*callback)(osmid_t id, struct keyval *tags, struct osmNode *nodes, int count, int exists)) { int block, offset, ndCount = 0; struct osmNode *nodes; fprintf(stderr, "\n"); for(block=NUM_BLOCKS-1; block>=0; block--) { if (!ways[block]) continue; for (offset=0; offset < PER_BLOCK; offset++) { if (ways[block][offset].ndids) { way_out_count++; if (way_out_count % 1000 == 0) fprintf(stderr, "\rWriting way (%uk)", way_out_count/1000); if (ways[block][offset].pending) { /* First element contains number of nodes */ nodes = malloc( sizeof(struct osmNode) * ways[block][offset].ndids[0]); ndCount = ram_nodes_get_list(nodes, ways[block][offset].ndids+1, ways[block][offset].ndids[0]); if (nodes) { osmid_t id = block2id(block, offset); callback(id, ways[block][offset].tags, nodes, ndCount, 0); free(nodes); } ways[block][offset].pending = 0; } if (ways[block][offset].tags) { resetList(ways[block][offset].tags); free(ways[block][offset].tags); ways[block][offset].tags = NULL; } if (ways[block][offset].ndids) { free(ways[block][offset].ndids); ways[block][offset].ndids = NULL; } } } } fprintf(stderr, "\rWriting way (%uk)\n", way_out_count/1000); } /* Caller must free nodes_ptr and resetList(tags_ptr) */ static int ram_ways_get(osmid_t id, struct keyval *tags_ptr, struct osmNode **nodes_ptr, int *count_ptr) { int block = id2block(id), offset = id2offset(id), ndCount = 0; struct osmNode *nodes; if (!ways[block]) return 1; if (ways[block][offset].ndids) { /* First element contains number of nodes */ nodes = malloc( sizeof(struct osmNode) * ways[block][offset].ndids[0]); ndCount = ram_nodes_get_list(nodes, ways[block][offset].ndids+1, ways[block][offset].ndids[0]); if (ndCount) { cloneList( tags_ptr, ways[block][offset].tags ); *nodes_ptr = nodes; *count_ptr = ndCount; return 0; } free(nodes); } return 1; } static int ram_ways_get_list(osmid_t *ids, int way_count, osmid_t **way_ids, struct keyval *tag_ptr, struct osmNode **node_ptr, int *count_ptr) { int count = 0; int i; *way_ids = malloc( sizeof(osmid_t) * (way_count + 1)); initList(&(tag_ptr[count])); for (i = 0; i < way_count; i++) { if (ram_ways_get(ids[i], &(tag_ptr[count]), &(node_ptr[count]), &(count_ptr[count])) == 0) { (*way_ids)[count] = ids[i]; count++; initList(&(tag_ptr[count])); } } return count; } /* Marks the way so that iterate ways skips it */ static int ram_ways_done(osmid_t id) { int block = id2block(id), offset = id2offset(id); if (!ways[block]) return 1; ways[block][offset].pending = 0; return 0; } static void ram_analyze(void) { /* No need */ } static void ram_end(void) { /* No need */ } static int ram_start(const struct output_options *options) { /* latlong has a range of +-180, mercator +-20000 The fixed poing scaling needs adjusting accordingly to be stored accurately in an int */ scale = options->scale; init_node_ram_cache( options->alloc_chunkwise, options->cache, scale); fprintf( stderr, "Mid: Ram, scale=%d\n", scale ); return 0; } static void ram_stop(void) { int i, j; free_node_ram_cache(); for (i=0; i #include #include #include #include #include #include #include #include #include #include #include #include "osmtypes.h" #include "output.h" #include "node-persistent-cache.h" #include "node-ram-cache.h" #include "binarysearcharray.h" void exit_nicely() { fprintf(stderr, "Error occurred, cleaning up\n"); exit(1); } void test_get_node_list(int itterations, int max_size, int process_number) { int i, j, node_cnt, node_cnt_total; struct osmNode *nodes; struct timeval start, stop; struct timeval start_overall, stop_overall; osmid_t *osmids; node_cnt_total = 0; gettimeofday(&start_overall, NULL); for (i = 0; i < itterations; i++) { node_cnt = random() % max_size; node_cnt_total += node_cnt; printf("Process %i: Getting %i nodes....\n", process_number, node_cnt); nodes = malloc(sizeof(struct osmNode) * node_cnt); osmids = malloc(sizeof(osmid_t) * node_cnt); for (j = 0; j < node_cnt; j++) { osmids[j] = random() % (1 << 31); } gettimeofday(&start, NULL); persistent_cache_nodes_get_list(nodes,osmids,node_cnt); gettimeofday(&stop, NULL); double duration = ((stop.tv_sec - start.tv_sec)*1000000.0 + (stop.tv_usec - start.tv_usec))/1000000.0; printf("Process %i: Got nodes in %f at a rate of %f/s\n", process_number, duration, node_cnt / duration); free(nodes); free(osmids); } gettimeofday(&stop_overall, NULL); double duration = ((stop_overall.tv_sec - start_overall.tv_sec)*1000000.0 + (stop_overall.tv_usec - start_overall.tv_usec))/1000000.0; printf("Process %i: Got a total of nodes in %f at a rate of %f/s\n", process_number, duration, node_cnt_total / duration); } int main(int argc, char *argv[]) { int i,p; struct output_options options; struct osmNode node; struct osmNode *nodes; struct timeval start; osmid_t *osmids; int node_cnt; options.append = 1; options.scale = 100; options.flat_node_cache_enabled = 1; options.flat_node_file = argv[1]; init_node_ram_cache(0,10,100); if (argc > 3) { init_node_persistent_cache(&options, 1); node_cnt = argc - 2; nodes = malloc(sizeof(struct osmNode) * node_cnt); osmids = malloc(sizeof(osmid_t) * node_cnt); for (i = 0; i < node_cnt; i++) { osmids[i] = atoi(argv[2 + i]); } persistent_cache_nodes_get_list(nodes,osmids,node_cnt); for (i = 0; i < node_cnt; i++) { printf("lat: %f / lon: %f\n", nodes[i].lat, nodes[i].lon); } } else if (argc == 2) { char * state = malloc(sizeof(char)* 128); gettimeofday(&start, NULL); initstate(start.tv_usec, state, 8); setstate(state); printf("Testing mode\n"); init_node_persistent_cache(&options, 1); test_get_node_list(10, 200, 0); shutdown_node_persistent_cache(); #ifdef HAVE_FORK printf("Testing using multiple processes\n"); int noProcs = 4; int pid; for (p = 1; p < noProcs; p++) { pid=fork(); if (pid==0) { break; } if (pid==-1) { fprintf(stderr,"WARNING: Failed to fork helper processes. Falling back to only using %i \n", p); exit(1); } } gettimeofday(&start, NULL); initstate(start.tv_usec, state, 8); setstate(state); init_node_persistent_cache(&options, 1); test_get_node_list(10,200,p); if (pid == 0) { shutdown_node_persistent_cache(); fprintf(stderr,"Exiting process %i\n", p); exit(0); } else { for (p = 0; p < noProcs; p++) wait(NULL); } free(state); fprintf(stderr, "\nAll child processes exited\n"); #endif } else { init_node_persistent_cache(&options, 1); if (strstr(argv[2],",") == NULL) { persistent_cache_nodes_get(&node, atoi(argv[2])); printf("lat: %f / lon: %f\n", node.lat, node.lon); } else { char * node_list = malloc(sizeof(char) * (strlen(argv[2]) + 1)); strcpy(node_list,argv[2]); node_cnt = 1; strtok(node_list,","); while (strtok(NULL,",") != NULL) node_cnt++; printf("Processing %i nodes\n", node_cnt); nodes = malloc(sizeof(struct osmNode) * node_cnt); osmids = malloc(sizeof(osmid_t) * node_cnt); strcpy(node_list,argv[2]); osmids[0] = atoi(strtok(node_list,",")); for (i = 1; i < node_cnt; i++) { char * tmp = strtok(NULL,","); osmids[i] = atoi(tmp); } persistent_cache_nodes_get_list(nodes,osmids,node_cnt); for (i = 0; i < node_cnt; i++) { printf("lat: %f / lon: %f\n", nodes[i].lat, nodes[i].lon); } } } shutdown_node_persistent_cache(); return 0; } osm2pgsql-0.82.0/node-persistent-cache.c000066400000000000000000000606601213272333300200670ustar00rootroot00000000000000#define _LARGEFILE64_SOURCE /* See feature_test_macrors(7) */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include "osmtypes.h" #include "output.h" #include "node-persistent-cache.h" #include "node-ram-cache.h" #include "binarysearcharray.h" #ifdef __APPLE__ #define lseek64 lseek #else #ifndef HAVE_LSEEK64 #if SIZEOF_OFF_T == 8 #define lseek64 lseek #else #error Flat nodes cache requires a 64 bit capable seek #endif #endif #endif static int node_cache_fd; static const char * node_cache_fname; static int append_mode; struct persistentCacheHeader cacheHeader; static struct ramNodeBlock writeNodeBlock; /* larger node block for more efficient initial sequential writing of node cache */ static struct ramNodeBlock * readNodeBlockCache; static struct binary_search_array * readNodeBlockCacheIdx; static int scale; static int cache_already_written = 0; static void writeout_dirty_nodes(osmid_t id) { int i; if (writeNodeBlock.dirty > 0) { if (lseek64(node_cache_fd, (writeNodeBlock.block_offset << WRITE_NODE_BLOCK_SHIFT) * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, writeNodeBlock.nodes, WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode)) < WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to write out node cache: %s\n", strerror(errno)); exit_nicely(); } cacheHeader.max_initialised_id = ((writeNodeBlock.block_offset + 1) << WRITE_NODE_BLOCK_SHIFT) - 1; writeNodeBlock.used = 0; writeNodeBlock.dirty = 0; if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, &cacheHeader, sizeof(struct persistentCacheHeader)) != sizeof(struct persistentCacheHeader)) { fprintf(stderr, "Failed to update persistent cache header: %s\n", strerror(errno)); exit_nicely(); } if (fsync(node_cache_fd) < 0) { fprintf(stderr, "Info: Node cache could not be guaranteeded to be made durable. fsync failed: %s\n", strerror(errno)); }; } if (id < 0) { for (i = 0; i < READ_NODE_CACHE_SIZE; i++) { if (readNodeBlockCache[i].dirty) { if (lseek64(node_cache_fd, (readNodeBlockCache[i].block_offset << READ_NODE_BLOCK_SHIFT) * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, readNodeBlockCache[i].nodes, READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) < READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to write out node cache: %s\n", strerror(errno)); exit_nicely(); } } readNodeBlockCache[i].dirty = 0; } } } static void ramNodes_clear(struct ramNode * nodes, int size) { int i; for (i = 0; i < size; i++) { #ifdef FIXED_POINT nodes[i].lon = INT_MIN; nodes[i].lat = INT_MIN; #else nodes[i].lon = NAN; nodes[i].lat = NAN; #endif } } /** * Find the cache block with the lowest usage count for replacement */ static int persistent_cache_replace_block() { int min_used = INT_MAX; int block_id = -1; int i; for (i = 0; i < READ_NODE_CACHE_SIZE; i++) { if (readNodeBlockCache[i].used < min_used) { min_used = readNodeBlockCache[i].used; block_id = i; } } if (min_used > 0) { for (i = 0; i < READ_NODE_CACHE_SIZE; i++) { if (readNodeBlockCache[i].used > 1) { readNodeBlockCache[i].used--; } } } return block_id; } /** * Find cache block number by block_offset */ static int persistent_cache_find_block(osmid_t block_offset) { int idx = binary_search_get(readNodeBlockCacheIdx, block_offset); return idx; } /** * Initialise the persistent cache with NaN values to identify which IDs are valid or not */ static void persistent_cache_expand_cache(osmid_t block_offset) { osmid_t i; struct ramNode * dummyNodes = malloc( READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)); if (!dummyNodes) { fprintf(stderr, "Out of memory: Could not allocate node structure during cache expansion\n"); exit_nicely(); } ramNodes_clear(dummyNodes, READ_NODE_BLOCK_SIZE); /* Need to expand the persistent node cache */ if (lseek64(node_cache_fd, cacheHeader.max_initialised_id * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; for (i = cacheHeader.max_initialised_id >> READ_NODE_BLOCK_SHIFT; i <= block_offset; i++) { if (write(node_cache_fd, dummyNodes, READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) < READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to expand persistent node cache: %s\n", strerror(errno)); exit_nicely(); } } cacheHeader.max_initialised_id = ((block_offset + 1) << READ_NODE_BLOCK_SHIFT) - 1; if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, &cacheHeader, sizeof(struct persistentCacheHeader)) != sizeof(struct persistentCacheHeader)) { fprintf(stderr, "Failed to update persistent cache header: %s\n", strerror(errno)); exit_nicely(); } free(dummyNodes); fsync(node_cache_fd); } static void persistent_cache_nodes_prefetch_async(osmid_t id) { #ifdef HAVE_POSIX_FADVISE osmid_t block_offset = id >> READ_NODE_BLOCK_SHIFT; osmid_t block_id = persistent_cache_find_block(block_offset); if (block_id < 0) { /* The needed block isn't in cache already, so initiate loading */ writeout_dirty_nodes(id); /* Make sure the node cache is correctly initialised for the block that will be read */ if (cacheHeader.max_initialised_id < ((block_offset + 1) << READ_NODE_BLOCK_SHIFT)) persistent_cache_expand_cache(block_offset); if (posix_fadvise(node_cache_fd, (block_offset << READ_NODE_BLOCK_SHIFT) * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), READ_NODE_BLOCK_SIZE * sizeof(struct ramNode), POSIX_FADV_WILLNEED | POSIX_FADV_RANDOM) != 0) { fprintf(stderr, "Info: async prefetch of node cache failed. This might reduce performance\n"); }; } #endif } /** * Load block offset in a synchronous way. */ static int persistent_cache_load_block(osmid_t block_offset) { int block_id = persistent_cache_replace_block(); if (readNodeBlockCache[block_id].dirty) { if (lseek64(node_cache_fd, (readNodeBlockCache[block_id].block_offset << READ_NODE_BLOCK_SHIFT) * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, readNodeBlockCache[block_id].nodes, READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) < READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to write out node cache: %s\n", strerror(errno)); exit_nicely(); } readNodeBlockCache[block_id].dirty = 0; } binary_search_remove(readNodeBlockCacheIdx, readNodeBlockCache[block_id].block_offset); ramNodes_clear(readNodeBlockCache[block_id].nodes, READ_NODE_BLOCK_SIZE); readNodeBlockCache[block_id].block_offset = block_offset; readNodeBlockCache[block_id].used = READ_NODE_CACHE_SIZE; /* Make sure the node cache is correctly initialised for the block that will be read */ if (cacheHeader.max_initialised_id < ((block_offset + 1) << READ_NODE_BLOCK_SHIFT)) { persistent_cache_expand_cache(block_offset); } /* Read the block into cache */ if (lseek64(node_cache_fd, (block_offset << READ_NODE_BLOCK_SHIFT) * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (read(node_cache_fd, readNodeBlockCache[block_id].nodes, READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) != READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to read from node cache: %s\n", strerror(errno)); exit(1); } binary_search_add(readNodeBlockCacheIdx, readNodeBlockCache[block_id].block_offset, block_id); return block_id; } static void persisten_cache_nodes_set_create_writeout_block() { if (write(node_cache_fd, writeNodeBlock.nodes, WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode)) < WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode)) { fprintf(stderr, "Failed to write out node cache: %s\n", strerror(errno)); exit_nicely(); } #ifdef HAVE_SYNC_FILE_RANGE /* writing out large files can cause trouble on some operating systems. * For one, if to much dirty data is in RAM, the whole OS can stall until * enough dirty data is written out which can take a while. It can also interfere * with outher disk caching operations and might push things out to swap. By forcing the OS to * immediately write out the data and blocking after a while, we ensure that no more * than a couple of 10s of MB are dirty in RAM at a time. * Secondly, the nodes are stored in an additional ram cache during import. Keeping the * node cache file in buffer cache therefore duplicates the data wasting 16GB of ram. * Therefore tell the OS not to cache the node-persistent-cache during initial import. * */ if (sync_file_range(node_cache_fd, writeNodeBlock.block_offset*WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode), SYNC_FILE_RANGE_WRITE) < 0) { fprintf(stderr, "Info: Sync_file_range writeout has an issue. This shouldn't be anything to worry about.: %s\n", strerror(errno)); }; if (writeNodeBlock.block_offset > 16) { if(sync_file_range(node_cache_fd, (writeNodeBlock.block_offset - 16)*WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode), SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE | SYNC_FILE_RANGE_WAIT_AFTER) < 0) { fprintf(stderr, "Info: Sync_file_range block has an issue. This shouldn't be anything to worry about.: %s\n", strerror(errno)); } #ifdef HAVE_POSIX_FADVISE if (posix_fadvise(node_cache_fd, (writeNodeBlock.block_offset - 16)*WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode) + sizeof(struct persistentCacheHeader), WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode), POSIX_FADV_DONTNEED) !=0 ) { fprintf(stderr, "Info: Posix_fadvise failed. This shouldn't be anything to worry about.: %s\n", strerror(errno)); }; #endif } #endif } static int persistent_cache_nodes_set_create(osmid_t id, double lat, double lon) { osmid_t block_offset = id >> WRITE_NODE_BLOCK_SHIFT; int i; if (cache_already_written) return 0; if (writeNodeBlock.block_offset != block_offset) { if (writeNodeBlock.dirty) { persisten_cache_nodes_set_create_writeout_block(); writeNodeBlock.used = 0; writeNodeBlock.dirty = 0; /* After writing out the node block, the file pointer is at the next block level */ writeNodeBlock.block_offset++; cacheHeader.max_initialised_id = (writeNodeBlock.block_offset << WRITE_NODE_BLOCK_SHIFT) - 1; } if (writeNodeBlock.block_offset > block_offset) { fprintf(stderr, "ERROR: Block_offset not in sequential order: %" PRIdOSMID "%" PRIdOSMID "\n", writeNodeBlock.block_offset, block_offset); exit_nicely(); } /* We need to fill the intermediate node cache with node nodes to identify which nodes are valid */ for (i = writeNodeBlock.block_offset; i < block_offset; i++) { ramNodes_clear(writeNodeBlock.nodes, WRITE_NODE_BLOCK_SIZE); persisten_cache_nodes_set_create_writeout_block(); } ramNodes_clear(writeNodeBlock.nodes, WRITE_NODE_BLOCK_SIZE); writeNodeBlock.used = 0; writeNodeBlock.block_offset = block_offset; } #ifdef FIXED_POINT writeNodeBlock.nodes[id & WRITE_NODE_BLOCK_MASK].lat = DOUBLE_TO_FIX(lat); writeNodeBlock.nodes[id & WRITE_NODE_BLOCK_MASK].lon = DOUBLE_TO_FIX(lon); #else writeNodeBlock.nodes[id & WRITE_NODE_BLOCK_MASK].lat = lat; writeNodeBlock.nodes[id & WRITE_NODE_BLOCK_MASK].lon = lon; #endif writeNodeBlock.used++; writeNodeBlock.dirty = 1; return 0; } static int persistent_cache_nodes_set_append(osmid_t id, double lat, double lon) { osmid_t block_offset = id >> READ_NODE_BLOCK_SHIFT; int block_id = persistent_cache_find_block(block_offset); if (block_id < 0) block_id = persistent_cache_load_block(block_offset); #ifdef FIXED_POINT if (isnan(lat) && isnan(lon)) { readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat = INT_MIN; readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon = INT_MIN; } else { readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat = DOUBLE_TO_FIX(lat); readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon = DOUBLE_TO_FIX(lon); } #else readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat = lat; readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon = lon; #endif readNodeBlockCache[block_id].used++; readNodeBlockCache[block_id].dirty = 1; return 1; } int persistent_cache_nodes_set(osmid_t id, double lat, double lon) { return append_mode ? persistent_cache_nodes_set_append(id, lat, lon) : persistent_cache_nodes_set_create(id, lat, lon); } int persistent_cache_nodes_get(struct osmNode *out, osmid_t id) { osmid_t block_offset = id >> READ_NODE_BLOCK_SHIFT; osmid_t block_id = persistent_cache_find_block(block_offset); if (block_id < 0) { writeout_dirty_nodes(id); block_id = persistent_cache_load_block(block_offset); } readNodeBlockCache[block_id].used++; #ifdef FIXED_POINT if ((readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat == INT_MIN) && (readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon == INT_MIN)) { return 1; } else { out->lat = FIX_TO_DOUBLE(readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat); out->lon = FIX_TO_DOUBLE(readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon); return 0; } #else if ((isnan(readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat)) && (isnan(readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon))) { return 1; } else { out->lat = readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lat; out->lon = readNodeBlockCache[block_id].nodes[id & READ_NODE_BLOCK_MASK].lon; return 0; } #endif return 0; } int persistent_cache_nodes_get_list(struct osmNode *nodes, osmid_t *ndids, int nd_count) { int count = 0; int i; for (i = 0; i < nd_count; i++) { /* Check cache first */ if (ram_cache_nodes_get(&nodes[i], ndids[i]) == 0) { count++; } else { nodes[i].lat = NAN; nodes[i].lon = NAN; } } if (count == nd_count) return count; for (i = 0; i < nd_count; i++) { /* In order to have a higher OS level I/O queue depth issue posix_fadvise(WILLNEED) requests for all I/O */ if (isnan(nodes[i].lat) && isnan(nodes[i].lon)) persistent_cache_nodes_prefetch_async(ndids[i]); } for (i = 0; i < nd_count; i++) { if ((isnan(nodes[i].lat) && isnan(nodes[i].lon)) && (persistent_cache_nodes_get(&(nodes[i]), ndids[i]) == 0)) count++; } if (count < nd_count) { int j = 0; for (i = 0; i < nd_count; i++) { if (!isnan(nodes[i].lat)) { nodes[j].lat = nodes[i].lat; nodes[j].lon = nodes[i].lon; j++; } } for (i = count; i < nd_count; i++) { nodes[i].lat = NAN; nodes[i].lon = NAN; } } return count; } void init_node_persistent_cache(const struct output_options *options, int append) { int i; scale = options->scale; append_mode = append; node_cache_fname = options->flat_node_file; fprintf(stderr, "Mid: loading persistent node cache from %s\n", node_cache_fname); readNodeBlockCacheIdx = init_search_array(READ_NODE_CACHE_SIZE); /* Setup the file for the node position cache */ if (append_mode) { node_cache_fd = open(node_cache_fname, O_RDWR, S_IRUSR | S_IWUSR); if (node_cache_fd < 0) { fprintf(stderr, "Failed to open node cache file: %s\n", strerror(errno)); exit_nicely(); } } else { if (cache_already_written) { node_cache_fd = open(node_cache_fname, O_RDWR, S_IRUSR | S_IWUSR); } else { node_cache_fd = open(node_cache_fname, O_RDWR | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR); } if (node_cache_fd < 0) { fprintf(stderr, "Failed to create node cache file: %s\n", strerror(errno)); exit_nicely(); } if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (cache_already_written == 0) { #ifdef HAVE_POSIX_FALLOCATE if (posix_fallocate(node_cache_fd, 0, sizeof(struct ramNode) * MAXIMUM_INITIAL_ID) != 0) { fprintf(stderr, "Failed to allocate space for node cache file: %s\n", strerror(errno)); close(node_cache_fd); exit_nicely(); } fprintf(stderr, "Allocated space for persistent node cache file\n"); #endif writeNodeBlock.nodes = malloc( WRITE_NODE_BLOCK_SIZE * sizeof(struct ramNode)); if (!writeNodeBlock.nodes) { fprintf(stderr, "Out of memory: Failed to allocate node writeout buffer\n"); exit_nicely(); } ramNodes_clear(writeNodeBlock.nodes, WRITE_NODE_BLOCK_SIZE); writeNodeBlock.block_offset = 0; writeNodeBlock.used = 0; writeNodeBlock.dirty = 0; cacheHeader.format_version = PERSISTENT_CACHE_FORMAT_VERSION; cacheHeader.id_size = sizeof(osmid_t); cacheHeader.max_initialised_id = 0; if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, &cacheHeader, sizeof(struct persistentCacheHeader)) != sizeof(struct persistentCacheHeader)) { fprintf(stderr, "Failed to write persistent cache header: %s\n", strerror(errno)); exit_nicely(); } } } if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (read(node_cache_fd, &cacheHeader, sizeof(struct persistentCacheHeader)) != sizeof(struct persistentCacheHeader)) { fprintf(stderr, "Failed to read persistent cache header: %s\n", strerror(errno)); exit_nicely(); } if (cacheHeader.format_version != PERSISTENT_CACHE_FORMAT_VERSION) { fprintf(stderr, "Persistent cache header is wrong version\n"); exit_nicely(); } if (cacheHeader.id_size != sizeof(osmid_t)) { fprintf(stderr, "Persistent cache header is wrong id type\n"); exit_nicely(); } fprintf(stderr,"Maximum node in persistent node cache: %" PRIdOSMID "\n", cacheHeader.max_initialised_id); readNodeBlockCache = malloc( READ_NODE_CACHE_SIZE * sizeof(struct ramNodeBlock)); if (!readNodeBlockCache) { fprintf(stderr, "Out of memory: Failed to allocate node read cache\n"); exit_nicely(); } for (i = 0; i < READ_NODE_CACHE_SIZE; i++) { readNodeBlockCache[i].nodes = malloc( READ_NODE_BLOCK_SIZE * sizeof(struct ramNode)); if (!readNodeBlockCache[i].nodes) { fprintf(stderr, "Out of memory: Failed to allocate node read cache\n"); exit_nicely(); } readNodeBlockCache[i].block_offset = -1; readNodeBlockCache[i].used = 0; readNodeBlockCache[i].dirty = 0; } } void shutdown_node_persistent_cache() { int i; writeout_dirty_nodes(-1); if (lseek64(node_cache_fd, 0, SEEK_SET) < 0) { fprintf(stderr, "Failed to seek to correct position in node cache: %s\n", strerror(errno)); exit_nicely(); }; if (write(node_cache_fd, &cacheHeader, sizeof(struct persistentCacheHeader)) != sizeof(struct persistentCacheHeader)) { fprintf(stderr, "Failed to update persistent cache header: %s\n", strerror(errno)); exit_nicely(); } fprintf(stderr,"Maximum node in persistent node cache: %" PRIdOSMID "\n", cacheHeader.max_initialised_id); fsync(node_cache_fd); if (close(node_cache_fd) != 0) { fprintf(stderr, "Failed to close node cache file: %s\n", strerror(errno)); } for (i = 0; i < READ_NODE_CACHE_SIZE; i++) { free(readNodeBlockCache[i].nodes); } shutdown_search_array(&readNodeBlockCacheIdx); free(readNodeBlockCache); readNodeBlockCache = NULL; } osm2pgsql-0.82.0/node-persistent-cache.h000066400000000000000000000015061213272333300200660ustar00rootroot00000000000000#define MAXIMUM_INITIAL_ID (1L << 31) #define READ_NODE_CACHE_SIZE 10000 #define READ_NODE_BLOCK_SHIFT 10l #define READ_NODE_BLOCK_SIZE (1l << READ_NODE_BLOCK_SHIFT) #define READ_NODE_BLOCK_MASK 0x03FFl #define WRITE_NODE_BLOCK_SHIFT 20l #define WRITE_NODE_BLOCK_SIZE (1l << WRITE_NODE_BLOCK_SHIFT) #define WRITE_NODE_BLOCK_MASK 0x0FFFFFl #define PERSISTENT_CACHE_FORMAT_VERSION 1 struct persistentCacheHeader { int format_version; int id_size; osmid_t max_initialised_id; }; int persistent_cache_nodes_set(osmid_t id, double lat, double lon); int persistent_cache_nodes_get(struct osmNode *out, osmid_t id); int persistent_cache_nodes_get_list(struct osmNode *nodes, osmid_t *ndids, int nd_count); void init_node_persistent_cache(const struct output_options *options, const int append); void shutdown_node_persistent_cache(); osm2pgsql-0.82.0/node-ram-cache.c000066400000000000000000000430431213272333300164420ustar00rootroot00000000000000/* Implements a node cache in ram, for the middle layers to use. * It uses two different storage methods, one optimized for dense * nodes (with respect to id) and the other for sparse representations. */ #include "config.h" #include #include #include #include #include #include "osmtypes.h" #include "middle.h" #include "node-ram-cache.h" /* Here we use a similar storage structure as middle-ram, except we allow * the array to be lossy so we can cap the total memory usage. Hence it is a * combination of a sparse array with a priority queue * * Like middle-ram we have a number of blocks all storing PER_BLOCK * ramNodes. However, here we also track the number of nodes in each block. * Seperately we have a priority queue like structure when maintains a list * of all the used block so we can easily find the block with the least * nodes. The cache has two phases: * * Phase 1: Loading initially, usedBlocks < maxBlocks. In this case when a * new block is needed we simply allocate it and put it in * queue[usedBlocks-1] which is the bottom of the tree. Every node added * increases it's usage. When we move onto the next block we percolate this * block up the queue until it reaches its correct position. The invariant * is that the priority tree is complete except for this last node. We do * not permit adding nodes to any other block to preserve this invariant. * * Phase 2: Once we've reached the maximum number of blocks permitted, we * change so that the block currently be inserted into is at the top of the * tree. When a new block is needed we take the one at the end of the queue, * as it is the one with the least number of nodes in it. When we move onto * the next block we first push the just completed block down to it's * correct position in the queue and then reuse the block that now at the * head. * * The result being that at any moment we have in memory the top maxBlock * blocks in terms of number of nodes in memory. This should maximize the * number of hits in lookups. * * Complexity: * Insert node: O(1) * Lookup node: O(1) * Add new block: O(log usedBlocks) * Reuse old block: O(log maxBlocks) */ static int allocStrategy = ALLOC_DENSE; #define BLOCK_SHIFT 10 #define PER_BLOCK (((osmid_t)1) << BLOCK_SHIFT) #define NUM_BLOCKS (((osmid_t)1) << (36 - BLOCK_SHIFT)) #define SAFETY_MARGIN 1024*PER_BLOCK*sizeof(struct ramNode) static struct ramNodeBlock *blocks; static int usedBlocks; /* Note: maxBlocks *must* be odd, to make sure the priority queue has no nodes with only one child */ static int maxBlocks = 0; static void *blockCache = NULL; static struct ramNodeBlock **queue; static struct ramNodeID *sparseBlock; static int64_t maxSparseTuples = 0; static int64_t sizeSparseTuples = 0; static int64_t cacheUsed, cacheSize; static osmid_t storedNodes, totalNodes; int nodesCacheHits, nodesCacheLookups; static int warn_node_order; static int ram_cache_nodes_get_sparse(struct osmNode *out, osmid_t id); static int id2block(osmid_t id) { /* + NUM_BLOCKS/2 allows for negative IDs */ return (id >> BLOCK_SHIFT) + NUM_BLOCKS/2; } static int id2offset(osmid_t id) { return id & (PER_BLOCK-1); } static osmid_t block2id(int block, int offset) { return (((osmid_t) block - NUM_BLOCKS/2) << BLOCK_SHIFT) + (osmid_t) offset; } #define Swap(a,b) { struct ramNodeBlock * __tmp = a; a = b; b = __tmp; } static void percolate_up( int pos ) { int i = pos; while( i > 0 ) { int parent = (i-1)>>1; if( queue[i]->used < queue[parent]->used ) { Swap( queue[i], queue[parent] ) i = parent; } else break; } } static void *next_chunk(size_t count, size_t size) { if ( (allocStrategy & ALLOC_DENSE_CHUNK) == 0 ) { static size_t pos = 0; void *result; pos += count * size; result = blockCache + cacheSize - pos + SAFETY_MARGIN; return result; } else { return calloc(PER_BLOCK, sizeof(struct ramNode)); } } static int ram_cache_nodes_set_sparse(osmid_t id, double lat, double lon, struct keyval *tags UNUSED) { if ((sizeSparseTuples > maxSparseTuples) || ( cacheUsed > cacheSize)) { if ((allocStrategy & ALLOC_LOSSY) > 0) return 1; else { fprintf(stderr, "\nNode cache size is too small to fit all nodes. Please increase cache size\n"); exit_nicely(); } } sparseBlock[sizeSparseTuples].id = id; #ifdef FIXED_POINT sparseBlock[sizeSparseTuples].coord.lat = DOUBLE_TO_FIX(lat); sparseBlock[sizeSparseTuples].coord.lon = DOUBLE_TO_FIX(lon); #else sparseBlock[sizeSparseTuples].coord.lat = lat; sparseBlock[sizeSparseTuples].coord.lon = lon; #endif sizeSparseTuples++; cacheUsed += sizeof(struct ramNodeID); storedNodes++; return 0; } static int ram_cache_nodes_set_dense(osmid_t id, double lat, double lon, struct keyval *tags UNUSED) { int block = id2block(id); int offset = id2offset(id); int i = 0; if (!blocks[block].nodes) { if (((allocStrategy & ALLOC_SPARSE) > 0) && ( usedBlocks < maxBlocks) && ( cacheUsed > cacheSize)) { /* TODO: It is more memory efficient to drop nodes from the sparse node cache than from the dense node cache */ } if ((usedBlocks < maxBlocks ) && (cacheUsed < cacheSize)) { /* if usedBlocks > 0 then the previous block is used up. Need to correctly handle it. */ if ( usedBlocks > 0 ) { /* If sparse allocation is also set, then check if the previous block has sufficient density * to store it in dense representation. If not, push all elements of the block * to the sparse node cache and reuse memory of the previous block for the current block */ if ( ((allocStrategy & ALLOC_SPARSE) == 0) || ((queue[usedBlocks - 1]->used / (double)(1<< BLOCK_SHIFT)) > (sizeof(struct ramNode) / (double)sizeof(struct ramNodeID)))) { /* Block has reached the level to keep it in dense representation */ /* We've just finished with the previous block, so we need to percolate it up the queue to its correct position */ /* Upto log(usedBlocks) iterations */ percolate_up( usedBlocks-1 ); blocks[block].nodes = next_chunk(PER_BLOCK, sizeof(struct ramNode)); } else { /* previous block was not dense enough, so push it into the sparse node cache instead */ for (i = 0; i < (1 << BLOCK_SHIFT); i++) { if (queue[usedBlocks -1]->nodes[i].lat || queue[usedBlocks -1]->nodes[i].lon) { ram_cache_nodes_set_sparse(block2id(queue[usedBlocks - 1]->block_offset,i), #ifdef FIXED_POINT FIX_TO_DOUBLE(queue[usedBlocks -1]->nodes[i].lat), FIX_TO_DOUBLE(queue[usedBlocks -1]->nodes[i].lon), #else queue[usedBlocks -1]->nodes[i].lat, queue[usedBlocks -1]->nodes[i].lon, #endif NULL); } } /* reuse previous block, as it's content is now in the dense representation */ storedNodes -= queue[usedBlocks - 1]->used; blocks[block].nodes = queue[usedBlocks - 1]->nodes; blocks[queue[usedBlocks - 1]->block_offset].nodes = NULL; memset( blocks[block].nodes, 0, PER_BLOCK * sizeof(struct ramNode) ); usedBlocks--; cacheUsed -= PER_BLOCK * sizeof(struct ramNode); } } else { blocks[block].nodes = next_chunk(PER_BLOCK, sizeof(struct ramNode)); } blocks[block].used = 0; blocks[block].block_offset = block; if (!blocks[block].nodes) { fprintf(stderr, "Error allocating nodes\n"); exit_nicely(); } queue[usedBlocks] = &blocks[block]; usedBlocks++; cacheUsed += PER_BLOCK * sizeof(struct ramNode); /* If we've just used up the last possible block we enter the * transition and we change the invariant. To do this we percolate * the newly allocated block straight to the head */ if (( usedBlocks == maxBlocks ) || ( cacheUsed > cacheSize )) percolate_up( usedBlocks-1 ); } else { if ((allocStrategy & ALLOC_LOSSY) == 0) { fprintf(stderr, "\nNode cache size is too small to fit all nodes. Please increase cache size\n"); exit_nicely(); } /* We've reached the maximum number of blocks, so now we push the * current head of the tree down to the right level to restore the * priority queue invariant. Upto log(maxBlocks) iterations */ i=0; while( 2*i+1 < usedBlocks - 1 ) { if( queue[2*i+1]->used <= queue[2*i+2]->used ) { if( queue[i]->used > queue[2*i+1]->used ) { Swap( queue[i], queue[2*i+1] ); i = 2*i+1; } else break; } else { if( queue[i]->used > queue[2*i+2]->used ) { Swap( queue[i], queue[2*i+2] ); i = 2*i+2; } else break; } } /* Now the head of the queue is the smallest, so it becomes our replacement candidate */ blocks[block].nodes = queue[0]->nodes; blocks[block].used = 0; memset( blocks[block].nodes, 0, PER_BLOCK * sizeof(struct ramNode) ); /* Clear old head block and point to new block */ storedNodes -= queue[0]->used; queue[0]->nodes = NULL; queue[0]->used = 0; queue[0] = &blocks[block]; } } else { /* Insert into an existing block. We can't allow this in general or it * will break the invariant. However, it will work fine if all the * nodes come in numerical order, which is the common case */ int expectedpos; if (( usedBlocks < maxBlocks ) && (cacheUsed < cacheSize)) expectedpos = usedBlocks-1; else expectedpos = 0; if( queue[expectedpos] != &blocks[block] ) { if (!warn_node_order) { fprintf( stderr, "WARNING: Found Out of order node %" PRIdOSMID " (%d,%d) - this will impact the cache efficiency\n", id, block, offset ); warn_node_order++; } return 1; } } #ifdef FIXED_POINT blocks[block].nodes[offset].lat = DOUBLE_TO_FIX(lat); blocks[block].nodes[offset].lon = DOUBLE_TO_FIX(lon); #else blocks[block].nodes[offset].lat = lat; blocks[block].nodes[offset].lon = lon; #endif blocks[block].used++; storedNodes++; return 0; } static int ram_cache_nodes_get_sparse(struct osmNode *out, osmid_t id) { int64_t pivotPos = sizeSparseTuples >> 1; int64_t minPos = 0; int64_t maxPos = sizeSparseTuples; while (minPos <= maxPos) { if ( sparseBlock[pivotPos].id == id ) { #ifdef FIXED_POINT out->lat = FIX_TO_DOUBLE(sparseBlock[pivotPos].coord.lat); out->lon = FIX_TO_DOUBLE(sparseBlock[pivotPos].coord.lon); #else out->lat = sparseBlock[pivotPos].coord.lat; out->lon = sparseBlock[pivotPos].coord.lon; #endif return 0; } if ( (pivotPos == minPos) || (pivotPos == maxPos)) return 1; if ( sparseBlock[pivotPos].id > id ) { maxPos = pivotPos; pivotPos = minPos + ((maxPos - minPos) >> 1); } else { minPos = pivotPos; pivotPos = minPos + ((maxPos - minPos) >> 1); } } return 1; } static int ram_cache_nodes_get_dense(struct osmNode *out, osmid_t id) { int block = id2block(id); int offset = id2offset(id); if (!blocks[block].nodes) return 1; if (!blocks[block].nodes[offset].lat && !blocks[block].nodes[offset].lon) return 1; #ifdef FIXED_POINT out->lat = FIX_TO_DOUBLE(blocks[block].nodes[offset].lat); out->lon = FIX_TO_DOUBLE(blocks[block].nodes[offset].lon); #else out->lat = blocks[block].nodes[offset].lat; out->lon = blocks[block].nodes[offset].lon; #endif return 0; } void init_node_ram_cache( int strategy, int cacheSizeMB, int fixpointscale ) { blockCache = 0; cacheUsed = 0; cacheSize = (int64_t)cacheSizeMB*(1024*1024); /* How much we can fit, and make sure it's odd */ maxBlocks = (cacheSize/(PER_BLOCK*sizeof(struct ramNode))) | 1; maxSparseTuples = (cacheSize/sizeof(struct ramNodeID)) | 1; allocStrategy = strategy; scale = fixpointscale; if ((allocStrategy & ALLOC_DENSE) > 0 ) { fprintf(stderr, "Allocating memory for dense node cache\n"); blocks = calloc(NUM_BLOCKS,sizeof(struct ramNodeBlock)); if (!blocks) { fprintf(stderr, "Out of memory for node cache dense index, try using \"--cache-strategy sparse\" instead \n"); exit_nicely(); } queue = calloc( maxBlocks,sizeof(struct ramNodeBlock *) ); /* Use this method of allocation if virtual memory is limited, * or if OS allocs physical memory right away, rather than page by page * once it is needed. */ if( (allocStrategy & ALLOC_DENSE_CHUNK) > 0 ) { fprintf(stderr, "Allocating dense node cache in block sized chunks\n"); if (!queue) { fprintf(stderr, "Out of memory, reduce --cache size\n"); exit_nicely(); } } else { fprintf(stderr, "Allocating dense node cache in one big chunk\n"); blockCache = calloc(maxBlocks + 1024,PER_BLOCK * sizeof(struct ramNode)); if (!queue || !blockCache) { fprintf(stderr, "Out of memory for dense node cache, reduce --cache size\n"); exit_nicely(); } } } /* Allocate the full amount of memory given by --cache parameter in one go. * If both dense and sparse cache alloc is set, this will allocate up to twice * as much virtual memory as specified by --cache. This relies on the OS doing * lazy allocation of physical RAM. Extra accounting during setting of nodes is done * to ensure physical RAM usage should roughly be no more than --cache */ if ((allocStrategy & ALLOC_SPARSE) > 0 ) { fprintf(stderr, "Allocating memory for sparse node cache\n"); if (!blockCache) { sparseBlock = calloc(maxSparseTuples,sizeof(struct ramNodeID)); } else { fprintf(stderr, "Sharing dense sparse\n"); sparseBlock = blockCache; } if (!sparseBlock) { fprintf(stderr, "Out of memory for sparse node cache, reduce --cache size\n"); exit_nicely(); } } #ifdef __MINGW_H fprintf( stderr, "Node-cache: cache=%ldMB, maxblocks=%d*%d, allocation method=%i\n", (cacheSize >> 20), maxBlocks, PER_BLOCK*sizeof(struct ramNode), allocStrategy ); #else fprintf( stderr, "Node-cache: cache=%ldMB, maxblocks=%d*%zd, allocation method=%i\n", (cacheSize >> 20), maxBlocks, PER_BLOCK*sizeof(struct ramNode), allocStrategy ); #endif } void free_node_ram_cache() { int i; fprintf( stderr, "node cache: stored: %" PRIdOSMID "(%.2f%%), storage efficiency: %.2f%% (dense blocks: %i, sparse nodes: %li), hit rate: %.2f%%\n", storedNodes, 100.0f*storedNodes/totalNodes, 100.0f*storedNodes*sizeof(struct ramNode)/cacheUsed, usedBlocks, sizeSparseTuples, 100.0f*nodesCacheHits/nodesCacheLookups ); if ( (allocStrategy & ALLOC_DENSE) > 0 ) { if ( (allocStrategy & ALLOC_DENSE_CHUNK) > 0 ) { for( i=0; inodes); queue[i]->nodes = NULL; } } else { free(blockCache); blockCache = 0; } free(queue); } if ( ((allocStrategy & ALLOC_SPARSE) > 0) && ((allocStrategy & ALLOC_DENSE) == 0)) { free(sparseBlock); } } int ram_cache_nodes_set(osmid_t id, double lat, double lon, struct keyval *tags UNUSED) { totalNodes++; /* if ALLOC_DENSE and ALLOC_SPARSE are set, send it through * ram_nodes_set_dense. If a block is non dense, it will automatically * get pushed to the sparse cache if a block is sparse and ALLOC_SPARSE is set */ if ( (allocStrategy & ALLOC_DENSE) > 0 ) { return ram_cache_nodes_set_dense(id, lat, lon, tags); } if ( (allocStrategy & ALLOC_SPARSE) > 0 ) return ram_cache_nodes_set_sparse(id, lat, lon, tags); return 1; } int ram_cache_nodes_get(struct osmNode *out, osmid_t id) { nodesCacheLookups++; if ((allocStrategy & ALLOC_DENSE) > 0) { if (ram_cache_nodes_get_dense(out,id) == 0) { nodesCacheHits++; return 0; } } if ((allocStrategy & ALLOC_SPARSE) > 0) { if (ram_cache_nodes_get_sparse(out,id) == 0) { nodesCacheHits++; return 0; } } return 1; } osm2pgsql-0.82.0/node-ram-cache.h000066400000000000000000000023531213272333300164460ustar00rootroot00000000000000/* Implements the node cache in ram. * * There are two different storage strategies, either optimised * for dense storage of node ids, or for sparse storage as well as * a strategy to combine both in an optimal way. */ #ifndef NODE_RAM_CACHE_H #define NODE_RAM_CACHE_H #define ALLOC_SPARSE 1 #define ALLOC_DENSE 2 #define ALLOC_DENSE_CHUNK 4 #define ALLOC_LOSSY 8 /* Store +-20,000km Mercator co-ordinates as fixed point 32bit number with maximum precision */ /* Scale is chosen such that 40,000 * SCALE < 2^32 */ #define FIXED_POINT static int scale = 100; #define DOUBLE_TO_FIX(x) ((int)((x) * scale)) #define FIX_TO_DOUBLE(x) (((double)x) / scale) #define UNUSED __attribute__ ((unused)) struct ramNode { #ifdef FIXED_POINT int lon; int lat; #else double lon; double lat; #endif }; struct ramNodeID { osmid_t id; struct ramNode coord; }; struct ramNodeBlock { struct ramNode *nodes; osmid_t block_offset; int used; int dirty; }; void init_node_ram_cache(int strategy, int cacheSizeMB, int fixpointscale); void free_node_ram_cache(); int ram_cache_nodes_set(osmid_t id, double lat, double lon, struct keyval *tags UNUSED); int ram_cache_nodes_get(struct osmNode *out, osmid_t id); #endif osm2pgsql-0.82.0/osm2pgsql-svn.sh000077500000000000000000000004241213272333300166210ustar00rootroot00000000000000#!/bin/sh DATE=$(date +%Y%m%d) MODULE="$(basename $0 -svn.sh)" SVNROOT=http://svn.openstreetmap.org/applications/utils/export/osm2pgsql/ set -x rm -rf $MODULE svn export $SVNROOT $MODULE/ ## tar it up tar cjf $MODULE-${DATE}svn.tar.bz2 $MODULE ## cleanup rm -rf $MODULE osm2pgsql-0.82.0/osm2pgsql.c000066400000000000000000000730301213272333300156250ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik # Use: osm2pgsql planet.osm.bz2 #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #include "config.h" #include #include #include #include #include #include #include #include #include #include #include #include "osmtypes.h" #include "build_geometry.h" #include "middle-pgsql.h" #include "middle-ram.h" #include "node-ram-cache.h" #include "output-pgsql.h" #include "output-gazetteer.h" #include "output-null.h" #include "sanitizer.h" #include "reprojection.h" #include "text-tree.h" #include "input.h" #include "sprompt.h" #include "parse-xml2.h" #include "parse-primitive.h" #include "parse-o5m.h" #ifdef BUILD_READER_PBF # include "parse-pbf.h" #endif #define INIT_MAX_MEMBERS 64 #define INIT_MAX_NODES 4096 int verbose; /* Data structure carrying all parsing related variables */ static struct osmdata_t osmdata = { .filetype = FILETYPE_NONE, .action = ACTION_NONE, .bbox = NULL }; static int parse_bbox(struct osmdata_t *osmdata) { int n; if (!osmdata->bbox) return 0; n = sscanf(osmdata->bbox, "%lf,%lf,%lf,%lf", &(osmdata->minlon), &(osmdata->minlat), &(osmdata->maxlon), &(osmdata->maxlat)); if (n != 4) { fprintf(stderr, "Bounding box must be specified like: minlon,minlat,maxlon,maxlat\n"); return 1; } if (osmdata->maxlon <= osmdata->minlon) { fprintf(stderr, "Bounding box failed due to maxlon <= minlon\n"); return 1; } if (osmdata->maxlat <= osmdata->minlat) { fprintf(stderr, "Bounding box failed due to maxlat <= minlat\n"); return 1; } fprintf(stderr, "Applying Bounding box: %f,%f to %f,%f\n", osmdata->minlon, osmdata->minlat, osmdata->maxlon, osmdata->maxlat); return 0; } void exit_nicely() { fprintf(stderr, "Error occurred, cleaning up\n"); osmdata.out->cleanup(); exit(1); } static void short_usage(char *arg0) { const char *name = basename(arg0); fprintf(stderr, "Usage error. For further information see:\n"); fprintf(stderr, "\t%s -h|--help\n", name); } static void long_usage(char *arg0) { int i; const char *name = basename(arg0); printf("Usage:\n"); printf("\t%s [options] planet.osm\n", name); printf("\t%s [options] planet.osm.{gz,bz2}\n", name); printf("\t%s [options] file1.osm file2.osm file3.osm\n", name); printf("\nThis will import the data from the OSM file(s) into a PostgreSQL database\n"); printf("suitable for use by the Mapnik renderer\n"); printf("\nOptions:\n"); printf(" -a|--append\t\tAdd the OSM file into the database without removing\n"); printf(" \t\texisting data.\n"); printf(" -b|--bbox\t\tApply a bounding box filter on the imported data\n"); printf(" \t\tMust be specified as: minlon,minlat,maxlon,maxlat\n"); printf(" \t\te.g. --bbox -0.5,51.25,0.5,51.75\n"); printf(" -c|--create\t\tRemove existing data from the database. This is the \n"); printf(" \t\tdefault if --append is not specified.\n"); printf(" -d|--database\tThe name of the PostgreSQL database to connect\n"); printf(" \t\tto (default: gis).\n"); printf(" -i|--tablespace-index\tThe name of the PostgreSQL tablespace where\n"); printf(" \t\tall indexes will be created.\n"); printf(" \t\tThe following options allow more fine-grained control:\n"); printf(" --tablespace-main-data \ttablespace for main tables\n"); printf(" --tablespace-main-index\ttablespace for main table indexes\n"); printf(" --tablespace-slim-data \ttablespace for slim mode tables\n"); printf(" --tablespace-slim-index\ttablespace for slim mode indexes\n"); printf(" \t\t(if unset, use db's default; -i is equivalent to setting\n"); printf(" \t\t--tablespace-main-index and --tablespace-slim-index)\n"); printf(" -l|--latlong\t\tStore data in degrees of latitude & longitude.\n"); printf(" -m|--merc\t\tStore data in proper spherical mercator (default)\n"); printf(" -M|--oldmerc\t\tStore data in the legacy OSM mercator format\n"); printf(" -E|--proj num\tUse projection EPSG:num\n"); printf(" -u|--utf8-sanitize\tRepair bad UTF8 input data (present in planet\n"); printf(" \tdumps prior to August 2007). Adds about 10%% overhead.\n"); printf(" -p|--prefix\t\tPrefix for table names (default planet_osm)\n"); printf(" -s|--slim\t\tStore temporary data in the database. This greatly\n"); printf(" \t\treduces the RAM usage but is much slower. This switch is\n"); printf(" \t\trequired if you want to update with --append later.\n"); if (sizeof(int*) == 4) { printf(" \t\tThis program was compiled on a 32bit system, so at most\n"); printf(" \t\t3GB of RAM will be used. If you encounter problems\n"); printf(" \t\tduring import, you should try this switch.\n"); } printf(" --drop\t\tonly with --slim: drop temporary tables after import (no updates).\n"); printf(" -S|--style\t\tLocation of the style file. Defaults to " OSM2PGSQL_DATADIR "/default.style\n"); printf(" -C|--cache\t\tNow required for slim and non-slim modes: \n"); printf(" \t\tUse up to this many MB for caching nodes (default: 800)\n"); printf(" -U|--username\tPostgresql user name\n"); printf(" \t\tpassword can be given by prompt or PGPASS environment variable.\n"); printf(" -W|--password\tForce password prompt.\n"); printf(" -H|--host\t\tDatabase server hostname or socket location.\n"); printf(" -P|--port\t\tDatabase server port.\n"); printf(" -e|--expire-tiles [min_zoom-]max_zoom\tCreate a tile expiry list.\n"); printf(" -o|--expire-output filename\tOutput filename for expired tiles list.\n"); printf(" -r|--input-reader\tInput frontend.\n"); printf(" \t\tlibxml2 - Parse XML using libxml2. (default)\n"); printf(" \t\tprimitive - Primitive XML parsing.\n"); #ifdef BUILD_READER_PBF printf(" \t\tpbf - OSM binary format.\n"); #endif printf(" -O|--output\t\tOutput backend.\n"); printf(" \t\tpgsql - Output to a PostGIS database. (default)\n"); printf(" \t\tgazetteer - Output to a PostGIS database suitable for gazetteer\n"); printf(" \t\tnull - No output. Useful for testing.\n"); printf(" -x|--extra-attributes\n"); printf(" \t\tInclude attributes for each object in the database.\n"); printf(" \t\tThis includes the username, userid, timestamp and version.\n"); printf(" \t\tNote: this option also requires additional entries in your style file.\n"); printf(" -k|--hstore\t\tAdd tags without column to an additional hstore (key/value) column to postgresql tables\n"); printf(" --hstore-match-only\tOnly keep objects that have a value in one of the columns\n"); printf(" - \t(normal action with --hstore is to keep all objects)\n"); printf(" -j|--hstore-all\tAdd all tags to an additional hstore (key/value) column in postgresql tables\n"); printf(" -z|--hstore-column\tAdd an additional hstore (key/value) column containing all tags\n"); printf(" \tthat start with the specified string, eg --hstore-column \"name:\" will\n"); printf(" \tproduce an extra hstore column that contains all name:xx tags\n"); printf(" --hstore-add-index\tAdd index to hstore column.\n"); printf(" -G|--multi-geometry\tGenerate multi-geometry features in postgresql tables.\n"); printf(" -K|--keep-coastlines\tKeep coastline data rather than filtering it out.\n"); printf(" \t\tBy default natural=coastline tagged data will be discarded based on the\n"); printf(" \t\tassumption that post-processed Coastline Checker shapefiles will be used.\n"); printf(" --exclude-invalid-polygon\n"); #ifdef HAVE_FORK printf(" --number-processes\t\tSpecifies the number of parallel processes used for certain operations\n"); printf(" \t\tDefault is 1\n"); #endif printf(" -I|--disable-parallel-indexing\tDisable indexing all tables concurrently.\n"); printf(" --unlogged\tUse unlogged tables (lost on crash but faster). Requires PostgreSQL 9.1.\n"); printf(" --cache-strategy\tSpecifies the method used to cache nodes in ram.\n"); printf(" \t\tAvailable options are:\n"); printf(" \t\tdense: caching strategy optimised for full planet import\n"); printf(" \t\tchunked: caching strategy optimised for non-contigouse memory allocation\n"); printf(" \t\tsparse: caching strategy optimised for small extracts\n"); printf(" \t\toptimized: automatically combines dense and sparse strategies for optimal storage efficiency.\n"); printf(" \t\t optimized may use twice as much virtual memory, but no more physical memory\n"); #ifdef __amd64__ printf(" \t\t The default is \"optimized\"\n"); #else /* use "chunked" as a default in 32 bit compilations, as it is less wasteful of virtual memory than "optimized"*/ printf(" \t\t The default is \"sparse\"\n"); #endif printf(" --flat-nodes\tSpecifies the flat file to use to persistently store node information in slim mode instead of in pgsql\n"); printf(" \t\tThis file is a single > 16Gb large file. This method is only recomended for full planet imports\n"); printf(" \t\tas it doesn't work well with small extracts. The default is disabled\n"); printf(" -h|--help\t\tHelp information.\n"); printf(" -v|--verbose\t\tVerbose output.\n"); printf("\n"); if(!verbose) { printf("Add -v to display supported projections.\n"); printf("Use -E to access any espg projections (usually in /usr/share/proj/epsg)\n" ); } else { printf("Supported projections:\n" ); for(i=0; ind_max == 0 ) osmdata->nd_max = INIT_MAX_NODES; else osmdata->nd_max <<= 1; osmdata->nds = realloc( osmdata->nds, osmdata->nd_max * sizeof( osmdata->nds[0] ) ); if( !osmdata->nds ) { fprintf( stderr, "Failed to expand node list to %d\n", osmdata->nd_max ); exit_nicely(); } } void realloc_members(struct osmdata_t *osmdata) { if( osmdata->member_max == 0 ) osmdata->member_max = INIT_MAX_NODES; else osmdata->member_max <<= 1; osmdata->members = realloc( osmdata->members, osmdata->member_max * sizeof( osmdata->members[0] ) ); if( !osmdata->members ) { fprintf( stderr, "Failed to expand member list to %d\n", osmdata->member_max ); exit_nicely(); } } void resetMembers(struct osmdata_t *osmdata) { unsigned i; for(i = 0; i < osmdata->member_count; i++ ) free( osmdata->members[i].role ); } void printStatus(struct osmdata_t *osmdata) { time_t now; time_t end_nodes; time_t end_way; time_t end_rel; time(&now); end_nodes = osmdata->start_way > 0 ? osmdata->start_way : now; end_way = osmdata->start_rel > 0 ? osmdata->start_rel : now; end_rel = now; fprintf(stderr, "\rProcessing: Node(%" PRIdOSMID "k %.1fk/s) Way(%" PRIdOSMID "k %.2fk/s) Relation(%" PRIdOSMID " %.2f/s)", osmdata->count_node/1000, (double)osmdata->count_node/1000.0/((int)(end_nodes - osmdata->start_node) > 0 ? (double)(end_nodes - osmdata->start_node) : 1.0), osmdata->count_way/1000, osmdata->count_way > 0 ? (double)osmdata->count_way/1000.0/ ((double)(end_way - osmdata->start_way) > 0.0 ? (double)(end_way - osmdata->start_way) : 1.0) : 0.0, osmdata->count_rel, osmdata->count_rel > 0 ? (double)osmdata->count_rel/ ((double)(end_rel - osmdata->start_rel) > 0.0 ? (double)(end_rel - osmdata->start_rel) : 1.0) : 0.0); } int node_wanted(struct osmdata_t *osmdata, double lat, double lon) { if (!osmdata->bbox) return 1; if (lat < osmdata->minlat || lat > osmdata->maxlat) return 0; if (lon < osmdata->minlon || lon > osmdata->maxlon) return 0; return 1; } int main(int argc, char *argv[]) { int append=0; int create=0; int slim=0; int sanitize=0; int long_usage_bool=0; int pass_prompt=0; int projection = PROJ_SPHERE_MERC; int expire_tiles_zoom = -1; int expire_tiles_zoom_min = -1; int enable_hstore = HSTORE_NONE; int enable_hstore_index = 0; int hstore_match_only = 0; int enable_multi = 0; int parallel_indexing = 1; int flat_node_cache_enabled = 0; #ifdef __amd64__ int alloc_chunkwise = ALLOC_SPARSE | ALLOC_DENSE; #else int alloc_chunkwise = ALLOC_SPARSE; #endif int num_procs = 1; int droptemp = 0; int unlogged = 0; int excludepoly = 0; time_t start, end; time_t overall_start, overall_end; time_t now; time_t end_nodes; time_t end_way; time_t end_rel; const char *expire_tiles_filename = "dirty_tiles"; const char *db = "gis"; const char *username=NULL; const char *host=NULL; const char *password=NULL; const char *port = "5432"; const char *tblsmain_index = NULL; /* no default TABLESPACE for index on main tables */ const char *tblsmain_data = NULL; /* no default TABLESPACE for main tables */ const char *tblsslim_index = NULL; /* no default TABLESPACE for index on slim mode tables */ const char *tblsslim_data = NULL; /* no default TABLESPACE for slim mode tables */ const char *conninfo = NULL; const char *prefix = "planet_osm"; const char *style = OSM2PGSQL_DATADIR "/default.style"; const char *temparg; const char *output_backend = "pgsql"; const char *input_reader = "auto"; const char **hstore_columns = NULL; const char *flat_nodes_file = NULL; int n_hstore_columns = 0; int keep_coastlines=0; int cache = 800; struct output_options options; PGconn *sql_conn; int (*streamFile)(char *, int, struct osmdata_t *); fprintf(stderr, "osm2pgsql SVN version %s (%lubit id space)\n\n", VERSION, 8 * sizeof(osmid_t)); while (1) { int c, option_index = 0; static struct option long_options[] = { {"append", 0, 0, 'a'}, {"bbox", 1, 0, 'b'}, {"create", 0, 0, 'c'}, {"database", 1, 0, 'd'}, {"latlong", 0, 0, 'l'}, {"verbose", 0, 0, 'v'}, {"slim", 0, 0, 's'}, {"prefix", 1, 0, 'p'}, {"proj", 1, 0, 'E'}, {"merc", 0, 0, 'm'}, {"oldmerc", 0, 0, 'M'}, {"utf8-sanitize", 0, 0, 'u'}, {"cache", 1, 0, 'C'}, {"username", 1, 0, 'U'}, {"password", 0, 0, 'W'}, {"host", 1, 0, 'H'}, {"port", 1, 0, 'P'}, {"tablespace-index", 1, 0, 'i'}, {"tablespace-slim-data", 1, 0, 200}, {"tablespace-slim-index", 1, 0, 201}, {"tablespace-main-data", 1, 0, 202}, {"tablespace-main-index", 1, 0, 203}, {"help", 0, 0, 'h'}, {"style", 1, 0, 'S'}, {"expire-tiles", 1, 0, 'e'}, {"expire-output", 1, 0, 'o'}, {"output", 1, 0, 'O'}, {"extra-attributes", 0, 0, 'x'}, {"hstore", 0, 0, 'k'}, {"hstore-all", 0, 0, 'j'}, {"hstore-column", 1, 0, 'z'}, {"hstore-match-only", 0, 0, 208}, {"hstore-add-index",0,0,211}, {"multi-geometry", 0, 0, 'G'}, {"keep-coastlines", 0, 0, 'K'}, {"input-reader", 1, 0, 'r'}, {"version", 0, 0, 'V'}, {"disable-parallel-indexing", 0, 0, 'I'}, {"cache-strategy", 1, 0, 204}, {"number-processes", 1, 0, 205}, {"drop", 0, 0, 206}, {"unlogged", 0, 0, 207}, {"flat-nodes",1,0,209}, {"exclude-invalid-polygon",0,0,210}, {0, 0, 0, 0} }; c = getopt_long (argc, argv, "ab:cd:KhlmMp:suvU:WH:P:i:IE:C:S:e:o:O:xkjGz:r:V", long_options, &option_index); if (c == -1) break; switch (c) { case 'a': append=1; break; case 'b': osmdata.bbox=optarg; break; case 'c': create=1; break; case 'v': verbose=1; break; case 's': slim=1; break; case 'K': keep_coastlines=1; break; case 'u': sanitize=1; break; case 'l': projection=PROJ_LATLONG; break; case 'm': projection=PROJ_SPHERE_MERC; break; case 'M': projection=PROJ_MERC; break; case 'E': projection=-atoi(optarg); break; case 'p': prefix=optarg; break; case 'd': db=optarg; break; case 'C': cache = atoi(optarg); break; case 'U': username=optarg; break; case 'W': pass_prompt=1; break; case 'H': host=optarg; break; case 'P': port=optarg; break; case 'S': style=optarg; break; case 'i': tblsmain_index=tblsslim_index=optarg; break; case 200: tblsslim_data=optarg; break; case 201: tblsslim_index=optarg; break; case 202: tblsmain_data=optarg; break; case 203: tblsmain_index=optarg; break; case 'e': expire_tiles_zoom_min = atoi(optarg); temparg = strchr(optarg, '-'); if (temparg) expire_tiles_zoom = atoi(temparg + 1); if (expire_tiles_zoom < expire_tiles_zoom_min) expire_tiles_zoom = expire_tiles_zoom_min; break; case 'o': expire_tiles_filename=optarg; break; case 'O': output_backend = optarg; break; case 'x': osmdata.extra_attributes=1; break; case 'k': enable_hstore=HSTORE_NORM; break; case 208: hstore_match_only = 1; break; case 'j': enable_hstore=HSTORE_ALL; break; case 'z': n_hstore_columns++; hstore_columns = (const char**)realloc(hstore_columns, sizeof(char *) * n_hstore_columns); hstore_columns[n_hstore_columns-1] = optarg; break; case 'G': enable_multi=1; break; case 'r': input_reader = optarg; break; case 'h': long_usage_bool=1; break; case 'I': #ifdef HAVE_PTHREAD parallel_indexing=0; #endif break; case 204: if (strcmp(optarg,"dense") == 0) alloc_chunkwise = ALLOC_DENSE; if (strcmp(optarg,"chunk") == 0) alloc_chunkwise = ALLOC_DENSE | ALLOC_DENSE_CHUNK; if (strcmp(optarg,"sparse") == 0) alloc_chunkwise = ALLOC_SPARSE; if (strcmp(optarg,"optimized") == 0) alloc_chunkwise = ALLOC_DENSE | ALLOC_SPARSE; break; case 205: #ifdef HAVE_FORK num_procs = atoi(optarg); #else fprintf(stderr, "WARNING: osm2pgsql was compiled without fork, only using one process!\n"); #endif break; case 206: droptemp = 1; break; case 207: unlogged = 1; break; case 209: flat_node_cache_enabled = 1; flat_nodes_file = optarg; break; case 210: excludepoly = 1; exclude_broken_polygon(); break; case 211: enable_hstore_index = 1; break; case 'V': exit(EXIT_SUCCESS); case '?': default: short_usage(argv[0]); exit(EXIT_FAILURE); } } if (long_usage_bool) { long_usage(argv[0]); exit(EXIT_SUCCESS); } if (argc == optind) { /* No non-switch arguments */ short_usage(argv[0]); exit(EXIT_FAILURE); } if (append && create) { fprintf(stderr, "Error: --append and --create options can not be used at the same time!\n"); exit(EXIT_FAILURE); } if (droptemp && !slim) { fprintf(stderr, "Error: --drop only makes sense with --slim.\n"); exit(EXIT_FAILURE); } if (unlogged && !create) { fprintf(stderr, "Warning: --unlogged only makes sense with --create; ignored.\n"); unlogged = 0; } if (enable_hstore == HSTORE_NONE && !n_hstore_columns && hstore_match_only) { fprintf(stderr, "Warning: --hstore-match-only only makes sense with --hstore, --hstore-all, or --hstore-column; ignored.\n"); hstore_match_only = 0; } if (enable_hstore_index && enable_hstore == HSTORE_NONE && !n_hstore_columns) { fprintf(stderr, "Warning: --hstore-add-index only makes sense with hstore enabled.\n"); enable_hstore_index = 0; } if (cache < 0) cache = 0; if (num_procs < 1) num_procs = 1; if (pass_prompt) password = simple_prompt("Password:", 100, 0); else { password = getenv("PGPASS"); } conninfo = build_conninfo(db, username, password, host, port); sql_conn = PQconnectdb(conninfo); if (PQstatus(sql_conn) != CONNECTION_OK) { fprintf(stderr, "Error: Connection to database failed: %s\n", PQerrorMessage(sql_conn)); exit(EXIT_FAILURE); } if (unlogged && PQserverVersion(sql_conn) < 90100) { fprintf(stderr, "Error: --unlogged works only with PostgreSQL 9.1 and above, but\n"); fprintf(stderr, "you are using PostgreSQL %d.%d.%d.\n", PQserverVersion(sql_conn) / 10000, (PQserverVersion(sql_conn) / 100) % 100, PQserverVersion(sql_conn) % 100); exit(EXIT_FAILURE); } PQfinish(sql_conn); text_init(); initList(&osmdata.tags); osmdata.count_node = osmdata.max_node = 0; osmdata.count_way = osmdata.max_way = 0; osmdata.count_rel = osmdata.max_rel = 0; LIBXML_TEST_VERSION project_init(projection); fprintf(stderr, "Using projection SRS %d (%s)\n", project_getprojinfo()->srs, project_getprojinfo()->descr ); if (parse_bbox(&osmdata)) return 1; options.conninfo = conninfo; options.prefix = prefix; options.append = append; options.slim = slim; options.projection = project_getprojinfo()->srs; options.scale = (projection==PROJ_LATLONG)?10000000:100; options.mid = slim ? &mid_pgsql : &mid_ram; options.cache = cache; options.style = style; options.tblsmain_index = tblsmain_index; options.tblsmain_data = tblsmain_data; options.tblsslim_index = tblsslim_index; options.tblsslim_data = tblsslim_data; options.expire_tiles_zoom = expire_tiles_zoom; options.expire_tiles_zoom_min = expire_tiles_zoom_min; options.expire_tiles_filename = expire_tiles_filename; options.enable_multi = enable_multi; options.enable_hstore = enable_hstore; options.enable_hstore_index = enable_hstore_index; options.hstore_match_only = hstore_match_only; options.hstore_columns = hstore_columns; options.n_hstore_columns = n_hstore_columns; options.keep_coastlines = keep_coastlines; options.parallel_indexing = parallel_indexing; options.alloc_chunkwise = alloc_chunkwise; options.num_procs = num_procs; options.droptemp = droptemp; options.unlogged = unlogged; options.flat_node_cache_enabled = flat_node_cache_enabled; options.flat_node_file = flat_nodes_file; options.excludepoly = excludepoly; if (strcmp("pgsql", output_backend) == 0) { osmdata.out = &out_pgsql; } else if (strcmp("gazetteer", output_backend) == 0) { osmdata.out = &out_gazetteer; } else if (strcmp("null", output_backend) == 0) { osmdata.out = &out_null; } else { fprintf(stderr, "Output backend `%s' not recognised. Should be one of [pgsql, gazetteer, null].\n", output_backend); exit(EXIT_FAILURE); } options.out = osmdata.out; if (strcmp("auto", input_reader) != 0) { if (strcmp("libxml2", input_reader) == 0) { streamFile = &streamFileXML2; } else if (strcmp("primitive", input_reader) == 0) { streamFile = &streamFilePrimitive; #ifdef BUILD_READER_PBF } else if (strcmp("pbf", input_reader) == 0) { streamFile = &streamFilePbf; #endif } else if (strcmp("o5m", input_reader) == 0) { streamFile = &streamFileO5m; } else { fprintf(stderr, "Input parser `%s' not recognised. Should be one of [libxml2, primitive, o5m" #ifdef BUILD_READER_PBF ", pbf" #endif "].\n", input_reader); exit(EXIT_FAILURE); } } time(&overall_start); osmdata.out->start(&options); realloc_nodes(&osmdata); realloc_members(&osmdata); if (sizeof(int*) == 4 && options.slim != 1) { fprintf(stderr, "\n!! You are running this on 32bit system, so at most\n"); fprintf(stderr, "!! 3GB of RAM can be used. If you encounter unexpected\n"); fprintf(stderr, "!! exceptions during import, you should try running in slim\n"); fprintf(stderr, "!! mode using parameter -s.\n"); } while (optind < argc) { /* if input_reader is not forced by -r switch try to auto-detect it by file extension */ if (strcmp("auto", input_reader) == 0) { if (strcasecmp(".pbf",argv[optind]+strlen(argv[optind])-4) == 0) { #ifdef BUILD_READER_PBF streamFile = &streamFilePbf; #else fprintf(stderr, "ERROR: PBF support has not been compiled into this version of osm2pgsql, please either compile it with pbf support or use one of the other input formats\n"); exit(EXIT_FAILURE); #endif } else if (strcasecmp(".o5m",argv[optind]+strlen(argv[optind])-4) == 0) { streamFile = &streamFileO5m; } else { streamFile = &streamFileXML2; } } fprintf(stderr, "\nReading in file: %s\n", argv[optind]); time(&start); if (streamFile(argv[optind], sanitize, &osmdata) != 0) exit_nicely(); time(&end); fprintf(stderr, " parse time: %ds\n", (int)(end - start)); optind++; } xmlCleanupParser(); xmlMemoryDump(); if (osmdata.count_node || osmdata.count_way || osmdata.count_rel) { time(&now); end_nodes = osmdata.start_way > 0 ? osmdata.start_way : now; end_way = osmdata.start_rel > 0 ? osmdata.start_rel : now; end_rel = now; fprintf(stderr, "\n"); fprintf(stderr, "Node stats: total(%" PRIdOSMID "), max(%" PRIdOSMID ") in %is\n", osmdata.count_node, osmdata.max_node, osmdata.count_node > 0 ? (int)(end_nodes - osmdata.start_node) : 0); fprintf(stderr, "Way stats: total(%" PRIdOSMID "), max(%" PRIdOSMID ") in %is\n", osmdata.count_way, osmdata.max_way, osmdata.count_way > 0 ? (int)(end_way - osmdata.start_way) : 0); fprintf(stderr, "Relation stats: total(%" PRIdOSMID "), max(%" PRIdOSMID ") in %is\n", osmdata.count_rel, osmdata.max_rel, osmdata.count_rel > 0 ? (int)(end_rel - osmdata.start_rel) : 0); } osmdata.out->stop(); free(osmdata.nds); free(osmdata.members); /* free the column pointer buffer */ free(hstore_columns); project_exit(); text_exit(); fprintf(stderr, "\n"); time(&overall_end); fprintf(stderr, "Osm2pgsql took %ds overall\n", (int)(overall_end - overall_start)); return 0; } osm2pgsql-0.82.0/osm2pgsql.spec.in000066400000000000000000000057651213272333300167540ustar00rootroot00000000000000 %define svn @SVN@ Summary: Imports map data from www.OpenStreetMap.org to a PostgresSQL database Name: @PACKAGE@ Group: Applications/Text Version: @VERSION@ Release: 1.%{svn}%{?dist} License: GPL URL: http://svn.openstreetmap.org/applications/utils/export/osm2pgsql Source0: %{name}-%{version}.tar.bz2 Source1: osm2pgsql-svn.sh BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildRequires: geos-devel BuildRequires: libxml2-devel BuildRequires: postgresql-devel BuildRequires: bzip2-devel BuildRequires: proj-devel %description Processes the planet file from the communtiy mapping project at http://www.openstreetmap.org. The map data is converted from XML to a database stored in PostgreSQL with PostGIS geospatial extentions. This database may then be used to render maps with Mapnik or for other geospatial analysis. %prep %setup -q -n %{name} %build export CFLAGS="$RPM_OPT_FLAGS" export CXXFLAGS="$RPM_OPT_FLAGS" make all %install rm -rf $RPM_BUILD_ROOT install -D -p osm2pgsql $RPM_BUILD_ROOT/usr/bin/osm2pgsql %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) %doc README %{_bindir}/osm2pgsql %changelog * Sun Nov 23 2008 Keith Sharp - 0.55-1.20081123 - Fixed case of README.txt filename in Makefile - Fixed case of README.txt filename in osm2pgsql.spec.in - Fixed make clean to remove generated osm2pgsql.spec file * Mon Sep 3 2007 Jon Burgess - 0.06-1.20070903 - Add several command line options and help text - "--append" mode which imports extra data into the DB - "--database" to specify the Postgresql DB name - "--slim" to select the middle_ram code - Multiple files can be read sequentially, e.g. lots of tiger county.osm files * Sun Aug 19 2007 Jon Burgess - 0.04-1.20070812 - Allow polygon data types to appear as linestring if not closed (was broken by previous change) - Handle exception when finding an interior point of a complex polygon geometry - Add in a text cache for key/value pairs - Switch to 65k blocks for object store - Exclude source= during import - The above changes save around 10% memory usage during a planet import - Push leisure= polygons down to improve rendering * Sun Aug 19 2007 Jon Burgess - 0.3-1.20070812 - Handle polygons with holes properly - Export a couple more keys * Sun Aug 12 2007 Jon Burgess - 0.2-1.20070812 - Added rpm build target to SVN osm2pgsql source - Enhanced middle-ram to remove hard coded maximum IDs - Support negative IDs in middle-ram - Replace centroid with interior to cope with unusual pareking polygons * Tue Jul 31 2007 Keith Sharp 0.1-2.20070728svn - Fixed BuidlRequires so that Mock builds work. * Sat Jul 28 2007 Keith Sharp 0.1-1.20070728svn - Updated to latest SVN, now includes UTF8Sanitize functioanlity - Building on F7 now has latest GEOS * Fri Mar 16 2007 Keith Sharp 0.1-1.20070316svn - Initial build osm2pgsql-0.82.0/osmtypes.h000066400000000000000000000036661213272333300155760ustar00rootroot00000000000000/* Data types to hold OSM node, segment, way data */ #ifndef OSMTYPES_H #define OSMTYPES_H #include #include #include /* Use ./configure --enable-64bit-ids to build a version that supports 64bit IDs. */ #ifdef OSMID64 typedef int64_t osmid_t; #define strtoosmid strtoll #define PRIdOSMID PRId64 #define POSTGRES_OSMID_TYPE "int8" #else typedef int32_t osmid_t; #define strtoosmid strtol #define PRIdOSMID PRId32 #define POSTGRES_OSMID_TYPE "int4" #endif #include "keyvals.h" enum OsmType { OSMTYPE_WAY, OSMTYPE_NODE, OSMTYPE_RELATION }; struct osmNode { double lon; double lat; }; struct member { enum OsmType type; osmid_t id; char *role; }; typedef enum { FILETYPE_NONE, FILETYPE_OSM, FILETYPE_OSMCHANGE, FILETYPE_PLANETDIFF } filetypes_t; typedef enum { ACTION_NONE, ACTION_CREATE, ACTION_MODIFY, ACTION_DELETE } actions_t; struct osmdata_t { osmid_t count_node, max_node; osmid_t count_way, max_way; osmid_t count_rel, max_rel; time_t start_node, start_way, start_rel; struct output_t *out; /* Since {node,way} elements are not nested we can guarantee the values in an end tag must match those of the corresponding start tag and can therefore be cached. */ double node_lon, node_lat; struct keyval tags; osmid_t *nds; int nd_count, nd_max; struct member *members; int member_count, member_max; osmid_t osm_id; filetypes_t filetype; actions_t action; int extra_attributes; /* Bounding box to filter imported data */ const char *bbox; double minlon, minlat, maxlon, maxlat; int parallel_indexing; }; void realloc_nodes(struct osmdata_t *osmdata); void realloc_members(struct osmdata_t *osmdata); void resetMembers(struct osmdata_t *osmdata); void printStatus(struct osmdata_t *osmdata); int node_wanted(struct osmdata_t *osmdata, double lat, double lon); /* exit_nicely - called to cleanup after fatal error */ void exit_nicely(void); #endif osm2pgsql-0.82.0/output-gazetteer.c000066400000000000000000001160521213272333300172300ustar00rootroot00000000000000#include #include #include #include #include "osmtypes.h" #include "middle.h" #include "output.h" #include "output-gazetteer.h" #include "pgsql.h" #include "reprojection.h" #include "build_geometry.h" #define BUFFER_SIZE 4096 #define SRID (project_getprojinfo()->srs) #define CREATE_KEYVALUETYPE_TYPE \ "CREATE TYPE keyvalue AS (" \ " key TEXT," \ " value TEXT" \ ")" #define CREATE_WORDSCORE_TYPE \ "CREATE TYPE wordscore AS (" \ " word TEXT," \ " score FLOAT" \ ")" #define CREATE_PLACE_TABLE \ "CREATE TABLE place (" \ " osm_type CHAR(1) NOT NULL," \ " osm_id " POSTGRES_OSMID_TYPE " NOT NULL," \ " class TEXT NOT NULL," \ " type TEXT NOT NULL," \ " name HSTORE," \ " admin_level INTEGER," \ " housenumber TEXT," \ " street TEXT," \ " isin TEXT," \ " postcode TEXT," \ " country_code VARCHAR(2)," \ " extratags HSTORE" \ ") %s %s" #define ADMINLEVEL_NONE 100 #define CREATE_PLACE_ID_INDEX \ "CREATE INDEX place_id_idx ON place USING BTREE (osm_type, osm_id) %s %s" #define TAGINFO_NODE 0x1u #define TAGINFO_WAY 0x2u #define TAGINFO_AREA 0x4u static const struct output_options *Options = NULL; static PGconn *Connection = NULL; static int CopyActive = 0; static char Buffer[BUFFER_SIZE]; static unsigned int BufferLen = 0; static PGconn *ConnectionDelete = NULL; static PGconn *ConnectionError = NULL; static int CopyErrorActive = 0; static char BufferError[BUFFER_SIZE]; static unsigned int BufferErrorLen = 0; static FILE * hLog = NULL; static void require_slim_mode(void) { if (!Options->slim) { fprintf(stderr, "Cannot apply diffs unless in slim mode\n"); exit_nicely(); } return; } static void copy_data(const char *sql) { unsigned int sqlLen = strlen(sql); /* Make sure we have an active copy */ if (!CopyActive) { pgsql_exec(Connection, PGRES_COPY_IN, "COPY place FROM STDIN"); CopyActive = 1; } /* If the combination of old and new data is too big, flush old data */ if (BufferLen + sqlLen > BUFFER_SIZE - 10) { pgsql_CopyData("place", Connection, Buffer); BufferLen = 0; } /* * If new data by itself is too big, output it immediately, * otherwise just add it to the buffer. */ if (sqlLen > BUFFER_SIZE - 10) { pgsql_CopyData("Place", Connection, sql); sqlLen = 0; } else if (sqlLen > 0) { strcpy(Buffer + BufferLen, sql); BufferLen += sqlLen; sqlLen = 0; } /* If we have completed a line, output it */ if (BufferLen > 0 && Buffer[BufferLen-1] == '\n') { pgsql_CopyData("place", Connection, Buffer); BufferLen = 0; } return; } static void stop_copy(void) { PGresult *res; /* Do we have a copy active? */ if (!CopyActive) return; /* Terminate the copy */ if (PQputCopyEnd(Connection, NULL) != 1) { fprintf(stderr, "COPY_END for place failed: %s\n", PQerrorMessage(Connection)); exit_nicely(); } /* Check the result */ res = PQgetResult(Connection); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for place failed: %s\n", PQerrorMessage(Connection)); PQclear(res); exit_nicely(); } /* Discard the result */ PQclear(res); /* We no longer have an active copy */ CopyActive = 0; return; } static void copy_error_data(const char *sql) { unsigned int sqlLen = strlen(sql); if (hLog) fprintf(hLog, "%s", sql); /* Make sure we have an active copy */ if (!CopyErrorActive) { pgsql_exec(ConnectionError, PGRES_COPY_IN, "COPY import_polygon_error (osm_type, osm_id, class, type, name, country_code, updated, errormessage, prevgeometry, newgeometry) FROM stdin;"); CopyErrorActive = 1; } /* If the combination of old and new data is too big, flush old data */ if (BufferErrorLen + sqlLen > BUFFER_SIZE - 10) { pgsql_CopyData("import_polygon_error", ConnectionError, BufferError); BufferErrorLen = 0; } /* * If new data by itself is too big, output it immediately, * otherwise just add it to the buffer. */ if (sqlLen > BUFFER_SIZE - 10) { pgsql_CopyData("import_polygon_error", ConnectionError, sql); sqlLen = 0; } else if (sqlLen > 0) { strcpy(BufferError + BufferErrorLen, sql); BufferErrorLen += sqlLen; sqlLen = 0; } /* If we have completed a line, output it */ if (BufferErrorLen > 0 && BufferError[BufferErrorLen-1] == '\n') { pgsql_CopyData("place", ConnectionError, BufferError); BufferErrorLen = 0; } return; } static void stop_error_copy(void) { PGresult *res; /* Do we have a copy active? */ if (!CopyErrorActive) return; /* Terminate the copy */ if (PQputCopyEnd(ConnectionError, NULL) != 1) { fprintf(stderr, "COPY_END for import_polygon_error failed: %s\n", PQerrorMessage(ConnectionError)); exit_nicely(); } /* Check the result */ res = PQgetResult(ConnectionError); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for import_polygon_error failed: %s\n", PQerrorMessage(ConnectionError)); PQclear(res); exit_nicely(); } /* Discard the result */ PQclear(res); /* We no longer have an active copy */ CopyErrorActive = 0; return; } static int split_tags(struct keyval *tags, unsigned int flags, struct keyval *names, struct keyval *places, struct keyval *extratags, int* admin_level, struct keyval ** housenumber, struct keyval ** street, char ** isin, struct keyval ** postcode, struct keyval ** countrycode) { int placehouse = 0; int placebuilding = 0; int placeadmin = 0; struct keyval *landuse; struct keyval *place; struct keyval *item; struct keyval *conscriptionnumber; struct keyval *streetnumber; *admin_level = ADMINLEVEL_NONE; *housenumber = 0; *street = 0; *isin = 0; int isinsize = 0; *postcode = 0; *countrycode = 0; landuse = 0; place = 0; conscriptionnumber = 0; streetnumber = 0; /* Initialise the result lists */ initList(names); initList(places); initList(extratags); /* Loop over the tags */ while ((item = popItem(tags)) != NULL) { /* If this is a name tag, add it to the name list */ if (strcmp(item->key, "ref") == 0 || strcmp(item->key, "int_ref") == 0 || strcmp(item->key, "nat_ref") == 0 || strcmp(item->key, "reg_ref") == 0 || strcmp(item->key, "loc_ref") == 0 || strcmp(item->key, "old_ref") == 0 || strcmp(item->key, "ncn_ref") == 0 || strcmp(item->key, "rcn_ref") == 0 || strcmp(item->key, "lcn_ref") == 0 || strcmp(item->key, "iata") == 0 || strcmp(item->key, "icao") == 0 || strcmp(item->key, "pcode:1") == 0 || strcmp(item->key, "pcode:2") == 0 || strcmp(item->key, "pcode:3") == 0 || strcmp(item->key, "un:pcode:1") == 0 || strcmp(item->key, "un:pcode:2") == 0 || strcmp(item->key, "un:pcode:3") == 0 || strcmp(item->key, "name") == 0 || (strncmp(item->key, "name:", 5) == 0) || strcmp(item->key, "int_name") == 0 || (strncmp(item->key, "int_name:", 9) == 0) || strcmp(item->key, "nat_name") == 0 || (strncmp(item->key, "nat_name:", 9) == 0) || strcmp(item->key, "reg_name") == 0 || (strncmp(item->key, "reg_name:", 9) == 0) || strcmp(item->key, "loc_name") == 0 || (strncmp(item->key, "loc_name:", 9) == 0) || strcmp(item->key, "old_name") == 0 || (strncmp(item->key, "old_name:", 9) == 0) || strcmp(item->key, "alt_name") == 0 || (strncmp(item->key, "alt_name:", 9) == 0) || strcmp(item->key, "official_name") == 0 || (strncmp(item->key, "official_name:", 14) == 0) || strcmp(item->key, "commonname") == 0 || (strncmp(item->key, "commonname:", 11) == 0) || strcmp(item->key, "common_name") == 0 || (strncmp(item->key, "common_name:", 12) == 0) || strcmp(item->key, "place_name") == 0 || (strncmp(item->key, "place_name:", 11) == 0) || strcmp(item->key, "short_name") == 0 || (strncmp(item->key, "short_name:", 11) == 0) || strcmp(item->key, "operator") == 0) /* operator is a bit of an oddity */ { if (strcmp(item->key, "name:prefix") == 0) { pushItem(extratags, item); } else { pushItem(names, item); } } else if (strcmp(item->key, "aerialway") == 0 || strcmp(item->key, "aeroway") == 0 || strcmp(item->key, "amenity") == 0 || strcmp(item->key, "boundary") == 0 || strcmp(item->key, "bridge") == 0 || strcmp(item->key, "craft") == 0 || strcmp(item->key, "emergency") == 0 || strcmp(item->key, "highway") == 0 || strcmp(item->key, "historic") == 0 || strcmp(item->key, "leisure") == 0 || strcmp(item->key, "military") == 0 || strcmp(item->key, "natural") == 0 || strcmp(item->key, "office") == 0 || strcmp(item->key, "railway") == 0 || strcmp(item->key, "shop") == 0 || strcmp(item->key, "tourism") == 0 || strcmp(item->key, "tunnel") == 0 || strcmp(item->key, "waterway") == 0 ) { if (strcmp(item->value, "no")) { pushItem(places, item); if (strcmp(item->key, "boundary") == 0 && strcmp(item->value, "administrative") == 0) { placeadmin = 1; } } else { freeItem(item); } } else if (strcmp(item->key, "place") == 0) { place = item; } else if (strcmp(item->key, "addr:housename") == 0) { pushItem(names, item); placehouse = 1; } else if (strcmp(item->key, "landuse") == 0) { if (strcmp(item->value, "cemetery") == 0) pushItem(places, item); else landuse = item; } else if (strcmp(item->key, "postal_code") == 0 || strcmp(item->key, "post_code") == 0 || strcmp(item->key, "postcode") == 0 || strcmp(item->key, "addr:postcode") == 0 || strcmp(item->key, "tiger:zip_left") == 0 || strcmp(item->key, "tiger:zip_right") == 0) { if (*postcode) freeItem(item); else *postcode = item; } else if (strcmp(item->key, "addr:street") == 0) { *street = item; } else if ((strcmp(item->key, "country_code_iso3166_1_alpha_2") == 0 || strcmp(item->key, "country_code_iso3166_1") == 0 || strcmp(item->key, "country_code_iso3166") == 0 || strcmp(item->key, "country_code") == 0 || strcmp(item->key, "iso3166-1:alpha2") == 0 || strcmp(item->key, "iso3166-1") == 0 || strcmp(item->key, "ISO3166-1") == 0 || strcmp(item->key, "iso3166") == 0 || strcmp(item->key, "is_in:country_code") == 0 || strcmp(item->key, "addr:country") == 0 || strcmp(item->key, "addr:country_code") == 0) && strlen(item->value) == 2) { *countrycode = item; } else if (strcmp(item->key, "addr:housenumber") == 0) { /* house number can be far more complex than just a single house number - leave for postgresql to deal with */ if (*housenumber) freeItem(item); else { *housenumber = item; placehouse = 1; } } else if (strcmp(item->key, "addr:conscriptionnumber") == 0) { if (conscriptionnumber) freeItem(item); else { conscriptionnumber = item; placehouse = 1; } } else if (strcmp(item->key, "addr:streetnumber") == 0) { if (streetnumber) freeItem(item); else { streetnumber = item; placehouse = 1; } } else if (strcmp(item->key, "addr:interpolation") == 0) { /* house number can be far more complex than just a single house number - leave for postgresql to deal with */ if (*housenumber) { freeItem(item); } else { *housenumber = item; addItem(places, "place", "houses", 1); } } else if (strcmp(item->key, "is_in") == 0 || (strncmp(item->key, "is_in:", 5) == 0) || strcmp(item->key, "addr:country")== 0 || strcmp(item->key, "addr:county")== 0 || strcmp(item->key, "tiger:county")== 0 || strcmp(item->key, "addr:city") == 0 || strcmp(item->key, "addr:state_code") == 0 || strcmp(item->key, "addr:state") == 0) { *isin = realloc(*isin, isinsize + 2 + strlen(item->value)); *(*isin+isinsize) = ','; strcpy(*isin+1+isinsize, item->value); isinsize += 1 + strlen(item->value); freeItem(item); } else if (strcmp(item->key, "admin_level") == 0) { *admin_level = atoi(item->value); freeItem(item); } else if (strcmp(item->key, "tracktype") == 0 || strcmp(item->key, "traffic_calming") == 0 || strcmp(item->key, "service") == 0 || strcmp(item->key, "cuisine") == 0 || strcmp(item->key, "capital") == 0 || strcmp(item->key, "dispensing") == 0 || strcmp(item->key, "religion") == 0 || strcmp(item->key, "denomination") == 0 || strcmp(item->key, "sport") == 0 || strcmp(item->key, "internet_access") == 0 || strcmp(item->key, "lanes") == 0 || strcmp(item->key, "surface") == 0 || strcmp(item->key, "smoothness") == 0 || strcmp(item->key, "width") == 0 || strcmp(item->key, "est_width") == 0 || strcmp(item->key, "incline") == 0 || strcmp(item->key, "opening_hours") == 0 || strcmp(item->key, "food_hours") == 0 || strcmp(item->key, "collection_times") == 0 || strcmp(item->key, "service_times") == 0 || strcmp(item->key, "smoking_hours") == 0 || strcmp(item->key, "disused") == 0 || strcmp(item->key, "wheelchair") == 0 || strcmp(item->key, "sac_scale") == 0 || strcmp(item->key, "trail_visibility") == 0 || strcmp(item->key, "mtb:scale") == 0 || strcmp(item->key, "mtb:description") == 0 || strcmp(item->key, "wood") == 0 || strcmp(item->key, "drive_thru") == 0 || strcmp(item->key, "drive_in") == 0 || strcmp(item->key, "access") == 0 || strcmp(item->key, "vehicle") == 0 || strcmp(item->key, "bicyle") == 0 || strcmp(item->key, "foot") == 0 || strcmp(item->key, "goods") == 0 || strcmp(item->key, "hgv") == 0 || strcmp(item->key, "motor_vehicle") == 0 || strcmp(item->key, "motor_car") == 0 || (strncmp(item->key, "access:", 7) == 0) || (strncmp(item->key, "contact:", 8) == 0) || (strncmp(item->key, "drink:", 6) == 0) || strcmp(item->key, "oneway") == 0 || strcmp(item->key, "date_on") == 0 || strcmp(item->key, "date_off") == 0 || strcmp(item->key, "day_on") == 0 || strcmp(item->key, "day_off") == 0 || strcmp(item->key, "hour_on") == 0 || strcmp(item->key, "hour_off") == 0 || strcmp(item->key, "maxweight") == 0 || strcmp(item->key, "maxheight") == 0 || strcmp(item->key, "maxspeed") == 0 || strcmp(item->key, "disused") == 0 || strcmp(item->key, "toll") == 0 || strcmp(item->key, "charge") == 0 || strcmp(item->key, "population") == 0 || strcmp(item->key, "description") == 0 || strcmp(item->key, "image") == 0 || strcmp(item->key, "attribution") == 0 || strcmp(item->key, "fax") == 0 || strcmp(item->key, "email") == 0 || strcmp(item->key, "url") == 0 || strcmp(item->key, "website") == 0 || strcmp(item->key, "phone") == 0 || strcmp(item->key, "tel") == 0 || strcmp(item->key, "real_ale") == 0 || strcmp(item->key, "smoking") == 0 || strcmp(item->key, "food") == 0 || strcmp(item->key, "camera") == 0 || strcmp(item->key, "brewery") == 0 || strcmp(item->key, "locality") == 0 || strcmp(item->key, "wikipedia") == 0 || (strncmp(item->key, "wikipedia:", 10) == 0) ) { pushItem(extratags, item); } else if (strcmp(item->key, "building") == 0) { placebuilding = 1; freeItem(item); } else if (strcmp(item->key, "mountain_pass") == 0) { pushItem(places, item); } else { freeItem(item); } } /* Handle Czech/Slovak addresses: - if we have just a conscription number or a street number, just use the one we have as a house number - if we have both of them, concatenate them so users may search by any of them */ if (conscriptionnumber || streetnumber) { if (*housenumber) { freeItem(*housenumber); } if (!conscriptionnumber) { addItem(tags, "addr:housenumber", streetnumber->value, 0); freeItem(streetnumber); *housenumber = popItem(tags); } if (!streetnumber) { addItem(tags, "addr:housenumber", conscriptionnumber->value, 10); freeItem(conscriptionnumber); *housenumber = popItem(tags); } if (conscriptionnumber && streetnumber) { char * completenumber = strdup(conscriptionnumber->value); size_t completenumberlength = strlen(completenumber); completenumber = realloc(completenumber, completenumberlength + 2 + strlen(streetnumber->value)); *(completenumber + completenumberlength) = '/'; strcpy(completenumber + completenumberlength + 1, streetnumber->value); freeItem(conscriptionnumber); freeItem(streetnumber); addItem(tags, "addr:housenumber", completenumber, 0); *housenumber = popItem(tags); free(completenumber); } } if (place) { if (placeadmin) { pushItem(extratags, place); } else { pushItem(places, place); } } if (placehouse && !listHasData(places)) { addItem(places, "place", "house", 1); } /* Fallback place types - only used if we didn't create something more specific already */ if (placebuilding && !listHasData(places) && (listHasData(names) || *housenumber || *postcode)) { addItem(places, "building", "yes", 1); } if (landuse) { if (!listHasData(places)) { pushItem(places, landuse); } else { freeItem(item); } } if (*postcode && !listHasData(places)) { addItem(places, "place", "postcode", 1); } /* Try to convert everything to an area */ return 1; } void escape_array_record(char *out, int len, const char *in) { int count = 0; const char *old_in = in, *old_out = out; if (!len) return; while(*in && count < len-3) { switch(*in) { case '\\': *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; *out++ = '\\'; count+= 8; break; case '\n': case '\r': case '\t': case '"': /* This is a bit naughty - we know that nominatim ignored these characters so just drop them now for simplicity */ *out++ = ' '; count++; break; default: *out++ = *in; count++; break; } in++; } *out = '\0'; if (*in) fprintf(stderr, "%s truncated at %d chars: %s\n%s\n", __FUNCTION__, count, old_in, old_out); } static void delete_unused_classes(char osm_type, osmid_t osm_id, struct keyval *places) { int i,sz, slen; PGresult *res; char tmp[16]; char tmp2[2]; char *cls, *clslist = 0; char const *paramValues[2]; tmp2[0] = osm_type; tmp2[1] = '\0'; paramValues[0] = tmp2; snprintf(tmp, sizeof(tmp), "%" PRIdOSMID, osm_id); paramValues[1] = tmp; res = pgsql_execPrepared(ConnectionDelete, "get_classes", 2, paramValues, PGRES_TUPLES_OK); sz = PQntuples(res); if (sz > 0 && !places) { PQclear(res); /* uncondtional delete of all places */ stop_copy(); pgsql_exec(Connection, PGRES_COMMAND_OK, "DELETE FROM place WHERE osm_type = '%c' AND osm_id = %" PRIdOSMID, osm_type, osm_id); } else { for (i = 0; i < sz; i++) { cls = PQgetvalue(res, i, 0); if (!getItem(places, cls)) { if (!clslist) { clslist = malloc(strlen(cls)+3); sprintf(clslist, "'%s'", cls); } else { slen = strlen(clslist); clslist = realloc(clslist, slen + 4 + strlen(cls)); sprintf(&(clslist[slen]), ",'%s'", cls); } } } PQclear(res); if (clslist) { /* Stop any active copy */ stop_copy(); /* Delete all places for this object */ pgsql_exec(Connection, PGRES_COMMAND_OK, "DELETE FROM place WHERE osm_type = '%c' AND osm_id = %" PRIdOSMID " and class = any(ARRAY[%s])", osm_type, osm_id, clslist); free(clslist); } } } static void add_place(char osm_type, osmid_t osm_id, const char *class, const char *type, struct keyval *names, struct keyval *extratags, int adminlevel, struct keyval *housenumber, struct keyval *street, const char *isin, struct keyval *postcode, struct keyval *countrycode, const char *wkt) { int first; struct keyval *name; char sql[2048]; /* Output a copy line for this place */ sprintf(sql, "%c\t%" PRIdOSMID "\t", osm_type, osm_id); copy_data(sql); escape(sql, sizeof(sql), class); copy_data(sql); copy_data("\t"); escape(sql, sizeof(sql), type); copy_data(sql); copy_data("\t"); /* start name array */ if (listHasData(names)) { first = 1; for (name = firstItem(names); name; name = nextItem(names, name)) { if (first) first = 0; else copy_data(", "); copy_data("\""); escape_array_record(sql, sizeof(sql), name->key); copy_data(sql); copy_data("\"=>\""); escape_array_record(sql, sizeof(sql), name->value); copy_data(sql); copy_data("\""); } copy_data("\t"); } else { copy_data("\\N\t"); } sprintf(sql, "%d\t", adminlevel); copy_data(sql); if (housenumber) { escape(sql, sizeof(sql), housenumber->value); copy_data(sql); copy_data("\t"); } else { copy_data("\\N\t"); } if (street) { escape(sql, sizeof(sql), street->value); copy_data(sql); copy_data("\t"); } else { copy_data("\\N\t"); } if (isin) { /* Skip the leading ',' from the contactination */ escape(sql, sizeof(sql), isin+1); copy_data(sql); copy_data("\t"); } else { copy_data("\\N\t"); } if (postcode) { escape(sql, sizeof(sql), postcode->value); copy_data(sql); copy_data("\t"); } else { copy_data("\\N\t"); } if (countrycode) { escape(sql, sizeof(sql), countrycode->value); copy_data(sql); copy_data("\t"); } else { copy_data("\\N\t"); } /* extra tags array */ if (listHasData(extratags)) { first = 1; for (name = firstItem(extratags); name; name = nextItem(extratags, name)) { if (first) first = 0; else copy_data(", "); copy_data("\""); escape_array_record(sql, sizeof(sql), name->key); copy_data(sql); copy_data("\"=>\""); escape_array_record(sql, sizeof(sql), name->value); copy_data(sql); copy_data("\""); } copy_data("\t"); } else { copy_data("\\N\t"); } sprintf(sql, "SRID=%d;", SRID); copy_data(sql); copy_data(wkt); copy_data("\n"); return; } static void add_polygon_error(char osm_type, osmid_t osm_id, const char *class, const char *type, struct keyval *names, const char *countrycode, const char *wkt) { int first; struct keyval *name; char sql[2048]; /* Output a copy line for this place */ sprintf(sql, "%c\t%" PRIdOSMID "\t", osm_type, osm_id); copy_error_data(sql); escape(sql, sizeof(sql), class); copy_error_data(sql); copy_error_data("\t"); escape(sql, sizeof(sql), type); copy_error_data(sql); copy_error_data("\t"); /* start name array */ if (listHasData(names)) { first = 1; for (name = firstItem(names); name; name = nextItem(names, name)) { if (first) first = 0; else copy_error_data(", "); copy_error_data("\""); escape_array_record(sql, sizeof(sql), name->key); copy_error_data(sql); copy_error_data("\"=>\""); escape_array_record(sql, sizeof(sql), name->value); copy_error_data(sql); copy_error_data("\""); } copy_error_data("\t"); } else { copy_error_data("\\N\t"); } if (countrycode) { escape(sql, sizeof(sql), countrycode); copy_error_data(sql); copy_error_data("\t"); } else { copy_error_data("\\N\t"); } copy_error_data("now\tNot a polygon\t\\N\t"); sprintf(sql, "SRID=%d;", SRID); copy_error_data(sql); copy_error_data(wkt); copy_error_data("\n"); return; } static void delete_place(char osm_type, osmid_t osm_id) { /* Stop any active copy */ stop_copy(); /* Delete all places for this object */ pgsql_exec(Connection, PGRES_COMMAND_OK, "DELETE FROM place WHERE osm_type = '%c' AND osm_id = %" PRIdOSMID, osm_type, osm_id); return; } static int gazetteer_out_start(const struct output_options *options) { /* Save option handle */ Options = options; /* Connection to the database */ Connection = PQconnectdb(options->conninfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(Connection) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(Connection)); exit_nicely(); } /* Start a transaction */ pgsql_exec(Connection, PGRES_COMMAND_OK, "BEGIN"); /* (Re)create the table unless we are appending */ if (!Options->append) { /* Drop any existing table */ pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP TABLE IF EXISTS place"); pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP TYPE if exists keyvalue cascade"); pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP TYPE if exists wordscore cascade"); pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP TYPE if exists stringlanguagetype cascade"); pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP TYPE if exists keyvaluetype cascade"); pgsql_exec(Connection, PGRES_COMMAND_OK, "DROP FUNCTION IF EXISTS get_connected_ways(integer[])"); /* Create types and functions */ pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_KEYVALUETYPE_TYPE); pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_WORDSCORE_TYPE); /* Create the new table */ if (Options->tblsmain_data) { pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_PLACE_TABLE, "TABLESPACE", Options->tblsmain_data); } else { pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_PLACE_TABLE, "", ""); } if (Options->tblsmain_index) { pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_PLACE_ID_INDEX, "TABLESPACE", Options->tblsmain_index); } else { pgsql_exec(Connection, PGRES_COMMAND_OK, CREATE_PLACE_ID_INDEX, "", ""); } pgsql_exec(Connection, PGRES_TUPLES_OK, "SELECT AddGeometryColumn('place', 'geometry', %d, 'GEOMETRY', 2)", SRID); pgsql_exec(Connection, PGRES_COMMAND_OK, "ALTER TABLE place ALTER COLUMN geometry SET NOT NULL"); } else { ConnectionDelete = PQconnectdb(options->conninfo); if (PQstatus(ConnectionDelete) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(ConnectionDelete)); exit_nicely(); } pgsql_exec(ConnectionDelete, PGRES_COMMAND_OK, "PREPARE get_classes (CHAR(1), " POSTGRES_OSMID_TYPE ") AS SELECT class FROM place WHERE osm_type = $1 and osm_id = $2"); } /* Setup middle layer */ options->mid->start(options); hLog = fopen("log", "w"); return 0; } static void gazetteer_out_stop(void) { /* Process any remaining ways and relations */ /* No longer need to access middle layer */ Options->mid->commit(); Options->mid->stop(); /* Stop any active copy */ stop_copy(); if (hLog) fclose(hLog); /* Commit transaction */ pgsql_exec(Connection, PGRES_COMMAND_OK, "COMMIT"); PQfinish(Connection); if (ConnectionDelete) PQfinish(ConnectionDelete); if (ConnectionError) PQfinish(ConnectionError); return; } static void gazetteer_out_cleanup(void) { return; } static int gazetteer_process_node(osmid_t id, double lat, double lon, struct keyval *tags, int delete_old) { struct keyval names; struct keyval places; struct keyval extratags; struct keyval *place; int adminlevel; struct keyval * housenumber; struct keyval * street; char * isin; struct keyval * postcode; struct keyval * countrycode; char wkt[128]; /* Split the tags */ split_tags(tags, TAGINFO_NODE, &names, &places, &extratags, &adminlevel, &housenumber, &street, &isin, &postcode, &countrycode); /* Feed this node to the middle layer */ Options->mid->nodes_set(id, lat, lon, tags); if (delete_old) delete_unused_classes('N', id, &places); /* Are we interested in this item? */ if (listHasData(&places)) { sprintf(wkt, "POINT(%.15g %.15g)", lon, lat); for (place = firstItem(&places); place; place = nextItem(&places, place)) { add_place('N', id, place->key, place->value, &names, &extratags, adminlevel, housenumber, street, isin, postcode, countrycode, wkt); } } if (housenumber) freeItem(housenumber); if (street) freeItem(street); if (isin) free(isin); if (postcode) freeItem(postcode); if (countrycode) freeItem(countrycode); /* Free tag lists */ resetList(&names); resetList(&places); resetList(&extratags); return 0; } static int gazetteer_add_node(osmid_t id, double lat, double lon, struct keyval *tags) { return gazetteer_process_node(id, lat, lon, tags, 0); } static int gazetteer_process_way(osmid_t id, osmid_t *ndv, int ndc, struct keyval *tags, int delete_old) { struct keyval names; struct keyval places; struct keyval extratags; struct keyval *place; int adminlevel; struct keyval * housenumber; struct keyval * street; char * isin; struct keyval * postcode; struct keyval * countrycode; int area; /* Split the tags */ area = split_tags(tags, TAGINFO_WAY, &names, &places, &extratags, &adminlevel, &housenumber, &street, &isin, &postcode, &countrycode); /* Feed this way to the middle layer */ Options->mid->ways_set(id, ndv, ndc, tags, 0); if (delete_old) delete_unused_classes('W', id, &places); /* Are we interested in this item? */ if (listHasData(&places)) { struct osmNode *nodev; int nodec; char *wkt; /* Fetch the node details */ nodev = malloc(ndc * sizeof(struct osmNode)); nodec = Options->mid->nodes_get_list(nodev, ndv, ndc); /* Get the geometry of the object */ if ((wkt = get_wkt_simple(nodev, nodec, area)) != NULL && strlen(wkt) > 0) { for (place = firstItem(&places); place; place = nextItem(&places, place)) { add_place('W', id, place->key, place->value, &names, &extratags, adminlevel, housenumber, street, isin, postcode, countrycode, wkt); } } /* Free the geometry */ free(wkt); /* Free the nodes */ free(nodev); } if (housenumber) freeItem(housenumber); if (street) freeItem(street); if (isin) free(isin); if (postcode) freeItem(postcode); if (countrycode) freeItem(countrycode); /* Free tag lists */ resetList(&names); resetList(&places); resetList(&extratags); return 0; } static int gazetteer_add_way(osmid_t id, osmid_t *ndv, int ndc, struct keyval *tags) { return gazetteer_process_way(id, ndv, ndc, tags, 0); } static int gazetteer_process_relation(osmid_t id, struct member *members, int member_count, struct keyval *tags, int delete_old) { struct keyval names; struct keyval places; struct keyval extratags; struct keyval *place; int adminlevel; struct keyval * housenumber; struct keyval * street; char * isin; struct keyval * postcode; struct keyval * countrycode; int wkt_size; const char *type; type = getItem(tags, "type"); if (!type) { if (delete_old) delete_unused_classes('R', id, 0); return 0; } if (!strcmp(type, "associatedStreet") || !strcmp(type, "relatedStreet")) { Options->mid->relations_set(id, members, member_count, tags); if (delete_old) delete_unused_classes('R', id, 0); return 0; } if (strcmp(type, "boundary") && strcmp(type, "multipolygon")) { if (delete_old) delete_unused_classes('R', id, 0); return 0; } Options->mid->relations_set(id, members, member_count, tags); /* Split the tags */ split_tags(tags, TAGINFO_AREA, &names, &places, &extratags, &adminlevel, &housenumber, &street, &isin, &postcode, &countrycode); if (delete_old) delete_unused_classes('R', id, &places); if (listHasData(&places)) { /* get the boundary path (ways) */ int i, count; int *xcount = malloc( (member_count+1) * sizeof(int) ); struct keyval *xtags = malloc( (member_count+1) * sizeof(struct keyval) ); struct osmNode **xnodes = malloc( (member_count+1) * sizeof(struct osmNode*) ); osmid_t *xid; osmid_t *xid2 = malloc( (member_count+1) * sizeof(osmid_t) ); count = 0; for (i=0; imid->ways_get_list(xid2, count, &xid, xtags, xnodes, xcount); xnodes[count] = NULL; xcount[count] = 0; wkt_size = build_geometry(id, xnodes, xcount, 1, 1, 1000000); for (i=0;ikey, place->value, &names, &extratags, adminlevel, housenumber, street, isin, postcode, countrycode, wkt); } } else { /* add_polygon_error('R', id, "boundary", "adminitrative", &names, countrycode, wkt); */ } free(wkt); } clear_wkts(); for( i=0; imid->nodes_delete(id); return 0; } static int gazetteer_delete_way(osmid_t id) { /* Make sure we are in slim mode */ require_slim_mode(); /* Delete all references to this way */ delete_place('W', id); /* Feed this delete to the middle layer */ Options->mid->ways_delete(id); return 0; } static int gazetteer_delete_relation(osmid_t id) { /* Make sure we are in slim mode */ require_slim_mode(); /* Delete all references to this relation */ delete_place('R', id); /* Feed this delete to the middle layer */ Options->mid->relations_delete(id); return 0; } static int gazetteer_modify_node(osmid_t id, double lat, double lon, struct keyval *tags) { require_slim_mode(); Options->mid->nodes_delete(id); return gazetteer_process_node(id, lat, lon, tags, 1); } static int gazetteer_modify_way(osmid_t id, osmid_t *ndv, int ndc, struct keyval *tags) { require_slim_mode(); Options->mid->ways_delete(id); return gazetteer_process_way(id, ndv, ndc, tags, 1); } static int gazetteer_modify_relation(osmid_t id, struct member *members, int member_count, struct keyval *tags) { require_slim_mode(); Options->mid->relations_delete(id); return gazetteer_process_relation(id, members, member_count, tags, 1); } struct output_t out_gazetteer = { .start = gazetteer_out_start, .stop = gazetteer_out_stop, .cleanup = gazetteer_out_cleanup, .node_add = gazetteer_add_node, .way_add = gazetteer_add_way, .relation_add = gazetteer_add_relation, .node_modify = gazetteer_modify_node, .way_modify = gazetteer_modify_way, .relation_modify = gazetteer_modify_relation, .node_delete = gazetteer_delete_node, .way_delete = gazetteer_delete_way, .relation_delete = gazetteer_delete_relation }; osm2pgsql-0.82.0/output-gazetteer.h000066400000000000000000000001631213272333300172300ustar00rootroot00000000000000#ifndef OUTPUT_GAZETTEER_H #define OUTPUT_GAZETTEER_H #include "output.h" struct output_t out_gazetteer; #endif osm2pgsql-0.82.0/output-null.c000066400000000000000000000034201213272333300162020ustar00rootroot00000000000000#include #include #include #include #include #include #include "osmtypes.h" #include "output.h" #include "output-null.h" #define UNUSED __attribute__ ((unused)) static void null_out_cleanup(void) { } static int null_out_start(const struct output_options *opt UNUSED) { return 0; } static void null_out_stop() { } static int null_add_node(osmid_t a UNUSED, double b UNUSED, double c UNUSED, struct keyval *k UNUSED) { return 0; } static int null_add_way(osmid_t a UNUSED, osmid_t *b UNUSED, int c UNUSED, struct keyval *k UNUSED) { return 0; } static int null_add_relation(osmid_t a UNUSED, struct member *b UNUSED, int c UNUSED, struct keyval *k UNUSED) { return 0; } static int null_delete_node(osmid_t i UNUSED) { return 0; } static int null_delete_way(osmid_t i UNUSED) { return 0; } static int null_delete_relation(osmid_t i UNUSED) { return 0; } static int null_modify_node(osmid_t a UNUSED, double b UNUSED, double c UNUSED, struct keyval * k UNUSED) { return 0; } static int null_modify_way(osmid_t a UNUSED, osmid_t * b UNUSED, int c UNUSED, struct keyval * k UNUSED) { return 0; } static int null_modify_relation(osmid_t a UNUSED, struct member * b UNUSED, int c UNUSED, struct keyval * k UNUSED) { return 0; } struct output_t out_null = { .start = null_out_start, .stop = null_out_stop, .cleanup = null_out_cleanup, .node_add = null_add_node, .way_add = null_add_way, .relation_add = null_add_relation, .node_modify = null_modify_node, .way_modify = null_modify_way, .relation_modify = null_modify_relation, .node_delete = null_delete_node, .way_delete = null_delete_way, .relation_delete = null_delete_relation }; osm2pgsql-0.82.0/output-null.h000066400000000000000000000002511213272333300162060ustar00rootroot00000000000000/* Implements dummy output-layer processing for testing. */ #ifndef OUTPUT_NULL_H #define OUTPUT_NULL_H #include "output.h" extern struct output_t out_null; #endif osm2pgsql-0.82.0/output-pgsql.c000066400000000000000000001765621213272333300164000ustar00rootroot00000000000000/* Implements the mid-layer processing for osm2pgsql * using several PostgreSQL tables * * This layer stores data read in from the planet.osm file * and is then read by the backend processing code to * emit the final geometry-enabled output formats */ #include "config.h" #include #include #include #include #include #include #include #ifdef HAVE_PTHREAD #include #endif #include #include "osmtypes.h" #include "output.h" #include "reprojection.h" #include "output-pgsql.h" #include "build_geometry.h" #include "middle.h" #include "pgsql.h" #include "expire-tiles.h" #include "wildcmp.h" #include "node-ram-cache.h" #define SRID (project_getprojinfo()->srs) /* FIXME: Shouldn't malloc this all to begin with but call realloc() as required. The program will most likely segfault if it reads a style file with more styles than this */ #define MAX_STYLES 1000 enum table_id { t_point, t_line, t_poly, t_roads }; static const struct output_options *Options; /* enable output of a generated way_area tag to either hstore or its own column */ static int enable_way_area=1; /* Tables to output */ static struct s_table { char *name; const char *type; PGconn *sql_conn; char buffer[1024]; unsigned int buflen; int copyMode; char *columns; } tables [] = { { .name = "%s_point", .type = "POINT" }, { .name = "%s_line", .type = "LINESTRING"}, { .name = "%s_polygon", .type = "GEOMETRY" }, /* Actually POLGYON & MULTIPOLYGON but no way to limit to just these two */ { .name = "%s_roads", .type = "LINESTRING"} }; #define NUM_TABLES ((signed)(sizeof(tables) / sizeof(tables[0]))) #define FLAG_POLYGON 1 /* For polygon table */ #define FLAG_LINEAR 2 /* For lines table */ #define FLAG_NOCACHE 4 /* Optimisation: don't bother remembering this one */ #define FLAG_DELETE 8 /* These tags should be simply deleted on sight */ #define FLAG_PHSTORE 17 /* polygons without own column but listed in hstore this implies FLAG_POLYGON */ static struct flagsname { char *name; int flag; } tagflags[] = { { .name = "polygon", .flag = FLAG_POLYGON }, { .name = "linear", .flag = FLAG_LINEAR }, { .name = "nocache", .flag = FLAG_NOCACHE }, { .name = "delete", .flag = FLAG_DELETE }, { .name = "phstore", .flag = FLAG_PHSTORE } }; #define NUM_FLAGS ((signed)(sizeof(tagflags) / sizeof(tagflags[0]))) /* Table columns, representing key= tags */ struct taginfo { char *name; char *type; int flags; int count; }; static struct taginfo *exportList[4]; /* Indexed by enum table_id */ static int exportListCount[4]; /* Data to generate z-order column and road table * The name of the roads table is misleading, this table * is used for any feature to be shown at low zoom. * This includes railways and administrative boundaries too */ static struct { int offset; const char *highway; int roads; } layers[] = { { 3, "minor", 0 }, { 3, "road", 0 }, { 3, "unclassified", 0 }, { 3, "residential", 0 }, { 4, "tertiary_link", 0 }, { 4, "tertiary", 0 }, { 6, "secondary_link",1 }, { 6, "secondary", 1 }, { 7, "primary_link", 1 }, { 7, "primary", 1 }, { 8, "trunk_link", 1 }, { 8, "trunk", 1 }, { 9, "motorway_link", 1 }, { 9, "motorway", 1 } }; static const unsigned int nLayers = (sizeof(layers)/sizeof(*layers)); static int pgsql_delete_way_from_output(osmid_t osm_id); static int pgsql_delete_relation_from_output(osmid_t osm_id); static int pgsql_process_relation(osmid_t id, struct member *members, int member_count, struct keyval *tags, int exists); void read_style_file( const char *filename ) { FILE *in; int lineno = 0; int num_read = 0; char osmtype[24]; char tag[64]; char datatype[24]; char flags[128]; int i; char *str; int fields; struct taginfo temp; char buffer[1024]; int flag = 0; exportList[OSMTYPE_NODE] = malloc( sizeof(struct taginfo) * MAX_STYLES ); exportList[OSMTYPE_WAY] = malloc( sizeof(struct taginfo) * MAX_STYLES ); in = fopen( filename, "rt" ); if( !in ) { fprintf( stderr, "Couldn't open style file '%s': %s\n", filename, strerror(errno) ); exit_nicely(); } while( fgets( buffer, sizeof(buffer), in) != NULL ) { lineno++; str = strchr( buffer, '#' ); if( str ) *str = '\0'; fields = sscanf( buffer, "%23s %63s %23s %127s", osmtype, tag, datatype, flags ); if( fields <= 0 ) /* Blank line */ continue; if( fields < 3 ) { fprintf( stderr, "Error reading style file line %d (fields=%d)\n", lineno, fields ); exit_nicely(); } temp.name = strdup(tag); temp.type = strdup(datatype); temp.flags = 0; for( str = strtok( flags, ",\r\n" ); str; str = strtok(NULL, ",\r\n") ) { for( i=0; ienable_hstore)) { fprintf( stderr, "Error reading style file line %d (fields=%d)\n", lineno, fields ); fprintf( stderr, "flag 'phstore' is invalid in non-hstore mode\n"); exit_nicely(); } } if ((temp.flags!=FLAG_DELETE) && ((strchr(temp.name,'?') != NULL) || (strchr(temp.name,'*') != NULL))) { fprintf( stderr, "wildcard '%s' in non-delete style entry\n",temp.name); exit_nicely(); } if ((0==strcmp(temp.name,"way_area")) && (temp.flags==FLAG_DELETE)) { enable_way_area=0; } temp.count = 0; /* printf("%s %s %d %d\n", temp.name, temp.type, temp.polygon, offset ); */ if( strstr( osmtype, "node" ) ) { memcpy( &exportList[ OSMTYPE_NODE ][ exportListCount[ OSMTYPE_NODE ] ], &temp, sizeof(temp) ); exportListCount[ OSMTYPE_NODE ]++; flag = 1; } if( strstr( osmtype, "way" ) ) { memcpy( &exportList[ OSMTYPE_WAY ][ exportListCount[ OSMTYPE_WAY ] ], &temp, sizeof(temp) ); exportListCount[ OSMTYPE_WAY ]++; flag = 1; } if( !flag ) { fprintf( stderr, "Weird style line %d\n", lineno ); exit_nicely(); } num_read++; } if (ferror(in)) { perror(filename); exit_nicely(); } if (num_read == 0) { fprintf(stderr, "Unable to parse any valid columns from the style file. Aborting.\n"); exit_nicely(); } fclose(in); } static void free_style_refs(const char *name, const char *type) { /* Find and remove any other references to these pointers This would be way easier if we kept a single list of styles Currently this scales with n^2 number of styles */ int i,j; for (i=0; i sizeof( tables[table].buffer )-10 ) { pgsql_CopyData(tables[table].name, sql_conn, buffer); buflen = 0; /* If new data by itself is also too big, output it immediately */ if( (unsigned)len > sizeof( tables[table].buffer )-10 ) { pgsql_CopyData(tables[table].name, sql_conn, sql); len = 0; } } /* Normal case, just append to buffer */ if( len > 0 ) { strcpy( buffer+buflen, sql ); buflen += len; len = 0; } /* If we have completed a line, output it */ if( buflen > 0 && buffer[buflen-1] == '\n' ) { pgsql_CopyData(tables[table].name, sql_conn, buffer); buflen = 0; } tables[table].buflen = buflen; } static int add_z_order(struct keyval *tags, int *roads) { const char *layer = getItem(tags, "layer"); const char *highway = getItem(tags, "highway"); const char *bridge = getItem(tags, "bridge"); const char *tunnel = getItem(tags, "tunnel"); const char *railway = getItem(tags, "railway"); const char *boundary= getItem(tags, "boundary"); int z_order = 0; int l; unsigned int i; char z[13]; l = layer ? strtol(layer, NULL, 10) : 0; z_order = 10 * l; *roads = 0; if (highway) { for (i=0; i * * becomes: * */ void compress_tag_name(struct keyval *tags) { const char *name = getItem(tags, "name"); struct keyval *name_ext = getMatches(tags, "name:"); struct keyval *p; char out[2048]; if (!name_ext) return; out[0] = '\0'; if (name) { strncat(out, name, sizeof(out)-1); strncat(out, " ", sizeof(out)-1); } while((p = popItem(name_ext)) != NULL) { /* Exclude name:source = "dicataphone" and duplicates */ if (strcmp(p->key, "name:source") && !strstr(out, p->value)) { strncat(out, p->value, sizeof(out)-1); strncat(out, " ", sizeof(out)-1); } freeItem(p); } free(name_ext); /* Remove trailing space */ out[strlen(out)-1] = '\0'; /* fprintf(stderr, "*** New name: %s\n", out); */ updateItem(tags, "name", out); } static void pgsql_out_cleanup(void) { int i; for (i=0; i tmplen) { tmpstr=realloc(tmpstr,len); tmplen=len; } strcpy(tmpstr,value); if ( !strcmp(type, "int4") ) { int from, to; /* For integers we take the first number, or the average if it's a-b */ items = sscanf(value, "%d-%d", &from, &to); if ( items == 1 ) { sprintf(sql, "%d", from); } else if ( items == 2 ) { sprintf(sql, "%d", (from + to) / 2); } else { sprintf(sql, "\\N"); } } else { /* try to "repair" real values as follows: * assume "," to be a decimal mark which need to be replaced by "." * like int4 take the first number, or the average if it's a-b * assume SI unit (meters) * convert feet to meters (1 foot = 0.3048 meters) * reject anything else */ if ( !strcmp(type, "real") ) { int i,slen; float from,to; slen=strlen(value); for (i=0;inext->key != NULL) { /* hard exclude z_order tag and keys which have their own column */ if ((xtags->next->has_column) || (strcmp("z_order",xtags->next->key)==0)) { /* update the tag-pointer to point to the next tag */ xtags = xtags->next; continue; } /* hstore ASCII representation looks like ""=>"" we need at least strlen(key)+strlen(value)+6+'\0' bytes in theory any single character could also be escaped thus we need an additional factor of 2. The maximum lenght of a single hstore element is thus calcuated as follows: */ hlen=2 * (strlen(xtags->next->key) + strlen(xtags->next->value)) + 7; /* if the sql buffer is too small */ if (hlen > sqllen) { sqllen = hlen; sql = realloc(sql, sqllen); } /* pack the tag with its value into the hstore */ keyval2hstore(sql, xtags->next); copy_to_table(table, sql); /* update the tag-pointer to point to the next tag */ xtags = xtags->next; /* if the tag has a follow up, add a comma to the end */ if (xtags->next->key != NULL) copy_to_table(table, ","); } /* finish the hstore column by placing a TAB into the data stream */ copy_to_table(table, "\t"); /* the main hstore-column has now been written */ } /* write an hstore column to the database */ static void write_hstore_columns(enum table_id table, struct keyval *tags) { static char *sql; static size_t sqllen=0; char *shortkey; /* the index of the current hstore column */ int i_hstore_column; int found; struct keyval *xtags; char *pos; size_t hlen; /* sql buffer */ if (sqllen==0) { sqllen=2048; sql=malloc(sqllen); } /* iterate over all configured hstore colums in the options */ for(i_hstore_column = 0; i_hstore_column < Options->n_hstore_columns; i_hstore_column++) { /* did this node have a tag that matched the current hstore column */ found = 0; /* a clone of the tags pointer */ xtags = tags; /* while this tags has a follow-up.. */ while (xtags->next->key != NULL) { /* check if the tag's key starts with the name of the hstore column */ pos = strstr(xtags->next->key, Options->hstore_columns[i_hstore_column]); /* and if it does.. */ if(pos == xtags->next->key) { /* remember we found one */ found=1; /* generate the short key name */ shortkey = xtags->next->key + strlen(Options->hstore_columns[i_hstore_column]); /* calculate the size needed for this hstore entry */ hlen=2*(strlen(shortkey)+strlen(xtags->next->value))+7; /* if the sql buffer is too small */ if (hlen > sqllen) { /* resize it */ sqllen=hlen; sql=realloc(sql,sqllen); } /* and pack the shortkey with its value into the hstore */ keyval2hstore_manual(sql, shortkey, xtags->next->value); copy_to_table(table, sql); /* update the tag-pointer to point to the next tag */ xtags=xtags->next; /* if the tag has a follow up, add a comma to the end */ if (xtags->next->key != NULL) copy_to_table(table, ","); } else { /* update the tag-pointer to point to the next tag */ xtags=xtags->next; } } /* if no matching tag has been found, write a NULL */ if(!found) copy_to_table(table, "\\N"); /* finish the hstore column by placing a TAB into the data stream */ copy_to_table(table, "\t"); } /* all hstore-columns have now been written */ } /* example from: pg_dump -F p -t planet_osm gis COPY planet_osm (osm_id, name, place, landuse, leisure, "natural", man_made, waterway, highway, railway, amenity, tourism, learning, building, bridge, layer, way) FROM stdin; 17959841 \N \N \N \N \N \N \N bus_stop \N \N \N \N \N \N -\N 0101000020E610000030CCA462B6C3D4BF92998C9B38E04940 17401934 The Horn \N \N \N \N \N \N \N \N pub \N \N \N \N -\N 0101000020E6100000C12FC937140FD5BFB4D2F4FB0CE04940 ... mine - 01 01000000 48424298424242424242424256427364 psql - 01 01000020 E6100000 30CCA462B6C3D4BF92998C9B38E04940 01 01000020 E6100000 48424298424242424242424256427364 0x2000_0000 = hasSRID, following 4 bytes = srid, not supported by geos WKBWriter Workaround - output SRID=4326; */ static int pgsql_out_node(osmid_t id, struct keyval *tags, double node_lat, double node_lon) { static char *sql; static size_t sqllen=0; int i; struct keyval *tag; if (sqllen==0) { sqllen=2048; sql=malloc(sqllen); } expire_tiles_from_bbox(node_lon, node_lat, node_lon, node_lat); sprintf(sql, "%" PRIdOSMID "\t", id); copy_to_table(t_point, sql); for (i=0; i < exportListCount[OSMTYPE_NODE]; i++) { if( exportList[OSMTYPE_NODE][i].flags & FLAG_DELETE ) continue; if( (exportList[OSMTYPE_NODE][i].flags & FLAG_PHSTORE) == FLAG_PHSTORE) continue; if ((tag = getTag(tags, exportList[OSMTYPE_NODE][i].name))) { escape_type(sql, sqllen, tag->value, exportList[OSMTYPE_NODE][i].type); exportList[OSMTYPE_NODE][i].count++; if (HSTORE_NORM==Options->enable_hstore) tag->has_column=1; } else sprintf(sql, "\\N"); copy_to_table(t_point, sql); copy_to_table(t_point, "\t"); } /* hstore columns */ write_hstore_columns(t_point, tags); /* check if a regular hstore is requested */ if (Options->enable_hstore) write_hstore(t_point, tags); #ifdef FIXED_POINT // guarantee that we use the same values as in the node cache scale = Options->scale; node_lon = FIX_TO_DOUBLE(DOUBLE_TO_FIX(node_lon)); node_lat = FIX_TO_DOUBLE(DOUBLE_TO_FIX(node_lat)); #endif sprintf(sql, "SRID=%d;POINT(%.15g %.15g)", SRID, node_lon, node_lat); copy_to_table(t_point, sql); copy_to_table(t_point, "\n"); return 0; } static void write_wkts(osmid_t id, struct keyval *tags, const char *wkt, enum table_id table) { static char *sql; static size_t sqllen=0; int j; struct keyval *tag; if (sqllen==0) { sqllen=2048; sql=malloc(sqllen); } sprintf(sql, "%" PRIdOSMID "\t", id); copy_to_table(table, sql); for (j=0; j < exportListCount[OSMTYPE_WAY]; j++) { if( exportList[OSMTYPE_WAY][j].flags & FLAG_DELETE ) continue; if( (exportList[OSMTYPE_WAY][j].flags & FLAG_PHSTORE) == FLAG_PHSTORE) continue; if ((tag = getTag(tags, exportList[OSMTYPE_WAY][j].name))) { exportList[OSMTYPE_WAY][j].count++; escape_type(sql, sqllen, tag->value, exportList[OSMTYPE_WAY][j].type); if (HSTORE_NORM==Options->enable_hstore) tag->has_column=1; } else sprintf(sql, "\\N"); copy_to_table(table, sql); copy_to_table(table, "\t"); } /* hstore columns */ write_hstore_columns(table, tags); /* check if a regular hstore is requested */ if (Options->enable_hstore) write_hstore(table, tags); sprintf(sql, "SRID=%d;", SRID); copy_to_table(table, sql); copy_to_table(table, wkt); copy_to_table(table, "\n"); } static int tag_indicates_polygon(enum OsmType type, const char *key) { int i; if (!strcmp(key, "area")) return 1; for (i=0; i < exportListCount[type]; i++) { if( strcmp( exportList[type][i].name, key ) == 0 ) return exportList[type][i].flags & FLAG_POLYGON; } return 0; } /* Go through the given tags and determine the union of flags. Also remove * any tags from the list that we don't know about */ unsigned int pgsql_filter_tags(enum OsmType type, struct keyval *tags, int *polygon) { int i, filter = 1; int flags = 0; int add_area_tag = 0; const char *area; struct keyval *item; struct keyval temp; initList(&temp); /* We used to only go far enough to determine if it's a polygon or not, but now we go through and filter stuff we don't need */ while( (item = popItem(tags)) != NULL ) { /* Allow named islands to appear as polygons */ if (!strcmp("natural",item->key) && !strcmp("coastline",item->value)) { add_area_tag = 1; } /* Discard natural=coastline tags (we render these from a shapefile instead) */ if (!Options->keep_coastlines && !strcmp("natural",item->key) && !strcmp("coastline",item->value)) { freeItem( item ); item = NULL; continue; } for (i=0; i < exportListCount[type]; i++) { if (wildMatch( exportList[type][i].name, item->key )) { if( exportList[type][i].flags & FLAG_DELETE ) { freeItem( item ); item = NULL; break; } filter = 0; flags |= exportList[type][i].flags; pushItem( &temp, item ); item = NULL; break; } } /** if tag not found in list of exports: */ if (i == exportListCount[type]) { if (Options->enable_hstore) { /* with hstore, copy all tags... */ pushItem(&temp, item); /* ... but if hstore_match_only is set then don't take this as a reason for keeping the object */ if ( !Options->hstore_match_only && strcmp("osm_uid",item->key) && strcmp("osm_user",item->key) && strcmp("osm_timestamp",item->key) && strcmp("osm_version",item->key) && strcmp("osm_changeset",item->key) ) filter = 0; } else if (Options->n_hstore_columns) { /* does this column match any of the hstore column prefixes? */ int j; for (j = 0; j < Options->n_hstore_columns; j++) { char *pos = strstr(item->key, Options->hstore_columns[j]); if (pos == item->key) { pushItem(&temp, item); /* ... but if hstore_match_only is set then don't take this as a reason for keeping the object */ if ( !Options->hstore_match_only && strcmp("osm_uid",item->key) && strcmp("osm_user",item->key) && strcmp("osm_timestamp",item->key) && strcmp("osm_version",item->key) && strcmp("osm_changeset",item->key) ) filter = 0; break; } } /* if not, skip the tag */ if (j == Options->n_hstore_columns) { freeItem(item); } } else { freeItem(item); } item = NULL; } } /* Move from temp list back to original list */ while( (item = popItem(&temp)) != NULL ) pushItem( tags, item ); *polygon = flags & FLAG_POLYGON; /* Special case allowing area= to override anything else */ if ((area = getItem(tags, "area"))) { if (!strcmp(area, "yes") || !strcmp(area, "true") ||!strcmp(area, "1")) *polygon = 1; else if (!strcmp(area, "no") || !strcmp(area, "false") || !strcmp(area, "0")) *polygon = 0; } else { /* If we need to force this as a polygon, append an area tag */ if (add_area_tag) { addItem(tags, "area", "yes", 0); *polygon = 1; } } return filter; } /* COPY planet_osm (osm_id, name, place, landuse, leisure, "natural", man_made, waterway, highway, railway, amenity, tourism, learning, bu ilding, bridge, layer, way) FROM stdin; 198497 Bedford Road \N \N \N \N \N \N residential \N \N \N \N \N \N \N 0102000020E610000004000000452BF702B342D5BF1C60E63BF8DF49406B9C4D470037D5BF5471E316F3DF4940DFA815A6EF35D5BF9AE95E27F5DF4940B41EB E4C1421D5BF24D06053E7DF4940 212696 Oswald Road \N \N \N \N \N \N minor \N \N \N \N \N \N \N 0102000020E610000004000000467D923B6C22D5BFA359D93EE4DF4940B3976DA7AD11D5BF84BBB376DBDF4940997FF44D9A06D5BF4223D8B8FEDF49404D158C4AEA04D 5BF5BB39597FCDF4940 */ static int pgsql_out_way(osmid_t id, struct keyval *tags, struct osmNode *nodes, int count, int exists) { int polygon = 0, roads = 0; int i, wkt_size; double split_at; double area; /* If the flag says this object may exist already, delete it first */ if(exists) { pgsql_delete_way_from_output(id); Options->mid->way_changed(id); } if (pgsql_filter_tags(OSMTYPE_WAY, tags, &polygon) || add_z_order(tags, &roads)) return 0; /* Split long ways after around 1 degree or 100km */ if (Options->projection == PROJ_LATLONG) split_at = 1; else split_at = 100 * 1000; wkt_size = get_wkt_split(nodes, count, polygon, split_at); for (i=0;i 0.0) && enable_way_area) { char tmp[32]; snprintf(tmp, sizeof(tmp), "%g", area); addItem(tags, "way_area", tmp, 0); } write_wkts(id, tags, wkt, t_poly); } else { expire_tiles_from_nodes_line(nodes, count); write_wkts(id, tags, wkt, t_line); if (roads) write_wkts(id, tags, wkt, t_roads); } } free(wkt); } clear_wkts(); return 0; } static int pgsql_out_relation(osmid_t id, struct keyval *rel_tags, struct osmNode **xnodes, struct keyval *xtags, int *xcount, osmid_t *xid, const char **xrole) { int i, wkt_size; int polygon = 0, roads = 0; int make_polygon = 0; int make_boundary = 0; struct keyval tags, *p, poly_tags; char *type; double split_at; #if 0 fprintf(stderr, "Got relation with counts:"); for (i=0; xcount[i]; i++) fprintf(stderr, " %d", xcount[i]); fprintf(stderr, "\n"); #endif /* Get the type, if there's no type we don't care */ type = getItem(rel_tags, "type"); if( !type ) return 0; initList(&tags); initList(&poly_tags); /* Clone tags from relation */ p = rel_tags->next; while (p != rel_tags) { /* For routes, we convert name to route_name */ if ((strcmp(type, "route") == 0) && (strcmp(p->key, "name") ==0)) addItem(&tags, "route_name", p->value, 1); else if (strcmp(p->key, "type")) /* drop type= */ addItem(&tags, p->key, p->value, 1); p = p->next; } if( strcmp(type, "route") == 0 ) { const char *state = getItem(rel_tags, "state"); const char *netw = getItem(rel_tags, "network"); int networknr = -1; if (state == NULL) { state = ""; } if (netw != NULL) { if (strcmp(netw, "lcn") == 0) { networknr = 10; if (strcmp(state, "alternate") == 0) { addItem(&tags, "lcn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "lcn", "connection", 1); } else { addItem(&tags, "lcn", "yes", 1); } } else if (strcmp(netw, "rcn") == 0) { networknr = 11; if (strcmp(state, "alternate") == 0) { addItem(&tags, "rcn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "rcn", "connection", 1); } else { addItem(&tags, "rcn", "yes", 1); } } else if (strcmp(netw, "ncn") == 0) { networknr = 12; if (strcmp(state, "alternate") == 0) { addItem(&tags, "ncn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "ncn", "connection", 1); } else { addItem(&tags, "ncn", "yes", 1); } } else if (strcmp(netw, "lwn") == 0) { networknr = 20; if (strcmp(state, "alternate") == 0) { addItem(&tags, "lwn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "lwn", "connection", 1); } else { addItem(&tags, "lwn", "yes", 1); } } else if (strcmp(netw, "rwn") == 0) { networknr = 21; if (strcmp(state, "alternate") == 0) { addItem(&tags, "rwn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "rwn", "connection", 1); } else { addItem(&tags, "rwn", "yes", 1); } } else if (strcmp(netw, "nwn") == 0) { networknr = 22; if (strcmp(state, "alternate") == 0) { addItem(&tags, "nwn", "alternate", 1); } else if (strcmp(state, "connection") == 0) { addItem(&tags, "nwn", "connection", 1); } else { addItem(&tags, "nwn", "yes", 1); } } } if (getItem(rel_tags, "preferred_color") != NULL) { const char *a = getItem(rel_tags, "preferred_color"); if (strcmp(a, "0") == 0 || strcmp(a, "1") == 0 || strcmp(a, "2") == 0 || strcmp(a, "3") == 0 || strcmp(a, "4") == 0) { addItem(&tags, "route_pref_color", a, 1); } else { addItem(&tags, "route_pref_color", "0", 1); } } else { addItem(&tags, "route_pref_color", "0", 1); } if (getItem(rel_tags, "ref") != NULL) { if (networknr == 10) { addItem(&tags, "lcn_ref", getItem(rel_tags, "ref"), 1); } else if (networknr == 11) { addItem(&tags, "rcn_ref", getItem(rel_tags, "ref"), 1); } else if (networknr == 12) { addItem(&tags, "ncn_ref", getItem(rel_tags, "ref"), 1); } else if (networknr == 20) { addItem(&tags, "lwn_ref", getItem(rel_tags, "ref"), 1); } else if (networknr == 21) { addItem(&tags, "rwn_ref", getItem(rel_tags, "ref"), 1); } else if (networknr == 22) { addItem(&tags, "nwn_ref", getItem(rel_tags, "ref"), 1); } } } else if( strcmp( type, "boundary" ) == 0 ) { /* Boundaries will get converted into multiple geometries: - Linear features will end up in the line and roads tables (useful for admin boundaries) - Polygon features also go into the polygon table (useful for national_forests) The edges of the polygon also get treated as linear fetaures allowing these to be rendered seperately. */ make_boundary = 1; } else if( strcmp( type, "multipolygon" ) == 0 && getItem(&tags, "boundary") ) { /* Treat type=multipolygon exactly like type=boundary if it has a boundary tag. */ make_boundary = 1; } else if( strcmp( type, "multipolygon" ) == 0 ) { make_polygon = 1; /* Copy the tags from the outer way(s) if the relation is untagged */ /* or if there is just a name tag, people seem to like naming relations */ if (!listHasData(&tags) || ((countList(&tags)==1) && getItem(&tags, "name"))) { for (i=0; xcount[i]; i++) { if (xrole[i] && !strcmp(xrole[i], "inner")) continue; p = xtags[i].next; while (p != &(xtags[i])) { addItem(&tags, p->key, p->value, 1); p = p->next; } } } /* Collect a list of polygon-like tags, these are used later to identify if an inner rings looks like it should be rendered seperately */ p = tags.next; while (p != &tags) { if (tag_indicates_polygon(OSMTYPE_WAY, p->key)) { addItem(&poly_tags, p->key, p->value, 1); } p = p->next; } } else { /* Unknown type, just exit */ resetList(&tags); resetList(&poly_tags); return 0; } if (pgsql_filter_tags(OSMTYPE_WAY, &tags, &polygon) || add_z_order(&tags, &roads)) { resetList(&tags); resetList(&poly_tags); return 0; } /* Split long linear ways after around 1 degree or 100km (polygons not effected) */ if (Options->projection == PROJ_LATLONG) split_at = 1; else split_at = 100 * 1000; wkt_size = build_geometry(id, xnodes, xcount, make_polygon, Options->enable_multi, split_at); if (!wkt_size) { resetList(&tags); resetList(&poly_tags); return 0; } for (i=0;i 0.0) && enable_way_area) { char tmp[32]; snprintf(tmp, sizeof(tmp), "%g", area); addItem(&tags, "way_area", tmp, 0); } write_wkts(-id, &tags, wkt, t_poly); } else { write_wkts(-id, &tags, wkt, t_line); if (roads) write_wkts(-id, &tags, wkt, t_roads); } } free(wkt); } clear_wkts(); /* If we are creating a multipolygon then we mark each member so that we can skip them during iterate_ways but only if the polygon-tags look the same as the outer ring */ if (make_polygon) { for (i=0; xcount[i]; i++) { int match = 0; struct keyval *p = poly_tags.next; while (p != &poly_tags) { const char *v = getItem(&xtags[i], p->key); if (!v || strcmp(v, p->value)) { match = 0; break; } match = 1; p = p->next; } if (match) { Options->mid->ways_done(xid[i]); pgsql_delete_way_from_output(xid[i]); } } } /* If we are making a boundary then also try adding any relations which form complete rings The linear variants will have already been processed above */ if (make_boundary) { wkt_size = build_geometry(id, xnodes, xcount, 1, Options->enable_multi, split_at); for (i=0;i 0.0) && enable_way_area) { char tmp[32]; snprintf(tmp, sizeof(tmp), "%g", area); addItem(&tags, "way_area", tmp, 0); } write_wkts(-id, &tags, wkt, t_poly); } } free(wkt); } clear_wkts(); } resetList(&tags); resetList(&poly_tags); return 0; } static int pgsql_out_connect(const struct output_options *options, int startTransaction) { int i; for (i=0; iconninfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(sql_conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(sql_conn)); return 1; } tables[i].sql_conn = sql_conn; pgsql_exec(sql_conn, PGRES_COMMAND_OK, "SET synchronous_commit TO off;"); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "PREPARE get_wkt (" POSTGRES_OSMID_TYPE ") AS SELECT ST_AsText(way) FROM %s WHERE osm_id = $1;\n", tables[i].name); if (startTransaction) pgsql_exec(sql_conn, PGRES_COMMAND_OK, "BEGIN"); } return 0; } static int pgsql_out_start(const struct output_options *options) { char *sql, tmp[256]; PGresult *res; int i,j; unsigned int sql_len; int their_srid; int i_hstore_column; enum OsmType type; int numTags; struct taginfo *exportTags; Options = options; read_style_file( options->style ); sql_len = 2048; sql = malloc(sql_len); assert(sql); for (i=0; iprefix) + strlen(tables[i].name) + 1 ); sprintf( temp, tables[i].name, options->prefix ); tables[i].name = temp; } fprintf(stderr, "Setting up table: %s\n", tables[i].name); sql_conn = PQconnectdb(options->conninfo); /* Check to see that the backend connection was successfully made */ if (PQstatus(sql_conn) != CONNECTION_OK) { fprintf(stderr, "Connection to database failed: %s\n", PQerrorMessage(sql_conn)); exit_nicely(); } tables[i].sql_conn = sql_conn; pgsql_exec(sql_conn, PGRES_COMMAND_OK, "SET synchronous_commit TO off;"); if (!options->append) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "DROP TABLE IF EXISTS %s", tables[i].name); } else { sprintf(sql, "SELECT srid FROM geometry_columns WHERE f_table_name='%s';", tables[i].name); res = PQexec(sql_conn, sql); if (!((PQntuples(res) == 1) && (PQnfields(res) == 1))) { fprintf(stderr, "Problem reading geometry information for table %s - does it exist?\n", tables[i].name); exit_nicely(); } their_srid = atoi(PQgetvalue(res, 0, 0)); PQclear(res); if (their_srid != SRID) { fprintf(stderr, "SRID mismatch: cannot append to table %s (SRID %d) using selected SRID %d\n", tables[i].name, their_srid, SRID); exit_nicely(); } } /* These _tmp tables can be left behind if we run out of disk space */ pgsql_exec(sql_conn, PGRES_COMMAND_OK, "DROP TABLE IF EXISTS %s_tmp", tables[i].name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "BEGIN"); type = (i == t_point)?OSMTYPE_NODE:OSMTYPE_WAY; numTags = exportListCount[type]; exportTags = exportList[type]; if (!options->append) { sprintf(sql, "CREATE TABLE %s ( osm_id " POSTGRES_OSMID_TYPE, tables[i].name ); for (j=0; j < numTags; j++) { if( exportTags[j].flags & FLAG_DELETE ) continue; if( (exportTags[j].flags & FLAG_PHSTORE ) == FLAG_PHSTORE) continue; sprintf(tmp, ",\"%s\" %s", exportTags[j].name, exportTags[j].type); if (strlen(sql) + strlen(tmp) + 1 > sql_len) { sql_len *= 2; sql = realloc(sql, sql_len); assert(sql); } strcat(sql, tmp); } for(i_hstore_column = 0; i_hstore_column < Options->n_hstore_columns; i_hstore_column++) { strcat(sql, ",\""); strcat(sql, Options->hstore_columns[i_hstore_column]); strcat(sql, "\" hstore "); } if (Options->enable_hstore) { strcat(sql, ",tags hstore"); } strcat(sql, ")"); if (Options->tblsmain_data) { sprintf(sql + strlen(sql), " TABLESPACE %s", Options->tblsmain_data); } strcat(sql, "\n"); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", sql); pgsql_exec(sql_conn, PGRES_TUPLES_OK, "SELECT AddGeometryColumn('%s', 'way', %d, '%s', 2 );\n", tables[i].name, SRID, tables[i].type ); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "ALTER TABLE %s ALTER COLUMN way SET NOT NULL;\n", tables[i].name); /* slim mode needs this to be able to apply diffs */ if (Options->slim && !Options->droptemp) { sprintf(sql, "CREATE INDEX %s_pkey ON %s USING BTREE (osm_id)", tables[i].name, tables[i].name); if (Options->tblsmain_index) { sprintf(sql + strlen(sql), " TABLESPACE %s\n", Options->tblsmain_index); } pgsql_exec(sql_conn, PGRES_COMMAND_OK, "%s", sql); } } else { /* Add any new columns referenced in the default.style */ PGresult *res; sprintf(sql, "SELECT * FROM %s LIMIT 0;\n", tables[i].name); res = PQexec(sql_conn, sql); if (PQresultStatus(res) != PGRES_TUPLES_OK) { fprintf(stderr, "Error, failed to query table %s\n%s\n", tables[i].name, sql); exit_nicely(); } for (j=0; j < numTags; j++) { if( exportTags[j].flags & FLAG_DELETE ) continue; if( (exportTags[j].flags & FLAG_PHSTORE) == FLAG_PHSTORE) continue; sprintf(tmp, "\"%s\"", exportTags[j].name); if (PQfnumber(res, tmp) < 0) { #if 0 fprintf(stderr, "Append failed. Column \"%s\" is missing from \"%s\"\n", exportTags[j].name, tables[i].name); exit_nicely(); #else fprintf(stderr, "Adding new column \"%s\" to \"%s\"\n", exportTags[j].name, tables[i].name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "ALTER TABLE %s ADD COLUMN \"%s\" %s;\n", tables[i].name, exportTags[j].name, exportTags[j].type); #endif } /* Note: we do not verify the type or delete unused columns */ } PQclear(res); /* change the type of the geometry column if needed - this can only change to a more permisive type */ } pgsql_exec(sql_conn, PGRES_COMMAND_OK, "PREPARE get_wkt (" POSTGRES_OSMID_TYPE ") AS SELECT ST_AsText(way) FROM %s WHERE osm_id = $1;\n", tables[i].name); /* Generate column list for COPY */ strcpy(sql, "osm_id"); for (j=0; j < numTags; j++) { if( exportTags[j].flags & FLAG_DELETE ) continue; if( (exportTags[j].flags & FLAG_PHSTORE ) == FLAG_PHSTORE) continue; sprintf(tmp, ",\"%s\"", exportTags[j].name); if (strlen(sql) + strlen(tmp) + 1 > sql_len) { sql_len *= 2; sql = realloc(sql, sql_len); assert(sql); } strcat(sql, tmp); } for(i_hstore_column = 0; i_hstore_column < Options->n_hstore_columns; i_hstore_column++) { strcat(sql, ",\""); strcat(sql, Options->hstore_columns[i_hstore_column]); strcat(sql, "\" "); } if (Options->enable_hstore) strcat(sql,",tags"); tables[i].columns = strdup(sql); pgsql_exec(sql_conn, PGRES_COPY_IN, "COPY %s (%s,way) FROM STDIN", tables[i].name, tables[i].columns); tables[i].copyMode = 1; } free(sql); expire_tiles_init(options); options->mid->start(options); return 0; } static void pgsql_pause_copy(struct s_table *table) { PGresult *res; int stop; if( !table->copyMode ) return; /* Terminate any pending COPY */ stop = PQputCopyEnd(table->sql_conn, NULL); if (stop != 1) { fprintf(stderr, "COPY_END for %s failed: %s\n", table->name, PQerrorMessage(table->sql_conn)); exit_nicely(); } res = PQgetResult(table->sql_conn); if (PQresultStatus(res) != PGRES_COMMAND_OK) { fprintf(stderr, "COPY_END for %s failed: %s\n", table->name, PQerrorMessage(table->sql_conn)); PQclear(res); exit_nicely(); } PQclear(res); table->copyMode = 0; } static void pgsql_out_close(int stopTransaction) { int i; for (i=0; isql_conn; if( table->buflen != 0 ) { fprintf( stderr, "Internal error: Buffer for %s has %d bytes after end copy", table->name, table->buflen ); exit_nicely(); } pgsql_pause_copy(table); if (!Options->append) { time_t start, end; time(&start); fprintf(stderr, "Sorting data and creating indexes for %s\n", table->name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "ANALYZE %s;\n", table->name); fprintf(stderr, "Analyzing %s finished\n", table->name); if (Options->tblsmain_data) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE TABLE %s_tmp " "TABLESPACE %s AS SELECT * FROM %s ORDER BY way;\n", table->name, Options->tblsmain_data, table->name); } else { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE TABLE %s_tmp AS SELECT * FROM %s ORDER BY way;\n", table->name, table->name); } pgsql_exec(sql_conn, PGRES_COMMAND_OK, "DROP TABLE %s;\n", table->name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "ALTER TABLE %s_tmp RENAME TO %s;\n", table->name, table->name); fprintf(stderr, "Copying %s to cluster by geometry finished\n", table->name); fprintf(stderr, "Creating geometry index on %s\n", table->name); if (Options->tblsmain_index) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_index ON %s USING GIST (way) TABLESPACE %s;\n", table->name, table->name, Options->tblsmain_index); } else { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_index ON %s USING GIST (way);\n", table->name, table->name); } /* slim mode needs this to be able to apply diffs */ if (Options->slim && !Options->droptemp) { fprintf(stderr, "Creating osm_id index on %s\n", table->name); if (Options->tblsmain_index) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_pkey ON %s USING BTREE (osm_id) TABLESPACE %s;\n", table->name, table->name, Options->tblsmain_index); } else { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_pkey ON %s USING BTREE (osm_id);\n", table->name, table->name); } } /* Create hstore index if selected */ if (Options->enable_hstore_index) { fprintf(stderr, "Creating hstore indexes on %s\n", table->name); if (Options->tblsmain_index) { if (HSTORE_NONE != (Options->enable_hstore)) pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_tags_index ON %s USING GIN (tags) TABLESPACE %s;\n", table->name, table->name, Options->tblsmain_index); for(i_column = 0; i_column < Options->n_hstore_columns; i_column++) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_hstore_%i_index ON %s USING GIN (\"%s\") TABLESPACE %s;\n", table->name, i_column,table->name, Options->hstore_columns[i_column], Options->tblsmain_index); } } else { if (HSTORE_NONE != (Options->enable_hstore)) pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_tags_index ON %s USING GIN (tags);\n", table->name, table->name); for(i_column = 0; i_column < Options->n_hstore_columns; i_column++) { pgsql_exec(sql_conn, PGRES_COMMAND_OK, "CREATE INDEX %s_hstore_%i_index ON %s USING GIN (\"%s\");\n", table->name, i_column,table->name, Options->hstore_columns[i_column]); } } } fprintf(stderr, "Creating indexes on %s finished\n", table->name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "GRANT SELECT ON %s TO PUBLIC;\n", table->name); pgsql_exec(sql_conn, PGRES_COMMAND_OK, "ANALYZE %s;\n", table->name); time(&end); fprintf(stderr, "All indexes on %s created in %ds\n", table->name, (int)(end - start)); } PQfinish(sql_conn); table->sql_conn = NULL; fprintf(stderr, "Completed %s\n", table->name); free(table->name); free(table->columns); return NULL; } static void pgsql_out_stop() { int i; #ifdef HAVE_PTHREAD pthread_t threads[NUM_TABLES]; #endif /* Commit the transactions, so that multiple processes can * access the data simultanious to process the rest in parallel * as well as see the newly created tables. */ pgsql_out_commit(); Options->mid->commit(); /* To prevent deadlocks in parallel processing, the mid tables need * to stay out of a transaction. In this stage output tables are only * written to and not read, so they can be processed as several parallel * independent transactions */ for (i=0; imid->iterate_ways( pgsql_out_way ); pgsql_out_commit(); Options->mid->commit(); /* Processing any remaing to be processed relations */ /* During this stage output tables also need to stay out of * extended transactions, as the delete_way_from_output, called * from process_relation, can deadlock if using multi-processing. */ Options->mid->iterate_relations( pgsql_process_relation ); #ifdef HAVE_PTHREAD if (Options->parallel_indexing) { for (i=0; imid->stop(); for (i=0; imid->stop(); for (i=0; imid->nodes_set(id, lat, lon, tags); if( !filter ) pgsql_out_node(id, tags, lat, lon); return 0; } static int pgsql_add_way(osmid_t id, osmid_t *nds, int nd_count, struct keyval *tags) { int polygon = 0; /* Check whether the way is: (1) Exportable, (2) Maybe a polygon */ int filter = pgsql_filter_tags(OSMTYPE_WAY, tags, &polygon); /* If this isn't a polygon then it can not be part of a multipolygon Hence only polygons are "pending" */ Options->mid->ways_set(id, nds, nd_count, tags, (!filter && polygon) ? 1 : 0); if( !polygon && !filter ) { /* Get actual node data and generate output */ struct osmNode *nodes = malloc( sizeof(struct osmNode) * nd_count ); int count = Options->mid->nodes_get_list( nodes, nds, nd_count ); pgsql_out_way(id, tags, nodes, count, 0); free(nodes); } return 0; } /* This is the workhorse of pgsql_add_relation, split out because it is used as the callback for iterate relations */ static int pgsql_process_relation(osmid_t id, struct member *members, int member_count, struct keyval *tags, int exists) { int i, j, count, count2; osmid_t *xid2 = malloc( (member_count+1) * sizeof(osmid_t) ); osmid_t *xid; const char **xrole = malloc( (member_count+1) * sizeof(const char *) ); int *xcount = malloc( (member_count+1) * sizeof(int) ); struct keyval *xtags = malloc( (member_count+1) * sizeof(struct keyval) ); struct osmNode **xnodes = malloc( (member_count+1) * sizeof(struct osmNode*) ); /* If the flag says this object may exist already, delete it first */ if(exists) pgsql_delete_relation_from_output(id); count = 0; for( i=0; imid->ways_get_list(xid2, count, &xid, xtags, xnodes, xcount); for (i = 0; i < count2; i++) { for (j = i; j < member_count; j++) { if (members[j].id == xid[i]) break; } xrole[i] = members[j].role; } xnodes[count2] = NULL; xcount[count2] = 0; xid[count2] = 0; xrole[count2] = NULL; /* At some point we might want to consider storing the retreived data in the members, rather than as seperate arrays */ pgsql_out_relation(id, tags, xnodes, xtags, xcount, xid, xrole); for( i=0; imid->relations_set) Options->mid->relations_set(id, members, member_count, tags); /* Only a limited subset of type= is supported, ignore other */ if ( (strcmp(type, "route") != 0) && (strcmp(type, "multipolygon") != 0) && (strcmp(type, "boundary") != 0)) return 0; return pgsql_process_relation(id, members, member_count, tags, 0); } #define UNUSED __attribute__ ((unused)) /* Delete is easy, just remove all traces of this object. We don't need to * worry about finding objects that depend on it, since the same diff must * contain the change for that also. */ static int pgsql_delete_node(osmid_t osm_id) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_pause_copy(&tables[t_point]); if ( expire_tiles_from_db(tables[t_point].sql_conn, osm_id) != 0) pgsql_exec(tables[t_point].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_point].name, osm_id ); Options->mid->nodes_delete(osm_id); return 0; } /* Seperated out because we use it elsewhere */ static int pgsql_delete_way_from_output(osmid_t osm_id) { /* Optimisation: we only need this is slim mode */ if( !Options->slim ) return 0; /* in droptemp mode we don't have indices and this takes ages. */ if (Options->droptemp) return 0; pgsql_pause_copy(&tables[t_roads]); pgsql_pause_copy(&tables[t_line]); pgsql_pause_copy(&tables[t_poly]); pgsql_exec(tables[t_roads].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_roads].name, osm_id ); if ( expire_tiles_from_db(tables[t_line].sql_conn, osm_id) != 0) pgsql_exec(tables[t_line].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_line].name, osm_id ); if ( expire_tiles_from_db(tables[t_poly].sql_conn, osm_id) != 0) pgsql_exec(tables[t_poly].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_poly].name, osm_id ); return 0; } static int pgsql_delete_way(osmid_t osm_id) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_delete_way_from_output(osm_id); Options->mid->ways_delete(osm_id); return 0; } /* Relations are identified by using negative IDs */ static int pgsql_delete_relation_from_output(osmid_t osm_id) { pgsql_pause_copy(&tables[t_roads]); pgsql_pause_copy(&tables[t_line]); pgsql_pause_copy(&tables[t_poly]); pgsql_exec(tables[t_roads].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_roads].name, -osm_id ); if ( expire_tiles_from_db(tables[t_line].sql_conn, -osm_id) != 0) pgsql_exec(tables[t_line].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_line].name, -osm_id ); if ( expire_tiles_from_db(tables[t_poly].sql_conn, -osm_id) != 0) pgsql_exec(tables[t_poly].sql_conn, PGRES_COMMAND_OK, "DELETE FROM %s WHERE osm_id = %" PRIdOSMID, tables[t_poly].name, -osm_id ); return 0; } static int pgsql_delete_relation(osmid_t osm_id) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_delete_relation_from_output(osm_id); Options->mid->relations_delete(osm_id); return 0; } /* Modify is slightly trickier. The basic idea is we simply delete the * object and create it with the new parameters. Then we need to mark the * objects that depend on this one */ static int pgsql_modify_node(osmid_t osm_id, double lat, double lon, struct keyval *tags) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_delete_node(osm_id); pgsql_add_node(osm_id, lat, lon, tags); Options->mid->node_changed(osm_id); return 0; } static int pgsql_modify_way(osmid_t osm_id, osmid_t *nodes, int node_count, struct keyval *tags) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_delete_way(osm_id); pgsql_add_way(osm_id, nodes, node_count, tags); Options->mid->way_changed(osm_id); return 0; } static int pgsql_modify_relation(osmid_t osm_id, struct member *members, int member_count, struct keyval *tags) { if( !Options->slim ) { fprintf( stderr, "Cannot apply diffs unless in slim mode\n" ); exit_nicely(); } pgsql_delete_relation(osm_id); pgsql_add_relation(osm_id, members, member_count, tags); Options->mid->relation_changed(osm_id); return 0; } struct output_t out_pgsql = { .start = pgsql_out_start, .connect = pgsql_out_connect, .stop = pgsql_out_stop, .cleanup = pgsql_out_cleanup, .close = pgsql_out_close, .node_add = pgsql_add_node, .way_add = pgsql_add_way, .relation_add = pgsql_add_relation, .node_modify = pgsql_modify_node, .way_modify = pgsql_modify_way, .relation_modify = pgsql_modify_relation, .node_delete = pgsql_delete_node, .way_delete = pgsql_delete_way, .relation_delete = pgsql_delete_relation }; osm2pgsql-0.82.0/output-pgsql.h000066400000000000000000000004211213272333300163610ustar00rootroot00000000000000/* Implements the output-layer processing for osm2pgsql * storing the data in several PostgreSQL tables * with the final PostGIS geometries for each entity */ #ifndef OUTPUT_PGSQL_H #define OUTPUT_PGSQL_H #include "output.h" extern struct output_t out_pgsql; #endif osm2pgsql-0.82.0/output.h000066400000000000000000000064471213272333300152530ustar00rootroot00000000000000/* Common output layer interface */ /* Each output layer must provide methods for * storing: * - Nodes (Points of interest etc) * - Way geometries * Associated tags: name, type etc. */ #ifndef OUTPUT_H #define OUTPUT_H #include "middle.h" #include "keyvals.h" /* Variants for generation of hstore column */ /* No hstore column */ #define HSTORE_NONE 0 /* create a hstore column for all tags which do not have an exclusive column */ #define HSTORE_NORM 1 /* create a hstore column for all tags */ #define HSTORE_ALL 2 struct output_options { const char *conninfo; /* Connection info string */ const char *prefix; /* prefix for table names */ int scale; /* scale for converting coordinates to fixed point */ int projection; /* SRS of projection */ int append; /* Append to existing data */ int slim; /* In slim mode */ int cache; /* Memory usable for cache in MB */ struct middle_t *mid; /* Mid storage to use */ struct output_t *out; /* Output type used */ const char *tblsmain_index; /* Pg Tablespace to store indexes on main tables */ const char *tblsslim_index; /* Pg Tablespace to store indexes on slim tables */ const char *tblsmain_data; /* Pg Tablespace to store main tables */ const char *tblsslim_data; /* Pg Tablespace to store slim tables */ const char *style; /* style file to use */ int expire_tiles_zoom; /* Zoom level for tile expiry list */ int expire_tiles_zoom_min; /* Minimum zoom level for tile expiry list */ const char *expire_tiles_filename; /* File name to output expired tiles list to */ int enable_hstore; /* add an additional hstore column with objects key/value pairs */ int enable_hstore_index; /* add an index on the hstore column */ int enable_multi; /* Output multi-geometries intead of several simple geometries */ const char** hstore_columns; /* list of columns that should be written into their own hstore column */ int n_hstore_columns; /* number of hstore columns */ int keep_coastlines; int parallel_indexing; int alloc_chunkwise; int num_procs; int droptemp; /* drop slim mode temp tables after act */ int unlogged; /* use unlogged tables where possible */ int hstore_match_only; /* only copy rows that match an explicitly listed key */ int flat_node_cache_enabled; int excludepoly; const char *flat_node_file; }; struct output_t { int (*start)(const struct output_options *options); int (*connect)(const struct output_options *options, int startTransaction); void (*stop)(); void (*cleanup)(void); void (*close)(int stopTransaction); int (*node_add)(osmid_t id, double lat, double lon, struct keyval *tags); int (*way_add)(osmid_t id, osmid_t *nodes, int node_count, struct keyval *tags); int (*relation_add)(osmid_t id, struct member *members, int member_count, struct keyval *tags); int (*node_modify)(osmid_t id, double lat, double lon, struct keyval *tags); int (*way_modify)(osmid_t id, osmid_t *nodes, int node_count, struct keyval *tags); int (*relation_modify)(osmid_t id, struct member *members, int member_count, struct keyval *tags); int (*node_delete)(osmid_t id); int (*way_delete)(osmid_t id); int (*relation_delete)(osmid_t id); }; unsigned int pgsql_filter_tags(enum OsmType type, struct keyval *tags, int *polygon); #endif osm2pgsql-0.82.0/parse-o5m.c000066400000000000000000000752511213272333300155150ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #define _GNU_SOURCE /* 2011-07-03 02:30 Markus Weber */ #include #include #include #include #include #include #include #include "osmtypes.h" #include "reprojection.h" #include "output.h" #define inline typedef enum {false= 0,true= 1} bool; typedef uint8_t byte; typedef unsigned int uint; #define isdig(x) isdigit((unsigned char)(x)) static int loglevel= 0; /* logging to stderr; */ /* 0: no logging; 1: small logging; 2: normal logging; 3: extended logging; */ #define DP(f) fprintf(stderr,"- Debug: " #f "\n"); #define DPv(f,...) fprintf(stderr,"- Debug: " #f "\n",__VA_ARGS__); #if __WIN32__ #define NL "\r\n" /* use CR/LF as new-line sequence */ #define off_t off64_t #define lseek lseek64 #else #define NL "\n" /* use LF as new-line sequence */ #define O_BINARY 0 #endif #define PERR(f) \ fprintf(stderr,"osm2pgsql Error: " f "\n"); /* print error message */ #define PERRv(f,...) \ fprintf(stderr,"osm2pgsql Error: " f "\n",__VA_ARGS__); /* print error message with value(s) */ #define WARN(f) { static int msgn= 3; if(--msgn>=0) \ fprintf(stderr,"osm2pgsql Warning: " f "\n"); } /* print a warning message, do it maximal 3 times */ #define WARNv(f,...) { static int msgn= 3; if(--msgn>=0) \ fprintf(stderr,"osm2pgsql Warning: " f "\n",__VA_ARGS__); } /* print a warning message with value(s), do it maximal 3 times */ #define PINFO(f) \ fprintf(stderr,"osm2pgsql: " f "\n"); /* print info message */ #define ONAME(i) \ (i==0? "node": i==1? "way": i==2? "relation": "unknown object") static inline char *stpcpy0(char *dest, const char *src) { /* redefinition of C99's stpcpy() because it's missing in MinGW, and declaration in Linux seems to be wrong; */ while(*src!=0) *dest++= *src++; *dest= 0; return dest; } /* end stpcpy0() */ static inline char* uint32toa(uint32_t v,char* s) { /* convert uint32_t integer into string; v: long integer value to convert; return: s; s[]: digit string; */ char* s1,*s2; char c; s1= s; if(v==0) *s1++= '0'; s2= s1; while(v>0) { *s2++= "0123456789"[v%10]; v/= 10; } *s2--= 0; while(s2>s1) { c= *s1; *s1= *s2; *s2= c; s1++; s2--; } return s; } /* end uint32toa() */ static inline void createtimestamp(uint64_t v,char* sp) { /* write a timestamp in OSM format, e.g.: "2010-09-30T19:23:30Z", into a string; v: value of the timestamp; sp[21]: destination string; */ time_t vtime; struct tm tm; int i; vtime= v; #if __WIN32__ memcpy(&tm,gmtime(&vtime),sizeof(tm)); #else gmtime_r(&vtime,&tm); #endif i= tm.tm_year+1900; sp+= 3; *sp--= i%10+'0'; i/=10; *sp--= i%10+'0'; i/=10; *sp--= i%10+'0'; i/=10; *sp= i%10+'0'; sp+= 4; *sp++= '-'; i= tm.tm_mon+1; *sp++= i/10+'0'; *sp++= i%10+'0'; *sp++= '-'; i= tm.tm_mday; *sp++= i/10+'0'; *sp++= i%10+'0'; *sp++= 'T'; i= tm.tm_hour; *sp++= i/10+'0'; *sp++= i%10+'0'; *sp++= ':'; i= tm.tm_min; *sp++= i/10+'0'; *sp++= i%10+'0'; *sp++= ':'; i= tm.tm_sec%60; *sp++= i/10+'0'; *sp++= i%10+'0'; *sp++= 'Z'; *sp= 0; } /* end createtimestamp() */ /*------------------------------------------------------------ Module pbf_ protobuf conversions module ------------------------------------------------------------ this module provides procedures for conversions from protobuf formats to regular numbers; as usual, all identifiers of a module have the same prefix, in this case 'pbf'; one underline will follow in case of a global accessible object, two underlines in case of objects which are not meant to be accessed from outside this module; the sections of private and public definitions are separated by a horizontal line: ---- many procedures have a parameter 'pp'; here, the address of a buffer pointer is expected; this pointer will be incremented by the number of bytes the converted protobuf element consumes; ------------------------------------------------------------ */ static inline uint32_t pbf_uint32(byte** pp) { /* get the value of an unsigned integer; pp: see module header; */ byte* p; uint32_t i; uint32_t fac; p= *pp; i= *p; if((*p & 0x80)==0) { /* just one byte */ (*pp)++; return i; } i&= 0x7f; fac= 0x80; while(*++p & 0x80) { /* more byte(s) will follow */ i+= (*p & 0x7f)*fac; fac<<= 7; } i+= *p++ *fac; *pp= p; return i; } /* end pbf_uint32() */ static inline int32_t pbf_sint32(byte** pp) { /* get the value of an unsigned integer; pp: see module header; */ byte* p; int32_t i; int32_t fac; int sig; p= *pp; i= *p; if((*p & 0x80)==0) { /* just one byte */ (*pp)++; if(i & 1) /* negative */ return -1-(i>>1); else return i>>1; } sig= i & 1; i= (i & 0x7e)>>1; fac= 0x40; while(*++p & 0x80) { /* more byte(s) will follow */ i+= (*p & 0x7f)*fac; fac<<= 7; } i+= *p++ *fac; *pp= p; if(sig) /* negative */ return -1-i; else return i; } /* end pbf_sint32() */ static inline uint64_t pbf_uint64(byte** pp) { /* get the value of an unsigned integer; pp: see module header; */ byte* p; uint64_t i; uint64_t fac; p= *pp; i= *p; if((*p & 0x80)==0) { /* just one byte */ (*pp)++; return i; } i&= 0x7f; fac= 0x80; while(*++p & 0x80) { /* more byte(s) will follow */ i+= (*p & 0x7f)*fac; fac<<= 7; } i+= *p++ *fac; *pp= p; return i; } /* end pbf_uint64() */ static inline int64_t pbf_sint64(byte** pp) { /* get the value of a signed integer; pp: see module header; */ byte* p; int64_t i; int64_t fac; int sig; p= *pp; i= *p; if((*p & 0x80)==0) { /* just one byte */ (*pp)++; if(i & 1) /* negative */ return -1-(i>>1); else return i>>1; } sig= i & 1; i= (i & 0x7e)>>1; fac= 0x40; while(*++p & 0x80) { /* more byte(s) will follow */ i+= (*p & 0x7f)*fac; fac<<= 7; } i+= *p++ *fac; *pp= p; if(sig) /* negative */ return -1-i; else return i; } /* end pbf_sint64() */ #if 0 /* not used at present */ static inline void pbf_intjump(byte** pp) { /* jump over a protobuf formatted integer; pp: see module header; we do not care about a possibly existing identifier, therefore as the start address *pp the address of the integer value is expected; */ byte* p; p= *pp; while(*p & 0x80) p++; p++; *pp= p; } /* end pbf_intjump() */ #endif /*------------------------------------------------------------ end Module pbf_ protobuf conversions module ------------------------------------------------------------ */ /*------------------------------------------------------------ Module read_ OSM file read module ------------------------------------------------------------ this module provides procedures for buffered reading of standard input; as usual, all identifiers of a module have the same prefix, in this case 'read'; one underline will follow in case of a global accessible object, two underlines in case of objects which are not meant to be accessed from outside this module; the sections of private and public definitions are separated by a horizontal line: ---- */ #define read_PREFETCH ((32+3)*1024*1024) /* number of bytes which will be available in the buffer after every call of read_input(); (important for reading .pbf files: size must be greater than pb__blockM) */ #define read__bufM (read_PREFETCH*5) /* length of the buffer; */ typedef struct { /* members may not be accessed from external */ int fd; /* file descriptor */ bool eof; /* we are at the end of input file */ byte* bufp; /* pointer in buf[] */ byte* bufe; /* pointer to the end of valid input in buf[] */ int64_t read__counter; /* byte counter to get the read position in input file; */ uint64_t bufferstart; /* dummy variable which marks the start of the read buffer concatenated with this instance of read info structure; */ } read_info_t; /*------------------------------------------------------------*/ static read_info_t* read_infop= NULL; /* presently used read info structure, i.e. file handle */ #define read__buf ((byte*)&read_infop->bufferstart) /* start address of the file's input buffer */ static byte* read_bufp= NULL; /* may be incremented by external */ /* up to the number of read_PREFETCH bytes before read_input() is called again; */ static byte* read_bufe= NULL; /* may not be changed from external */ static int read_open(const char* filename) { /* open an input file; filename[]: path and name of input file; ==NULL: standard input; return: 0: ok; !=0: error; read_infop: handle of the file; note that you should close every opened file with read_close() before the program ends; save status of presently processed input file (if any) */ if(read_infop!=NULL) { read_infop->bufp= read_bufp; read_infop->bufp= read_bufe; } /* get memory space for file information and input buffer */ read_infop= (read_info_t*)malloc(sizeof(read_info_t)+read__bufM); if(read_infop==NULL) { PERRv("could not get %i bytes of memory.",read__bufM) return 1; } /* initialize read info structure */ read_infop->fd= 0; /* (default) standard input */ read_infop->eof= false; /* we are at the end of input file */ read_infop->bufp= read_infop->bufe= read__buf; /* pointer in buf[] */ /* pointer to the end of valid input in buf[] */ read_infop->read__counter= 0; /* set modul-global variables which are associated with this file */ read_bufp= read_infop->bufp; read_bufe= read_infop->bufe; /* open the file */ if(loglevel>=2) fprintf(stderr,"Read-opening: %s", filename==NULL? "stdin": filename); if(filename==NULL) /* stdin shall be opened */ read_infop->fd= 0; else if(filename!=NULL) { /* a real file shall be opened */ read_infop->fd= open(filename,O_RDONLY|O_BINARY); if(read_infop->fd<0) { if(loglevel>=2) fprintf(stderr," -> failed\n"); PERRv("could not open input file: %.80s\n", filename==NULL? "standard input": filename) free(read_infop); read_infop= NULL; read_bufp= read_bufe= NULL; return 1; } } /* end a real file shall be opened */ if(loglevel>=2) fprintf(stderr," -> FD %i\n",read_infop->fd); return 0; } /* end read_open() */ static void read_close() { /* close an opened file; read_infop: handle of the file which is to close; */ int fd; if(read_infop==NULL) /* handle not valid; */ return; fd= read_infop->fd; if(loglevel>=1) { /* verbose */ fprintf(stderr,"osm2pgsql: Number of bytes read: %"PRIu64"\n", read_infop->read__counter); } if(loglevel>=2) { fprintf(stderr,"Read-closing FD: %i\n",fd); } if(fd>0) /* not standard input */ close(fd); free(read_infop); read_infop= NULL; read_bufp= read_bufe= NULL; } /* end read_close() */ static inline bool read_input() { /* read data from standard input file, use an internal buffer; make data available at read_bufp; read_open() must have been called before calling this procedure; return: there are no (more) bytes to read; read_bufp: start of next bytes available; may be incremented by the caller, up to read_bufe; read_bufe: end of bytes in buffer; must not be changed by the caller; after having called this procedure, the caller may rely on having available at least read_PREFETCH bytes at address read_bufp - with one exception: if there are not enough bytes left to read from standard input, every byte after the end of the reminding part of the file in the buffer will be set to 0x00 - up to read_bufp+read_PREFETCH; */ int l,r; if(read_bufp+read_PREFETCH>=read_bufe) { /* read buffer is too low */ if(!read_infop->eof) { /* still bytes in the file */ if(read_bufe>read_bufp) { /* bytes remaining in buffer */ memmove(read__buf,read_bufp,read_bufe-read_bufp); /* move remaining bytes to start of buffer */ read_bufe= read__buf+(read_bufe-read_bufp); /* protect the remaining bytes at buffer start */ } else /* no remaining bytes in buffer */ read_bufe= read__buf; /* no bytes remaining to protect */ /* add read bytes to debug counter */ read_bufp= read__buf; do { /* while buffer has not been filled */ l= (read__buf+read__bufM)-read_bufe-4; /* number of bytes to read */ r= read(read_infop->fd,read_bufe,l); if(r<=0) { /* no more bytes in the file */ read_infop->eof= true; /* memorize that there we are at end of file */ l= (read__buf+read__bufM)-read_bufe; /* reminding space in buffer */ if(l>read_PREFETCH) l= read_PREFETCH; memset(read_bufe,0,l); /* set reminding space up to prefetch bytes in buffer to 0 */ break; } read_infop->read__counter+= r; read_bufe+= r; /* set new mark for end of data */ read_bufe[0]= 0; read_bufe[1]= 0; /* set 4 null-terminators */ read_bufe[2]= 0; read_bufe[3]= 0; } while(reof && read_bufp>=read_bufe; } /* end read__input() */ /*------------------------------------------------------------ end Module read_ OSM file read module ------------------------------------------------------------ */ /*------------------------------------------------------------ Module str_ string read module ------------------------------------------------------------ this module provides procedures for conversions from strings which have been stored in data stream objects to c-formatted strings; as usual, all identifiers of a module have the same prefix, in this case 'str'; one underline will follow in case of a global accessible object, two underlines in case of objects which are not meant to be accessed from outside this module; the sections of private and public definitions are separated by a horizontal line: ---- */ #define str__tabM (15000+4000) /* +4000 because it might happen that an object has a lot of key/val pairs or refroles which are not stored already; */ #define str__tabstrM 250 /* must be < row size of str__rab[] */ typedef struct str__info_struct { /* members of this structure must not be accessed from outside this module; */ char tab[str__tabM][256]; /* string table; see o5m documentation; row length must be at least str__tabstrM+2; each row contains a double string; each of the two strings is terminated by a zero byte, the logical lengths must not exceed str__tabstrM bytes in total; the first str__tabM lines of this array are used as input buffer for strings; */ int tabi; /* index of last entered element in string table; */ int tabn; /* number of valid strings in string table; */ struct str__info_struct* prev; /* address of previous unit; */ } str_info_t; str_info_t* str__infop= NULL; static void str__end() { /* clean-up this module; */ str_info_t* p; while(str__infop!=NULL) { p= str__infop->prev; free(str__infop); str__infop= p; } } /* end str__end() */ /*------------------------------------------------------------*/ static str_info_t* str_open() { /* open an new string client unit; this will allow us to process multiple o5m input files; return: handle of the new unit; ==NULL: error; you do not need to care about closing the unit(s); */ static bool firstrun= true; str_info_t* prev; prev= str__infop; str__infop= (str_info_t*)malloc(sizeof(str_info_t)); if(str__infop==NULL) { PERR("could not get memory for string buffer.") return NULL; } str__infop->tabi= 0; str__infop->tabn= 0; str__infop->prev= prev; if(firstrun) { firstrun= false; atexit(str__end); } return str__infop; } /* end str_open() */ static void inline str_reset() { /* clear string table; must be called before any other procedure of this module and may be called every time the string processing shall be restarted; */ str__infop->tabi= str__infop->tabn= 0; } /* end str_reset() */ static void str_read(byte** pp,char** s1p,char** s2p) { /* read an o5m formatted string (pair), e.g. key/val, from standard input buffer; if got a string reference, resolve it, using an internal string table; no reference is used if the strings are longer than 250 characters in total (252 including terminators); pp: address of a buffer pointer; this pointer will be incremented by the number of bytes the converted protobuf element consumes; s2p: ==NULL: read not a string pair but a single string; return: *s1p,*s2p: pointers to the strings which have been read; */ char* p; int len1,len2; int ref; p= (char*)*pp; if(*p==0) { /* string (pair) given directly */ *s1p= ++p; len1= strlen(p); p+= len1+1; if(s2p==NULL) { /* single string */ /* p= strchr(p,0)+1; jump over second string (if any) */ if(len1<=str__tabstrM) { char* tmpcharp; /* single string short enough for string table */ tmpcharp= stpcpy0(str__infop->tab[str__infop->tabi],*s1p); tmpcharp[1]= 0; /* add a second terminator, just in case someone will try to read this single string as a string pair later; */ if(++str__infop->tabi>=str__tabM) str__infop->tabi= 0; if(str__infop->tabntabn++; } /* end single string short enough for string table */ } /* end single string */ else { /* string pair */ *s2p= p; len2= strlen(p); p+= len2+1; if(len1+len2<=str__tabstrM) { /* string pair short enough for string table */ memcpy(str__infop->tab[str__infop->tabi],*s1p,len1+len2+2); if(++str__infop->tabi>=str__tabM) str__infop->tabi= 0; if(str__infop->tabntabn++; } /* end string pair short enough for string table */ } /* end string pair */ *pp= (byte*)p; } /* end string (pair) given directly */ else { /* string (pair) given by reference */ ref= pbf_uint32(pp); if(ref>str__infop->tabn) { /* string reference invalid */ WARNv("invalid .o5m string reference: %i->%i", str__infop->tabn,ref) *s1p= "(invalid)"; if(s2p!=NULL) /* caller wants a string pair */ *s2p= "(invalid)"; } /* end string reference invalid */ else { /* string reference valid */ ref= str__infop->tabi-ref; if(ref<0) ref+= str__tabM; *s1p= str__infop->tab[ref]; if(s2p!=NULL) /* caller wants a string pair */ *s2p= strchr(str__infop->tab[ref],0)+1; } /* end string reference valid */ } /* end string (pair) given by reference */ } /* end str_read() */ /*------------------------------------------------------------ end Module str_ string read module ------------------------------------------------------------ */ int streamFileO5m(char *filename,int sanitize,struct osmdata_t *osmdata) { /* open and parse an .o5m file; */ /* return: ==0: ok; !=0: error; */ int otype; /* type of currently processed object; */ /* 0: node; 1: way; 2: relation; */ uint32_t hisver; int64_t histime; int64_t hiscset; uint32_t hisuid; char* hisuser; str_info_t* str; /* string unit handle (if o5m format) */ bool endoffile; int64_t o5id; /* for o5m delta coding */ int32_t o5lon,o5lat; /* for o5m delta coding */ int64_t o5histime; /* for o5m delta coding */ int64_t o5hiscset; /* for o5m delta coding */ int64_t o5rid[3]; /* for o5m delta coding */ byte* bufp; /* pointer in read buffer */ #define bufsp ((char*)bufp) /* for signed char */ byte* bufe; /* pointer in read buffer, end of object */ char c; /* latest character which has been read */ byte b; /* latest byte which has been read */ int l; byte* bp; /* procedure initializations */ str= str_open(); /* call some initialization of string read module */ str_reset(); o5id= 0; o5lat= o5lon= 0; o5hiscset= 0; o5histime= 0; o5rid[0]= o5rid[1]= o5rid[2]= 0; /* open the input file */ if(read_open(filename)!=0) { fprintf(stderr,"Unable to open %s\n",filename); return 1; } endoffile= false; /* determine file type */ { char* p; read_input(); if(*read_bufp!=0xff) { /* cannot be an .o5m file, nor an .o5c file */ PERR("File format neither .o5m nor .o5c") return 1; } p= strchr(filename,0)-4; /* get end of filename */ if(memcmp(read_bufp,"\xff\xe0\0x04""o5m2",7)==0) osmdata->filetype= FILETYPE_OSM; else if(memcmp(read_bufp,"\xff\xe0\0x04""o5c2",7)==0) osmdata->filetype= FILETYPE_OSMCHANGE; else if(p>=filename && strcmp(p,".o5m")==0) osmdata->filetype= FILETYPE_OSM; else if(p>=filename && (strcmp(p,".o5c")==0 || strcmp(p,".o5h")==0)) osmdata->filetype= FILETYPE_OSMCHANGE; else { WARN("File type not specified. Assuming .o5m") osmdata->filetype= FILETYPE_OSM; } if(osmdata->filetype==FILETYPE_OSM) PINFO("Processing .o5m file (not a change file).") else PINFO("Processing .o5c change file.") } /* process the input file */ for(;;) { /* read input file */ /* get next object */ read_input(); bufp= read_bufp; b= *bufp; c= (char)b; /* care about file end */ if(read_bufp>=read_bufe) /* at end of input file; */ break; if(endoffile) { /* after logical end of file */ fprintf(stderr,"osm2pgsql Warning: unexpected contents " "after logical end of file.\n"); break; } /* care about header and unknown objects */ if(b<0x10 || b>0x12) { /* not a regular dataset id */ if(b>=0xf0) { /* single byte dataset */ if(b==0xff) { /* file start, resp. o5m reset */ str_reset(); o5id= 0; o5lat= o5lon= 0; o5hiscset= 0; o5histime= 0; o5rid[0]= o5rid[1]= o5rid[2]= 0; } else if(b==0xfe) endoffile= true; else WARNv("unknown .o5m short dataset id: 0x%02x\n",b) read_bufp++; continue; } /* end single byte dataset */ else { /* unknown multibyte dataset */ if(b!=0xe0 && b!=0xdc) WARNv("unknown .o5m dataset id: 0x%02x\n",b) read_bufp++; l= pbf_uint32(&read_bufp); /* jump over this dataset */ read_bufp+= l; /* jump over this dataset */ continue; } /* end unknown multibyte dataset */ } /* end not a regular dataset id */ otype= b&3; /* object initialization */ hisver= 0; histime= 0; hiscset= 0; hisuid= 0; hisuser= ""; osmdata->nd_count= 0; osmdata->member_count= 0; /* read object id */ bufp++; l= pbf_uint32(&bufp); read_bufp= bufe= bufp+l; osmdata->osm_id= o5id+= pbf_sint64(&bufp); /* do statistics on object id */ switch(otype) { case 0: /* node */ if(osmdata->osm_id>osmdata->max_node) osmdata->max_node= osmdata->osm_id; if (osmdata->count_node == 0) { time(&osmdata->start_node); } osmdata->count_node++; if(osmdata->count_node%10000==0) printStatus(osmdata); break; case 1: /* way */ if(osmdata->osm_id>osmdata->max_way) osmdata->max_way= osmdata->osm_id; if (osmdata->count_way == 0) { time(&osmdata->start_way); } osmdata->count_way++; if(osmdata->count_way%1000==0) printStatus(osmdata); break; case 2: /* relation */ if(osmdata->osm_id>osmdata->max_rel) osmdata->max_rel= osmdata->osm_id; if (osmdata->count_rel == 0) { time(&osmdata->start_rel); } osmdata->count_rel++; if(osmdata->count_rel%10==0) printStatus(osmdata); break; default: ; } /* read history */ { char tmpstr[50]; char* sp; hisver= pbf_uint32(&bufp); uint32toa(hisver,tmpstr); addItem(&(osmdata->tags),"osm_version",tmpstr,0); if(hisver!=0) { /* history information available */ histime= o5histime+= pbf_sint64(&bufp); createtimestamp(histime,tmpstr); addItem(&(osmdata->tags),"osm_timestamp",tmpstr, 0); if(histime!=0) { hiscset= o5hiscset+= pbf_sint32(&bufp); /* (not used) */ str_read(&bufp,&sp,&hisuser); hisuid= pbf_uint64((byte**)&sp); uint32toa(hisuid,tmpstr); addItem(&(osmdata->tags),"osm_uid",tmpstr,0); addItem(&(osmdata->tags),"osm_user",hisuser,0); } } /* end history information available */ } /* end read history */ /* perform action */ if(bufp>=bufe) { /* just the id and history, i.e. this is a delete request */ osmdata->action= ACTION_DELETE; switch(otype) { case 0: /* node */ osmdata->out->node_delete(osmdata->osm_id); break; case 1: /* way */ osmdata->out->way_delete(osmdata->osm_id); break; case 2: /* relation */ osmdata->out->relation_delete(osmdata->osm_id); break; default: ; } resetList(&(osmdata->tags)); continue; /* end processing for this object */ } /* end delete request */ else { /* not a delete request */ /* determine action */ if(osmdata->filetype==FILETYPE_OSMCHANGE && hisver>1) osmdata->action= ACTION_MODIFY; else osmdata->action= ACTION_CREATE; /* read coordinates (for nodes only) */ if(otype==0) { /* node */ /* read node body */ osmdata->node_lon= (double)(o5lon+= pbf_sint32(&bufp))/10000000; osmdata->node_lat= (double)(o5lat+= pbf_sint32(&bufp))/10000000; if(!node_wanted(osmdata,osmdata->node_lat,osmdata->node_lon)) { resetList(&(osmdata->tags)); continue; } reproject(&(osmdata->node_lat),&(osmdata->node_lon)); } /* end node */ /* read noderefs (for ways only) */ if(otype==1) { /* way */ l= pbf_uint32(&bufp); bp= bufp+l; if(bp>bufe) bp= bufe; /* (format error) */ while(bufpnds[osmdata->nd_count++]= o5rid[0]+= pbf_sint64(&bufp); if(osmdata->nd_count>=osmdata->nd_max) realloc_nodes(osmdata); } /* end for all noderefs of this way */ } /* end way */ /* read refs (for relations only) */ else if(otype==2) { /* relation */ int64_t ri; /* temporary, refid */ int rt; /* temporary, reftype */ char* rr; /* temporary, refrole */ l= pbf_uint32(&bufp); bp= bufp+l; if(bp>bufe) bp= bufe; /* (format error) */ while(bufpmembers[osmdata->member_count].type= OSMTYPE_NODE; break; case 1: /* way */ osmdata->members[osmdata->member_count].type= OSMTYPE_WAY; break; case 2: /* relation */ osmdata->members[osmdata->member_count].type= OSMTYPE_RELATION; break; } osmdata->members[osmdata->member_count].id= o5rid[rt]+= ri; osmdata->members[osmdata->member_count].role= rr; osmdata->member_count++; if(osmdata->member_count>=osmdata->member_max) realloc_members(osmdata); } /* end for all references of this relation */ } /* end relation */ /* read node key/val pairs */ while(bufptags),k,v,0); } } /* end for all tags of this object */ /* write object into database */ switch(otype) { case 0: /* node */ if(osmdata->action==ACTION_CREATE) osmdata->out->node_add(osmdata->osm_id, osmdata->node_lat,osmdata->node_lon,&(osmdata->tags)); else /* ACTION_MODIFY */ osmdata->out->node_modify(osmdata->osm_id, osmdata->node_lat,osmdata->node_lon,&(osmdata->tags)); break; case 1: /* way */ if(osmdata->action==ACTION_CREATE) osmdata->out->way_add(osmdata->osm_id, osmdata->nds,osmdata->nd_count,&(osmdata->tags)); else /* ACTION_MODIFY */ osmdata->out->way_modify(osmdata->osm_id, osmdata->nds,osmdata->nd_count,&(osmdata->tags)); break; case 2: /* relation */ if(osmdata->action==ACTION_CREATE) osmdata->out->relation_add(osmdata->osm_id, osmdata->members,osmdata->member_count,&(osmdata->tags)); else /* ACTION_MODIFY */ osmdata->out->relation_modify(osmdata->osm_id, osmdata->members,osmdata->member_count,&(osmdata->tags)); break; default: ; } /* reset temporary storage lists */ resetList(&(osmdata->tags)); } /* end not a delete request */ } /* end read input file */ /* close the input file */ printStatus(osmdata); read_close(); return 0; } /* streamFileO5m() */ osm2pgsql-0.82.0/parse-o5m.h000066400000000000000000000024121213272333300155070ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef PARSE_O5M_H #define PARSE_O5M_h int streamFileO5m(char *filename,int sanitize,struct osmdata_t *osmdata); #endif osm2pgsql-0.82.0/parse-pbf.c000066400000000000000000000415051213272333300155570ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #include #include #include #include #include #include #include #include "osmtypes.h" #include "output.h" #include "reprojection.h" #include "fileformat.pb-c.h" #include "osmformat.pb-c.h" #define UNUSED __attribute__ ((unused)) #define MAX_BLOCK_HEADER_SIZE 64*1024 #define MAX_BLOB_SIZE 32*1024*1024 #define NANO_DEGREE .000000001 static uint32_t get_length(FILE *input) { char buf[4]; if (1 != fread(buf, sizeof(buf), 1, input)) return 0; return ntohl(*((size_t *)buf)); } static void *realloc_or_free(void *p, size_t len) { void *new = realloc(p, len); if (new == NULL) { free(p); } return new; } static BlockHeader *read_header(FILE *input, void *buf) { BlockHeader *header_msg; size_t read, length = get_length(input); if (length < 1 || length > MAX_BLOCK_HEADER_SIZE) { if (!feof(input)) { fprintf(stderr, "Invalid blocksize %lu\n", (unsigned long)length); } return NULL; } read = fread(buf, length, 1, input); if (!read) { perror("parse-pbf: error while reading header data"); return NULL; } header_msg = block_header__unpack (NULL, length, buf); if (header_msg == NULL) { fprintf(stderr, "Error unpacking BlockHeader message\n"); return NULL; } return header_msg; } static Blob *read_blob(FILE *input, void *buf, int32_t length) { Blob *blob_msg; if (length < 1 || length > MAX_BLOB_SIZE) { fprintf(stderr, "Blob isn't present or exceeds minimum/maximum size\n"); return NULL; } if(1 != fread(buf, length, 1, input)) { fprintf(stderr, "error reading blob content\n"); return NULL; } blob_msg = blob__unpack (NULL, length, buf); if (blob_msg == NULL) { fprintf(stderr, "Error unpacking Blob message\n"); return NULL; } return blob_msg; } static size_t uncompress_blob(Blob *bmsg, void *buf, int32_t max_size) { if (bmsg->raw_size > max_size) { fprintf(stderr, "blob raw size too large\n"); return 0; } if (bmsg->has_raw) { memcpy(buf, bmsg->raw.data, bmsg->raw.len); return bmsg->raw.len; } else if (bmsg->has_zlib_data) { int ret; z_stream strm; strm.zalloc = Z_NULL; strm.zfree = Z_NULL; strm.opaque = Z_NULL; strm.avail_in = bmsg->zlib_data.len; strm.next_in = bmsg->zlib_data.data; strm.avail_out = bmsg->raw_size; strm.next_out = buf; ret = inflateInit(&strm); if (ret != Z_OK) { fprintf(stderr, "Zlib init failed\n"); return 0; } ret = inflate(&strm, Z_NO_FLUSH); (void)inflateEnd(&strm); if (ret != Z_STREAM_END) { fprintf(stderr, "Zlib compression failed (code %d, %s)\n", ret, strm.msg); return 0; } return bmsg->raw_size; } else if (bmsg->has_bzip2_data) { int ret; bz_stream strm; strm.bzalloc = NULL; strm.bzfree = NULL; strm.opaque = NULL; strm.avail_in = bmsg->bzip2_data.len; strm.next_in = (char *) bmsg->bzip2_data.data; strm.avail_out = bmsg->raw_size; strm.next_out = buf; ret = BZ2_bzDecompressInit(&strm, 0, 0); if (ret != BZ_OK) { fprintf(stderr, "Bzip2 init failed\n"); return 0; } (void)BZ2_bzDecompressEnd(&strm); if (ret != BZ_STREAM_END) { fprintf(stderr, "Bzip2 compression failed\n"); return 0; } return bmsg->raw_size; } else if (bmsg->has_lzma_data) { fprintf(stderr, "Can't uncompress LZMA data\n"); return 0; } else { fprintf(stderr, "We cannot handle the %d non-raw bytes yet...\n", bmsg->raw_size); return 0; } return 0; } int addProtobufItem(struct keyval *head, ProtobufCBinaryData key, ProtobufCBinaryData val, int noDupe) { char *keystr, *valstr; int retval; keystr = calloc(key.len + 1, 1); memcpy(keystr, key.data, key.len); /* drop certain keys (matching parse-xml2) */ if ((strcmp(keystr, "created_by") == 0) || (strcmp(keystr, "source") == 0)) { free(keystr); return 0; } valstr = calloc(val.len + 1, 1); memcpy(valstr, val.data, val.len); retval = addItem(head, keystr, valstr, noDupe); free(keystr); free(valstr); return retval; } int addIntItem(struct keyval *head, const char *key, int val, int noDupe) { char buf[100]; sprintf(buf, "%d", val); return addItem(head, key, buf, noDupe); } int addInfoItems(struct keyval *head, Info *info, StringTable *string_table) { if (info->has_version) { addIntItem(head, "osm_version", info->version, 0); } if (info->has_changeset) { addIntItem(head, "osm_changeset", info->changeset, 0); } if (info->has_uid) { addIntItem(head, "osm_uid", info->uid, 0); } if (info->has_user_sid) { ProtobufCBinaryData user = string_table->s[info->user_sid]; char *username; username = calloc(user.len + 1, 1); memcpy(username, user.data, user.len); addItem(head, "osm_user", username, 0); } /* TODO timestamp */ return 0; } int processOsmHeader(void *data, size_t length) { HeaderBlock *hmsg = header_block__unpack (NULL, length, data); if (hmsg == NULL) { fprintf(stderr, "Error unpacking HeaderBlock message\n"); return 0; } header_block__free_unpacked (hmsg, &protobuf_c_system_allocator); return 1; } int processOsmDataNodes(struct osmdata_t *osmdata, PrimitiveGroup *group, StringTable *string_table, double lat_offset, double lon_offset, double granularity) { unsigned node_id, key_id; for (node_id = 0; node_id < group->n_nodes; node_id++) { Node *node = group->nodes[node_id]; double lat, lon; resetList(&(osmdata->tags)); if (node->info && osmdata->extra_attributes) { addInfoItems(&(osmdata->tags), node->info, string_table); } for (key_id = 0; key_id < node->n_keys; key_id++) { addProtobufItem(&(osmdata->tags), string_table->s[node->keys[key_id]], string_table->s[node->vals[key_id]], 0); } lat = lat_offset + (node->lat * granularity); lon = lon_offset + (node->lon * granularity); if (node_wanted(osmdata, lat, lon)) { reproject(&lat, &lon); osmdata->out->node_add(node->id, lat, lon, &(osmdata->tags)); if (node->id > osmdata->max_node) { osmdata->max_node = node->id; } if (osmdata->count_node == 0) { time(&osmdata->start_node); } osmdata->count_node++; if (osmdata->count_node%10000 == 0) printStatus(osmdata); } } return 1; } int processOsmDataDenseNodes(struct osmdata_t *osmdata, PrimitiveGroup *group, StringTable *string_table, double lat_offset, double lon_offset, double granularity) { unsigned node_id; if (group->dense) { unsigned l = 0; osmid_t deltaid = 0; long int deltalat = 0; long int deltalon = 0; unsigned long int deltatimestamp = 0; unsigned long int deltachangeset = 0; long int deltauid = 0; unsigned long int deltauser_sid = 0; double lat, lon; DenseNodes *dense = group->dense; for (node_id = 0; node_id < dense->n_id; node_id++) { resetList(&(osmdata->tags)); deltaid += dense->id[node_id]; deltalat += dense->lat[node_id]; deltalon += dense->lon[node_id]; if (dense->denseinfo && osmdata->extra_attributes) { DenseInfo *denseinfo = dense->denseinfo; deltatimestamp += denseinfo->timestamp[node_id]; deltachangeset += denseinfo->changeset[node_id]; deltauid += denseinfo->uid[node_id]; deltauser_sid += denseinfo->user_sid[node_id]; addIntItem(&(osmdata->tags), "osm_version", denseinfo->version[node_id], 0); addIntItem(&(osmdata->tags), "osm_changeset", deltachangeset, 0); #if 0 /* TODO */ if (deltauid != -1) { /* osmosis devs failed to read the specs */ printuser(string_table->s[deltauser_sid]); printnumericattribute("osm_uid", deltauid); } printtimestamp("osm_timestamp", deltatimestamp); #endif } if (l < dense->n_keys_vals) { while (dense->keys_vals[l] != 0 && l < dense->n_keys_vals) { addProtobufItem(&(osmdata->tags), string_table->s[dense->keys_vals[l]], string_table->s[dense->keys_vals[l+1]], 0); l += 2; } l += 1; } lat = lat_offset + (deltalat * granularity); lon = lon_offset + (deltalon * granularity); if (node_wanted(osmdata, lat, lon)) { reproject(&lat, &lon); osmdata->out->node_add(deltaid, lat, lon, &(osmdata->tags)); if (deltaid > osmdata->max_node) { osmdata->max_node = deltaid; } if (osmdata->count_node == 0) { time(&osmdata->start_node); } osmdata->count_node++; if (osmdata->count_node%10000 == 0) printStatus(osmdata); } } } return 1; } int processOsmDataWays(struct osmdata_t *osmdata, PrimitiveGroup *group, StringTable *string_table) { unsigned way_id, key_id, ref_id; for (way_id = 0; way_id < group->n_ways; way_id++) { Way *way = group->ways[way_id]; osmid_t deltaref = 0; resetList(&(osmdata->tags)); if (way->info && osmdata->extra_attributes) { addInfoItems(&(osmdata->tags), way->info, string_table); } osmdata->nd_count = 0; for (ref_id = 0; ref_id < way->n_refs; ref_id++) { deltaref += way->refs[ref_id]; osmdata->nds[osmdata->nd_count++] = deltaref; if( osmdata->nd_count >= osmdata->nd_max ) realloc_nodes(osmdata); } for (key_id = 0; key_id < way->n_keys; key_id++) { addProtobufItem(&(osmdata->tags), string_table->s[way->keys[key_id]], string_table->s[way->vals[key_id]], 0); } osmdata->out->way_add(way->id, osmdata->nds, osmdata->nd_count, &(osmdata->tags) ); if (way->id > osmdata->max_way) { osmdata->max_way = way->id; } if (osmdata->count_way == 0) { time(&osmdata->start_way); } osmdata->count_way++; if (osmdata->count_way%1000 == 0) printStatus(osmdata); } return 1; } int processOsmDataRelations(struct osmdata_t *osmdata, PrimitiveGroup *group, StringTable *string_table) { unsigned rel_id, member_id, key_id; for (rel_id = 0; rel_id < group->n_relations; rel_id++) { Relation *relation = group->relations[rel_id]; osmid_t deltamemids = 0; resetList(&(osmdata->tags)); osmdata->member_count = 0; if (relation->info && osmdata->extra_attributes) { addInfoItems(&(osmdata->tags), relation->info, string_table); } for (member_id = 0; member_id < relation->n_memids; member_id++) { ProtobufCBinaryData role = string_table->s[relation->roles_sid[member_id]]; char *rolestr; deltamemids += relation->memids[member_id]; osmdata->members[osmdata->member_count].id = deltamemids; rolestr = calloc(role.len + 1, 1); memcpy(rolestr, role.data, role.len); osmdata->members[osmdata->member_count].role = rolestr; switch (relation->types[member_id]) { case RELATION__MEMBER_TYPE__NODE: osmdata->members[osmdata->member_count].type = OSMTYPE_NODE; break; case RELATION__MEMBER_TYPE__WAY: osmdata->members[osmdata->member_count].type = OSMTYPE_WAY; break; case RELATION__MEMBER_TYPE__RELATION: osmdata->members[osmdata->member_count].type = OSMTYPE_RELATION; break; default: fprintf(stderr, "Unsupported type: %u""\n", relation->types[member_id]); return 0; } osmdata->member_count++; if( osmdata->member_count >= osmdata->member_max ) { realloc_members(osmdata); } } for (key_id = 0; key_id < relation->n_keys; key_id++) { addProtobufItem(&(osmdata->tags), string_table->s[relation->keys[key_id]], string_table->s[relation->vals[key_id]], 0); } osmdata->out->relation_add(relation->id, osmdata->members, osmdata->member_count, &(osmdata->tags)); for (member_id = 0; member_id < osmdata->member_count; member_id++) { free(osmdata->members[member_id].role); } if (relation->id > osmdata->max_rel) { osmdata->max_rel = relation->id; } if (osmdata->count_rel == 0) { time(&osmdata->start_rel); } osmdata->count_rel++; if (osmdata->count_rel%10 == 0) printStatus(osmdata); } return 1; } int processOsmData(struct osmdata_t *osmdata, void *data, size_t length) { unsigned int j; double lat_offset, lon_offset, granularity; PrimitiveBlock *pmsg = primitive_block__unpack (NULL, length, data); if (pmsg == NULL) { fprintf(stderr, "Error unpacking PrimitiveBlock message\n"); return 0; } lat_offset = NANO_DEGREE * pmsg->lat_offset; lon_offset = NANO_DEGREE * pmsg->lon_offset; granularity = NANO_DEGREE * pmsg->granularity; for (j = 0; j < pmsg->n_primitivegroup; j++) { PrimitiveGroup *group = pmsg->primitivegroup[j]; StringTable *string_table = pmsg->stringtable; if (!processOsmDataNodes(osmdata, group, string_table, lat_offset, lon_offset, granularity)) return 0; if (!processOsmDataDenseNodes(osmdata, group, string_table, lat_offset, lon_offset, granularity)) return 0; if (!processOsmDataWays(osmdata, group, string_table)) return 0; if (!processOsmDataRelations(osmdata, group, string_table)) return 0; } primitive_block__free_unpacked (pmsg, &protobuf_c_system_allocator); return 1; } int streamFilePbf(char *filename, int sanitize UNUSED, struct osmdata_t *osmdata) { void *header = NULL; void *blob = NULL; char *data = NULL; FILE *input = NULL; BlockHeader *header_msg = NULL; Blob *blob_msg = NULL; size_t length; int exit_status = EXIT_FAILURE; header = malloc(MAX_BLOCK_HEADER_SIZE); if (!header) { fprintf(stderr, "parse-pbf: out of memory allocating header buffer\n"); goto err; } blob = malloc(MAX_BLOB_SIZE); if (!blob) { fprintf(stderr, "parse-pbf: out of memory allocating blob buffer\n"); goto err; } data = malloc(MAX_BLOB_SIZE); if (!data) { fprintf(stderr, "parse-pbf: out of memory allocating data buffer\n"); goto err; } input = fopen(filename, "rb"); if (!input) { fprintf(stderr, "Unable to open %s\n", filename); goto err; } do { header_msg = read_header(input, header); if (header_msg == NULL) { break; } blob_msg = read_blob(input, blob, header_msg->datasize); length = uncompress_blob(blob_msg, data, MAX_BLOB_SIZE); if (!length) { goto err; } if (strcmp(header_msg->type, "OSMHeader") == 0) { if (!processOsmHeader(data, length)) { goto err; } } else if (strcmp(header_msg->type, "OSMData") == 0) { if (!processOsmData(osmdata, data, length)) { goto err; } } blob__free_unpacked (blob_msg, &protobuf_c_system_allocator); block_header__free_unpacked (header_msg, &protobuf_c_system_allocator); } while (!feof(input)); if (!feof(input)) { goto err; } exit_status = EXIT_SUCCESS; err: if (input) fclose(input); if (header) free(header); if (blob) free(blob); if (data) free(data); return exit_status; } osm2pgsql-0.82.0/parse-pbf.h000066400000000000000000000024101213272333300155540ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef PARSE_PBF_H #define PARSE_PBF_h int streamFilePbf(char *filename, int sanitize, struct osmdata_t *osmdata); #endif osm2pgsql-0.82.0/parse-primitive.c000066400000000000000000000371511213272333300170220ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ /* This is a version of osm2pgsql without proper XML parsing it should arrive at the same results as the normal osm2pgsql and take an hour less to process the full planet file but YMMV. This is just a proof of concept and should not be used in a production environment. */ #define _GNU_SOURCE #define UNUSED __attribute__ ((unused)) #include #include #include #include #include #include "osmtypes.h" #include "sanitizer.h" #include "reprojection.h" #include "input.h" #include "output.h" char *extractAttribute(char **token, int tokens, char *attname) { char buffer[256]; int cl; int i; char *in; char *out; sprintf(buffer, "%s=\"", attname); cl = strlen(buffer); for (i=0; ifiletype == FILETYPE_OSMCHANGE || osmdata->filetype == FILETYPE_PLANETDIFF ) return osmdata->action; new_action = ACTION_NONE; action = extractAttribute(token, tokens, "action"); if( action == NULL ) new_action = ACTION_CREATE; else if( strcmp((char *)action, "modify") == 0 ) new_action = ACTION_MODIFY; else if( strcmp((char *)action, "delete") == 0 ) new_action = ACTION_DELETE; else { fprintf( stderr, "Unknown value for action: %s\n", (char*)action ); exit_nicely(); } return new_action; } static void StartElement(char *name, char *line, struct osmdata_t *osmdata) { char *xid, *xlat, *xlon, *xk, *xv, *xrole, *xtype; char *token[255]; int tokens = 0; int quote = 0; char *i; if (osmdata->filetype == FILETYPE_NONE) { if (!strcmp(name, "?xml")) return; if (!strcmp(name, "osm")) { osmdata->filetype = FILETYPE_OSM; osmdata->action = ACTION_CREATE; } else if (!strcmp(name, "osmChange")) { osmdata->filetype = FILETYPE_OSMCHANGE; osmdata->action = ACTION_NONE; } else if (!strcmp(name, "planetdiff")) { osmdata->filetype = FILETYPE_PLANETDIFF; osmdata->action = ACTION_NONE; } else { fprintf( stderr, "Unknown XML document type: %s\n", name ); exit_nicely(); } return; } tokens=1; token[0] = line; for (i=line; *i; i++) { if (quote) { if (*i == '"') { quote = 0; } } else { if (*i == '"') { quote = 1; } else if (isspace(*i)) { *i = 0; token[tokens++] = i + 1; } } } if (!strcmp(name, "node")) { xid = extractAttribute(token, tokens, "id"); xlon = extractAttribute(token, tokens, "lon"); xlat = extractAttribute(token, tokens, "lat"); assert(xid); assert(xlon); assert(xlat); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->node_lon = strtod((char *)xlon, NULL); osmdata->node_lat = strtod((char *)xlat, NULL); osmdata->action = ParseAction(token, tokens, osmdata); if (osmdata->osm_id > osmdata->max_node) osmdata->max_node = osmdata->osm_id; if (osmdata->count_node == 0) { time(&osmdata->start_node); } osmdata->count_node++; if (osmdata->count_node%10000 == 0) printStatus(osmdata); } else if (!strcmp(name, "tag")) { xk = extractAttribute(token, tokens, "k"); assert(xk); /* 'created_by' and 'source' are common and not interesting to mapnik renderer */ if (strcmp((char *)xk, "created_by") && strcmp((char *)xk, "source")) { char *p; xv = extractAttribute(token, tokens, "v"); assert(xv); while ((p = strchr(xk, ' '))) *p = '_'; addItem(&(osmdata->tags), xk, (char *)xv, 0); } } else if (!strcmp(name, "way")) { xid = extractAttribute(token, tokens, "id"); assert(xid); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->action = ParseAction(token, tokens, osmdata); if (osmdata->osm_id > osmdata->max_way) osmdata->max_way = osmdata->osm_id; if (osmdata->count_way == 0) { time(&osmdata->start_way); } osmdata->count_way++; if (osmdata->count_way%1000 == 0) printStatus(osmdata); osmdata->nd_count = 0; } else if (!strcmp(name, "nd")) { xid = extractAttribute(token, tokens, "ref"); assert(xid); osmdata->nds[osmdata->nd_count++] = strtoosmid( (char *)xid, NULL, 10 ); if( osmdata->nd_count >= osmdata->nd_max ) realloc_nodes(osmdata); } else if (!strcmp(name, "relation")) { xid = extractAttribute(token, tokens, "id"); assert(xid); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->action = ParseAction(token, tokens, osmdata); if (osmdata->osm_id > osmdata->max_rel) osmdata->max_rel = osmdata->osm_id; if (osmdata->count_rel == 0) { time(&osmdata->start_rel); } osmdata->count_rel++; if (osmdata->count_rel%10 == 0) printStatus(osmdata); osmdata->member_count = 0; } else if (!strcmp(name, "member")) { xrole = extractAttribute(token, tokens, "role"); assert(xrole); xtype = extractAttribute(token, tokens, "type"); assert(xtype); xid = extractAttribute(token, tokens, "ref"); assert(xid); osmdata->members[osmdata->member_count].id = strtoosmid( (char *)xid, NULL, 0 ); osmdata->members[osmdata->member_count].role = strdup( (char *)xrole ); /* Currently we are only interested in 'way' members since these form polygons with holes */ if (!strcmp(xtype, "way")) osmdata->members[osmdata->member_count].type = OSMTYPE_WAY; else if (!strcmp(xtype, "node")) osmdata->members[osmdata->member_count].type = OSMTYPE_NODE; else if (!strcmp(xtype, "relation")) osmdata->members[osmdata->member_count].type = OSMTYPE_RELATION; osmdata->member_count++; if( osmdata->member_count >= osmdata->member_max ) realloc_members(osmdata); } else if (!strcmp(name, "add") || !strcmp(name, "create")) { osmdata->action = ACTION_MODIFY; /* Turns all creates into modifies, makes it resiliant against inconsistant snapshots. */ } else if (!strcmp(name, "modify")) { osmdata->action = ACTION_MODIFY; } else if (!strcmp(name, "delete")) { osmdata->action = ACTION_DELETE; } else if (!strcmp(name, "bound")) { /* ignore */ } else if (!strcmp(name, "bounds")) { /* ignore */ } else if (!strcmp(name, "changeset")) { /* ignore */ } else { fprintf(stderr, "%s: Unknown element name: %s\n", __FUNCTION__, name); } /* Collect extra attribute information and add as tags */ if (osmdata->extra_attributes && (!strcmp(name, "node") || !strcmp(name, "way") || !strcmp(name, "relation"))) { char *xtmp; xtmp = extractAttribute(token, tokens, "user"); if (xtmp) { addItem(&(osmdata->tags), "osm_user", (char *)xtmp, 0); } xtmp = extractAttribute(token, tokens, "uid"); if (xtmp) { addItem(&(osmdata->tags), "osm_uid", (char *)xtmp, 0); } xtmp = extractAttribute(token, tokens, "version"); if (xtmp) { addItem(&(osmdata->tags), "osm_version", (char *)xtmp, 0); } xtmp = extractAttribute(token, tokens, "timestamp"); if (xtmp) { addItem(&(osmdata->tags), "osm_timestamp", (char *)xtmp, 0); } } } static void EndElement(const char *name, struct osmdata_t *osmdata) { if (!strcmp(name, "node")) { if (node_wanted(osmdata, osmdata->node_lat, osmdata->node_lon)) { reproject(&(osmdata->node_lat), &(osmdata->node_lon)); if( osmdata->action == ACTION_CREATE ) osmdata->out->node_add(osmdata->osm_id, osmdata->node_lat, osmdata->node_lon, &(osmdata->tags)); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->node_modify(osmdata->osm_id, osmdata->node_lat, osmdata->node_lon, &(osmdata->tags)); else if( osmdata->action == ACTION_DELETE ) osmdata->out->node_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for node %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } } resetList(&(osmdata->tags)); } else if (!strcmp(name, "way")) { if( osmdata->action == ACTION_CREATE ) osmdata->out->way_add(osmdata->osm_id, osmdata->nds, osmdata->nd_count, &(osmdata->tags) ); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->way_modify(osmdata->osm_id, osmdata->nds, osmdata->nd_count, &(osmdata->tags) ); else if( osmdata->action == ACTION_DELETE ) osmdata->out->way_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for way %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } resetList(&(osmdata->tags)); } else if (!strcmp(name, "relation")) { if( osmdata->action == ACTION_CREATE ) osmdata->out->relation_add(osmdata->osm_id, osmdata->members, osmdata->member_count, &(osmdata->tags)); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->relation_modify(osmdata->osm_id, osmdata->members, osmdata->member_count, &(osmdata->tags)); else if( osmdata->action == ACTION_DELETE ) osmdata->out->relation_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for relation %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } resetList(&(osmdata->tags)); resetMembers(osmdata); } else if (!strcmp(name, "tag")) { /* ignore */ } else if (!strcmp(name, "nd")) { /* ignore */ } else if (!strcmp(name, "member")) { /* ignore */ } else if (!strcmp(name, "osm")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (!strcmp(name, "osmChange")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (!strcmp(name, "planetdiff")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (!strcmp(name, "bound")) { /* ignore */ } else if (!strcmp(name, "bounds")) { /* ignore */ } else if (!strcmp(name, "changeset")) { /* ignore */ resetList(&(osmdata->tags)); /* We may have accumulated some tags even if we ignored the changeset */ } else if (!strcmp(name, "add")) { osmdata->action = ACTION_NONE; } else if (!strcmp(name, "create")) { osmdata->action = ACTION_NONE; } else if (!strcmp(name, "modify")) { osmdata->action = ACTION_NONE; } else if (!strcmp(name, "delete")) { osmdata->action = ACTION_NONE; } else { fprintf(stderr, "%s: Unknown element name: %s\n", __FUNCTION__, name); } } static void process(char *line, struct osmdata_t *osmdata) { char *lt = strchr(line, '<'); if (lt) { char *spc = strchr(lt+1, ' '); char *gt = strchr(lt+1, '>'); char *nx = spc; if (*(lt+1) == '/') { *gt = 0; EndElement(lt+2, osmdata); } else { int slash = 0; if (gt != NULL) { *gt-- = 0; if (nx == NULL || gt < nx) nx = gt; while(gt>lt) { if (*gt=='/') { slash=1; *gt=0; break; } if (!isspace(*gt)) break; gt--; } } *nx++ = 0; /* printf ("nx=%d, lt+1=#%s#\n", nx-lt,lt+1); */ StartElement(lt+1, nx, osmdata); if (slash) EndElement(lt+1, osmdata); } } } int streamFilePrimitive(char *filename, int sanitize UNUSED, struct osmdata_t *osmdata) { struct Input *i; char buffer[65536]; int bufsz = 0; int offset = 0; char *nl; i = inputOpen(filename); if (i != NULL) { while(1) { bufsz = bufsz + readFile(i, buffer + bufsz, sizeof(buffer) - bufsz - 1); buffer[bufsz] = 0; nl = strchr(buffer, '\n'); if (nl == 0) break; *nl=0; while (nl && nl < buffer + bufsz) { *nl = 0; process(buffer + offset, osmdata); offset = nl - buffer + 1; nl = strchr(buffer + offset, '\n'); } memcpy(buffer, buffer + offset, bufsz - offset); bufsz = bufsz - offset; offset = 0; } } else { fprintf(stderr, "Unable to open %s\n", filename); return 1; } inputClose(i); return 0; } osm2pgsql-0.82.0/parse-primitive.h000066400000000000000000000024321213272333300170210ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef PARSE_PRIMITIVE_H #define PARSE_PRIMITIVE_H int streamFilePrimitive(char *filename, int sanitize, struct osmdata_t *osmdata); #endif osm2pgsql-0.82.0/parse-xml2.c000066400000000000000000000340731213272333300156740ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #define _GNU_SOURCE #include #include #include #include #include #include #include #include "osmtypes.h" #include "sanitizer.h" #include "reprojection.h" #include "input.h" #include "output.h" #include "parse-xml2.h" /* Parses the action="foo" tags in JOSM change files. Obvisouly not useful from osmChange files */ static actions_t ParseAction( xmlTextReaderPtr reader, struct osmdata_t *osmdata ) { actions_t new_action; xmlChar *action; if( osmdata->filetype == FILETYPE_OSMCHANGE || osmdata->filetype == FILETYPE_PLANETDIFF ) return osmdata->action; new_action = ACTION_NONE; action = xmlTextReaderGetAttribute( reader, BAD_CAST "action" ); if( action == NULL ) new_action = ACTION_CREATE; else if( strcmp((char *)action, "modify") == 0 ) new_action = ACTION_MODIFY; else if( strcmp((char *)action, "delete") == 0 ) new_action = ACTION_DELETE; else { fprintf( stderr, "Unknown value for action: %s\n", (char*)action ); exit_nicely(); } return new_action; } static void StartElement(xmlTextReaderPtr reader, const xmlChar *name, struct osmdata_t *osmdata) { xmlChar *xid, *xlat, *xlon, *xk, *xv, *xrole, *xtype; char *k; if (osmdata->filetype == FILETYPE_NONE) { if (xmlStrEqual(name, BAD_CAST "osm")) { osmdata->filetype = FILETYPE_OSM; osmdata->action = ACTION_CREATE; } else if (xmlStrEqual(name, BAD_CAST "osmChange")) { osmdata->filetype = FILETYPE_OSMCHANGE; osmdata->action = ACTION_NONE; } else if (xmlStrEqual(name, BAD_CAST "planetdiff")) { osmdata->filetype = FILETYPE_PLANETDIFF; osmdata->action = ACTION_NONE; } else { fprintf( stderr, "Unknown XML document type: %s\n", name ); exit_nicely(); } return; } if (xmlStrEqual(name, BAD_CAST "node")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); xlon = xmlTextReaderGetAttribute(reader, BAD_CAST "lon"); xlat = xmlTextReaderGetAttribute(reader, BAD_CAST "lat"); assert(xid); assert(xlon); assert(xlat); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->node_lon = strtod((char *)xlon, NULL); osmdata->node_lat = strtod((char *)xlat, NULL); osmdata->action = ParseAction( reader , osmdata); if (osmdata->osm_id > osmdata->max_node) osmdata->max_node = osmdata->osm_id; if (osmdata->count_node == 0) { time(&osmdata->start_node); } osmdata->count_node++; if (osmdata->count_node%10000 == 0) printStatus(osmdata); xmlFree(xid); xmlFree(xlon); xmlFree(xlat); } else if (xmlStrEqual(name, BAD_CAST "tag")) { xk = xmlTextReaderGetAttribute(reader, BAD_CAST "k"); assert(xk); /* 'created_by' and 'source' are common and not interesting to mapnik renderer */ if (strcmp((char *)xk, "created_by") && strcmp((char *)xk, "source")) { char *p; xv = xmlTextReaderGetAttribute(reader, BAD_CAST "v"); assert(xv); k = (char *)xmlStrdup(xk); while ((p = strchr(k, ' '))) *p = '_'; addItem(&(osmdata->tags), k, (char *)xv, 0); xmlFree(k); xmlFree(xv); } xmlFree(xk); } else if (xmlStrEqual(name, BAD_CAST "way")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); assert(xid); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->action = ParseAction( reader, osmdata ); if (osmdata->osm_id > osmdata->max_way) osmdata->max_way = osmdata->osm_id; if (osmdata->count_way == 0) { time(&osmdata->start_way); } osmdata->count_way++; if (osmdata->count_way%1000 == 0) printStatus(osmdata); osmdata->nd_count = 0; xmlFree(xid); } else if (xmlStrEqual(name, BAD_CAST "nd")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "ref"); assert(xid); osmdata->nds[osmdata->nd_count++] = strtoosmid( (char *)xid, NULL, 10 ); if( osmdata->nd_count >= osmdata->nd_max ) realloc_nodes(osmdata); xmlFree(xid); } else if (xmlStrEqual(name, BAD_CAST "relation")) { xid = xmlTextReaderGetAttribute(reader, BAD_CAST "id"); assert(xid); osmdata->osm_id = strtoosmid((char *)xid, NULL, 10); osmdata->action = ParseAction( reader, osmdata ); if (osmdata->osm_id > osmdata->max_rel) osmdata->max_rel = osmdata->osm_id; if (osmdata->count_rel == 0) { time(&osmdata->start_rel); } osmdata->count_rel++; if (osmdata->count_rel%10 == 0) printStatus(osmdata); osmdata->member_count = 0; xmlFree(xid); } else if (xmlStrEqual(name, BAD_CAST "member")) { xrole = xmlTextReaderGetAttribute(reader, BAD_CAST "role"); assert(xrole); xtype = xmlTextReaderGetAttribute(reader, BAD_CAST "type"); assert(xtype); xid = xmlTextReaderGetAttribute(reader, BAD_CAST "ref"); assert(xid); osmdata->members[osmdata->member_count].id = strtoosmid( (char *)xid, NULL, 0 ); osmdata->members[osmdata->member_count].role = strdup( (char *)xrole ); /* Currently we are only interested in 'way' members since these form polygons with holes */ if (xmlStrEqual(xtype, BAD_CAST "way")) osmdata->members[osmdata->member_count].type = OSMTYPE_WAY; if (xmlStrEqual(xtype, BAD_CAST "node")) osmdata->members[osmdata->member_count].type = OSMTYPE_NODE; if (xmlStrEqual(xtype, BAD_CAST "relation")) osmdata->members[osmdata->member_count].type = OSMTYPE_RELATION; osmdata->member_count++; if( osmdata->member_count >= osmdata->member_max ) realloc_members(osmdata); xmlFree(xid); xmlFree(xrole); xmlFree(xtype); } else if (xmlStrEqual(name, BAD_CAST "add") || xmlStrEqual(name, BAD_CAST "create")) { osmdata->action = ACTION_MODIFY; /* Turns all creates into modifies, makes it resiliant against inconsistant snapshots. */ } else if (xmlStrEqual(name, BAD_CAST "modify")) { osmdata->action = ACTION_MODIFY; } else if (xmlStrEqual(name, BAD_CAST "delete")) { osmdata->action = ACTION_DELETE; } else if (xmlStrEqual(name, BAD_CAST "bound")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "bounds")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "changeset")) { /* ignore */ } else { fprintf(stderr, "%s: Unknown element name: %s\n", __FUNCTION__, name); } /* Collect extra attribute information and add as tags */ if (osmdata->extra_attributes && (xmlStrEqual(name, BAD_CAST "node") || xmlStrEqual(name, BAD_CAST "way") || xmlStrEqual(name, BAD_CAST "relation"))) { xmlChar *xtmp; xtmp = xmlTextReaderGetAttribute(reader, BAD_CAST "user"); if (xtmp) { addItem(&(osmdata->tags), "osm_user", (char *)xtmp, 0); xmlFree(xtmp); } xtmp = xmlTextReaderGetAttribute(reader, BAD_CAST "uid"); if (xtmp) { addItem(&(osmdata->tags), "osm_uid", (char *)xtmp, 0); xmlFree(xtmp); } xtmp = xmlTextReaderGetAttribute(reader, BAD_CAST "version"); if (xtmp) { addItem(&(osmdata->tags), "osm_version", (char *)xtmp, 0); xmlFree(xtmp); } xtmp = xmlTextReaderGetAttribute(reader, BAD_CAST "timestamp"); if (xtmp) { addItem(&(osmdata->tags), "osm_timestamp", (char *)xtmp, 0); xmlFree(xtmp); } } } static void EndElement(const xmlChar *name, struct osmdata_t *osmdata) { if (xmlStrEqual(name, BAD_CAST "node")) { if (node_wanted(osmdata, osmdata->node_lat, osmdata->node_lon)) { reproject(&(osmdata->node_lat), &(osmdata->node_lon)); if( osmdata->action == ACTION_CREATE ) osmdata->out->node_add(osmdata->osm_id, osmdata->node_lat, osmdata->node_lon, &(osmdata->tags)); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->node_modify(osmdata->osm_id, osmdata->node_lat, osmdata->node_lon, &(osmdata->tags)); else if( osmdata->action == ACTION_DELETE ) osmdata->out->node_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for node %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } } resetList(&(osmdata->tags)); } else if (xmlStrEqual(name, BAD_CAST "way")) { if( osmdata->action == ACTION_CREATE ) osmdata->out->way_add(osmdata->osm_id, osmdata->nds, osmdata->nd_count, &(osmdata->tags) ); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->way_modify(osmdata->osm_id, osmdata->nds, osmdata->nd_count, &(osmdata->tags) ); else if( osmdata->action == ACTION_DELETE ) osmdata->out->way_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for way %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } resetList(&(osmdata->tags)); } else if (xmlStrEqual(name, BAD_CAST "relation")) { if( osmdata->action == ACTION_CREATE ) osmdata->out->relation_add(osmdata->osm_id, osmdata->members, osmdata->member_count, &(osmdata->tags)); else if( osmdata->action == ACTION_MODIFY ) osmdata->out->relation_modify(osmdata->osm_id, osmdata->members, osmdata->member_count, &(osmdata->tags)); else if( osmdata->action == ACTION_DELETE ) osmdata->out->relation_delete(osmdata->osm_id); else { fprintf( stderr, "Don't know action for relation %" PRIdOSMID "\n", osmdata->osm_id ); exit_nicely(); } resetList(&(osmdata->tags)); resetMembers(osmdata); } else if (xmlStrEqual(name, BAD_CAST "tag")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "nd")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "member")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "osm")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (xmlStrEqual(name, BAD_CAST "osmChange")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (xmlStrEqual(name, BAD_CAST "planetdiff")) { printStatus(osmdata); osmdata->filetype = FILETYPE_NONE; } else if (xmlStrEqual(name, BAD_CAST "bound")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "bounds")) { /* ignore */ } else if (xmlStrEqual(name, BAD_CAST "changeset")) { /* ignore */ resetList(&(osmdata->tags)); /* We may have accumulated some tags even if we ignored the changeset */ } else if (xmlStrEqual(name, BAD_CAST "add")) { osmdata->action = ACTION_NONE; } else if (xmlStrEqual(name, BAD_CAST "create")) { osmdata->action = ACTION_NONE; } else if (xmlStrEqual(name, BAD_CAST "modify")) { osmdata->action = ACTION_NONE; } else if (xmlStrEqual(name, BAD_CAST "delete")) { osmdata->action = ACTION_NONE; } else { fprintf(stderr, "%s: Unknown element name: %s\n", __FUNCTION__, name); } } static void processNode(xmlTextReaderPtr reader, struct osmdata_t *osmdata) { xmlChar *name; name = xmlTextReaderName(reader); if (name == NULL) name = xmlStrdup(BAD_CAST "--"); switch(xmlTextReaderNodeType(reader)) { case XML_READER_TYPE_ELEMENT: StartElement(reader, name, osmdata); if (xmlTextReaderIsEmptyElement(reader)) EndElement(name, osmdata); /* No end_element for self closing tags! */ break; case XML_READER_TYPE_END_ELEMENT: EndElement(name, osmdata); break; case XML_READER_TYPE_SIGNIFICANT_WHITESPACE: /* Ignore */ break; default: fprintf(stderr, "Unknown node type %d\n", xmlTextReaderNodeType(reader)); break; } xmlFree(name); } int streamFileXML2(char *filename, int sanitize, struct osmdata_t *osmdata) { xmlTextReaderPtr reader; int ret = 0; if (sanitize) reader = sanitizerOpen(filename); else reader = inputUTF8(filename); if (reader != NULL) { ret = xmlTextReaderRead(reader); while (ret == 1) { processNode(reader, osmdata); ret = xmlTextReaderRead(reader); } if (ret != 0) { fprintf(stderr, "%s : failed to parse\n", filename); return ret; } xmlFreeTextReader(reader); } else { fprintf(stderr, "Unable to open %s\n", filename); return 1; } return 0; } osm2pgsql-0.82.0/parse-xml2.h000066400000000000000000000024131213272333300156720ustar00rootroot00000000000000/* #----------------------------------------------------------------------------- # osm2pgsql - converts planet.osm file into PostgreSQL # compatible output suitable to be rendered by mapnik #----------------------------------------------------------------------------- # Original Python implementation by Artem Pavlenko # Re-implementation by Jon Burgess, Copyright 2006 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #----------------------------------------------------------------------------- */ #ifndef PARSE_XML2_H #define PARSE_XML2_h int streamFileXML2(char *filename, int sanitize, struct osmdata_t *osmdata); #endif osm2pgsql-0.82.0/pgsql.c000066400000000000000000000077041213272333300150310ustar00rootroot00000000000000/* Helper functions for the postgresql connections */ #include #include #include #include #include #include "osmtypes.h" /* For exit_nicely() */ #include "pgsql.h" void escape(char *out, int len, const char *in) { /* Apply escaping of TEXT COPY data Escape: backslash itself, newline, carriage return, and the current delimiter character (tab) file:///usr/share/doc/postgresql-8.1.8/html/sql-copy.html */ int count = 0; const char *old_in = in, *old_out = out; if (!len) return; while(*in && count < len-3) { switch(*in) { case '\\': *out++ = '\\'; *out++ = '\\'; count+= 2; break; /* case 8: *out++ = '\\'; *out++ = '\b'; count+= 2; break; */ /* case 12: *out++ = '\\'; *out++ = '\f'; count+= 2; break; */ case '\n': *out++ = '\\'; *out++ = '\n'; count+= 2; break; case '\r': *out++ = '\\'; *out++ = '\r'; count+= 2; break; case '\t': *out++ = '\\'; *out++ = '\t'; count+= 2; break; /* case 11: *out++ = '\\'; *out++ = '\v'; count+= 2; break; */ default: *out++ = *in; count++; break; } in++; } *out = '\0'; if (*in) fprintf(stderr, "%s truncated at %d chars: %s\n%s\n", __FUNCTION__, count, old_in, old_out); } int pgsql_exec(PGconn *sql_conn, ExecStatusType expect, const char *fmt, ...) { PGresult *res; va_list ap; char *sql, *nsql; int n, size = 100; /* Based on vprintf manual page */ /* Guess we need no more than 100 bytes. */ if ((sql = malloc(size)) == NULL) { fprintf(stderr, "Memory allocation failed\n"); exit_nicely(); } while (1) { /* Try to print in the allocated space. */ va_start(ap, fmt); n = vsnprintf(sql, size, fmt, ap); va_end(ap); /* If that worked, return the string. */ if (n > -1 && n < size) break; /* Else try again with more space. */ if (n > -1) /* glibc 2.1 */ size = n+1; /* precisely what is needed */ else /* glibc 2.0 */ size *= 2; /* twice the old size */ if ((nsql = realloc (sql, size)) == NULL) { free(sql); fprintf(stderr, "Memory re-allocation failed\n"); exit_nicely(); } else { sql = nsql; } } #ifdef DEBUG_PGSQL fprintf( stderr, "Executing: %s\n", sql ); #endif res = PQexec(sql_conn, sql); if (PQresultStatus(res) != expect) { fprintf(stderr, "%s failed: %s\n", sql, PQerrorMessage(sql_conn)); free(sql); PQclear(res); exit_nicely(); } free(sql); PQclear(res); return 0; } int pgsql_CopyData(const char *context, PGconn *sql_conn, const char *sql) { #ifdef DEBUG_PGSQL fprintf( stderr, "%s>>> %s\n", context, sql ); #endif int r = PQputCopyData(sql_conn, sql, strlen(sql)); if (r != 1) { fprintf(stderr, "%s - bad result during COPY, data %s\n", context, sql); exit_nicely(); } return 0; } PGresult *pgsql_execPrepared( PGconn *sql_conn, const char *stmtName, int nParams, const char *const * paramValues, ExecStatusType expect) { #ifdef DEBUG_PGSQL fprintf( stderr, "ExecPrepared: %s\n", stmtName ); #endif PGresult *res = PQexecPrepared(sql_conn, stmtName, nParams, paramValues, NULL, NULL, 0); if (PQresultStatus(res) != expect) { fprintf(stderr, "%s failed: %s(%d)\n", stmtName, PQerrorMessage(sql_conn), PQresultStatus(res)); if( nParams ) { int i; fprintf( stderr, "Arguments were: " ); for( i=0; i nanodegrees. The default of granularity of 100 nanodegrees corresponds to about 1cm on the ground, and a full lat or lon fits into 32 bits. Converting an integer to a lattitude or longitude uses the formula: $OUT = IN * granularity / 10**9$. Many encoding schemes use delta coding when representing nodes and relations. */ ////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////// /* Contains the file header. */ message HeaderBlock { optional HeaderBBox bbox = 1; /* Additional tags to aid in parsing this dataset */ repeated string required_features = 4; repeated string optional_features = 5; optional string writingprogram = 16; optional string source = 17; // From the bbox field. } /** The bounding box field in the OSM header. BBOX, as used in the OSM header. Units are always in nanodegrees -- they do not obey granularity rules. */ message HeaderBBox { required sint64 left = 1; required sint64 right = 2; required sint64 top = 3; required sint64 bottom = 4; } /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// message PrimitiveBlock { required StringTable stringtable = 1; repeated PrimitiveGroup primitivegroup = 2; // Granularity, units of nanodegrees, used to store coordinates in this block optional int32 granularity = 17 [default=100]; // Offset value between the output coordinates coordinates and the granularity grid in unites of nanodegrees. optional int64 lat_offset = 19 [default=0]; optional int64 lon_offset = 20 [default=0]; // Granularity of dates, normally represented in units of milliseconds since the 1970 epoch. optional int32 date_granularity = 18 [default=1000]; // Proposed extension: //optional BBox bbox = 19; } // Group of OSMPrimitives. All primitives in a group must be the same type. message PrimitiveGroup { repeated Node nodes = 1; optional DenseNodes dense = 2; repeated Way ways = 3; repeated Relation relations = 4; repeated ChangeSet changesets = 5; } /** String table, contains the common strings in each block. Note that we reserve index '0' as a delimiter, so the entry at that index in the table is ALWAYS blank and unused. */ message StringTable { repeated bytes s = 1; } /* Optional metadata that may be included into each primitive. */ message Info { optional int32 version = 1 [default = -1]; optional int32 timestamp = 2; optional int64 changeset = 3; optional int32 uid = 4; optional int32 user_sid = 5; // String IDs } /** Optional metadata that may be included into each primitive. Special dense format used in DenseNodes. */ message DenseInfo { repeated int32 version = 1 [packed = true]; repeated sint64 timestamp = 2 [packed = true]; // DELTA coded repeated sint64 changeset = 3 [packed = true]; // DELTA coded repeated sint32 uid = 4 [packed = true]; // DELTA coded repeated sint32 user_sid = 5 [packed = true]; // String IDs for usernames. DELTA coded } // TODO: REMOVE THIS? NOT in osmosis schema. message ChangeSet { required int64 id = 1; // Parallel arrays. repeated uint32 keys = 2 [packed = true]; // String IDs. repeated uint32 vals = 3 [packed = true]; // String IDs. optional Info info = 4; required int64 created_at = 8; optional int64 closetime_delta = 9; required bool open = 10; optional HeaderBBox bbox = 11; } message Node { required sint64 id = 1; // Parallel arrays. repeated uint32 keys = 2 [packed = true]; // String IDs. repeated uint32 vals = 3 [packed = true]; // String IDs. optional Info info = 4; // May be omitted in omitmeta required sint64 lat = 8; required sint64 lon = 9; } /* Used to densly represent a sequence of nodes that do not have any tags. We represent these nodes columnwise as five columns: ID's, lats, and lons, all delta coded. When metadata is not omitted, We encode keys & vals for all nodes as a single array of integers containing key-stringid and val-stringid, using a stringid of 0 as a delimiter between nodes. ( ( )* '0' )* */ message DenseNodes { repeated sint64 id = 1 [packed = true]; // DELTA coded //repeated Info info = 4; optional DenseInfo denseinfo = 5; repeated sint64 lat = 8 [packed = true]; // DELTA coded repeated sint64 lon = 9 [packed = true]; // DELTA coded // Special packing of keys and vals into one array. May be empty if all nodes in this block are tagless. repeated int32 keys_vals = 10 [packed = true]; } message Way { required int64 id = 1; // Parallel arrays. repeated uint32 keys = 2 [packed = true]; repeated uint32 vals = 3 [packed = true]; optional Info info = 4; repeated sint64 refs = 8 [packed = true]; // DELTA coded } message Relation { enum MemberType { NODE = 0; WAY = 1; RELATION = 2; } required int64 id = 1; // Parallel arrays. repeated uint32 keys = 2 [packed = true]; repeated uint32 vals = 3 [packed = true]; optional Info info = 4; // Parallel arrays repeated int32 roles_sid = 8 [packed = true]; repeated sint64 memids = 9 [packed = true]; // DELTA encoded repeated MemberType types = 10 [packed = true]; } osm2pgsql-0.82.0/rb.c000066400000000000000000000607041213272333300143050ustar00rootroot00000000000000/* Produced by texiweb from libavl.w. */ /* libavl - library for manipulation of binary trees. Copyright (C) 1998-2002, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The author may be contacted at on the Internet, or write to Ben Pfaff, Stanford University, Computer Science Dept., 353 Serra Mall, Stanford CA 94305, USA. */ #include #include #include #include #include "rb.h" /* Creates and returns a new table with comparison function |compare| using parameter |param| and memory allocator |allocator|. Returns |NULL| if memory allocation failed. */ struct rb_table * rb_create (rb_comparison_func *compare, void *param, struct libavl_allocator *allocator) { struct rb_table *tree; assert (compare != NULL); if (allocator == NULL) allocator = &rb_allocator_default; tree = allocator->libavl_malloc (allocator, sizeof *tree); if (tree == NULL) return NULL; tree->rb_root = NULL; tree->rb_compare = compare; tree->rb_param = param; tree->rb_alloc = allocator; tree->rb_count = 0; tree->rb_generation = 0; return tree; } /* Search |tree| for an item matching |item|, and return it if found. Otherwise return |NULL|. */ void * rb_find (const struct rb_table *tree, const void *item) { const struct rb_node *p; assert (tree != NULL && item != NULL); for (p = tree->rb_root; p != NULL; ) { int cmp = tree->rb_compare (item, p->rb_data, tree->rb_param); if (cmp < 0) p = p->rb_link[0]; else if (cmp > 0) p = p->rb_link[1]; else /* |cmp == 0| */ return p->rb_data; } return NULL; } /* Inserts |item| into |tree| and returns a pointer to |item|'s address. If a duplicate item is found in the tree, returns a pointer to the duplicate without inserting |item|. Returns |NULL| in case of memory allocation failure. */ void ** rb_probe (struct rb_table *tree, void *item) { struct rb_node *pa[RB_MAX_HEIGHT]; /* Nodes on stack. */ unsigned char da[RB_MAX_HEIGHT]; /* Directions moved from stack nodes. */ int k; /* Stack height. */ struct rb_node *p; /* Traverses tree looking for insertion point. */ struct rb_node *n; /* Newly inserted node. */ assert (tree != NULL && item != NULL); pa[0] = (struct rb_node *) &tree->rb_root; da[0] = 0; k = 1; for (p = tree->rb_root; p != NULL; p = p->rb_link[da[k - 1]]) { int cmp = tree->rb_compare (item, p->rb_data, tree->rb_param); if (cmp == 0) return &p->rb_data; pa[k] = p; da[k++] = cmp > 0; } n = pa[k - 1]->rb_link[da[k - 1]] = tree->rb_alloc->libavl_malloc (tree->rb_alloc, sizeof *n); if (n == NULL) return NULL; n->rb_data = item; n->rb_link[0] = n->rb_link[1] = NULL; n->rb_color = RB_RED; tree->rb_count++; tree->rb_generation++; while (k >= 3 && pa[k - 1]->rb_color == RB_RED) { if (da[k - 2] == 0) { struct rb_node *y = pa[k - 2]->rb_link[1]; if (y != NULL && y->rb_color == RB_RED) { pa[k - 1]->rb_color = y->rb_color = RB_BLACK; pa[k - 2]->rb_color = RB_RED; k -= 2; } else { struct rb_node *x; if (da[k - 1] == 0) y = pa[k - 1]; else { x = pa[k - 1]; y = x->rb_link[1]; x->rb_link[1] = y->rb_link[0]; y->rb_link[0] = x; pa[k - 2]->rb_link[0] = y; } x = pa[k - 2]; x->rb_color = RB_RED; y->rb_color = RB_BLACK; x->rb_link[0] = y->rb_link[1]; y->rb_link[1] = x; pa[k - 3]->rb_link[da[k - 3]] = y; break; } } else { struct rb_node *y = pa[k - 2]->rb_link[0]; if (y != NULL && y->rb_color == RB_RED) { pa[k - 1]->rb_color = y->rb_color = RB_BLACK; pa[k - 2]->rb_color = RB_RED; k -= 2; } else { struct rb_node *x; if (da[k - 1] == 1) y = pa[k - 1]; else { x = pa[k - 1]; y = x->rb_link[0]; x->rb_link[0] = y->rb_link[1]; y->rb_link[1] = x; pa[k - 2]->rb_link[1] = y; } x = pa[k - 2]; x->rb_color = RB_RED; y->rb_color = RB_BLACK; x->rb_link[1] = y->rb_link[0]; y->rb_link[0] = x; pa[k - 3]->rb_link[da[k - 3]] = y; break; } } } tree->rb_root->rb_color = RB_BLACK; return &n->rb_data; } /* Inserts |item| into |table|. Returns |NULL| if |item| was successfully inserted or if a memory allocation error occurred. Otherwise, returns the duplicate item. */ void * rb_insert (struct rb_table *table, void *item) { void **p = rb_probe (table, item); return p == NULL || *p == item ? NULL : *p; } /* Inserts |item| into |table|, replacing any duplicate item. Returns |NULL| if |item| was inserted without replacing a duplicate, or if a memory allocation error occurred. Otherwise, returns the item that was replaced. */ void * rb_replace (struct rb_table *table, void *item) { void **p = rb_probe (table, item); if (p == NULL || *p == item) return NULL; else { void *r = *p; *p = item; return r; } } /* Deletes from |tree| and returns an item matching |item|. Returns a null pointer if no matching item found. */ void * rb_delete (struct rb_table *tree, const void *item) { struct rb_node *pa[RB_MAX_HEIGHT]; /* Nodes on stack. */ unsigned char da[RB_MAX_HEIGHT]; /* Directions moved from stack nodes. */ int k; /* Stack height. */ struct rb_node *p; /* The node to delete, or a node part way to it. */ int cmp; /* Result of comparison between |item| and |p|. */ assert (tree != NULL && item != NULL); k = 0; p = (struct rb_node *) &tree->rb_root; for (cmp = -1; cmp != 0; cmp = tree->rb_compare (item, p->rb_data, tree->rb_param)) { int dir = cmp > 0; pa[k] = p; da[k++] = dir; p = p->rb_link[dir]; if (p == NULL) return NULL; } item = p->rb_data; if (p->rb_link[1] == NULL) pa[k - 1]->rb_link[da[k - 1]] = p->rb_link[0]; else { enum rb_color t; struct rb_node *r = p->rb_link[1]; if (r->rb_link[0] == NULL) { r->rb_link[0] = p->rb_link[0]; t = r->rb_color; r->rb_color = p->rb_color; p->rb_color = t; pa[k - 1]->rb_link[da[k - 1]] = r; da[k] = 1; pa[k++] = r; } else { struct rb_node *s; int j = k++; for (;;) { da[k] = 0; pa[k++] = r; s = r->rb_link[0]; if (s->rb_link[0] == NULL) break; r = s; } da[j] = 1; pa[j] = s; pa[j - 1]->rb_link[da[j - 1]] = s; s->rb_link[0] = p->rb_link[0]; r->rb_link[0] = s->rb_link[1]; s->rb_link[1] = p->rb_link[1]; t = s->rb_color; s->rb_color = p->rb_color; p->rb_color = t; } } if (p->rb_color == RB_BLACK) { for (;;) { struct rb_node *x = pa[k - 1]->rb_link[da[k - 1]]; if (x != NULL && x->rb_color == RB_RED) { x->rb_color = RB_BLACK; break; } if (k < 2) break; if (da[k - 1] == 0) { struct rb_node *w = pa[k - 1]->rb_link[1]; if (w->rb_color == RB_RED) { w->rb_color = RB_BLACK; pa[k - 1]->rb_color = RB_RED; pa[k - 1]->rb_link[1] = w->rb_link[0]; w->rb_link[0] = pa[k - 1]; pa[k - 2]->rb_link[da[k - 2]] = w; pa[k] = pa[k - 1]; da[k] = 0; pa[k - 1] = w; k++; w = pa[k - 1]->rb_link[1]; } if ((w->rb_link[0] == NULL || w->rb_link[0]->rb_color == RB_BLACK) && (w->rb_link[1] == NULL || w->rb_link[1]->rb_color == RB_BLACK)) w->rb_color = RB_RED; else { if (w->rb_link[1] == NULL || w->rb_link[1]->rb_color == RB_BLACK) { struct rb_node *y = w->rb_link[0]; y->rb_color = RB_BLACK; w->rb_color = RB_RED; w->rb_link[0] = y->rb_link[1]; y->rb_link[1] = w; w = pa[k - 1]->rb_link[1] = y; } w->rb_color = pa[k - 1]->rb_color; pa[k - 1]->rb_color = RB_BLACK; w->rb_link[1]->rb_color = RB_BLACK; pa[k - 1]->rb_link[1] = w->rb_link[0]; w->rb_link[0] = pa[k - 1]; pa[k - 2]->rb_link[da[k - 2]] = w; break; } } else { struct rb_node *w = pa[k - 1]->rb_link[0]; if (w->rb_color == RB_RED) { w->rb_color = RB_BLACK; pa[k - 1]->rb_color = RB_RED; pa[k - 1]->rb_link[0] = w->rb_link[1]; w->rb_link[1] = pa[k - 1]; pa[k - 2]->rb_link[da[k - 2]] = w; pa[k] = pa[k - 1]; da[k] = 1; pa[k - 1] = w; k++; w = pa[k - 1]->rb_link[0]; } if ((w->rb_link[0] == NULL || w->rb_link[0]->rb_color == RB_BLACK) && (w->rb_link[1] == NULL || w->rb_link[1]->rb_color == RB_BLACK)) w->rb_color = RB_RED; else { if (w->rb_link[0] == NULL || w->rb_link[0]->rb_color == RB_BLACK) { struct rb_node *y = w->rb_link[1]; y->rb_color = RB_BLACK; w->rb_color = RB_RED; w->rb_link[1] = y->rb_link[0]; y->rb_link[0] = w; w = pa[k - 1]->rb_link[0] = y; } w->rb_color = pa[k - 1]->rb_color; pa[k - 1]->rb_color = RB_BLACK; w->rb_link[0]->rb_color = RB_BLACK; pa[k - 1]->rb_link[0] = w->rb_link[1]; w->rb_link[1] = pa[k - 1]; pa[k - 2]->rb_link[da[k - 2]] = w; break; } } k--; } } tree->rb_alloc->libavl_free (tree->rb_alloc, p); tree->rb_count--; tree->rb_generation++; return (void *) item; } /* Refreshes the stack of parent pointers in |trav| and updates its generation number. */ static void trav_refresh (struct rb_traverser *trav) { assert (trav != NULL); trav->rb_generation = trav->rb_table->rb_generation; if (trav->rb_node != NULL) { rb_comparison_func *cmp = trav->rb_table->rb_compare; void *param = trav->rb_table->rb_param; struct rb_node *node = trav->rb_node; struct rb_node *i; trav->rb_height = 0; for (i = trav->rb_table->rb_root; i != node; ) { assert (trav->rb_height < RB_MAX_HEIGHT); assert (i != NULL); trav->rb_stack[trav->rb_height++] = i; i = i->rb_link[cmp (node->rb_data, i->rb_data, param) > 0]; } } } /* Initializes |trav| for use with |tree| and selects the null node. */ void rb_t_init (struct rb_traverser *trav, struct rb_table *tree) { trav->rb_table = tree; trav->rb_node = NULL; trav->rb_height = 0; trav->rb_generation = tree->rb_generation; } /* Initializes |trav| for |tree| and selects and returns a pointer to its least-valued item. Returns |NULL| if |tree| contains no nodes. */ void * rb_t_first (struct rb_traverser *trav, struct rb_table *tree) { struct rb_node *x; assert (tree != NULL && trav != NULL); trav->rb_table = tree; trav->rb_height = 0; trav->rb_generation = tree->rb_generation; x = tree->rb_root; if (x != NULL) while (x->rb_link[0] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[0]; } trav->rb_node = x; return x != NULL ? x->rb_data : NULL; } /* Initializes |trav| for |tree| and selects and returns a pointer to its greatest-valued item. Returns |NULL| if |tree| contains no nodes. */ void * rb_t_last (struct rb_traverser *trav, struct rb_table *tree) { struct rb_node *x; assert (tree != NULL && trav != NULL); trav->rb_table = tree; trav->rb_height = 0; trav->rb_generation = tree->rb_generation; x = tree->rb_root; if (x != NULL) while (x->rb_link[1] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[1]; } trav->rb_node = x; return x != NULL ? x->rb_data : NULL; } /* Searches for |item| in |tree|. If found, initializes |trav| to the item found and returns the item as well. If there is no matching item, initializes |trav| to the null item and returns |NULL|. */ void * rb_t_find (struct rb_traverser *trav, struct rb_table *tree, void *item) { struct rb_node *p, *q; assert (trav != NULL && tree != NULL && item != NULL); trav->rb_table = tree; trav->rb_height = 0; trav->rb_generation = tree->rb_generation; for (p = tree->rb_root; p != NULL; p = q) { int cmp = tree->rb_compare (item, p->rb_data, tree->rb_param); if (cmp < 0) q = p->rb_link[0]; else if (cmp > 0) q = p->rb_link[1]; else /* |cmp == 0| */ { trav->rb_node = p; return p->rb_data; } assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = p; } trav->rb_height = 0; trav->rb_node = NULL; return NULL; } /* Attempts to insert |item| into |tree|. If |item| is inserted successfully, it is returned and |trav| is initialized to its location. If a duplicate is found, it is returned and |trav| is initialized to its location. No replacement of the item occurs. If a memory allocation failure occurs, |NULL| is returned and |trav| is initialized to the null item. */ void * rb_t_insert (struct rb_traverser *trav, struct rb_table *tree, void *item) { void **p; assert (trav != NULL && tree != NULL && item != NULL); p = rb_probe (tree, item); if (p != NULL) { trav->rb_table = tree; trav->rb_node = ((struct rb_node *) ((char *) p - offsetof (struct rb_node, rb_data))); trav->rb_generation = tree->rb_generation - 1; return *p; } else { rb_t_init (trav, tree); return NULL; } } /* Initializes |trav| to have the same current node as |src|. */ void * rb_t_copy (struct rb_traverser *trav, const struct rb_traverser *src) { assert (trav != NULL && src != NULL); if (trav != src) { trav->rb_table = src->rb_table; trav->rb_node = src->rb_node; trav->rb_generation = src->rb_generation; if (trav->rb_generation == trav->rb_table->rb_generation) { trav->rb_height = src->rb_height; memcpy (trav->rb_stack, (const void *) src->rb_stack, sizeof *trav->rb_stack * trav->rb_height); } } return trav->rb_node != NULL ? trav->rb_node->rb_data : NULL; } /* Returns the next data item in inorder within the tree being traversed with |trav|, or if there are no more data items returns |NULL|. */ void * rb_t_next (struct rb_traverser *trav) { struct rb_node *x; assert (trav != NULL); if (trav->rb_generation != trav->rb_table->rb_generation) trav_refresh (trav); x = trav->rb_node; if (x == NULL) { return rb_t_first (trav, trav->rb_table); } else if (x->rb_link[1] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[1]; while (x->rb_link[0] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[0]; } } else { struct rb_node *y; do { if (trav->rb_height == 0) { trav->rb_node = NULL; return NULL; } y = x; x = trav->rb_stack[--trav->rb_height]; } while (y == x->rb_link[1]); } trav->rb_node = x; return x->rb_data; } /* Returns the previous data item in inorder within the tree being traversed with |trav|, or if there are no more data items returns |NULL|. */ void * rb_t_prev (struct rb_traverser *trav) { struct rb_node *x; assert (trav != NULL); if (trav->rb_generation != trav->rb_table->rb_generation) trav_refresh (trav); x = trav->rb_node; if (x == NULL) { return rb_t_last (trav, trav->rb_table); } else if (x->rb_link[0] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[0]; while (x->rb_link[1] != NULL) { assert (trav->rb_height < RB_MAX_HEIGHT); trav->rb_stack[trav->rb_height++] = x; x = x->rb_link[1]; } } else { struct rb_node *y; do { if (trav->rb_height == 0) { trav->rb_node = NULL; return NULL; } y = x; x = trav->rb_stack[--trav->rb_height]; } while (y == x->rb_link[0]); } trav->rb_node = x; return x->rb_data; } /* Returns |trav|'s current item. */ void * rb_t_cur (struct rb_traverser *trav) { assert (trav != NULL); return trav->rb_node != NULL ? trav->rb_node->rb_data : NULL; } /* Replaces the current item in |trav| by |new| and returns the item replaced. |trav| must not have the null item selected. The new item must not upset the ordering of the tree. */ void * rb_t_replace (struct rb_traverser *trav, void *new) { void *old; assert (trav != NULL && trav->rb_node != NULL && new != NULL); old = trav->rb_node->rb_data; trav->rb_node->rb_data = new; return old; } /* Destroys |new| with |rb_destroy (new, destroy)|, first setting right links of nodes in |stack| within |new| to null pointers to avoid touching uninitialized data. */ static void copy_error_recovery (struct rb_node **stack, int height, struct rb_table *new, rb_item_func *destroy) { assert (stack != NULL && height >= 0 && new != NULL); for (; height > 2; height -= 2) stack[height - 1]->rb_link[1] = NULL; rb_destroy (new, destroy); } /* Copies |org| to a newly created tree, which is returned. If |copy != NULL|, each data item in |org| is first passed to |copy|, and the return values are inserted into the tree, with |NULL| return values taken as indications of failure. On failure, destroys the partially created new tree, applying |destroy|, if non-null, to each item in the new tree so far, and returns |NULL|. If |allocator != NULL|, it is used for allocation in the new tree. Otherwise, the same allocator used for |org| is used. */ struct rb_table * rb_copy (const struct rb_table *org, rb_copy_func *copy, rb_item_func *destroy, struct libavl_allocator *allocator) { struct rb_node *stack[2 * (RB_MAX_HEIGHT + 1)]; int height = 0; struct rb_table *new; const struct rb_node *x; struct rb_node *y; assert (org != NULL); new = rb_create (org->rb_compare, org->rb_param, allocator != NULL ? allocator : org->rb_alloc); if (new == NULL) return NULL; new->rb_count = org->rb_count; if (new->rb_count == 0) return new; x = (const struct rb_node *) &org->rb_root; y = (struct rb_node *) &new->rb_root; for (;;) { while (x->rb_link[0] != NULL) { assert (height < 2 * (RB_MAX_HEIGHT + 1)); y->rb_link[0] = new->rb_alloc->libavl_malloc (new->rb_alloc, sizeof *y->rb_link[0]); if (y->rb_link[0] == NULL) { if (y != (struct rb_node *) &new->rb_root) { y->rb_data = NULL; y->rb_link[1] = NULL; } copy_error_recovery (stack, height, new, destroy); return NULL; } stack[height++] = (struct rb_node *) x; stack[height++] = y; x = x->rb_link[0]; y = y->rb_link[0]; } y->rb_link[0] = NULL; for (;;) { y->rb_color = x->rb_color; if (copy == NULL) y->rb_data = x->rb_data; else { y->rb_data = copy (x->rb_data, org->rb_param); if (y->rb_data == NULL) { y->rb_link[1] = NULL; copy_error_recovery (stack, height, new, destroy); return NULL; } } if (x->rb_link[1] != NULL) { y->rb_link[1] = new->rb_alloc->libavl_malloc (new->rb_alloc, sizeof *y->rb_link[1]); if (y->rb_link[1] == NULL) { copy_error_recovery (stack, height, new, destroy); return NULL; } x = x->rb_link[1]; y = y->rb_link[1]; break; } else y->rb_link[1] = NULL; if (height <= 2) return new; y = stack[--height]; x = stack[--height]; } } } /* Frees storage allocated for |tree|. If |destroy != NULL|, applies it to each data item in inorder. */ void rb_destroy (struct rb_table *tree, rb_item_func *destroy) { struct rb_node *p, *q; assert (tree != NULL); for (p = tree->rb_root; p != NULL; p = q) if (p->rb_link[0] == NULL) { q = p->rb_link[1]; if (destroy != NULL && p->rb_data != NULL) destroy (p->rb_data, tree->rb_param); tree->rb_alloc->libavl_free (tree->rb_alloc, p); } else { q = p->rb_link[0]; p->rb_link[0] = q->rb_link[1]; q->rb_link[1] = p; } tree->rb_alloc->libavl_free (tree->rb_alloc, tree); } /* Allocates |size| bytes of space using |malloc()|. Returns a null pointer if allocation fails. */ void * rb_malloc (struct libavl_allocator *allocator, size_t size) { assert (allocator != NULL && size > 0); return malloc (size); } /* Frees |block|. */ void rb_free (struct libavl_allocator *allocator, void *block) { assert (allocator != NULL && block != NULL); free (block); } /* Default memory allocator that uses |malloc()| and |free()|. */ struct libavl_allocator rb_allocator_default = { rb_malloc, rb_free }; #undef NDEBUG #include /* Asserts that |rb_insert()| succeeds at inserting |item| into |table|. */ void (rb_assert_insert) (struct rb_table *table, void *item) { void **p = rb_probe (table, item); assert (p != NULL && *p == item); } /* Asserts that |rb_delete()| really removes |item| from |table|, and returns the removed item. */ void * (rb_assert_delete) (struct rb_table *table, void *item) { void *p = rb_delete (table, item); assert (p != NULL); return p; } osm2pgsql-0.82.0/rb.h000066400000000000000000000105661213272333300143130ustar00rootroot00000000000000/* Produced by texiweb from libavl.w. */ /* libavl - library for manipulation of binary trees. Copyright (C) 1998-2002, 2004 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The author may be contacted at on the Internet, or write to Ben Pfaff, Stanford University, Computer Science Dept., 353 Serra Mall, Stanford CA 94305, USA. */ #ifndef RB_H #define RB_H 1 #include /* Function types. */ typedef int rb_comparison_func (const void *rb_a, const void *rb_b, void *rb_param); typedef void rb_item_func (void *rb_item, void *rb_param); typedef void *rb_copy_func (void *rb_item, void *rb_param); #ifndef LIBAVL_ALLOCATOR #define LIBAVL_ALLOCATOR /* Memory allocator. */ struct libavl_allocator { void *(*libavl_malloc) (struct libavl_allocator *, size_t libavl_size); void (*libavl_free) (struct libavl_allocator *, void *libavl_block); }; #endif /* Default memory allocator. */ extern struct libavl_allocator rb_allocator_default; void *rb_malloc (struct libavl_allocator *, size_t); void rb_free (struct libavl_allocator *, void *); /* Maximum RB height. */ #ifndef RB_MAX_HEIGHT #define RB_MAX_HEIGHT 48 #endif /* Tree data structure. */ struct rb_table { struct rb_node *rb_root; /* Tree's root. */ rb_comparison_func *rb_compare; /* Comparison function. */ void *rb_param; /* Extra argument to |rb_compare|. */ struct libavl_allocator *rb_alloc; /* Memory allocator. */ size_t rb_count; /* Number of items in tree. */ unsigned long rb_generation; /* Generation number. */ }; /* Color of a red-black node. */ enum rb_color { RB_BLACK, /* Black. */ RB_RED /* Red. */ }; /* A red-black tree node. */ struct rb_node { struct rb_node *rb_link[2]; /* Subtrees. */ void *rb_data; /* Pointer to data. */ unsigned char rb_color; /* Color. */ }; /* RB traverser structure. */ struct rb_traverser { struct rb_table *rb_table; /* Tree being traversed. */ struct rb_node *rb_node; /* Current node in tree. */ struct rb_node *rb_stack[RB_MAX_HEIGHT]; /* All the nodes above |rb_node|. */ size_t rb_height; /* Number of nodes in |rb_parent|. */ unsigned long rb_generation; /* Generation number. */ }; /* Table functions. */ struct rb_table *rb_create (rb_comparison_func *, void *, struct libavl_allocator *); struct rb_table *rb_copy (const struct rb_table *, rb_copy_func *, rb_item_func *, struct libavl_allocator *); void rb_destroy (struct rb_table *, rb_item_func *); void **rb_probe (struct rb_table *, void *); void *rb_insert (struct rb_table *, void *); void *rb_replace (struct rb_table *, void *); void *rb_delete (struct rb_table *, const void *); void *rb_find (const struct rb_table *, const void *); void rb_assert_insert (struct rb_table *, void *); void *rb_assert_delete (struct rb_table *, void *); #define rb_count(table) ((size_t) (table)->rb_count) /* Table traverser functions. */ void rb_t_init (struct rb_traverser *, struct rb_table *); void *rb_t_first (struct rb_traverser *, struct rb_table *); void *rb_t_last (struct rb_traverser *, struct rb_table *); void *rb_t_find (struct rb_traverser *, struct rb_table *, void *); void *rb_t_insert (struct rb_traverser *, struct rb_table *, void *); void *rb_t_copy (struct rb_traverser *, const struct rb_traverser *); void *rb_t_next (struct rb_traverser *); void *rb_t_prev (struct rb_traverser *); void *rb_t_cur (struct rb_traverser *); void *rb_t_replace (struct rb_traverser *, void *); #endif /* rb.h */ osm2pgsql-0.82.0/reprojection.c000066400000000000000000000140341213272333300164000ustar00rootroot00000000000000/* reprojection.c * * Convert OSM coordinates to another coordinate system for * the database (usually convert lat/lon to Spherical Mercator * so Mapnik doesn't have to). */ #include #include #include #include #include #include "reprojection.h" #ifndef M_PI #define M_PI 3.14159265358979323846 #endif /** must match expire.tiles.c */ #define EARTH_CIRCUMFERENCE 40075016.68 /** The projection of the source data. Always lat/lon (EPSG:4326). */ static projPJ pj_source = NULL; /** The target projection (used in the PostGIS tables). Controlled by the -l/-M/-m/-E options. */ static projPJ pj_target = NULL; /** The projection used for tiles. Currently this is fixed to be Spherical * Mercator. You will usually have tiles in the same projection as used * for PostGIS, but it is theoretically possible to have your PostGIS data * in, say, lat/lon but still create tiles in Spherical Mercator. */ static projPJ pj_tile = NULL; static int Proj; const struct Projection_Info Projection_Info[] = { [PROJ_LATLONG] = { .descr = "Latlong", .proj4text = "+init=epsg:4326", .srs = 4326, .option = "-l" }, [PROJ_MERC] = { .descr = "WGS84 Mercator", .proj4text = "+proj=merc +datum=WGS84 +k=1.0 +units=m +over +no_defs", .srs = 3395, .option = "-M" }, [PROJ_SPHERE_MERC] = { .descr = "Spherical Mercator", .proj4text = "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 +y_0=0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs", .srs = 900913, .option = "-m" } }; static struct Projection_Info custom_projection; /** defined in expire-tiles.c; depends on the zoom level selected for expiry. */ extern int map_width; /* Positive numbers refer the to the table above, negative numbers are assumed to refer to EPSG codes and it uses the proj4 to find those. */ void project_init(int proj) { char buffer[32]; Proj = proj; /* hard-code the source projection to be lat/lon, since OSM XML always * has coordinates in degrees. */ pj_source = pj_init_plus("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs"); /* hard-code the tile projection to be spherical mercator always. * theoretically this could be made selectable but not all projections * lend themselves well to making tiles; non-spherical mercator tiles * are uncharted waters in OSM. */ pj_tile = pj_init_plus(Projection_Info[PROJ_SPHERE_MERC].proj4text); /* now set the target projection - the only one which is really variable */ if (proj >= 0 && proj < PROJ_COUNT) { pj_target = pj_init_plus(Projection_Info[proj].proj4text); } else if (proj < 0) { if (snprintf(buffer, sizeof(buffer), "+init=epsg:%d", -proj ) >= (int)sizeof(buffer)) { fprintf(stderr, "Buffer overflow computing proj4 initialisation string\n"); exit(1); } pj_target = pj_init_plus(buffer); if (!pj_target) { fprintf (stderr, "Couldn't read EPSG definition (do you have /usr/share/proj/epsg?)\n"); exit(1); } } if (!pj_source || !pj_target || !pj_tile) { fprintf(stderr, "Projection code failed to initialise\n"); exit(1); } if (proj >= 0) return; custom_projection.srs = -proj; custom_projection.proj4text = pj_get_def(pj_target, 0); if (snprintf(buffer, sizeof(buffer), "EPSG:%d", -proj) >= (int)sizeof(buffer)) { fprintf(stderr, "Buffer overflow computing projection description\n"); exit(1); } custom_projection.descr = strdup(buffer); custom_projection.option = "-E"; return; } void project_exit(void) { pj_free(pj_source); pj_source = NULL; pj_free(pj_target); pj_target = NULL; } struct Projection_Info const *project_getprojinfo(void) { if( Proj >= 0 ) return &Projection_Info[Proj]; else return &custom_projection; } void reproject(double *lat, double *lon) { double x[1], y[1], z[1]; /** Caution: This section is only correct if the source projection is lat/lon; * so even if it looks like pj_source was just a variable, things break if * pj_source is something else than lat/lon. */ if (Proj == PROJ_LATLONG) return; if (Proj == PROJ_SPHERE_MERC) { /* The latitude co-ordinate is clipped at slightly larger than the 900913 'world' * extent of +-85.0511 degrees to ensure that the points appear just outside the * edge of the map. */ if (*lat > 85.07) *lat = 85.07; if (*lat < -85.07) *lat = -85.07; *lat = log(tan(M_PI/4.0 + (*lat) * DEG_TO_RAD / 2.0)) * EARTH_CIRCUMFERENCE/(M_PI*2); *lon = (*lon) * EARTH_CIRCUMFERENCE / 360.0; return; } x[0] = *lon * DEG_TO_RAD; y[0] = *lat * DEG_TO_RAD; z[0] = 0; /** end of "caution" section. */ pj_transform(pj_source, pj_target, 1, 1, x, y, z); *lat = y[0]; *lon = x[0]; } /** * Converts from (target) coordinates to tile coordinates. * * The zoom level for the coordinates is implicitly given in the global * variable map_width. */ void coords_to_tile(double *tilex, double *tiley, double lon, double lat) { double x[1], y[1], z[1]; x[0] = lon; y[0] = lat; z[0] = 0; if (Proj == PROJ_LATLONG) { x[0] *= DEG_TO_RAD; y[0] *= DEG_TO_RAD; } /* since pj_tile is always spherical merc, don't bother doing anything if * destination proj is the same. */ if (Proj != PROJ_SPHERE_MERC) { pj_transform(pj_target, pj_tile, 1, 1, x, y, z); /** FIXME: pj_transform could fail if coordinates are outside +/- 85 degrees latitude */ } /* if ever pj_tile were allowed to be PROJ_LATLONG then results would have to * be divided by DEG_TO_RAD here. */ *tilex = map_width * (0.5 + x[0] / EARTH_CIRCUMFERENCE); *tiley = map_width * (0.5 - y[0] / EARTH_CIRCUMFERENCE); } osm2pgsql-0.82.0/reprojection.h000066400000000000000000000012161213272333300164030ustar00rootroot00000000000000/* reprojection.h * * Convert OSM lattitude / longitude from degrees to mercator * so that Mapnik does not have to project the data again * */ #ifndef REPROJECTION_H #define REPROJECTION_H struct Projection_Info { char *descr; char *proj4text; int srs; char *option; }; enum Projection { PROJ_LATLONG = 0, PROJ_MERC, PROJ_SPHERE_MERC, PROJ_COUNT }; void project_init(int); void project_exit(void); struct Projection_Info const* project_getprojinfo(void); void reproject(double *lat, double *lon); void coords_to_tile(double *tilex, double *tiley, double lon, double lat); extern const struct Projection_Info Projection_Info[]; #endif osm2pgsql-0.82.0/sanitizer.h000066400000000000000000000002401213272333300157040ustar00rootroot00000000000000#ifndef SANITIZER_H #define SANITIZER_H #include #include xmlTextReaderPtr sanitizerOpen(const char *name); #endif osm2pgsql-0.82.0/sprompt.c000066400000000000000000000111171213272333300154000ustar00rootroot00000000000000/*------------------------------------------------------------------------- * * sprompt.c * simple_prompt() routine * * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * * IDENTIFICATION * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.18 2006/10/04 00:30:14 momjian Exp $ * *------------------------------------------------------------------------- * * PostgreSQL Database Management System * (formerly known as Postgres, then as Postgres95) * * Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group * * Portions Copyright (c) 1994, The Regents of the University of California * * Permission to use, copy, modify, and distribute this software and its * documentation for any purpose, without fee, and without a written agreement * is hereby granted, provided that the above copyright notice and this * paragraph and the following two paragraphs appear in all copies. * * IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR * DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS * ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * */ /* * simple_prompt * * Generalized function especially intended for reading in usernames and * password interactively. Reads from /dev/tty or stdin/stderr. * * prompt: The prompt to print * maxlen: How many characters to accept * echo: Set to false if you want to hide what is entered (for passwords) * * Returns a malloc()'ed string with the input (w/o trailing newline). */ #define DEVTTY "/dev/tty" #include #include #include #include #include #include #ifdef __MINGW_H # include #else # define HAVE_TERMIOS_H # include #endif extern char *simple_prompt(const char *prompt, int maxlen, int echo); char * simple_prompt(const char *prompt, int maxlen, int echo) { int length; char *destination; FILE *termin, *termout; #ifdef HAVE_TERMIOS_H struct termios t_orig, t; #else #ifdef WIN32 HANDLE t = NULL; LPDWORD t_orig = NULL; #endif #endif destination = (char *) malloc(maxlen + 1); if (!destination) return NULL; /* * Do not try to collapse these into one "w+" mode file. Doesn't work on * some platforms (eg, HPUX 10.20). */ termin = fopen(DEVTTY, "r"); termout = fopen(DEVTTY, "w"); if (!termin || !termout #ifdef WIN32 /* See DEVTTY comment for msys */ || (getenv("OSTYPE") && strcmp(getenv("OSTYPE"), "msys") == 0) #endif ) { if (termin) fclose(termin); if (termout) fclose(termout); termin = stdin; termout = stderr; } #ifdef HAVE_TERMIOS_H if (!echo) { tcgetattr(fileno(termin), &t); t_orig = t; t.c_lflag &= ~ECHO; tcsetattr(fileno(termin), TCSAFLUSH, &t); } #else #ifdef WIN32 if (!echo) { /* get a new handle to turn echo off */ t_orig = (LPDWORD) malloc(sizeof(DWORD)); t = GetStdHandle(STD_INPUT_HANDLE); /* save the old configuration first */ GetConsoleMode(t, t_orig); /* set to the new mode */ SetConsoleMode(t, ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT); } #endif #endif if (prompt) { fputs(prompt, termout); fflush(termout); } if (fgets(destination, maxlen + 1, termin) == NULL) destination[0] = '\0'; length = strlen(destination); if (length > 0 && destination[length - 1] != '\n') { /* eat rest of the line */ char buf[128]; int buflen; do { if (fgets(buf, sizeof(buf), termin) == NULL) break; buflen = strlen(buf); } while (buflen > 0 && buf[buflen - 1] != '\n'); } if (length > 0 && destination[length - 1] == '\n') /* remove trailing newline */ destination[length - 1] = '\0'; #ifdef HAVE_TERMIOS_H if (!echo) { tcsetattr(fileno(termin), TCSAFLUSH, &t_orig); fputs("\n", termout); fflush(termout); } #else #ifdef WIN32 if (!echo) { /* reset to the original console mode */ SetConsoleMode(t, *t_orig); fputs("\n", termout); fflush(termout); free(t_orig); } #endif #endif if (termin != stdin) { fclose(termin); fclose(termout); } return destination; } osm2pgsql-0.82.0/sprompt.h000066400000000000000000000001521213272333300154020ustar00rootroot00000000000000#ifndef SPROMPT_H #define SPROMPT_H char *simple_prompt(const char *prompt, int maxlen, int echo); #endif osm2pgsql-0.82.0/tests/000077500000000000000000000000001213272333300146715ustar00rootroot00000000000000osm2pgsql-0.82.0/tests/regression-test.sh000077500000000000000000000145741213272333300204000ustar00rootroot00000000000000#!/bin/bash set -e trap errorhandler ERR errorhandler(){ echo "!!!!!!TEST failed, please check results!!!!!!" exit $status } planetfile=$1 planetdiff=$2 test_output=`dirname $0`/test_output_$$ function setup_db { echo "" echo "Initialising test db" dropdb osm2pgsql-test > /dev/null || true createdb -E UTF8 osm2pgsql-test psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/postgis.sql -d osm2pgsql-test > /dev/null psql -f /usr/share/postgresql/9.1/contrib/postgis-1.5/spatial_ref_sys.sql -d osm2pgsql-test > /dev/null psql -c "CREATE EXTENSION hstore;" -d osm2pgsql-test &> /dev/null sudo rm -rf /tmp/psql-tablespace || true mkdir /tmp/psql-tablespace sudo chown postgres.postgres /tmp/psql-tablespace psql -q -c "DROP TABLESPACE tablespacetest" -d osm2pgsql-test > /dev/null || true psql -c "CREATE TABLESPACE tablespacetest LOCATION '/tmp/psql-tablespace'" -d osm2pgsql-test } function teardown_db { dropdb osm2pgsql-test #To remove any objects that might still be in the table space psql -c "DROP TABLESPACE tablespacetest" -d postgres sudo rm -rf /tmp/psql-tablespace rm -f $test_output $test_output.* dropdb osm2pgsql-test } function psql_test { ( echo -n "$1"; psql -c "$2" -t -d osm2pgsql-test ) | tee -a $test_output.tmp } function reset_results { rm -f $test_output $test_output.* } function compare_results { if [ ! -r $test_output ]; then mv $test_output.tmp $test_output elif diff $test_output $test_output.tmp >/dev/null; then rm $test_output.tmp else errorhandler fi } function test_osm2pgsql_slim { trap errorhandler ERR echo "" echo "" echo "@@@Testing osm2pgsql in slim mode with the following parameters: \"" $1 "\"@@@" setup_db dbprefix=${2:-planet_osm} ./osm2pgsql --slim --create -d osm2pgsql-test $1 $planetfile psql_test "Number of points imported" "SELECT count(*) FROM ${dbprefix}_point;" psql_test "Number of lines imported" "SELECT count(*) FROM ${dbprefix}_line;" psql_test "Number of roads imported" "SELECT count(*) FROM ${dbprefix}_roads;" psql_test "Number of polygon imported" "SELECT count(*) FROM ${dbprefix}_polygon;" psql_test "Number of nodes imported" "SELECT count(*) FROM ${dbprefix}_nodes;" psql_test "Number of ways imported" "SELECT count(*) FROM ${dbprefix}_ways;" psql_test "Number of relations imported" "SELECT count(*) FROM ${dbprefix}_rels;" echo "***Testing osm2pgsql diff import with the following parameters: \"" $1 "\"***" ./osm2pgsql --slim --append -d osm2pgsql-test $1 $planetdiff psql_test "Number of points imported" "SELECT count(*) FROM ${dbprefix}_point;" psql_test "Number of lines imported" "SELECT count(*) FROM ${dbprefix}_line;" psql_test "Number of roads imported" "SELECT count(*) FROM ${dbprefix}_roads;" psql_test "Number of polygon imported" "SELECT count(*) FROM ${dbprefix}_polygon;" psql_test "Number of nodes imported" "SELECT count(*) FROM ${dbprefix}_nodes;" psql_test "Number of ways imported" "SELECT count(*) FROM ${dbprefix}_ways;" psql_test "Number of relations imported" "SELECT count(*) FROM ${dbprefix}_rels;" compare_results } function test_osm2pgsql_gazetteer { trap errorhandler ERR echo "" echo "" echo "@@@Testing osm2pgsql in gazetteer mode with the following parameters: \"" $1 "\"@@@" setup_db dbprefix=${2:-planet_osm} ./osm2pgsql --slim --create -l -O gazetteer -d osm2pgsql-test $1 $planetfile psql_test "Number of places imported" "SELECT count(*) FROM place;" psql_test "Number of nodes imported" "SELECT count(*) FROM ${dbprefix}_nodes;" psql_test "Number of ways imported" "SELECT count(*) FROM ${dbprefix}_ways;" psql_test "Number of relations imported" "SELECT count(*) FROM ${dbprefix}_rels;" echo "***Testing osm2pgsql diff import with the following parameters: \"" $1 "\"***" ./osm2pgsql --slim --append -l -O gazetteer -d osm2pgsql-test $1 $planetdiff psql_test "Number of places imported" "SELECT count(*) FROM place;" psql_test "Number of nodes imported" "SELECT count(*) FROM ${dbprefix}_nodes;" psql_test "Number of ways imported" "SELECT count(*) FROM ${dbprefix}_ways;" psql_test "Number of relations imported" "SELECT count(*) FROM ${dbprefix}_rels;" compare_results } function test_osm2pgsql_nonslim { trap errorhandler ERR echo "" echo "" echo "@@@Testing osm2pgsql with the following parameters: \"" $1 "\"@@@" setup_db ./osm2pgsql --create -d osm2pgsql-test $1 $planetfile psql_test "Number of points imported" "SELECT count(*) FROM planet_osm_point;" psql_test "Number of lines imported" "SELECT count(*) FROM planet_osm_line;" psql_test "Number of roads imported" "SELECT count(*) FROM planet_osm_roads;" psql_test "Number of polygon imported" "SELECT count(*) FROM planet_osm_polygon;" compare_results } test_osm2pgsql_nonslim "-S default.style -C 100" test_osm2pgsql_nonslim "-S default.style -C 100" echo ========== OK SO FAR ============= test_osm2pgsql_nonslim "-S default.style -l -C 100" test_osm2pgsql_nonslim "--slim --drop -S default.style -C 100" reset_results echo ========== NOW DOING SLIM ============= test_osm2pgsql_slim "-S default.style -C 100" test_osm2pgsql_slim "-S default.style -l -C 100" test_osm2pgsql_slim "-k -S default.style -C 100" test_osm2pgsql_slim "-j -S default.style -C 100" test_osm2pgsql_slim "-K -S default.style -C 100" test_osm2pgsql_slim "-x -S default.style -C 100" test_osm2pgsql_slim "-p planet_osm2 -S default.style -C 100" "planet_osm2" test_osm2pgsql_slim "--bbox -90.0,-180.0,90.0,180.0 -S default.style -C 100" test_osm2pgsql_slim "--number-processes 6 -S default.style -C 100" test_osm2pgsql_slim "-I -S default.style -C 100" test_osm2pgsql_slim "-e 16:16 -S default.style -C 100" test_osm2pgsql_slim "--number-processes 6 -e 16:16 -S default.style -C 100" test_osm2pgsql_slim "-S default.style -C 100 -i tablespacetest" test_osm2pgsql_slim "-S default.style -C 100 --tablespace-main-data tablespacetest" test_osm2pgsql_slim "-S default.style -C 100 --tablespace-main-index tablespacetest" test_osm2pgsql_slim "-S default.style -C 100 --tablespace-slim-data tablespacetest" test_osm2pgsql_slim "-S default.style -C 100 --tablespace-slim-index tablespacetest" reset_results #test_osm2pgsql_gazetteer "-C 100" #test_osm2pgsql_gazetteer "--bbox -90.0,-180.0,90.0,180.0 -C 100" teardown_db osm2pgsql-0.82.0/text-tree.c000066400000000000000000000045511213272333300156210ustar00rootroot00000000000000/* text-tree.c * * Storage of reference counted text strings * used by keyvals.c to store the key/value strings */ #define _GNU_SOURCE #include #include #include #include #include "text-tree.h" struct tree_context *tree_ctx = NULL; int text_compare(const void *pa, const void *pb, void *rb_param) { struct text_node *a = (struct text_node *)pa; struct text_node *b = (struct text_node *)pb; rb_param = NULL; return strcmp(a->str, b->str); } struct tree_context *text_init(void) { struct tree_context *context; struct rb_table *table = rb_create (text_compare, NULL, NULL); assert(table); context = calloc(1, sizeof(*context)); assert(context); context->table = table; tree_ctx = context; return context; } void text_free(void *pa, void *rb_param) { struct text_node *a = (struct text_node *)pa; rb_param = NULL; free(a->str); free(a); } const char *text_get(struct tree_context *context, const char *text) { struct text_node *node, *dupe; node = malloc(sizeof(*node)); assert(node); node->str = strdup(text); assert(node->str); node->ref = 0; dupe = rb_insert(context->table, (void *)node); if (dupe) { free(node->str); free(node); dupe->ref++; return dupe->str; } else { node->ref++; return node->str; } } void text_release(struct tree_context *context, const char *text) { struct text_node *node, find; find.str = (char *)text; find.ref = 0; node = rb_find(context->table, (void *)&find); if (!node) { fprintf(stderr, "failed to find '%s'\n", text); return; } node->ref--; if (!node->ref) { rb_delete (context->table, &find); free(node->str); free(node); } } void text_exit(void) { struct tree_context *context = tree_ctx; rb_destroy(context->table, text_free); free(context); tree_ctx = NULL; } #if 0 int main(int argc, char **argv) { struct tree_context *ctx = text_init(); printf("%1$p %1$s\n", text_get(ctx, "Hello")); printf("%1$p %1$s\n", text_get(ctx, "Hello")); printf("%1$p %1$s\n", text_get(ctx, "World")); text_release(ctx,"Hello"); text_release(ctx,"Hello"); text_release(ctx,"World"); text_release(ctx,"Hello"); text_exit(ctx); return 0; } #endif osm2pgsql-0.82.0/text-tree.h000066400000000000000000000006261213272333300156250ustar00rootroot00000000000000#ifndef TEXT_TREE_H #define TEXT_TREE_H #include "rb.h" struct tree_context { struct rb_table *table; }; extern struct tree_context *tree_ctx; struct text_node { char *str; int ref; }; struct tree_context *text_init(void); void text_exit(void); const char *text_get(struct tree_context *context, const char *text); void text_release(struct tree_context *context, const char *text); #endif osm2pgsql-0.82.0/wildcmp.c000066400000000000000000000041141213272333300153320ustar00rootroot00000000000000/* Wildcard matching. heavily based on wildcmp.c copyright 2002 Jim Kent */ #include #include "wildcmp.h" static int subMatch(char *str, char *wild) /* Returns number of characters that match between str and wild up * to the next wildcard in wild (or up to end of string.). */ { int len = 0; for(;;) { if(toupper(*str++) != toupper(*wild++) ) return(0); ++len; switch(*wild) { case 0: case '?': case '*': return(len); } } } int wildMatch(char *wildCard, char *string) /* does a case sensitive wild card match with a string. * * matches any string or no character. * ? matches any single character. * anything else etc must match the character exactly. returns NO_MATCH, FULL_MATCH or WC_MATCH defined in wildcmp.h */ { int matchStar = 0; int starMatchSize; int wildmatch=0; for(;;) { NEXT_WILD: switch(*wildCard) { case 0: /* end of wildcard */ { if(matchStar) { while(*string++) ; return wildmatch ? WC_MATCH : FULL_MATCH; } else if(*string) return NO_MATCH; else { return wildmatch ? WC_MATCH : FULL_MATCH; } } case '*': wildmatch = 1; matchStar = 1; break; case '?': /* anything will do */ wildmatch = 1; { if(*string == 0) return NO_MATCH; /* out of string, no match for ? */ ++string; break; } default: { if(matchStar) { for(;;) { if(*string == 0) /* if out of string no match */ return NO_MATCH; /* note matchStar is re-used here for substring * after star match length */ if((starMatchSize = subMatch(string,wildCard)) != 0) { string += starMatchSize; wildCard += starMatchSize; matchStar = 0; goto NEXT_WILD; } ++string; } } /* default: they must be equal or no match */ if(toupper(*string) != toupper(*wildCard)) return NO_MATCH; ++string; break; } } ++wildCard; } } osm2pgsql-0.82.0/wildcmp.h000066400000000000000000000001511213272333300153340ustar00rootroot00000000000000#define NO_MATCH 0 #define FULL_MATCH 1 #define WC_MATCH 2 int wildMatch(char *wildCard, char *string);