pax_global_header00006660000000000000000000000064142215636710014521gustar00rootroot0000000000000052 comment=c5055c076bd22281c67445d1df4f3225bfbf9925 s2geometry-0.10.0/000077500000000000000000000000001422156367100136775ustar00rootroot00000000000000s2geometry-0.10.0/.travis.yml000066400000000000000000000076261422156367100160230ustar00rootroot00000000000000dist: bionic language: cpp matrix: include: - os: linux addons: apt: packages: - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=gcc && CXX=g++" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-8 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=gcc-8 && CXX=g++-8" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-9 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=gcc-9 && CXX=g++-9" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - ubuntu-toolchain-r-test packages: - g++-10 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=gcc-10 && CXX=g++-10" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: packages: - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=clang && CXX=clang++" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - llvm-toolchain-bionic-8 - ubuntu-toolchain-r-test packages: - clang-8 - g++-7 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=clang-8 && CXX=clang++-8" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - llvm-toolchain-bionic-9 - ubuntu-toolchain-r-test packages: - clang-9 - g++-7 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=clang-9 && CXX=clang++-9" - GTEST_ROOT=/usr/src/gtest - os: linux addons: apt: sources: - llvm-toolchain-bionic-10 - ubuntu-toolchain-r-test packages: - clang-10 - g++-7 - libgflags-dev - libgoogle-glog-dev - libgtest-dev - libssl-dev - swig3.0 env: - MATRIX_EVAL="CC=clang-10 && CXX=clang++-10" - GTEST_ROOT=/usr/src/gtest - os: osx osx_image: xcode11.3 env: - MATRIX_EVAL="" - GTEST_ROOT="$( /bin/pwd )/googletest-release-1.8.0/googletest" - OPENSSL_ROOT_DIR=/usr/local/opt/openssl - os: osx osx_image: xcode12 env: - MATRIX_EVAL="" - GTEST_ROOT="$( /bin/pwd )/googletest-release-1.8.0/googletest" - OPENSSL_ROOT_DIR=/usr/local/opt/openssl before_install: - eval "${MATRIX_EVAL}" - ${CC} --version - ${CXX} --version install: - if [[ "$TRAVIS_OS_NAME" == osx ]]; then brew install gflags glog swig; fi - if [[ "$TRAVIS_OS_NAME" == osx ]]; then wget https://github.com/google/googletest/archive/release-1.8.0.tar.gz; fi - if [[ "$TRAVIS_OS_NAME" == osx ]]; then tar zxvf release-1.8.0.tar.gz; fi script: - mkdir build - cd build - cmake -DGTEST_ROOT=$GTEST_ROOT -DWITH_GFLAGS=ON -DWITH_GLOG=ON -DWITH_PYTHON=ON .. - make && make CTEST_OUTPUT_ON_FAILURE=1 test s2geometry-0.10.0/AUTHORS000066400000000000000000000006571422156367100147570ustar00rootroot00000000000000# This is the official list of glog authors for copyright purposes. # This file is distinct from the CONTRIBUTORS files. # See the latter for an explanation. # # Names should be added to this file as: # Name or Organization # The email address is not required for organizations. # # Please keep the list sorted. Dan Larkin-York Google Inc. Koordinates Limited Tiago Brito s2geometry-0.10.0/CMakeLists.txt000066400000000000000000000513771422156367100164540ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.12) project(s2-geometry VERSION 0.10.0) include(CMakeDependentOption) include(CheckCXXCompilerFlag) include(FeatureSummary) include(FindPackageHandleStandardArgs) include(GNUInstallDirs) set(CMAKE_EXPORT_COMPILE_COMMANDS ON) # Avoid "Policy CMP0042 is not set" warning on macOS. if (APPLE) set(CMAKE_MACOSX_RPATH TRUE) endif() # s2geometry needs to use the same C++ standard that absl used to avoid # undefined symbol errors since ABSL_HAVE_STD_STRING_VIEW etc will # end up defined differently. There is probably a better way to achieve # this than assuming what absl used. set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED ON) # No compiler-specific extensions, i.e. -std=c++11, not -std=gnu++11. set(CMAKE_CXX_EXTENSIONS OFF) list(APPEND CMAKE_MODULE_PATH "${CMAKE_SOURCE_DIR}/third_party/cmake") option(WITH_GFLAGS "Use gflags to change command line flags." OFF) add_feature_info(GFLAGS WITH_GFLAGS "allows changing command line flags.") # glog option can only be turned on if gflags is on. cmake_dependent_option(WITH_GLOG "Use glog for logging." ON "WITH_GFLAGS" OFF) add_feature_info(GLOG WITH_GLOG "provides logging configurability.") option(BUILD_SHARED_LIBS "Build shared libraries instead of static." ON) add_feature_info(SHARED_LIBS BUILD_SHARED_LIBS "builds shared libraries instead of static.") option(BUILD_EXAMPLES "Build s2 documentation examples." ON) option(WITH_PYTHON "Add python interface" OFF) add_feature_info(PYTHON WITH_PYTHON "provides python interface to S2") feature_summary(WHAT ALL) if (WITH_GLOG) find_package(Glog REQUIRED) # FindGFlags.cmake and FindGlog.cmake do not seem to implement REQUIRED; # check manually. if (NOT ${GLOG_FOUND}) message(FATAL_ERROR "Glog requested but not found") endif() add_definitions(-DS2_USE_GLOG) else() # Don't output anything for LOG(INFO). add_definitions(-DABSL_MIN_LOG_LEVEL=1) endif() if (WITH_GFLAGS) find_package(GFlags REQUIRED) if (NOT ${GFLAGS_FOUND}) message(FATAL_ERROR "GFlags requested but not found") endif() add_definitions(-DS2_USE_GFLAGS) endif() find_package(absl REQUIRED) find_package(OpenSSL REQUIRED) # pthreads isn't used directly, but this is still required for std::thread. find_package(Threads REQUIRED) if (WITH_PYTHON) # Should be easy to make it work with swig3, but some args to %pythonprepend # seem to be different and were changed. find_package(SWIG 4.0) # Use Python3_ROOT_DIR to help find python3, if the correct location is not # being found by default. find_package(Python3 COMPONENTS Interpreter Development) endif() if (MSVC) # Use unsigned characters add_definitions(-J) # Make sure cmath header defines things like M_PI add_definitions(-D_USE_MATH_DEFINES) # Make sure Windows doesn't define min/max macros that interfere with STL add_definitions(-DNOMINMAX) else() # Avoid megabytes of warnings like: # util/math/vector.h:178:16: warning: optimization attribute on # ‘double sqrt(double)’ follows definition but the attribute doesn’t # match [-Wattributes] add_definitions(-Wno-attributes) add_definitions(-Wno-deprecated-declarations) endif() # If OpenSSL is installed in a non-standard location, configure with # something like: # OPENSSL_ROOT_DIR=/usr/local/opt/openssl cmake .. include_directories( ${GFLAGS_INCLUDE_DIRS} ${GLOG_INCLUDE_DIRS} ${OPENSSL_INCLUDE_DIR}) if (WITH_PYTHON) include_directories(${Python3_INCLUDE_DIRS}) endif() include_directories(src) add_library(s2 src/s2/encoded_s2cell_id_vector.cc src/s2/encoded_s2point_vector.cc src/s2/encoded_s2shape_index.cc src/s2/encoded_string_vector.cc src/s2/id_set_lexicon.cc src/s2/mutable_s2shape_index.cc src/s2/r2rect.cc src/s2/s1angle.cc src/s2/s1chord_angle.cc src/s2/s1interval.cc src/s2/s2boolean_operation.cc src/s2/s2buffer_operation.cc src/s2/s2builder.cc src/s2/s2builder_graph.cc src/s2/s2builderutil_closed_set_normalizer.cc src/s2/s2builderutil_find_polygon_degeneracies.cc src/s2/s2builderutil_get_snapped_winding_delta.cc src/s2/s2builderutil_lax_polygon_layer.cc src/s2/s2builderutil_lax_polyline_layer.cc src/s2/s2builderutil_s2point_vector_layer.cc src/s2/s2builderutil_s2polygon_layer.cc src/s2/s2builderutil_s2polyline_layer.cc src/s2/s2builderutil_s2polyline_vector_layer.cc src/s2/s2builderutil_snap_functions.cc src/s2/s2cap.cc src/s2/s2cell.cc src/s2/s2cell_id.cc src/s2/s2cell_index.cc src/s2/s2cell_union.cc src/s2/s2centroids.cc src/s2/s2closest_cell_query.cc src/s2/s2closest_edge_query.cc src/s2/s2closest_point_query.cc src/s2/s2contains_vertex_query.cc src/s2/s2convex_hull_query.cc src/s2/s2coords.cc src/s2/s2crossing_edge_query.cc src/s2/s2debug.cc src/s2/s2earth.cc src/s2/s2edge_clipping.cc src/s2/s2edge_crosser.cc src/s2/s2edge_crossings.cc src/s2/s2edge_distances.cc src/s2/s2edge_tessellator.cc src/s2/s2furthest_edge_query.cc src/s2/s2latlng.cc src/s2/s2latlng_rect.cc src/s2/s2latlng_rect_bounder.cc src/s2/s2lax_loop_shape.cc src/s2/s2lax_polygon_shape.cc src/s2/s2lax_polyline_shape.cc src/s2/s2loop.cc src/s2/s2loop_measures.cc src/s2/s2measures.cc src/s2/s2memory_tracker.cc src/s2/s2metrics.cc src/s2/s2max_distance_targets.cc src/s2/s2min_distance_targets.cc src/s2/s2padded_cell.cc src/s2/s2point_compression.cc src/s2/s2point_region.cc src/s2/s2pointutil.cc src/s2/s2polygon.cc src/s2/s2polyline.cc src/s2/s2polyline_alignment.cc src/s2/s2polyline_measures.cc src/s2/s2polyline_simplifier.cc src/s2/s2predicates.cc src/s2/s2projections.cc src/s2/s2r2rect.cc src/s2/s2region.cc src/s2/s2region_term_indexer.cc src/s2/s2region_coverer.cc src/s2/s2region_intersection.cc src/s2/s2region_union.cc src/s2/s2shape_index.cc src/s2/s2shape_index_buffered_region.cc src/s2/s2shape_index_measures.cc src/s2/s2shape_measures.cc src/s2/s2shapeutil_build_polygon_boundaries.cc src/s2/s2shapeutil_coding.cc src/s2/s2shapeutil_contains_brute_force.cc src/s2/s2shapeutil_conversion.cc src/s2/s2shapeutil_edge_iterator.cc src/s2/s2shapeutil_get_reference_point.cc src/s2/s2shapeutil_range_iterator.cc src/s2/s2shapeutil_visit_crossing_edge_pairs.cc src/s2/s2text_format.cc src/s2/s2wedge_relations.cc src/s2/s2winding_operation.cc src/s2/strings/serialize.cc src/s2/util/bits/bit-interleave.cc src/s2/util/bits/bits.cc src/s2/util/coding/coder.cc src/s2/util/coding/varint.cc src/s2/util/math/exactfloat/exactfloat.cc src/s2/util/math/mathutil.cc src/s2/util/units/length-units.cc) if (GTEST_ROOT) add_library(s2testing STATIC src/s2/s2builderutil_testing.cc src/s2/s2shapeutil_testing.cc src/s2/s2testing.cc src/s2/thread_testing.cc) endif() target_link_libraries( s2 ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES} ${OPENSSL_LIBRARIES} absl::base absl::btree absl::config absl::core_headers absl::dynamic_annotations absl::endian absl::fixed_array absl::flat_hash_map absl::flat_hash_set absl::hash absl::inlined_vector absl::int128 absl::log_severity absl::memory absl::span absl::str_format absl::strings absl::type_traits absl::utility ${CMAKE_THREAD_LIBS_INIT}) if (GTEST_ROOT) target_link_libraries( s2testing ${GFLAGS_LIBRARIES} ${GLOG_LIBRARIES} absl::memory absl::strings) endif() # Allow other CMake projects to use this one with: # list(APPEND CMAKE_MODULE_PATH "/third_party/cmake") # add_subdirectory( s2geometry) # target_link_libraries( s2) target_include_directories(s2 PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/src) # Add version information to the target set_target_properties(s2 PROPERTIES SOVERSION ${PROJECT_VERSION_MAJOR} VERSION ${PROJECT_VERSION}) # We don't need to install all headers, only those # transitively included by s2 headers we are exporting. install(FILES src/s2/_fp_contract_off.h src/s2/encoded_s2cell_id_vector.h src/s2/encoded_s2point_vector.h src/s2/encoded_s2shape_index.h src/s2/encoded_string_vector.h src/s2/encoded_uint_vector.h src/s2/id_set_lexicon.h src/s2/mutable_s2shape_index.h src/s2/r1interval.h src/s2/r2.h src/s2/r2rect.h src/s2/s1angle.h src/s2/s1chord_angle.h src/s2/s1interval.h src/s2/s2boolean_operation.h src/s2/s2buffer_operation.h src/s2/s2builder.h src/s2/s2builder_graph.h src/s2/s2builder_layer.h src/s2/s2builderutil_closed_set_normalizer.h src/s2/s2builderutil_find_polygon_degeneracies.h src/s2/s2builderutil_get_snapped_winding_delta.h src/s2/s2builderutil_graph_shape.h src/s2/s2builderutil_lax_polygon_layer.h src/s2/s2builderutil_lax_polyline_layer.h src/s2/s2builderutil_s2point_vector_layer.h src/s2/s2builderutil_s2polygon_layer.h src/s2/s2builderutil_s2polyline_layer.h src/s2/s2builderutil_s2polyline_vector_layer.h src/s2/s2builderutil_snap_functions.h src/s2/s2builderutil_testing.h src/s2/s2cap.h src/s2/s2cell.h src/s2/s2cell_id.h src/s2/s2cell_index.h src/s2/s2cell_union.h src/s2/s2centroids.h src/s2/s2closest_cell_query.h src/s2/s2closest_cell_query_base.h src/s2/s2closest_edge_query.h src/s2/s2closest_edge_query_base.h src/s2/s2closest_point_query.h src/s2/s2closest_point_query_base.h src/s2/s2contains_point_query.h src/s2/s2contains_vertex_query.h src/s2/s2convex_hull_query.h src/s2/s2coords_internal.h src/s2/s2coords.h src/s2/s2crossing_edge_query.h src/s2/s2debug.h src/s2/s2distance_target.h src/s2/s2earth.h src/s2/s2edge_clipping.h src/s2/s2edge_crosser.h src/s2/s2edge_crossings.h src/s2/s2edge_crossings_internal.h src/s2/s2edge_distances.h src/s2/s2edge_tessellator.h src/s2/s2edge_vector_shape.h src/s2/s2error.h src/s2/s2furthest_edge_query.h src/s2/s2latlng.h src/s2/s2latlng_rect.h src/s2/s2latlng_rect_bounder.h src/s2/s2lax_loop_shape.h src/s2/s2lax_polygon_shape.h src/s2/s2lax_polyline_shape.h src/s2/s2loop.h src/s2/s2loop_measures.h src/s2/s2measures.h src/s2/s2memory_tracker.h src/s2/s2metrics.h src/s2/s2max_distance_targets.h src/s2/s2min_distance_targets.h src/s2/s2padded_cell.h src/s2/s2point.h src/s2/s2point_vector_shape.h src/s2/s2point_compression.h src/s2/s2point_index.h src/s2/s2point_region.h src/s2/s2point_span.h src/s2/s2pointutil.h src/s2/s2polygon.h src/s2/s2polyline.h src/s2/s2polyline_alignment.h src/s2/s2polyline_measures.h src/s2/s2polyline_simplifier.h src/s2/s2predicates.h src/s2/s2predicates_internal.h src/s2/s2projections.h src/s2/s2r2rect.h src/s2/s2region.h src/s2/s2region_term_indexer.h src/s2/s2region_coverer.h src/s2/s2region_intersection.h src/s2/s2region_union.h src/s2/s2shape.h src/s2/s2shape_index.h src/s2/s2shape_index_buffered_region.h src/s2/s2shape_index_region.h src/s2/s2shape_measures.h src/s2/s2shapeutil_build_polygon_boundaries.h src/s2/s2shapeutil_coding.h src/s2/s2shapeutil_contains_brute_force.h src/s2/s2shapeutil_conversion.h src/s2/s2shapeutil_count_edges.h src/s2/s2shapeutil_edge_iterator.h src/s2/s2shapeutil_get_reference_point.h src/s2/s2shapeutil_range_iterator.h src/s2/s2shapeutil_shape_edge.h src/s2/s2shapeutil_shape_edge_id.h src/s2/s2shapeutil_testing.h src/s2/s2shapeutil_visit_crossing_edge_pairs.h src/s2/s2testing.h src/s2/s2text_format.h src/s2/s2wedge_relations.h src/s2/s2winding_operation.h src/s2/s2wrapped_shape.h src/s2/sequence_lexicon.h src/s2/thread_testing.h src/s2/value_lexicon.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2") install(FILES src/s2/base/casts.h src/s2/base/commandlineflags.h src/s2/base/integral_types.h src/s2/base/log_severity.h src/s2/base/logging.h src/s2/base/port.h src/s2/base/spinlock.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/base") install(FILES src/s2/util/bits/bits.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/bits") install(FILES src/s2/util/coding/coder.h src/s2/util/coding/varint.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/coding") install(FILES src/s2/util/endian/endian.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/endian") install(FILES src/s2/util/gtl/compact_array.h src/s2/util/gtl/container_logging.h src/s2/util/gtl/dense_hash_set.h src/s2/util/gtl/densehashtable.h src/s2/util/gtl/hashtable_common.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/gtl") install(FILES src/s2/util/hash/mix.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/hash") install(FILES src/s2/util/math/mathutil.h src/s2/util/math/matrix3x3.h src/s2/util/math/vector.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/math") install(FILES src/s2/util/math/exactfloat/exactfloat.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/math/exactfloat") install(FILES src/s2/util/units/length-units.h src/s2/util/units/physical-units.h DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/s2/util/units") if (GTEST_ROOT) set(S2_TARGETS s2 s2testing) else() set(S2_TARGETS s2) endif() install(TARGETS ${S2_TARGETS} RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}" ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}" LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}") message("GTEST_ROOT: ${GTEST_ROOT}") if (GTEST_ROOT) add_subdirectory(${GTEST_ROOT} build_gtest) include_directories(${GTEST_ROOT}/include) set(S2TestFiles src/s2/encoded_s2cell_id_vector_test.cc src/s2/encoded_s2point_vector_test.cc src/s2/encoded_s2shape_index_test.cc src/s2/encoded_string_vector_test.cc src/s2/encoded_uint_vector_test.cc src/s2/id_set_lexicon_test.cc src/s2/mutable_s2shape_index_test.cc src/s2/r1interval_test.cc src/s2/r2rect_test.cc src/s2/s1angle_test.cc src/s2/s1chord_angle_test.cc src/s2/s1interval_test.cc src/s2/s2boolean_operation_test.cc src/s2/s2buffer_operation_test.cc src/s2/s2builder_graph_test.cc src/s2/s2builder_test.cc src/s2/s2builderutil_closed_set_normalizer_test.cc src/s2/s2builderutil_find_polygon_degeneracies_test.cc src/s2/s2builderutil_get_snapped_winding_delta_test.cc src/s2/s2builderutil_lax_polygon_layer_test.cc src/s2/s2builderutil_lax_polyline_layer_test.cc src/s2/s2builderutil_s2point_vector_layer_test.cc src/s2/s2builderutil_s2polygon_layer_test.cc src/s2/s2builderutil_s2polyline_layer_test.cc src/s2/s2builderutil_s2polyline_vector_layer_test.cc src/s2/s2builderutil_snap_functions_test.cc src/s2/s2builderutil_testing_test.cc src/s2/s2cap_test.cc src/s2/s2cell_test.cc src/s2/s2cell_id_test.cc src/s2/s2cell_index_test.cc src/s2/s2cell_union_test.cc src/s2/s2centroids_test.cc src/s2/s2closest_cell_query_base_test.cc src/s2/s2closest_cell_query_test.cc src/s2/s2closest_edge_query_base_test.cc src/s2/s2closest_edge_query_test.cc src/s2/s2closest_point_query_base_test.cc src/s2/s2closest_point_query_test.cc src/s2/s2contains_point_query_test.cc src/s2/s2contains_vertex_query_test.cc src/s2/s2convex_hull_query_test.cc src/s2/s2coords_test.cc src/s2/s2crossing_edge_query_test.cc src/s2/s2earth_test.cc src/s2/s2edge_clipping_test.cc src/s2/s2edge_crosser_test.cc src/s2/s2edge_crossings_test.cc src/s2/s2edge_distances_test.cc src/s2/s2edge_tessellator_test.cc src/s2/s2edge_vector_shape_test.cc src/s2/s2error_test.cc src/s2/s2furthest_edge_query_test.cc src/s2/s2latlng_test.cc src/s2/s2latlng_rect_bounder_test.cc src/s2/s2latlng_rect_test.cc src/s2/s2lax_loop_shape_test.cc src/s2/s2lax_polygon_shape_test.cc src/s2/s2lax_polyline_shape_test.cc src/s2/s2loop_measures_test.cc src/s2/s2loop_test.cc src/s2/s2measures_test.cc src/s2/s2memory_tracker_test.cc src/s2/s2metrics_test.cc src/s2/s2max_distance_targets_test.cc src/s2/s2min_distance_targets_test.cc src/s2/s2padded_cell_test.cc src/s2/s2point_test.cc src/s2/s2point_vector_shape_test.cc src/s2/s2point_compression_test.cc src/s2/s2point_index_test.cc src/s2/s2point_region_test.cc src/s2/s2pointutil_test.cc src/s2/s2polygon_test.cc src/s2/s2polyline_alignment_test.cc src/s2/s2polyline_simplifier_test.cc src/s2/s2polyline_measures_test.cc src/s2/s2polyline_test.cc src/s2/s2predicates_test.cc src/s2/s2projections_test.cc src/s2/s2r2rect_test.cc src/s2/s2region_test.cc src/s2/s2region_term_indexer_test.cc src/s2/s2region_coverer_test.cc src/s2/s2region_union_test.cc src/s2/s2shape_index_buffered_region_test.cc src/s2/s2shape_index_measures_test.cc src/s2/s2shape_index_region_test.cc src/s2/s2shape_index_test.cc src/s2/s2shape_measures_test.cc src/s2/s2shapeutil_build_polygon_boundaries_test.cc src/s2/s2shapeutil_coding_test.cc src/s2/s2shapeutil_contains_brute_force_test.cc src/s2/s2shapeutil_conversion_test.cc src/s2/s2shapeutil_count_edges_test.cc src/s2/s2shapeutil_edge_iterator_test.cc src/s2/s2shapeutil_get_reference_point_test.cc src/s2/s2shapeutil_range_iterator_test.cc src/s2/s2shapeutil_visit_crossing_edge_pairs_test.cc src/s2/s2testing_test.cc src/s2/s2text_format_test.cc src/s2/s2wedge_relations_test.cc src/s2/s2winding_operation_test.cc src/s2/s2wrapped_shape_test.cc src/s2/sequence_lexicon_test.cc src/s2/value_lexicon_test.cc) enable_testing() foreach (test_cc ${S2TestFiles}) get_filename_component(test ${test_cc} NAME_WE) add_executable(${test} ${test_cc}) target_link_libraries( ${test} s2testing s2 absl::base absl::btree absl::core_headers absl::flags_reflection absl::memory absl::span absl::strings absl::synchronization gtest_main) add_test(${test} ${test}) endforeach() endif() if (BUILD_EXAMPLES AND TARGET s2testing) add_subdirectory("doc/examples" examples) endif() if (${SWIG_FOUND} AND ${Python3_FOUND}) add_subdirectory("src/python" python) endif() s2geometry-0.10.0/CONTRIBUTING.md000066400000000000000000000051121422156367100161270ustar00rootroot00000000000000# How to contribute # We'd love to accept your patches and contributions to this project. There are a just a few small guidelines you need to follow. ## Contributor License Agreement ## Contributions to any Google project must be accompanied by a Contributor License Agreement. This is not a copyright **assignment**, it simply gives Google permission to use and redistribute your contributions as part of the project. * If you are an individual writing original source code and you're sure you own the intellectual property, then you'll need to sign an [individual CLA][]. * If you work for a company that wants to allow you to contribute your work, then you'll need to sign a [corporate CLA][]. You generally only need to submit a CLA once, so if you've already submitted one (even if it was for a different project), you probably don't need to do it again. [individual CLA]: https://developers.google.com/open-source/cla/individual [corporate CLA]: https://developers.google.com/open-source/cla/corporate Once your CLA is submitted (or if you already submitted one for another Google project), make a commit adding yourself to the [AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part of your first [pull request][]. [AUTHORS]: AUTHORS [CONTRIBUTORS]: CONTRIBUTORS ## Submitting a patch ## 1. It's generally best to start by opening a new issue describing the bug or feature you're intending to fix. Even if you think it's relatively minor, it's helpful to know what people are working on. Mention in the initial issue that you are planning to work on that bug or feature so that it can be assigned to you. 1. Follow the normal process of [forking][] the project, and setup a new branch to work in. It's important that each group of changes be done in separate branches in order to ensure that a pull request only includes the commits related to that bug or feature. 1. Do your best to have [well-formed commit messages][] for each change. This provides consistency throughout the project, and ensures that commit messages are able to be formatted properly by various git tools. 1. Finally, push the commits to your fork and submit a [pull request][]. [forking]: https://help.github.com/articles/fork-a-repo [well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html [pull request]: https://help.github.com/articles/creating-a-pull-request ## Style ## Contributions should follow the [Google C++ Style Guide]. [Google C++ Style Guide]: http://google.github.io/styleguide/cppguide.html s2geometry-0.10.0/CONTRIBUTORS000066400000000000000000000022371422156367100155630ustar00rootroot00000000000000# People who have agreed to one of the CLAs and can contribute patches. # The AUTHORS file lists the copyright holders; this file # lists people. For example, Google employees are listed here # but not in AUTHORS, because Google holds the copyright. # # Names should be added to this file only after verifying that # the individual or the individual's organization has agreed to # the appropriate Contributor License Agreement, found here: # # https://developers.google.com/open-source/cla/individual # https://developers.google.com/open-source/cla/corporate # # The agreement for individuals can be filled out on the web. # # When adding J Random Contributor's name to this file, # either J's name or J's organization's name should be # added to the AUTHORS file, depending on whether the # individual or corporate CLA was used. # # Names should be added to this file as: # Name # # Please keep the list sorted. Dan Larkin-York Eric Veach Jesse Rosenstock Julien Basch Phil Elson Robert Coup Tiago Brito s2geometry-0.10.0/LICENSE000066400000000000000000000261361422156367100147140ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. s2geometry-0.10.0/NOTICE000066400000000000000000000002241422156367100146010ustar00rootroot00000000000000S2 Geometry Library Copyright 2017 Google Inc. All Rights Reserved. This product includes software developed at Google (https://www.google.com/). s2geometry-0.10.0/README.md000066400000000000000000000131231422156367100151560ustar00rootroot00000000000000# S2 Geometry Library [![Build Status](https://travis-ci.org/google/s2geometry.svg?branch=master)](https://travis-ci.org/google/s2geometry) ## Overview This is a package for manipulating geometric shapes. Unlike many geometry libraries, S2 is primarily designed to work with _spherical geometry_, i.e., shapes drawn on a sphere rather than on a planar 2D map. This makes it especially suitable for working with geographic data. If you want to learn more about the library, start by reading the [overview](http://s2geometry.io/about/overview) and [quick start document](http://s2geometry.io/devguide/cpp/quickstart), then read the introduction to the [basic types](http://s2geometry.io/devguide/basic_types). S2 documentation can be found on [s2geometry.io](http://s2geometry.io). ## Requirements for End Users * [CMake](http://www.cmake.org/) * A C++ compiler with C++11 support, such as [g++ >= 4.7](https://gcc.gnu.org/) * [Abseil](https://github.com/abseil/abseil-cpp) (standard library extensions) * [OpenSSL](https://github.com/openssl/openssl) (for its bignum library) * [gflags command line flags](https://github.com/gflags/gflags), optional * [glog logging module](https://github.com/google/glog), optional * [googletest testing framework](https://github.com/google/googletest) (to build tests and example programs, optional) On Ubuntu, all of these other than abseil can be installed via apt-get: ``` sudo apt-get install cmake libgflags-dev libgoogle-glog-dev libgtest-dev libssl-dev ``` Otherwise, you may need to install some from source. Currently, Abseil must always be installed from source. See the use of `-DCMAKE_PREFIX_PATH` in the [build instructions below](#building). This is likely to change. On macOS, use [MacPorts](http://www.macports.org/) or [Homebrew](http://brew.sh/). For MacPorts: ``` sudo port install cmake gflags google-glog openssl ``` Do not install `gtest` from MacPorts; instead download [release 1.8.0](https://github.com/google/googletest/releases/tag/release-1.8.0), unpack, and substitute ``` cmake -DGTEST_ROOT=/...absolute path to.../googletest-release-1.8.0/googletest .. ``` in the build instructions below. Thorough testing has only been done on Ubuntu 14.04.3 and macOS 10.12. ## Build and Install You may either download the source as a ZIP archive, or [clone the git repository](https://help.github.com/articles/cloning-a-repository/). ### Via ZIP archive Download [ZIP file](https://github.com/google/s2geometry/archive/master.zip) ``` cd [parent of directory where you want to put S2] unzip [path to ZIP file]/s2geometry-master.zip cd s2geometry-master ``` ### Via `git clone` ``` cd [parent of directory where you want to put S2] git clone https://github.com/google/s2geometry.git cd s2geometry ``` ### Building First, [install Abseil](https://github.com/abseil/abseil-cpp/blob/master/CMake/README.md#traditional-cmake-set-up). It must be configured with `-DCMAKE_POSITION_INDEPENDENT_CODE=ON`. s2geometry must be configured to use the came C++ version that abseil uses. The easiest way to achieve this is to pass `-DCMAKE_CXX_STANDARD=11` (or `-DCMAKE_CXX_STANDARD=17`) to `cmake` when compiling both abseil and s2geometry. From the appropriate directory depending on how you got the source: ``` mkdir build cd build # You can omit -DGTEST_ROOT to skip tests; see above for macOS. # Use the same CMAKE_CXX_STANDARD value that was used with absl. cmake -DGTEST_ROOT=/usr/src/gtest -DCMAKE_PREFIX_PATH=/path/to/absl/install -DCMAKE_CXX_STANDARD=11 .. make -j $(nproc) make test ARGS="-j$(nproc)" # If GTEST_ROOT specified above. sudo make install ``` On macOS, `sysctl -n hw.logicalcpu` is the equivalent of `nproc`. Enable gflags and glog with `cmake -DWITH_GFLAGS=ON -DWITH_GLOG=ON ...`. Disable building of shared libraries with `-DBUILD_SHARED_LIBS=OFF`. Enable the python interface with `-DWITH_PYTHON=ON`. ## Installing From `build` subdirectory: ``` make install ``` Prefix it with `sudo` if needed: ``` sudo make install ``` _NOTE_: There is not `uninstall` target but `install_manifest.txt` may be helpfull. All files will be installed at location specified in `CMAKE_INSTALL_PREFIX` variable. Several suffix variables used for some file groups: Variable | Default | Description -------- | ------- | ----------- `CMAKE_INSTALL_INCLUDEDIR` | `include` | For header files `CMAKE_INSTALL_BINDIR` | `bin` | For executables and `*.dll` files on `DLL`-based platforms `CMAKE_INSTALL_LIBDIR` | `lib` | For library files (`*.so`, `*.a`, `*.lib` etc) If needed set this variables on command line as `cmake` arguments with `-D` prefix or edit from `build` subdirectory: ``` make edit_cache ``` For more info read: [The CMake Cache](https://cmake.org/cmake/help/latest/guide/user-interaction/index.html#the-cmake-cache). ## Python If you want the Python interface, you need to run cmake using `-DWITH_PYTHON=ON`. You will also need to install the following dependencies: * [SWIG 4](https://github.com/swig/swig) (for Python support, optional) * python3-dev (for Python support, optional) which can be installed via ``` sudo apt-get install swig python3-dev ``` or on macOS: ``` sudo port install swig ``` Version 4.0 is required, but it should be easy to make it work 3.0 or probably even 2.0. Python 3 is required. ## Other S2 implementations * [Go](https://github.com/golang/geo) (Approximately 40% complete.) * [Java](https://github.com/google/s2-geometry-library-java) (Some newer C++ features are missing; last updated in 2021.) * [Kotlin](https://github.com/Enovea/s2-geometry-kotlin) (Complete except binary serialization) ## Disclaimer This is not an official Google product. s2geometry-0.10.0/doc/000077500000000000000000000000001422156367100144445ustar00rootroot00000000000000s2geometry-0.10.0/doc/examples/000077500000000000000000000000001422156367100162625ustar00rootroot00000000000000s2geometry-0.10.0/doc/examples/CMakeLists.txt000066400000000000000000000003131422156367100210170ustar00rootroot00000000000000add_executable(point_index point_index.cc) target_link_libraries(point_index LINK_PUBLIC s2testing s2) add_executable(term_index term_index.cc) target_link_libraries(term_index LINK_PUBLIC s2testing s2) s2geometry-0.10.0/doc/examples/point_index.cc000066400000000000000000000031551422156367100211150ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // Author: ericv@google.com (Eric Veach) // // This example shows how to build and query an in-memory index of points // using S2PointIndex. #include #include #include #include "s2/base/commandlineflags.h" #include "s2/s2earth.h" #include "absl/flags/flag.h" #include "s2/s1chord_angle.h" #include "s2/s2closest_point_query.h" #include "s2/s2point_index.h" #include "s2/s2testing.h" S2_DEFINE_int32(num_index_points, 10000, "Number of points to index"); S2_DEFINE_int32(num_queries, 10000, "Number of queries"); S2_DEFINE_double(query_radius_km, 100, "Query radius in kilometers"); int main(int argc, char **argv) { // Build an index containing random points anywhere on the Earth. S2PointIndex index; for (int i = 0; i < absl::GetFlag(FLAGS_num_index_points); ++i) { index.Add(S2Testing::RandomPoint(), i); } // Create a query to search within the given radius of a target point. S2ClosestPointQuery query(&index); query.mutable_options()->set_max_distance(S1Angle::Radians( S2Earth::KmToRadians(absl::GetFlag(FLAGS_query_radius_km)))); // Repeatedly choose a random target point, and count how many index points // are within the given radius of that point. int64_t num_found = 0; for (int i = 0; i < absl::GetFlag(FLAGS_num_queries); ++i) { S2ClosestPointQuery::PointTarget target(S2Testing::RandomPoint()); num_found += query.FindClosestPoints(&target).size(); } std::printf("Found %" PRId64 " points in %d queries\n", num_found, absl::GetFlag(FLAGS_num_queries)); return 0; } s2geometry-0.10.0/doc/examples/term_index.cc000066400000000000000000000102031422156367100207230ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // Author: ericv@google.com (Eric Veach) // // This example shows how to add spatial data to an information retrieval // system. Such systems work by converting documents into a collection of // "index terms" (e.g., representing words or phrases), and then building an // "inverted index" that maps each term to a list of documents (and document // positions) where that term occurs. // // This example shows how to convert spatial data into index terms, which can // then be indexed along with the other document information. #include #include #include #include #include #include #include "s2/base/commandlineflags.h" #include "s2/s2earth.h" #include "absl/container/btree_set.h" #include "absl/container/flat_hash_map.h" #include "absl/flags/flag.h" #include "s2/s2cap.h" #include "s2/s2point_index.h" #include "s2/s2region_term_indexer.h" #include "s2/s2testing.h" using std::string; S2_DEFINE_int32(num_documents, 10000, "Number of documents"); S2_DEFINE_int32(num_queries, 10000, "Number of queries"); S2_DEFINE_double(query_radius_km, 100, "Query radius in kilometers"); // A prefix added to spatial terms to distinguish them from other index terms // (e.g. representing words or phrases). static const char kPrefix[] = "s2:"; int main(int argc, char** argv) { // Create a set of "documents" to be indexed. Each document consists of a // single point. (You can easily substitute any S2Region type here, or even // index a mixture of region types using std::unique_ptr. Other // region types include polygons, polylines, rectangles, discs, buffered // geometry, etc.) std::vector documents; documents.reserve(absl::GetFlag(FLAGS_num_documents)); for (int docid = 0; docid < absl::GetFlag(FLAGS_num_documents); ++docid) { documents.push_back(S2Testing::RandomPoint()); } // We use a hash map as our inverted index. The key is an index term, and // the value is the set of "document ids" where this index term is present. absl::flat_hash_map> index; // Create an indexer suitable for an index that contains points only. // (You may also want to adjust min_level() or max_level() if you plan // on querying very large or very small regions.) S2RegionTermIndexer::Options options; options.set_index_contains_points_only(true); S2RegionTermIndexer indexer(options); // Add the documents to the index. for (int docid = 0; docid < documents.size(); ++docid) { S2Point index_region = documents[docid]; for (const auto& term : indexer.GetIndexTerms(index_region, kPrefix)) { index[term].push_back(docid); } } // Convert the query radius to an angle representation. S1Angle radius = S1Angle::Radians( S2Earth::KmToRadians(absl::GetFlag(FLAGS_query_radius_km))); // Count the number of documents (points) found in all queries. int64_t num_found = 0; for (int i = 0; i < absl::GetFlag(FLAGS_num_queries); ++i) { // Choose a random center for query. S2Cap query_region(S2Testing::RandomPoint(), radius); // Convert the query region to a set of terms, and compute the union of // the document ids associated with those terms. (An actual information // retrieval system would do something more sophisticated.) absl::btree_set candidates; for (const auto& term : indexer.GetQueryTerms(query_region, kPrefix)) { candidates.insert(index[term].begin(), index[term].end()); } // "candidates" now contains all documents that intersect the query // region, along with some documents that nearly intersect it. We can // prune the results by retrieving the original "document" and checking // the distance more precisely. std::vector result; for (int docid : candidates) { if (!query_region.Contains(documents[docid])) continue; result.push_back(docid); } // Now do something with the results (in this example we just count them). num_found += result.size(); } std::printf("Found %" PRId64 " points in %d queries\n", num_found, absl::GetFlag(FLAGS_num_queries)); return 0; } s2geometry-0.10.0/doc/examples/term_index.py000066400000000000000000000074561422156367100210060ustar00rootroot00000000000000""" This example shows how to add spatial data to an information retrieval system. Such systems work by converting documents into a collection of "index terms" (e.g., representing words or phrases), and then building an "inverted index" that maps each term to a list of documents (and document positions) where that term occurs. This example shows how to convert spatial data into index terms, which can then be indexed along with the other document information. This is a port of the C++ term_index.cc example for the Python API. """ import argparse from collections import defaultdict import pywraps2 as s2 def main(): parser = argparse.ArgumentParser( description=( "This example shows how to convert spatial data into index terms, " "which can then be indexed along with the other document " "information." ) ) parser.add_argument( '--num_documents', type=int, default=10000, help="Number of documents" ) parser.add_argument( '--num_queries', type=int, default=10000, help="Number of queries" ) parser.add_argument( '--query_radius_km', type=float, default=100, help="Query radius in kilometers" ) args = parser.parse_args() # A prefix added to spatial terms to distinguish them from other index terms # (e.g. representing words or phrases). PREFIX = "s2:" # Create a set of "documents" to be indexed. Each document consists of a # single point. (You can easily substitute any S2Region type here, or even # index a mixture of region types using S2Region. Other # region types include polygons, polylines, rectangles, discs, buffered # geometry, etc.) documents = [] for i in range(args.num_documents): documents.append(s2.S2Testing.RandomPoint()) # We use a dict as our inverted index. The key is an index term, and # the value is the set of "document ids" where this index term is present. index = defaultdict(set) # Create an indexer suitable for an index that contains points only. # (You may also want to adjust min_level() or max_level() if you plan # on querying very large or very small regions.) indexer = s2.S2RegionTermIndexer() indexer.set_index_contains_points_only(True) # Add the documents to the index. for docid, index_region in enumerate(documents): for term in indexer.GetIndexTerms(index_region, PREFIX): index[term].add(docid) # Convert the query radius to an angle representation. radius = s2.S1Angle.Radians(s2.S2Earth.KmToRadians(args.query_radius_km)) # Count the number of documents (points) found in all queries. num_found = 0 for i in range(args.num_queries): # Choose a random center for query. query_region = s2.S2Cap(s2.S2Testing.RandomPoint(), radius) # Convert the query region to a set of terms, and compute the union of # the document ids associated with those terms. (An actual information # retrieval system would do something more sophisticated.) candidates = set() for term in indexer.GetQueryTerms(query_region, PREFIX): candidates |= index[term] # "candidates" now contains all documents that intersect the query # region, along with some documents that nearly intersect it. We can # prune the results by retrieving the original "document" and checking # the distance more precisely. result = [] for docid in candidates: if query_region.Contains(documents[docid]): result.append(docid) # Now do something with the results (in this example we just count # them). num_found += len(result) print("Found %d points in %d queries" % (num_found, args.num_queries)) if __name__ == "__main__": main() s2geometry-0.10.0/src/000077500000000000000000000000001422156367100144665ustar00rootroot00000000000000s2geometry-0.10.0/src/python/000077500000000000000000000000001422156367100160075ustar00rootroot00000000000000s2geometry-0.10.0/src/python/CMakeLists.txt000066400000000000000000000014071422156367100205510ustar00rootroot00000000000000include(${SWIG_USE_FILE}) include_directories(${Python3_INCLUDE_DIRS}) set(CMAKE_SWIG_FLAGS "") set_property(SOURCE s2.i PROPERTY SWIG_FLAGS "-module" "pywraps2") set_property(SOURCE s2.i PROPERTY CPLUSPLUS ON) swig_add_library(pywraps2 LANGUAGE python SOURCES s2.i) swig_link_libraries(pywraps2 ${Python3_LIBRARIES} s2) enable_testing() add_test(NAME pywraps2_test COMMAND ${Python3_EXECUTABLE} "${PROJECT_SOURCE_DIR}/src/python/pywraps2_test.py") set_property(TEST pywraps2_test PROPERTY ENVIRONMENT "PYTHONPATH=$ENV{PYTHONPATH}:${PROJECT_BINARY_DIR}/python") # Install the wrapper. install(TARGETS _pywraps2 DESTINATION ${Python3_SITELIB}) install(FILES "${PROJECT_BINARY_DIR}/python/pywraps2.py" DESTINATION ${Python3_SITELIB}) s2geometry-0.10.0/src/python/coder.i000066400000000000000000000067661422156367100172740ustar00rootroot00000000000000// // Exposes Encoder and Decoder a subset of functionality plus the // ability to set and get internal buffer. This allows passing them // into other SWIG'd methods, such as those in the S2 SWIG library. // For example: // // class S2Polygon { // ... // void Encode(Encoder* const encoder) const override; // // bool Decode(Decoder* const decoder) override; // }; // // Usage: // # data is a bytes object. // dec = pywrapcoder.Decoder(data) // polygon = s2.S2Polygon() // polygon.Decode(dec) // // enc = pywrapcoder.Encoder() // polygon.Encode(enc) // data = enc.buffer() // %{ #include "s2/util/coding/coder.h" %} // For Decoder::reset to accept a bytes or bytearray object. %typemap(in) (const void* buf, size_t maxn) { if (PyBytes_Check($input)) { $1 = (void *) PyBytes_AsString($input); $2 = PyBytes_Size($input); } else if (PyByteArray_Check($input)) { $1 = (void *) PyByteArray_AsString($input); $2 = PyByteArray_Size($input); } else { PyErr_Format(PyExc_TypeError, "bytes or bytearray needed, %s found", $input->ob_type->tp_name); return nullptr; } }; // For Encoder::reset to accept a bytearray object. %typemap(in) (void* buf, size_t maxn) { if (PyByteArray_Check($input)) { $1 = (void *) PyByteArray_AsString($input); $2 = PyByteArray_Size($input); } else { PyErr_Format(PyExc_TypeError, "bytearray needed, %s found", $input->ob_type->tp_name); return nullptr; } }; // Keep a reference to the object so that outside users don't // have to keep one, or else it could be released. // The auto-generated code passes *args to each of the methods, but the use // cases we support is only when a single arg is passed in. %pythonprepend Decoder::Decoder %{ if len(args) == 1: self._data_keepalive = args[0] %} %pythonprepend Decoder::reset %{ self._data_keepalive = args[0] %} %extend Decoder { // Allows direct construction with a bytes or bytearray objects. Decoder(PyObject *obj) { if (PyBytes_Check(obj)) { return new Decoder(PyBytes_AsString(obj), PyBytes_Size(obj)); } if (PyByteArray_Check(obj)) { return new Decoder(PyByteArray_AsString(obj), PyByteArray_Size(obj)); } PyErr_Format(PyExc_TypeError, "bytes or bytearray needed, %s found", obj->ob_type->tp_name); return nullptr; } } %extend Encoder { // Returns internal buffer as a bytearray. PyObject* buffer() { return PyByteArray_FromStringAndSize($self->base(), $self->length()); } } %ignoreall %unignore Decoder; %unignore Decoder::Decoder(); %unignore Decoder::Decoder(const void*, size_t); %unignore Decoder::~Decoder; %unignore Decoder::avail() const; %unignore Decoder::get8(); %unignore Decoder::get16(); %unignore Decoder::get32(); %unignore Decoder::get64(); %unignore Decoder::getfloat(); %unignore Decoder::getdouble(); %unignore Decoder::pos() const; %unignore Decoder::reset(const void*, size_t); %unignore Encoder; %unignore Encoder::Encoder(); %unignore Encoder::Encoder(void*, size_t); %unignore Encoder::~Encoder; %unignore Encoder::Ensure(size_t); %unignore Encoder::avail() const; %unignore Encoder::buffer(); %unignore Encoder::clear(); %unignore Encoder::length() const; %unignore Encoder::put8(unsigned char); %unignore Encoder::put16(uint16); %unignore Encoder::put32(uint32); %unignore Encoder::put64(uint64); %unignore Encoder::putdouble(double); %unignore Encoder::putfloat(float); %unignore Encoder::reset(void *, size_t); %include "s2/util/coding/coder.h" %unignoreall s2geometry-0.10.0/src/python/pywraps2_test.py000077500000000000000000001061251422156367100212170ustar00rootroot00000000000000# # Copyright 2006 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from collections import defaultdict import pywraps2 as s2 class PyWrapS2TestCase(unittest.TestCase): def testContainsIsWrappedCorrectly(self): london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979), s2.S2LatLng.FromDegrees(51.7323965, 0.1495211)) e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026), s2.S2LatLng.FromDegrees(51.5213527, -0.0476026)) self.assertTrue(london.Contains(e14lj)) def testS2CellIdEqualsIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) cell = s2.S2CellId(london) same_cell = s2.S2CellId(london) self.assertEqual(cell, same_cell) def testS2CellIdComparsionIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) cell = s2.S2CellId(london) self.assertLess(cell, cell.next()) self.assertGreater(cell.next(), cell) def testS2CellIdFromToTokenIsWrappedCorrectly(self): cell = s2.S2CellId.FromToken("487604c489f841c3") self.assertEqual(cell.ToToken(), "487604c489f841c3") self.assertEqual(cell.id(), 0x487604c489f841c3) cell = s2.S2CellId.FromToken("487") self.assertEqual(cell.ToToken(), "487") self.assertEqual(cell.id(), 0x4870000000000000) cell = s2.S2CellId.FromToken("this is invalid") self.assertEqual(cell.ToToken(), "X") self.assertEqual(cell.id(), 0) def testS2CellIdGetEdgeNeighborsIsWrappedCorrectly(self): cell = s2.S2CellId(0x466d319000000000) expected_neighbors = [s2.S2CellId(0x466d31b000000000), s2.S2CellId(0x466d317000000000), s2.S2CellId(0x466d323000000000), s2.S2CellId(0x466d31f000000000)] neighbors = cell.GetEdgeNeighbors() self.assertCountEqual(neighbors, expected_neighbors) def testS2CellIdGetVertexNeighborsIsWrappedCorrectly(self): cell = s2.S2CellId(0x466d319000000000) expected_neighbors = [s2.S2CellId(0x466d31c000000000), s2.S2CellId(0x466d314000000000), s2.S2CellId(0x466d324000000000), s2.S2CellId(0x466d33c000000000)] self.assertEqual(cell.level(), 12) # Requires level < cell.level. neighbors = cell.GetVertexNeighbors(11) self.assertCountEqual(neighbors, expected_neighbors) def testS2CellIdGetAllNeighborsIsWrappedCorrectly(self): cell = s2.S2CellId(0x466d319000000000) expected_neighbors = [s2.S2CellId(0x466d31d000000000), s2.S2CellId(0x466d311000000000), s2.S2CellId(0x466d31b000000000), s2.S2CellId(0x466d323000000000), s2.S2CellId(0x466d31f000000000), s2.S2CellId(0x466d317000000000), s2.S2CellId(0x466d321000000000), s2.S2CellId(0x466d33d000000000)] self.assertEqual(cell.level(), 12) # Requires level >= cell.level. neighbors = cell.GetAllNeighbors(12) self.assertCountEqual(neighbors, expected_neighbors) def testS2CellIdChild(self): valid = s2.S2CellId(0x89c259c000000000) invalid = s2.S2CellId(0) self.assertTrue(valid.is_valid()) self.assertFalse(invalid.is_valid()) self.assertEqual(valid.child(0).parent().id(), valid.id()) with self.assertRaises(ValueError): valid.child(-1) with self.assertRaises(ValueError): valid.child(4) with self.assertRaises(ValueError): invalid.child(0) leaf = s2.S2CellId(s2.S2LatLng.FromDegrees(10.0, 20.0)) with self.assertRaises(ValueError): leaf.child(0) def testS2CellIdChildLevelIsWrappedCorrectly(self): cell = s2.S2CellId(0x876bec2688e50000) self.assertEqual(cell.child_position(3), 2) with self.assertRaises(ValueError): cell.child_position(-1) with self.assertRaises(ValueError): cell.child_position(0) with self.assertRaises(ValueError): cell.child_position(40) def testS2CellIdContainsInvalidRaises(self): valid = s2.S2CellId(0x89c259c000000000) invalid = s2.S2CellId(0) self.assertTrue(valid.is_valid()) self.assertFalse(invalid.is_valid()) self.assertTrue(valid.contains(valid)) with self.assertRaises(ValueError): valid.contains(invalid) with self.assertRaises(ValueError): invalid.contains(valid) def testS2CellIdGetAllNeighborsIsWrappedCorrectly(self): cell = s2.S2CellId(0x6aa7590000000000) expected_neighbors = (s2.S2CellId(0x2ab3530000000000), s2.S2CellId(0x2ab34b0000000000), s2.S2CellId(0x2ab34d0000000000), s2.S2CellId(0x6aa75b0000000000), s2.S2CellId(0x6aa7570000000000), s2.S2CellId(0x6aa75f0000000000), s2.S2CellId(0x6aa7510000000000), s2.S2CellId(0x6aa75d0000000000)) neighbors = cell.GetAllNeighbors(cell.level()) self.assertEqual(neighbors, expected_neighbors) def testS2CellIdIntersectsIsTrueForOverlap(self): cell1 = s2.S2CellId(0x89c259c000000000) cell2 = s2.S2CellId(0x89c2590000000000) self.assertTrue(cell1.intersects(cell2)) def testS2CellIdIntersectsIsFalseForNonOverlap(self): cell1 = s2.S2CellId(0x89c259c000000000) cell2 = s2.S2CellId(0x89e83d0000000000) self.assertFalse(cell1.intersects(cell2)) def testS2CellIdIntersectsInvalidRaises(self): valid = s2.S2CellId(0x89c259c000000000) invalid = s2.S2CellId(0) self.assertTrue(valid.is_valid()) self.assertFalse(invalid.is_valid()) with self.assertRaises(ValueError): valid.intersects(invalid) with self.assertRaises(ValueError): invalid.intersects(valid) def testS2CellIdLevel(self): leaf = s2.S2CellId(s2.S2LatLng.FromDegrees(10.0, 20.0)) self.assertEqual(leaf.level(), 30) with self.assertRaises(ValueError): s2.S2CellId(0).level() def testS2CellIdParent(self): leaf = s2.S2CellId(s2.S2LatLng.FromDegrees(10.0, 20.0)) self.assertEqual(leaf.level(), 30) self.assertEqual(leaf.parent().level(), 29) level8 = leaf.parent(8) self.assertEqual(level8.level(), 8) self.assertEqual(level8.parent(0).level(), 0) # Error to have negative level. with self.assertRaises(ValueError): level8.parent(-1) # Same level is ok. self.assertEqual(level8.parent(8).level(), 8) # Error to ask for parent with lower level. with self.assertRaises(ValueError): level8.parent(9) # Parent of invalid is an error with self.assertRaises(ValueError): s2.S2CellId(0).parent() def testS2HashingIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) cell = s2.S2CellId(london) same_cell = s2.S2CellId(london) self.assertEqual(hash(cell), hash(same_cell)) def testCovererIsWrappedCorrectly(self): london = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.3368602, 0.4931979), s2.S2LatLng.FromDegrees(51.7323965, 0.1495211)) e14lj = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(51.5213527, -0.0476026), s2.S2LatLng.FromDegrees(51.5213527, -0.0476026)) coverer = s2.S2RegionCoverer() coverer.set_max_cells(6) self.assertEqual(6, coverer.max_cells()) covering = coverer.GetCovering(e14lj) self.assertLessEqual(len(covering), 6) for cellid in covering: self.assertTrue(london.Contains(s2.S2Cell(cellid))) interior = coverer.GetInteriorCovering(e14lj) for cellid in interior: self.assertTrue(london.Contains(s2.S2Cell(cellid))) def testS2CellUnionIsWrappedCorrectly(self): cell_union = s2.S2CellUnion() cell_union.Init([0x466d319000000000, 0x466d31b000000000]) self.assertEqual(cell_union.num_cells(), 2) trondheim = s2.S2LatLng.FromDegrees(63.431052, 10.395083) self.assertTrue(cell_union.Contains(s2.S2CellId(trondheim))) # Init() calls Normalized, so cell_ids() are normalized. cell_union2 = s2.S2CellUnion.FromNormalized(cell_union.cell_ids()) # There is no S2CellUnion::Equals, and cell_ids is a non-iterable # SWIG object, so just perform the same checks again. self.assertEqual(cell_union2.num_cells(), 2) self.assertTrue(cell_union2.Contains(s2.S2CellId(trondheim))) def testS2PolygonIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london))) self.assertEqual(polygon.num_loops(), 1) point = london.ToPoint() self.assertTrue(polygon.Contains(point)) def testS2LoopIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london))) loop = polygon.loop(0) self.assertTrue(loop.IsValid()) self.assertEqual(0, loop.depth()) self.assertFalse(loop.is_hole()) self.assertEqual(4, loop.num_vertices()) self.assertTrue(loop.IsNormalized()) point = london.ToPoint() self.assertTrue(loop.Contains(point)) def testS2LoopUsesValueEquality(self): self.assertEqual(s2.S2Loop(), s2.S2Loop()) def testS2PolygonCopiesLoopInConstructorBecauseItTakesOwnership(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london))) s2.S2Polygon(loop) def testS2LoopAreaIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london))) equivalent_polygon = s2.S2Polygon(loop) self.assertAlmostEqual(loop.GetArea(), equivalent_polygon.GetArea()) def testS2PolygonInitNestedIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) small_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london))) big_loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london).parent(1))) polygon = s2.S2Polygon() polygon.InitNested([big_loop, small_loop]) def testS2PolygonInitNestedWithIncorrectTypeIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) loop = s2.S2Loop(s2.S2Cell(s2.S2CellId(london))) polygon = s2.S2Polygon() with self.assertRaises(TypeError): polygon.InitNested([loop, s2.S2CellId()]) def testS2PolygonGetAreaIsWrappedCorrectly(self): # Cell at level 10 containing central London. london_level_10 = s2.S2CellId( s2.S2LatLng.FromDegrees(51.5001525, -0.1262355)).parent(10) polygon = s2.S2Polygon(s2.S2Cell(london_level_10)) # Because S2Cell.ExactArea() isn't swigged, compare S2Polygon.GetArea() with # S2CellUnion.ExactArea(). cell_union = s2.S2CellUnion() cell_union.Init([london_level_10.id()]) self.assertAlmostEqual(cell_union.ExactArea(), polygon.GetArea(), places=10) def testS2PolygonGetOverlapFractions(self): # Matches S2Polygon, OverlapFractions test from cs/s2polygon_test.cc a = s2.S2Polygon() b = s2.S2Polygon() r1, r2 = s2.S2Polygon.GetOverlapFractions(a, b) self.assertAlmostEqual(1.0, r1) self.assertAlmostEqual(1.0, r2) def verts2loop(vs): loop = s2.S2Loop() loop.Init([s2.S2LatLng.FromDegrees(*v).ToPoint() for v in vs]) return loop loop1verts = [(-10, 10), (0, 10), (0, -10), (-10, -10), (-10, 0)] b = s2.S2Polygon(verts2loop(loop1verts)) r1, r2 = s2.S2Polygon.GetOverlapFractions(a, b) self.assertAlmostEqual(1.0, r1) self.assertAlmostEqual(0.0, r2) loop2verts = [(-10, 0), (10, 0), (10, -10), (-10, -10)] a = s2.S2Polygon(verts2loop(loop2verts)) r1, r2 = s2.S2Polygon.GetOverlapFractions(a, b) self.assertAlmostEqual(0.5, r1) self.assertAlmostEqual(0.5, r2) def testGetS2LatLngVertexIsWrappedCorrectly(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london))) loop = polygon.loop(0) first_vertex = loop.GetS2LatLngVertex(0) self.assertIsInstance(first_vertex, s2.S2LatLng) self.assertEqual("51.500152,-0.126235", first_vertex.ToStringInDegrees()) second_vertex = loop.GetS2LatLngVertex(1) self.assertIsInstance(second_vertex, s2.S2LatLng) self.assertEqual("51.500153,-0.126235", second_vertex.ToStringInDegrees()) def testGetLastDescendant(self): def verts2loop(vs): loop = s2.S2Loop() loop.Init([s2.S2LatLng.FromDegrees(*v).ToPoint() for v in vs]) return loop loop1 = verts2loop([(0, 0), (0, 10), (10, 10), (10, 0)]) # Shell loop2 = verts2loop([(2, 2), (2, 5), (5, 5), (5, 2)]) # Hole loop3 = verts2loop([(0, 20), (0, 30), (10, 30), (10, 20)]) # Another shell polygon = s2.S2Polygon() polygon.InitNested([loop1, loop2, loop3]) self.assertEqual(1, polygon.GetLastDescendant(0)) self.assertEqual(1, polygon.GetLastDescendant(1)) self.assertEqual(2, polygon.GetLastDescendant(2)) def testS2PolylineInitFromS2LatLngs(self): e7_10deg = 0x5f5e100 list_ll = [] for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]: list_ll.append(s2.S2LatLng.FromE7(lat, lng)) line = s2.S2Polyline() line.InitFromS2LatLngs(list_ll) self.assertAlmostEqual(20.0, line.GetLength().degrees()) def testS2PolylineInitFromS2Points(self): e7_10deg = 0x5f5e100 list_points = [] for lat, lng in [(0, 0), (0, e7_10deg), (e7_10deg, e7_10deg)]: list_points.append(s2.S2LatLng.FromE7(lat, lng).ToPoint()) line = s2.S2Polyline() line.InitFromS2Points(list_points) self.assertAlmostEqual(20.0, line.GetLength().degrees()) def testS2PolylineUsesValueEquality(self): self.assertEqual(s2.S2Polyline(), s2.S2Polyline()) def testS2PointsCanBeNormalized(self): line = s2.S2Polyline() line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(37.794484, -122.394871), s2.S2LatLng.FromDegrees(37.762699, -122.435158)]) self.assertNotAlmostEqual(line.GetCentroid().Norm(), 1.0) self.assertAlmostEqual(line.GetCentroid().Normalize().Norm(), 1.0) def testS1AngleComparsionIsWrappedCorrectly(self): ten_degrees = s2.S1Angle.Degrees(10) one_hundred_degrees = s2.S1Angle.Degrees(100) self.assertLess(ten_degrees, one_hundred_degrees) self.assertGreater(one_hundred_degrees, ten_degrees) def testS2PolygonIntersectsWithPolyline(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15))) line = s2.S2Polyline() line.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(51.5, -0.128), s2.S2LatLng.FromDegrees(51.5, -0.125)]) intersections = polygon.IntersectWithPolyline(line) self.assertEqual(1, len(intersections)) def testS2PolygonBoundaryNearIsSame(self): london_1 = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon_1 = s2.S2Polygon(s2.S2Loop(s2.S2Cell(s2.S2CellId(london_1)))) london_2 = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon_2 = s2.S2Polygon(s2.S2Loop(s2.S2Cell(s2.S2CellId(london_2)))) self.assertTrue(polygon_1.BoundaryNear(polygon_2)) def testS2PolygonBoundaryNearIsTotallyDifferent(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon_1 = s2.S2Polygon(s2.S2Loop(s2.S2Cell(s2.S2CellId(london)))) seattle = s2.S2LatLng.FromDegrees(47.6062, -122.3321) polygon_2 = s2.S2Polygon(s2.S2Loop(s2.S2Cell(s2.S2CellId(seattle)))) self.assertFalse(polygon_1.BoundaryNear(polygon_2)) def testS2PolygonBoundaryNearIsNear(self): def verts2loop(vs): loop = s2.S2Loop() loop.Init([s2.S2LatLng.FromDegrees(*v).ToPoint() for v in vs]) return loop vertices_1 = [(-10, 10), (0, 10), (0, -10), (-10, -10), (-10, 0)] polygon_1 = s2.S2Polygon(verts2loop(vertices_1)) vertices_2 = [(-10, 10), (0, 10), (0, -10.1), (-10, -10), (-10, 0)] polygon_2 = s2.S2Polygon(verts2loop(vertices_2)) self.assertTrue(polygon_1.BoundaryNear(polygon_2, s2.S1Angle.Degrees(1))) def testS2PolygonUsesValueEquality(self): self.assertEqual(s2.S2Polygon(), s2.S2Polygon()) def testCrossingSign(self): a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint() b = s2.S2LatLng.FromDegrees(1, 0).ToPoint() c = s2.S2LatLng.FromDegrees(0, -1).ToPoint() d = s2.S2LatLng.FromDegrees(0, 1).ToPoint() # SWIG flattens namespaces, so this is just s2.CrossingSign, # not s2.S2.CrossingSign. self.assertEqual(1, s2.CrossingSign(a, b, c, d)) def testGetIntersection(self): a = s2.S2LatLng.FromDegrees(-1, 0).ToPoint() b = s2.S2LatLng.FromDegrees(1, 0).ToPoint() c = s2.S2LatLng.FromDegrees(0, -1).ToPoint() d = s2.S2LatLng.FromDegrees(0, 1).ToPoint() # SWIG namespace flattening as above. intersection = s2.GetIntersection(a, b, c, d) self.assertEqual( "0.000000,0.000000", s2.S2LatLng(intersection).ToStringInDegrees()) def testS2CellDistance(self): # Level-0 cell (i.e. face) centered at (0, 0) cell = s2.S2Cell(s2.S2CellId(0x1000000000000000)) p1 = s2.S2LatLng.FromDegrees(0, 0).ToPoint() self.assertTrue(cell.Contains(p1)) d1 = cell.GetDistance(p1).ToAngle().degrees() # Inside, so distance is 0, but boundary distance is not. self.assertEqual(0.0, d1) bd1 = cell.GetBoundaryDistance(p1).ToAngle().degrees() self.assertEqual(45.0, bd1) p2 = s2.S2LatLng.FromDegrees(0, 90).ToPoint() self.assertFalse(cell.Contains(p2)) d2 = cell.GetDistance(p2).ToAngle().degrees() self.assertAlmostEqual(45.0, d2) bd2 = cell.GetBoundaryDistance(p2).ToAngle().degrees() # Outside, so distance and boundary distance are the same. self.assertAlmostEqual(45.0, bd2) def testS2Rotate(self): mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint() mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint() angle = s2.S1Angle.Radians(0.039678) point = s2.Rotate(mtv_a, mtv_b, angle) self.assertEqual("37.439095,-121.967802", s2.S2LatLng(point).ToStringInDegrees()) def testS2TurnAngle(self): mtv_a = s2.S2LatLng.FromDegrees(37.4402777, -121.9638888).ToPoint() mtv_b = s2.S2LatLng.FromDegrees(37.3613888, -121.9283333).ToPoint() mtv_c = s2.S2LatLng.FromDegrees(37.3447222, -122.0308333).ToPoint() angle = s2.TurnAngle(mtv_a, mtv_b, mtv_c) self.assertAlmostEqual(-1.7132025, angle) def testEncodeDecode(self): london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) polygon = s2.S2Polygon(s2.S2Cell(s2.S2CellId(london).parent(15))) self.assertEqual(polygon.num_loops(), 1) encoder = s2.Encoder() polygon.Encode(encoder) encoded = encoder.buffer() decoder = s2.Decoder(encoded) decoded_polygon = s2.S2Polygon() self.assertTrue(decoded_polygon.Decode(decoder)) self.assertEqual(decoded_polygon.num_loops(), 1) self.assertTrue(decoded_polygon.Equals(polygon)) def testS2CapRegion(self): center = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint() cap = s2.S2Cap(center, s2.S1Angle.Degrees(1.0)) inside = s2.S2LatLng.FromDegrees(2.1, 2.9).ToPoint() outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint() self.assertTrue(cap.Contains(inside)) self.assertFalse(cap.Contains(outside)) self.assertTrue(cap.Contains(s2.S2Cell(inside))) self.assertFalse(cap.Contains(s2.S2Cell(outside))) self.assertTrue(cap.MayIntersect(s2.S2Cell(inside))) self.assertFalse(cap.MayIntersect(s2.S2Cell(outside))) self.assertTrue(cap.ApproxEquals(cap.GetCapBound())) rect_bound = cap.GetRectBound() self.assertTrue(rect_bound.Contains(inside)) self.assertFalse(rect_bound.Contains(outside)) def testS2LatLngRectRegion(self): rect = s2.S2LatLngRect(s2.S2LatLng.FromDegrees(1.0, 2.0), s2.S2LatLng.FromDegrees(3.0, 4.0)) inside = s2.S2LatLng.FromDegrees(2.0, 3.0).ToPoint() outside = s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint() self.assertTrue(rect.Contains(inside)) self.assertFalse(rect.Contains(outside)) self.assertTrue(rect.Contains(s2.S2Cell(inside))) self.assertFalse(rect.Contains(s2.S2Cell(outside))) self.assertTrue(rect.MayIntersect(s2.S2Cell(inside))) self.assertFalse(rect.MayIntersect(s2.S2Cell(outside))) cap_bound = rect.GetCapBound() self.assertTrue(cap_bound.Contains(inside)) self.assertFalse(cap_bound.Contains(outside)) self.assertTrue(rect.ApproxEquals(rect.GetRectBound())) def testS2CellRegion(self): cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8)) inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint() outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint() self.assertTrue(cell.Contains(inside)) self.assertFalse(cell.Contains(outside)) self.assertTrue(cell.Contains(s2.S2Cell(inside))) self.assertFalse(cell.Contains(s2.S2Cell(outside))) self.assertTrue(cell.MayIntersect(s2.S2Cell(inside))) self.assertFalse(cell.MayIntersect(s2.S2Cell(outside))) cap_bound = cell.GetCapBound() self.assertTrue(cap_bound.Contains(inside)) self.assertFalse(cap_bound.Contains(outside)) rect_bound = cell.GetRectBound() self.assertTrue(rect_bound.Contains(inside)) self.assertFalse(rect_bound.Contains(outside)) def testS2CellUnionRegion(self): cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8) cell_union = s2.S2CellUnion() cell_union.Init([cell_id.id()]) inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint() outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint() self.assertTrue(cell_union.Contains(inside)) self.assertFalse(cell_union.Contains(outside)) self.assertTrue(cell_union.Contains(s2.S2Cell(inside))) self.assertFalse(cell_union.Contains(s2.S2Cell(outside))) self.assertTrue(cell_union.MayIntersect(s2.S2Cell(inside))) self.assertFalse(cell_union.MayIntersect(s2.S2Cell(outside))) cap_bound = cell_union.GetCapBound() self.assertTrue(cap_bound.Contains(inside)) self.assertFalse(cap_bound.Contains(outside)) rect_bound = cell_union.GetRectBound() self.assertTrue(rect_bound.Contains(inside)) self.assertFalse(rect_bound.Contains(outside)) def testS2CellUnionEmpty(self): empty_cell_union = s2.S2CellUnion() self.assertTrue(empty_cell_union.empty()) cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8) cell_union = s2.S2CellUnion() cell_union.Init([cell_id.id()]) self.assertFalse(cell_union.empty()) def testS2CellUnionIntersectionWithS2CellUnion(self): cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)) cell_union = s2.S2CellUnion() cell_union.Init([cell_id.id()]) # No intersection. outside_cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(5.0, 6.0)) outside_cell_union = s2.S2CellUnion() outside_cell_union.Init([outside_cell_id.id()]) empty_intersection = cell_union.Intersection(outside_cell_union) self.assertTrue(empty_intersection.empty()) # Complete overlap. self_intersection = cell_union.Intersection(cell_union) self.assertTrue(self_intersection.Contains(cell_union)) self.assertTrue(cell_union.Contains(self_intersection)) # Some intersection. joint_cell_union = s2.S2CellUnion() joint_cell_union.Init([cell_id.id(), outside_cell_id.id()]) outside_intersection = joint_cell_union.Intersection(outside_cell_union) self.assertTrue(outside_intersection.Contains(outside_cell_id)) self.assertFalse(outside_intersection.Contains(cell_id)) def testS2CellUnionIntersectionWithS2CellId(self): cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)) cell_union = s2.S2CellUnion() cell_union.Init([cell_id.id()]) # No intersection. outside_cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(4.0, 5.0)) empty_intersection = cell_union.Intersection(outside_cell_id) self.assertTrue(empty_intersection.empty()) # Complete overlap. intersection = cell_union.Intersection(cell_id) self.assertTrue(intersection.Contains(cell_id)) # Some intersection. joint_cell_union = s2.S2CellUnion() joint_cell_union.Init([cell_id.id(), outside_cell_id.id()]) outside_intersection = joint_cell_union.Intersection(outside_cell_id) self.assertTrue(outside_intersection.Contains(outside_cell_id)) self.assertFalse(outside_intersection.Contains(cell_id)) def testS2CellUnionIsNormalized(self): empty_cell_union = s2.S2CellUnion() self.assertTrue(empty_cell_union.IsNormalized()) london = s2.S2LatLng.FromDegrees(51.5001525, -0.1262355) london_cell_id = s2.S2CellId(london) normalized_union = s2.S2CellUnion() normalized_union.Init([london_cell_id.id()]) self.assertTrue(normalized_union.IsNormalized()) def testS2CellUnionNormalizeS2CellUnion(self): empty_cell_union = s2.S2CellUnion() empty_cell_union.NormalizeS2CellUnion() self.assertTrue(empty_cell_union.IsNormalized()) cell_id = s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8) cell_union = s2.S2CellUnion() cell_union.Init([cell_id.id()]) cell_union.NormalizeS2CellUnion() self.assertTrue(cell_union.IsNormalized()) def testS2LoopRegion(self): cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8)) loop = s2.S2Loop(cell) inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint() outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint() self.assertTrue(loop.Contains(inside)) self.assertFalse(loop.Contains(outside)) self.assertTrue(loop.Contains(s2.S2Cell(inside))) self.assertFalse(loop.Contains(s2.S2Cell(outside))) self.assertTrue(loop.MayIntersect(s2.S2Cell(inside))) self.assertFalse(loop.MayIntersect(s2.S2Cell(outside))) cap_bound = loop.GetCapBound() self.assertTrue(cap_bound.Contains(inside)) self.assertFalse(cap_bound.Contains(outside)) rect_bound = loop.GetRectBound() self.assertTrue(rect_bound.Contains(inside)) self.assertFalse(rect_bound.Contains(outside)) def testS2PolygonRegion(self): cell = s2.S2Cell(s2.S2CellId(s2.S2LatLng.FromDegrees(3.0, 4.0)).parent(8)) polygon = s2.S2Polygon(cell) inside = s2.S2LatLng.FromDegrees(3.0, 4.0).ToPoint() outside = s2.S2LatLng.FromDegrees(30.0, 40.0).ToPoint() self.assertTrue(polygon.Contains(inside)) self.assertFalse(polygon.Contains(outside)) self.assertTrue(polygon.Contains(s2.S2Cell(inside))) self.assertFalse(polygon.Contains(s2.S2Cell(outside))) self.assertTrue(polygon.MayIntersect(s2.S2Cell(inside))) self.assertFalse(polygon.MayIntersect(s2.S2Cell(outside))) cap_bound = polygon.GetCapBound() self.assertTrue(cap_bound.Contains(inside)) self.assertFalse(cap_bound.Contains(outside)) rect_bound = polygon.GetRectBound() self.assertTrue(rect_bound.Contains(inside)) self.assertFalse(rect_bound.Contains(outside)) def testS2PolylineRegion(self): polyline = s2.S2Polyline() polyline.InitFromS2LatLngs([s2.S2LatLng.FromDegrees(0.0, 0.0), s2.S2LatLng.FromDegrees(1.0, 1.0)]) # Contains(S2Point) always return false. self.assertFalse( polyline.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint())) self.assertFalse( polyline.Contains(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0)))) self.assertTrue( polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(0.0, 0.0)))) self.assertFalse( polyline.MayIntersect(s2.S2Cell(s2.S2LatLng.FromDegrees(3.0, 4.0)))) cap_bound = polyline.GetCapBound() self.assertTrue( cap_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint())) self.assertFalse( cap_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint())) rect_bound = polyline.GetRectBound() self.assertTrue( rect_bound.Contains(s2.S2LatLng.FromDegrees(0.0, 0.0).ToPoint())) self.assertFalse( rect_bound.Contains(s2.S2LatLng.FromDegrees(2.0, 2.0).ToPoint())) def testS2CellIdCenterSiTi(self): cell = s2.S2CellId.FromFacePosLevel(3, 0x12345678, s2.S2CellId.kMaxLevel) # Check that the (si, ti) coordinates of the center end in a # 1 followed by (30 - level) 0s. # Leaf level, 30. face, si, ti = cell.GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 0, si & 1) self.assertEqual(1 << 0, ti & 1) # Level 29. face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 1).GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 1, si & 3) self.assertEqual(1 << 1, ti & 3) # Level 28. face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 2).GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 2, si & 7) self.assertEqual(1 << 2, ti & 7) # Level 20. face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 10).GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 10, si & ((1 << 11) - 1)) self.assertEqual(1 << 10, ti & ((1 << 11) - 1)) # Level 10. face, si, ti = cell.parent(s2.S2CellId.kMaxLevel - 20).GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 20, si & ((1 << 21) - 1)) self.assertEqual(1 << 20, ti & ((1 << 21) - 1)) # Level 0. face, si, ti = cell.parent(0).GetCenterSiTi() self.assertEqual(3, face) self.assertEqual(1 << 30, si & ((1 << 31) - 1)) self.assertEqual(1 << 30, ti & ((1 << 31) - 1)) def testS2CellIdToFromFaceIJ(self): cell = s2.S2CellId.FromFaceIJ(3, 1234, 5678) face, i, j, _ = cell.ToFaceIJOrientation() self.assertEqual(3, face) self.assertEqual(1234, i) self.assertEqual(5678, j) def testS2EarthMetricRadians(self): radius_rad = s2.S2Earth.KmToRadians(12.34) self.assertAlmostEqual(radius_rad, 0.0019368985451286374) angle = s2.S1Angle.Radians(radius_rad) radius_m = s2.S2Earth.RadiansToMeters(angle.radians()) self.assertEqual(radius_m, 12340.0) class RegionTermIndexerTest(unittest.TestCase): def _randomCaps(self, query_type, **indexer_options): # This function creates an index consisting either of points (if # options.index_contains_points_only() is true) or S2Caps of random size. # It then executes queries consisting of points (if query_type == POINT) # or S2Caps of random size (if query_type == CAP). # # indexer_options are set on both the indexer & coverer (if relevant) # eg. _randomCaps('cap', min_level=0) calls indexer.set_min_level(0) ITERATIONS = 400 indexer = s2.S2RegionTermIndexer() coverer = s2.S2RegionCoverer() # set indexer options for opt_key, opt_value in indexer_options.items(): setter = "set_%s" % opt_key getattr(indexer, setter)(opt_value) if hasattr(coverer, setter): getattr(coverer, setter)(opt_value) caps = [] coverings = [] index = defaultdict(set) index_terms = 0 query_terms = 0 for i in range(ITERATIONS): # Choose the region to be indexed: either a single point or a cap # of random size (up to a full sphere). terms = [] if indexer.index_contains_points_only(): cap = s2.S2Cap.FromPoint(s2.S2Testing.RandomPoint()) terms = indexer.GetIndexTerms(cap.center(), "") else: cap = s2.S2Testing.GetRandomCap( 0.3 * s2.S2Cell.AverageArea(indexer.max_level()), 4.0 * s2.S2Cell.AverageArea(indexer.min_level()) ) terms = indexer.GetIndexTerms(cap, "") caps.append(cap) coverings.append(s2.S2CellUnion(coverer.GetCovering(cap))) for term in terms: index[term].add(i) index_terms += len(terms) for i in range(ITERATIONS): # Choose the region to be queried: either a random point or a cap of # random size. terms = [] if query_type == 'cap': cap = s2.S2Cap.FromPoint(s2.S2Testing.RandomPoint()) terms = indexer.GetQueryTerms(cap.center(), "") else: cap = s2.S2Testing.GetRandomCap( 0.3 * s2.S2Cell.AverageArea(indexer.max_level()), 4.0 * s2.S2Cell.AverageArea(indexer.min_level()) ) terms = indexer.GetQueryTerms(cap, "") # Compute the expected results of the S2Cell query by brute force. covering = s2.S2CellUnion(coverer.GetCovering(cap)) expected, actual = set(), set() for j in range(len(caps)): if covering.Intersects(coverings[j]): expected.add(j) for term in terms: actual |= index[term] self.assertEqual(expected, actual) query_terms += len(terms) print("Index terms/doc: %0.2f, Query terms/doc: %0.2f" % ( float(index_terms) / ITERATIONS, float(query_terms) / ITERATIONS) ) # We run one test case for each combination of space vs. time optimization, # and indexing regions vs. only points. def testIndexRegionsQueryRegionsOptimizeTime(self): self._randomCaps("cap", optimize_for_space=False, min_level=0, max_level=16, max_cells=20, ) def testIndexRegionsQueryPointsOptimizeTime(self): self._randomCaps("point", optimize_for_space=False, min_level=0, max_level=16, max_cells=20, ) def testIndexRegionsQueryRegionsOptimizeTimeWithLevelMod(self): self._randomCaps("cap", optimize_for_space=False, min_level=6, max_level=12, level_mod=3, ) def testIndexRegionsQueryRegionsOptimizeSpace(self): self._randomCaps("cap", optimize_for_space=True, min_level=4, max_level=s2.S2CellId.kMaxLevel, max_cells=8, ) def testIndexPointsQueryRegionsOptimizeTime(self): self._randomCaps("cap", optimize_for_space=False, min_level=0, max_level=s2.S2CellId.kMaxLevel, level_mod=2, max_cells=20, index_contains_points_only=True, ) def testIndexPointsQueryRegionsOptimizeSpace(self): self._randomCaps("cap", optimize_for_space=True, index_contains_points_only=True, ) def testMaxLevelSetLoosely(self): # Test that correct terms are generated even when (max_level - min_level) # is not a multiple of level_mod. indexer1 = s2.S2RegionTermIndexer() indexer1.set_min_level(1) indexer1.set_level_mod(2) indexer1.set_max_level(19) indexer2 = s2.S2RegionTermIndexer() indexer2.set_min_level(1) indexer2.set_level_mod(2) indexer2.set_max_level(19) indexer2.set_max_level(20) point = s2.S2Testing.RandomPoint() self.assertEqual( indexer1.GetIndexTerms(point, ""), indexer2.GetIndexTerms(point, "") ) self.assertEqual( indexer1.GetQueryTerms(point, ""), indexer2.GetQueryTerms(point, "") ) cap = s2.S2Testing.GetRandomCap(0.0, 1.0) self.assertEqual( indexer1.GetIndexTerms(cap, ""), indexer2.GetIndexTerms(cap, "") ) self.assertEqual( indexer1.GetQueryTerms(cap, ""), indexer2.GetQueryTerms(cap, "") ) def testS2CellIdFromDebugString(self): cell = s2.S2CellId.FromDebugString("5/31200") self.assertTrue(cell.is_valid()) self.assertEqual("5/31200", cell.ToString()) if __name__ == "__main__": unittest.main() s2geometry-0.10.0/src/python/s2.i000066400000000000000000000032061422156367100165060ustar00rootroot00000000000000%include "std_vector.i" %include "std_string.i" %include "stdint.i" %{ #include "absl/strings/string_view.h" %} %typemap(typecheck) absl::string_view = char *; %typemap(in) absl::string_view { if ($input != Py_None) { Py_ssize_t len; if (PyUnicode_Check($input)) { const char *buf; buf = PyUnicode_AsUTF8AndSize($input, &len); if (buf == nullptr) { SWIG_fail; } $1 = absl::string_view(buf, len); } else { char *buf; if (PyBytes_AsStringAndSize($input, &buf, &len) == -1) { // Python has raised an error (likely TypeError or UnicodeEncodeError). SWIG_fail; } $1 = absl::string_view(buf, len); } } } %template() std::vector; %template() std::vector; %template() std::vector; %template() std::vector; %template() std::vector; %apply int {int32}; %apply unsigned long long {uint64}; %apply std::vector const & {std::vector const &}; // Standard Google convention is to ignore all functions and methods, and // selectively add back those for which wrapping is both required and // functional. %define %ignoreall %ignore ""; %enddef %define %unignore %rename("%s") %enddef %define %unignoreall %rename("%s") ""; %enddef %define ABSL_ATTRIBUTE_ALWAYS_INLINE %enddef %define ABSL_DEPRECATED(msg) %enddef // SWIG <3.0 does not understand these C++11 keywords (unsure of exact version). #if SWIG_VERSION < 0x030000 %define constexpr const %enddef %define override %enddef #endif // Still not supported by SWIG 3.0.12. %define final %enddef %include "coder.i" %include "s2_common.i" s2geometry-0.10.0/src/python/s2_common.i000077500000000000000000000640221422156367100200640ustar00rootroot00000000000000// Copyright 2006 Google Inc. All Rights Reserved. // These SWIG definitions are shared between the internal Google and external // open source releases of s2. %{ #include #include #include "s2/s2cell_id.h" #include "s2/s2region.h" #include "s2/s2cap.h" #include "s2/s2edge_crossings.h" #include "s2/s2earth.h" #include "s2/s2latlng.h" #include "s2/s2latlng_rect.h" #include "s2/s2loop.h" #include "s2/s2measures.h" #include "s2/s2pointutil.h" #include "s2/s2polygon.h" #include "s2/s2polyline.h" #include "s2/s2region_coverer.h" #include "s2/s2region_term_indexer.h" #include "s2/s2cell.h" #include "s2/s2cell_union.h" %} %inline %{ static PyObject *FromS2CellId(const S2CellId &cell_id) { return SWIG_NewPointerObj(new S2CellId(cell_id), SWIGTYPE_p_S2CellId, SWIG_POINTER_OWN); } %} %apply std::vector *OUTPUT {std::vector *covering}; %apply std::vector *OUTPUT {std::vector *interior}; %apply std::vector *OUTPUT {std::vector *output}; %typemap(in) absl::string_view { if (PyUnicode_Check($input)) { $1 = absl::string_view(PyUnicode_AsUTF8($input)); } else { SWIG_exception(SWIG_TypeError, "string expected"); } } %typemap(typecheck) absl::string_view = char *; %typemap(in, numinputs=0) S2CellId *OUTPUT_ARRAY_4(S2CellId temp[4]) { $1 = temp; } // For S2Polygon::GetOverlapFractions %typemap(out) std::pair { $result = Py_BuildValue("dd", $1.first, $1.second); } %typemap(argout) S2CellId *OUTPUT_ARRAY_4 { $result = PyList_New(4); if ($result == nullptr) return nullptr; for (int i = 0; i < 4; i++) { PyObject *const o = FromS2CellId($1[i]); if (!o) { Py_DECREF($result); return nullptr; } PyList_SET_ITEM($result, i, o); } } %apply S2CellId *OUTPUT_ARRAY_4 {S2CellId neighbors[4]}; // This overload shadows the one the takes vector&, and it // does not work anyway. %ignore S2CellUnion::Init(std::vector const& cell_ids); // The SWIG code which picks between overloaded methods doesn't work // when given a list parameter. SWIG_Python_ConvertPtrAndOwn calls // SWIG_Python_GetSwigThis, doesn't find the 'this' attribute and gives up. // To avoid this problem rename the Polyline::Init methods so they aren't // overloaded. We also need to reimplement them since SWIG doesn't // seem to understand absl::Span. %extend S2Polyline { public: void InitFromS2LatLngs(const std::vector& vertices) { $self->Init(absl::MakeConstSpan(vertices)); } void InitFromS2Points(const std::vector& vertices) { $self->Init(absl::MakeConstSpan(vertices)); } }; // And similarly for the overloaded S2CellUnion::Normalize method. %rename(NormalizeS2CellUnion) S2CellUnion::Normalize(); %apply int *OUTPUT {int *next_vertex}; %apply int *OUTPUT {int *psi}; %apply int *OUTPUT {int *pti}; %apply int *OUTPUT {int *pi}; %apply int *OUTPUT {int *pj}; %apply int *OUTPUT {int *orientation}; %apply SWIGTYPE *DISOWN {S2Loop *loop_disown}; %typemap(in) std::vector * (std::vector loops){ PyObject *element(nullptr); PyObject *iterator(PyObject_GetIter($input)); if (!iterator) { SWIG_fail; } int i(0); while ((element = PyIter_Next(iterator))) { i++; S2Loop *loop(nullptr); int res(SWIG_ConvertPtr(element, (void **)&loop, $descriptor(S2Loop *), 0)); if (SWIG_IsOK(res)) { loops.push_back(loop->Clone()); } else { SWIG_Python_TypeError(SWIG_TypePrettyName($descriptor(S2Loop *)), element); SWIG_Python_ArgFail(i); Py_DECREF(element); Py_DECREF(iterator); SWIG_fail; } Py_DECREF(element); } Py_DECREF(iterator); $1 = &loops; } %typemap(in, numinputs=0) std::vector *out(std::vector temp) { $1 = &temp; } %typemap(argout) std::vector *out { $result = PyList_New($1->size()); if ($result == nullptr) return nullptr; for (int i = 0; i < $1->size(); i++) { PyObject *const o = SWIG_NewPointerObj((*$1)[i], $descriptor(S2Polyline *), SWIG_POINTER_OWN); if (!o) { Py_DECREF($result); return nullptr; } PyList_SET_ITEM($result, i, o); } } %inline %{ // This isn't a constructor because it clashes with the SWIG-redefinition // below and the actual S2Point (a Vector_3d). static PyObject *S2Point_FromRaw(double x, double y, double z) { // Creates an S2Point directly, mostly useful for testing. return SWIG_NewPointerObj(new S2Point(x, y, z), SWIGTYPE_p_S2Point, SWIG_POINTER_OWN); } %} // We provide our own definition of S2Point, because the real one is too // difficult to wrap correctly. class S2Point { public: double Norm(); S2Point Normalize(); ~S2Point(); }; // The extensions below exist because of the difficulty swigging S2Point. // This alternate method of S2Loop::vertex() returns a S2LatLng instead. %extend S2Loop { public: S2LatLng GetS2LatLngVertex(int i) { return S2LatLng($self->vertex(i)); } }; // This alternate method of S2Cell::GetVertex() returns a S2LatLng instead. %extend S2Cell { public: S2LatLng GetS2LatLngVertex(int k) { return S2LatLng($self->GetVertex(k)); } }; // This alternate method of S2Cell::GetEdge() returns a S2LatLng instead. %extend S2Cell { public: S2LatLng GetS2LatLngEdge(int k) { return S2LatLng($self->GetEdge(k)); } }; // The extensions below exist to work around the use of absl::Span. %extend S2Loop { public: explicit S2Loop(const std::vector& vertices) { return new S2Loop(absl::MakeConstSpan(vertices)); } void Init(const std::vector& vertices) { $self->Init(absl::MakeConstSpan(vertices)); } }; // Add raw pointer versions of these functions because SWIG doesn't // understand unique_ptr and when std::move() must be used. // TODO(user): Make swig understand unique_ptr and vector. %extend S2Polygon { public: // Takes ownership of the loop. The _disown suffix is used to tell SWIG // that S2Polygon takes ownership of the loop. explicit S2Polygon(S2Loop* loop_disown) { // SWIG recognizes this as a constructor, but implements this // as a free function, so write it that way. return new S2Polygon(std::unique_ptr(loop_disown)); } void InitNested(std::vector* loops) { std::vector> unique_loops(loops->size()); for (int i = 0; i < loops->size(); ++i) { unique_loops[i].reset((*loops)[i]); } loops->clear(); $self->InitNested(std::move(unique_loops)); } void IntersectWithPolyline(S2Polyline const* in, std::vector* out) const { std::vector> polylines = $self->IntersectWithPolyline(*in); S2_DCHECK(out->empty()); out->reserve(polylines.size()); for (auto& polyline : polylines) { out->push_back(polyline.release()); } } } // Expose Options functions on S2RegionCoverer until we figure out // nested classes in SWIG. %extend S2RegionCoverer { int max_cells() const { return $self->options().max_cells(); } void set_max_cells(int max_cells) { $self->mutable_options()->set_max_cells(max_cells); } int min_level() const { return $self->options().min_level(); } void set_min_level(int min_level) { $self->mutable_options()->set_min_level(min_level); } int max_level() const { return $self->options().max_level(); } void set_max_level(int max_level) { $self->mutable_options()->set_max_level(max_level); } void set_fixed_level(int fixed_level) { $self->mutable_options()->set_fixed_level(fixed_level); } int level_mod() const { return $self->options().level_mod(); } void set_level_mod(int level_mod) { $self->mutable_options()->set_level_mod(level_mod); } int true_max_level() const { return $self->options().true_max_level(); } } // Expose Options functions on S2RegionTermIndexer until we figure out // nested classes in SWIG. %extend S2RegionTermIndexer { int max_cells() const { return $self->options().max_cells(); } void set_max_cells(int max_cells) { $self->mutable_options()->set_max_cells(max_cells); } int min_level() const { return $self->options().min_level(); } void set_min_level(int min_level) { $self->mutable_options()->set_min_level(min_level); } int max_level() const { return $self->options().max_level(); } void set_max_level(int max_level) { $self->mutable_options()->set_max_level(max_level); } void set_fixed_level(int fixed_level) { $self->mutable_options()->set_fixed_level(fixed_level); } int level_mod() const { return $self->options().level_mod(); } void set_level_mod(int level_mod) { $self->mutable_options()->set_level_mod(level_mod); } int true_max_level() const { return $self->options().true_max_level(); } bool index_contains_points_only() const { return $self->options().index_contains_points_only(); } void set_index_contains_points_only(bool value) { $self->mutable_options()->set_index_contains_points_only(value); } bool optimize_for_space() const { return $self->options().optimize_for_space(); } void set_optimize_for_space(bool value) { $self->mutable_options()->set_optimize_for_space(value); } char marker_character() const { return $self->options().marker_character(); } void set_marker_character(char ch) { $self->mutable_options()->set_marker_character(ch); } } // Raise ValueError for any functions that would trigger a S2_CHECK/S2_DCHECK. %pythonprepend S2CellId::child %{ if not self.is_valid(): raise ValueError("S2CellId must be valid.") if self.is_leaf(): raise ValueError("S2CellId must be non-leaf.") if not 0 <= position < 4: raise ValueError("Position must be 0-3.") %} // TODO(jrosenstock): child_begin() // TODO(jrosenstock): child_end() %pythonprepend S2CellId::child_position(int) const %{ if not self.is_valid(): raise ValueError("S2CellId must be valid.") if level < 1 or level > self.level(): raise ValueError("level must must be in range [1, S2 cell level]") %} %pythonprepend S2CellId::contains %{ if not self.is_valid() or not other.is_valid(): raise ValueError("Both S2CellIds must be valid.") %} %pythonprepend S2CellId::intersects %{ if not self.is_valid() or not other.is_valid(): raise ValueError("Both S2CellIds must be valid.") %} %pythonprepend S2CellId::level %{ # As in the C++ version: # We can't just check is_valid() because we want level() to be # defined for end-iterators, i.e. S2CellId.End(level). However there is # no good way to define S2CellId::None().level(), so we do prohibit that. if self.id() == 0: raise ValueError("None has no level.") %} %pythonprepend S2CellId::parent %{ if not self.is_valid(): raise ValueError("S2CellId must be valid.") if len(args) == 1: level, = args if level < 0: raise ValueError("Level must be non-negative.") if level > self.level(): raise ValueError("Level must be less than or equal to cell's level.") %} %ignoreall %unignore R1Interval; %ignore R1Interval::operator[]; %unignore R1Interval::GetLength; %unignore S1Angle; %unignore S1Angle::S1Angle; %unignore S1Angle::~S1Angle; %unignore S1Angle::Degrees; %unignore S1Angle::E5; %unignore S1Angle::E6; %unignore S1Angle::E7; %unignore S1Angle::Normalize; %unignore S1Angle::Normalized; %unignore S1Angle::Radians; %unignore S1Angle::UnsignedE6; %unignore S1Angle::abs; %unignore S1Angle::degrees; %unignore S1Angle::e6; %unignore S1Angle::e7; %unignore S1Angle::radians; %unignore S1ChordAngle; %unignore S1ChordAngle::ToAngle; %unignore S1Interval; %ignore S1Interval::operator[]; %unignore S1Interval::GetLength; %unignore S2; %unignore S2::CrossingSign; %unignore S2::GetIntersection; %unignore S2::Rotate; %unignore S2::TurnAngle; %unignore S2Cap; %unignore S2Cap::S2Cap; %unignore S2Cap::~S2Cap; %unignore S2Cap::AddPoint; %unignore S2Cap::ApproxEquals; %unignore S2Cap::Clone; %unignore S2Cap::Contains; %unignore S2Cap::Decode; %unignore S2Cap::Empty; %unignore S2Cap::Encode; %unignore S2Cap::Expanded; %unignore S2Cap::FromCenterArea(const S2Point&, double); %unignore S2Cap::FromCenterHeight(const S2Point&, double); %unignore S2Cap::FromPoint; %unignore S2Cap::Full; %unignore S2Cap::GetCapBound() const; %unignore S2Cap::GetCentroid; %unignore S2Cap::GetRectBound; %unignore S2Cap::Intersects; %unignore S2Cap::MayIntersect(const S2Cell&) const; %unignore S2Cap::Union; %unignore S2Cap::center; %unignore S2Cap::height; %unignore S2Cap::is_empty; %unignore S2Cap::is_valid; %unignore S2Cell; %unignore S2Cell::S2Cell; %unignore S2Cell::~S2Cell; %unignore S2Cell::ApproxArea; %unignore S2Cell::AverageArea; %unignore S2Cell::Clone; %unignore S2Cell::Contains; %unignore S2Cell::Decode; %unignore S2Cell::Encode; %unignore S2Cell::ExactArea; %unignore S2Cell::GetBoundaryDistance; %unignore S2Cell::GetCapBound() const; %unignore S2Cell::GetCenter; %unignore S2Cell::GetDistance; %unignore S2Cell::GetRectBound; %unignore S2Cell::GetS2LatLngEdge; %unignore S2Cell::GetS2LatLngVertex; %unignore S2Cell::GetVertex; %unignore S2Cell::MayIntersect(const S2Cell&) const; %unignore S2Cell::face; %unignore S2Cell::id; %unignore S2Cell::level; %unignore S2CellId; %unignore S2CellId::S2CellId; %unignore S2CellId::~S2CellId; %unignore S2CellId::AppendAllNeighbors(int, std::vector*) const; %rename(GetAllNeighbors) S2CellId::AppendAllNeighbors(int, std::vector*) const; %unignore S2CellId::AppendVertexNeighbors(int, std::vector*) const; %rename(GetVertexNeighbors) S2CellId::AppendVertexNeighbors(int, std::vector*) const; %unignore S2CellId::Begin; %unignore S2CellId::End; %unignore S2CellId::FromDebugString(absl::string_view); %unignore S2CellId::FromFaceIJ(int, int, int); %unignore S2CellId::FromFacePosLevel(int, uint64, int); %unignore S2CellId::FromLatLng; %unignore S2CellId::FromPoint; %unignore S2CellId::FromToken(absl::string_view); %unignore S2CellId::GetCenterSiTi(int*, int*) const; %unignore S2CellId::GetEdgeNeighbors; %unignore S2CellId::ToFaceIJOrientation(int*, int*, int*) const; %unignore S2CellId::ToLatLng; %unignore S2CellId::ToPoint; %unignore S2CellId::ToString; %unignore S2CellId::ToToken; %unignore S2CellId::child; %unignore S2CellId::child_begin; %unignore S2CellId::child_end; %unignore S2CellId::child_position(int) const; %unignore S2CellId::contains; %unignore S2CellId::face; %unignore S2CellId::id; %unignore S2CellId::intersects; %unignore S2CellId::is_leaf; %unignore S2CellId::is_face; %unignore S2CellId::is_valid; %unignore S2CellId::kMaxLevel; %unignore S2CellId::level; %unignore S2CellId::next; %unignore S2CellId::parent; %unignore S2CellId::pos; %unignore S2CellId::prev; %unignore S2CellId::range_max; %unignore S2CellId::range_min; %unignore S2CellUnion; %ignore S2CellUnion::operator[]; // Silence the SWIG warning. %unignore S2CellUnion::S2CellUnion; %unignore S2CellUnion::~S2CellUnion; %unignore S2CellUnion::ApproxArea; %unignore S2CellUnion::Clone; %unignore S2CellUnion::Contains; %unignore S2CellUnion::Decode; %unignore S2CellUnion::Denormalize(int, int, std::vector*) const; %unignore S2CellUnion::Encode; %unignore S2CellUnion::ExactArea; %unignore S2CellUnion::FromNormalized(std::vector); %unignore S2CellUnion::GetCapBound() const; %unignore S2CellUnion::GetDifference; %unignore S2CellUnion::GetRectBound; %unignore S2CellUnion::Init(std::vector const &); %unignore S2CellUnion::Intersection; %unignore S2CellUnion::Intersects; %unignore S2CellUnion::IsNormalized() const; %unignore S2CellUnion::MayIntersect(const S2Cell&) const; // SWIG doesn't handle disambiguation of the overloaded Normalize methods, so // the Normalize() instance method is renamed to NormalizeS2CellUnion. %unignore S2CellUnion::Normalize(std::vector*); %unignore S2CellUnion::cell_id; %unignore S2CellUnion::cell_ids; %unignore S2CellUnion::empty; %unignore S2CellUnion::num_cells; %unignore S2Earth; %unignore S2Earth::GetDistance(const S2LatLng&, const S2LatLng&); %unignore S2Earth::GetDistance(const S2Point&, const S2Point&); %unignore S2Earth::GetDistanceKm(const S2LatLng&, const S2LatLng&); %unignore S2Earth::GetDistanceKm(const S2Point&, const S2Point&); %unignore S2Earth::GetDistanceMeters(const S2LatLng&, const S2LatLng&); %unignore S2Earth::GetDistanceMeters(const S2Point&, const S2Point&); %unignore S2Earth::GetInitialBearing(const S2LatLng&, const S2LatLng&); %unignore S2Earth::HighestAltitude(); %unignore S2Earth::HighestAltitudeKm(); %unignore S2Earth::HighestAltitudeMeters(); %unignore S2Earth::KmToRadians(double); %unignore S2Earth::LowestAltitude(); %unignore S2Earth::LowestAltitudeKm(); %unignore S2Earth::LowestAltitudeMeters(); %unignore S2Earth::MetersToRadians(double); %unignore S2Earth::RadiansToKm(double); %unignore S2Earth::RadiansToMeters(double); %unignore S2Earth::Radius(); %unignore S2Earth::RadiusKm(); %unignore S2Earth::RadiusMeters(); %unignore S2Earth::SquareKmToSteradians(double); %unignore S2Earth::SquareMetersToSteradians(double); %unignore S2Earth::SteradiansToSquareKm(double); %unignore S2Earth::SteradiansToSquareMeters(double); %unignore S2Earth::ToAngle(const util::units::Meters&); %unignore S2Earth::ToChordAngle(const util::units::Meters&); %unignore S2Earth::ToDistance(const S1Angle&); %unignore S2Earth::ToDistance(const S1ChordAngle&); %unignore S2Earth::ToKm(const S1Angle&); %unignore S2Earth::ToKm(const S1ChordAngle&); %unignore S2Earth::ToLongitudeRadians(const util::units::Meters&, double); %unignore S2Earth::ToMeters(const S1Angle&); %unignore S2Earth::ToMeters(const S1ChordAngle&); %unignore S2Earth::ToRadians(const util::units::Meters&); %unignore S2LatLng; %unignore S2LatLng::S2LatLng; %unignore S2LatLng::~S2LatLng; %unignore S2LatLng::ApproxEquals; %unignore S2LatLng::FromDegrees; %unignore S2LatLng::FromE6; %unignore S2LatLng::FromE7; %unignore S2LatLng::FromRadians; %unignore S2LatLng::FromUnsignedE6; %unignore S2LatLng::FromUnsignedE7; %unignore S2LatLng::GetDistance; %unignore S2LatLng::Normalized; %unignore S2LatLng::ToPoint; %unignore S2LatLng::ToStringInDegrees; %unignore S2LatLng::coords; %unignore S2LatLng::is_valid; %unignore S2LatLng::lat; %unignore S2LatLng::lng; %unignore S2LatLngRect; %unignore S2LatLngRect::S2LatLngRect; %unignore S2LatLngRect::~S2LatLngRect; %unignore S2LatLngRect::AddPoint; %unignore S2LatLngRect::ApproxEquals; %unignore S2LatLngRect::Area; %unignore S2LatLngRect::Clone; %unignore S2LatLngRect::Contains; %unignore S2LatLngRect::Decode; %unignore S2LatLngRect::Empty; %unignore S2LatLngRect::Encode; %unignore S2LatLngRect::ExpandedByDistance; %unignore S2LatLngRect::FromCenterSize; %unignore S2LatLngRect::FromPoint; %unignore S2LatLngRect::FromPointPair; %unignore S2LatLngRect::Full; %unignore S2LatLngRect::GetCapBound() const; %unignore S2LatLngRect::GetCenter; %unignore S2LatLngRect::GetCentroid; %unignore S2LatLngRect::GetDistance; %unignore S2LatLngRect::GetRectBound; %unignore S2LatLngRect::GetSize; %unignore S2LatLngRect::GetVertex; %unignore S2LatLngRect::Intersection; %unignore S2LatLngRect::Intersects; %unignore S2LatLngRect::MayIntersect(const S2Cell&) const; %unignore S2LatLngRect::Union; %unignore S2LatLngRect::hi; %unignore S2LatLngRect::is_empty; %unignore S2LatLngRect::is_point; %unignore S2LatLngRect::is_valid; %unignore S2LatLngRect::lat; %unignore S2LatLngRect::lat_hi; %unignore S2LatLngRect::lat_lo; %unignore S2LatLngRect::lng; %unignore S2LatLngRect::lng_hi; %unignore S2LatLngRect::lng_lo; %unignore S2LatLngRect::lo; %unignore S2Loop; %unignore S2Loop::~S2Loop; %unignore S2Loop::Clone; %unignore S2Loop::Contains; %unignore S2Loop::Decode; %unignore S2Loop::Encode; %unignore S2Loop::Equals; %unignore S2Loop::GetArea; %unignore S2Loop::GetCapBound() const; %unignore S2Loop::GetCentroid; %unignore S2Loop::GetDistance; %unignore S2Loop::GetRectBound; %unignore S2Loop::GetS2LatLngVertex; %unignore S2Loop::Intersects; %unignore S2Loop::IsNormalized() const; %unignore S2Loop::IsValid; %unignore S2Loop::MayIntersect(const S2Cell&) const; %unignore S2Loop::Normalize; %unignore S2Loop::Project; %unignore S2Loop::depth; %unignore S2Loop::is_empty; %unignore S2Loop::is_hole; %unignore S2Loop::num_vertices; %unignore S2Loop::sign; %unignore S2Loop::vertex; %unignore S2Polygon; %unignore S2Polygon::S2Polygon; %unignore S2Polygon::~S2Polygon; %unignore S2Polygon::BoundaryNear; %unignore S2Polygon::Clone; %unignore S2Polygon::Contains; %unignore S2Polygon::Copy; %unignore S2Polygon::Decode; %unignore S2Polygon::Encode; %unignore S2Polygon::Equals; %unignore S2Polygon::GetArea; %unignore S2Polygon::GetCapBound() const; %unignore S2Polygon::GetCentroid; %unignore S2Polygon::GetDistance; %unignore S2Polygon::GetLastDescendant(int) const; %unignore S2Polygon::GetOverlapFractions(const S2Polygon&, const S2Polygon&); %unignore S2Polygon::GetRectBound; %unignore S2Polygon::Init; %unignore S2Polygon::InitNested; %unignore S2Polygon::Intersects; %unignore S2Polygon::IntersectWithPolyline; %unignore S2Polygon::IsValid; %unignore S2Polygon::MayIntersect(const S2Cell&) const; %unignore S2Polygon::Project; %unignore S2Polygon::is_empty; %unignore S2Polygon::loop; %unignore S2Polygon::num_loops; %unignore S2Polygon::num_vertices; %unignore S2Polyline; %unignore S2Polyline::S2Polyline(); %unignore S2Polyline::S2Polyline(std::vector const &); %ignore S2Polyline::S2Polyline(std::vector const &); %ignore S2Polyline::S2Polyline(std::vector const &, S2Debug); %unignore S2Polyline::~S2Polyline; %unignore S2Polyline::ApproxEquals; %unignore S2Polyline::Clone; %unignore S2Polyline::Contains; %unignore S2Polyline::Decode; %unignore S2Polyline::Encode; %unignore S2Polyline::GetCapBound() const; %unignore S2Polyline::GetCentroid; %unignore S2Polyline::GetLength; %unignore S2Polyline::GetRectBound; %unignore S2Polyline::GetSuffix; %unignore S2Polyline::Interpolate; %unignore S2Polyline::Intersects; %unignore S2Polyline::IsOnRight; %unignore S2Polyline::IsValid; %unignore S2Polyline::MayIntersect(const S2Cell&) const; %unignore S2Polyline::Project; %unignore S2Polyline::Reverse; %unignore S2Polyline::UnInterpolate; %unignore S2Polyline::num_vertices; %unignore S2Polyline::vertex; %unignore S2RegionCoverer; %unignore S2RegionCoverer::S2RegionCoverer; %unignore S2RegionCoverer::~S2RegionCoverer; %unignore S2RegionCoverer::GetCovering(const S2Region&, std::vector*); %unignore S2RegionCoverer::GetInteriorCovering(const S2Region&, std::vector*); %unignore S2RegionTermIndexer; %unignore S2RegionTermIndexer::S2RegionTermIndexer; %unignore S2RegionTermIndexer::~S2RegionTermIndexer; %unignore S2RegionTermIndexer::GetIndexTerms(const S2Point&, absl::string_view); %unignore S2RegionTermIndexer::GetIndexTerms(const S2Region&, absl::string_view); %unignore S2RegionTermIndexer::GetIndexTermsForCanonicalCovering( const S2CellUnion&, absl::string_view); %unignore S2RegionTermIndexer::GetQueryTerms(const S2Point&, absl::string_view); %unignore S2RegionTermIndexer::GetQueryTerms(const S2Region&, absl::string_view); %unignore S2RegionTermIndexer::GetQueryTermsForCanonicalCovering( const S2CellUnion&, absl::string_view); %include "s2/r1interval.h" %include "s2/s1angle.h" %include "s2/s1chord_angle.h" %include "s2/s1interval.h" %include "s2/s2cell_id.h" %include "s2/s2edge_crossings.h" %include "s2/s2earth.h" %include "s2/s2region.h" %include "s2/s2cap.h" %include "s2/s2latlng.h" %include "s2/s2latlng_rect.h" %include "s2/s2loop.h" %include "s2/s2measures.h" %include "s2/s2pointutil.h" %include "s2/s2polygon.h" %include "s2/s2polyline.h" %include "s2/s2region_coverer.h" %include "s2/s2region_term_indexer.h" %include "s2/s2cell.h" %include "s2/s2cell_union.h" %unignoreall %define USE_STREAM_INSERTOR_FOR_STR(type) %extend type { std::string __str__() { std::ostringstream output; output << *$self << std::ends; return output.str(); } } %enddef %define USE_EQUALS_FN_FOR_EQ_AND_NE(type) %extend type { bool __eq__(const type& other) { return $self->Equals(&other); } bool __ne__(const type& other) { return !$self->Equals(&other); } } %enddef %define USE_EQUALS_FOR_EQ_AND_NE(type) %extend type { bool __eq__(const type& other) { return *$self == other; } bool __ne__(const type& other) { return *$self != other; } } %enddef %define USE_COMPARISON_FOR_LT_AND_GT(type) %extend type { bool __lt__(const type& other) { return *$self < other; } bool __gt__(const type& other) { return *$self > other; } } %enddef %define USE_HASH_FOR_TYPE(type, hash_type) %extend type { size_t __hash__() { return hash_type()(*$self); } } %enddef USE_STREAM_INSERTOR_FOR_STR(S1Angle) USE_STREAM_INSERTOR_FOR_STR(S1Interval) USE_STREAM_INSERTOR_FOR_STR(S2CellId) USE_STREAM_INSERTOR_FOR_STR(S2Cap) USE_STREAM_INSERTOR_FOR_STR(S2LatLng) USE_STREAM_INSERTOR_FOR_STR(S2LatLngRect) USE_EQUALS_FOR_EQ_AND_NE(S2CellId) USE_COMPARISON_FOR_LT_AND_GT(S2CellId) USE_HASH_FOR_TYPE(S2CellId, S2CellIdHash) USE_EQUALS_FOR_EQ_AND_NE(S1Angle) USE_COMPARISON_FOR_LT_AND_GT(S1Angle) USE_EQUALS_FN_FOR_EQ_AND_NE(S2Loop) USE_EQUALS_FN_FOR_EQ_AND_NE(S2Polygon) USE_EQUALS_FN_FOR_EQ_AND_NE(S2Polyline) // Simple implementation of key S2Testing methods %pythoncode %{ import random class S2Testing(object): """ Simple implementation of key S2Testing methods. """ _rnd = random.Random(1) @classmethod def RandomPoint(cls): """ Return a random unit-length vector. """ x = cls._rnd.uniform(-1, 1) y = cls._rnd.uniform(-1, 1) z = cls._rnd.uniform(-1, 1) return S2Point_FromRaw(x, y, z).Normalize() @classmethod def GetRandomCap(cls, min_area, max_area): """ Return a cap with a random axis such that the log of its area is uniformly distributed between the logs of the two given values. (The log of the cap angle is also approximately uniformly distributed.) """ cap_area = max_area * pow(min_area / max_area, cls._rnd.random()) assert cap_area >= min_area assert cap_area <= max_area # The surface area of a cap is 2*Pi times its height. return S2Cap.FromCenterArea(cls.RandomPoint(), cap_area) %} s2geometry-0.10.0/src/s2/000077500000000000000000000000001422156367100150125ustar00rootroot00000000000000s2geometry-0.10.0/src/s2/_fp_contract_off.h000066400000000000000000000047131422156367100204630ustar00rootroot00000000000000// Copyright 2015 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2__FP_CONTRACT_OFF_H_ #define S2__FP_CONTRACT_OFF_H_ // Turn off the fused multiply-add optimization ("fp-contract"). With // fp-contract on, any expression of the form "a * b + c" has two possible // results, and the compiler is free to choose either of them. Effectively // this makes it impossible to write deterministic functions that involve // floating-point math. // // S2 requires deterministic arithmetic for correctness. We need to turn off // fp-contract for the entire compilation unit, because S2 has public inline // functions, and the optimization is controlled by the setting in effect when // inline functions are instantiated (not when they are defined). // // Note that there is a standard C pragma to turn off FP contraction: // #pragma STDC FP_CONTRACT OFF // but it is not implemented in GCC because the standard pragma allows control // at the level of compound statements rather than entire functions. // // This file may be included with other files in any order, as long as it // appears before the first non-inline function definition. It is // named with an underscore so that it is included first among the S2 headers. // TODO(compiler-team): Figure out how to do this in a portable way. #if defined(HAVE_ARMEABI_V7A) // Some android builds use a buggy compiler that runs out of memory while // parsing the pragma (--cpu=armeabi-v7a). #elif defined(__ANDROID__) // Other android builds use a buggy compiler that crashes with an internal // error (Android NDK R9). #elif defined(__clang__) // Clang supports the standard C++ pragma for turning off this optimization. #pragma STDC FP_CONTRACT OFF #elif defined(__GNUC__) // GCC defines its own pragma that operates at the function level rather than // the statement level. #pragma GCC optimize("fp-contract=off") #endif #endif // S2__FP_CONTRACT_OFF_H_ s2geometry-0.10.0/src/s2/base/000077500000000000000000000000001422156367100157245ustar00rootroot00000000000000s2geometry-0.10.0/src/s2/base/casts.h000066400000000000000000000270471422156367100172240ustar00rootroot00000000000000// Copyright 2009 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // // Various Google-specific casting templates. // // This code is compiled directly on many platforms, including client // platforms like Windows, Mac, and embedded systems. Before making // any changes here, make sure that you're not breaking any platforms. // #ifndef S2_BASE_CASTS_H_ #define S2_BASE_CASTS_H_ #include // for use with down_cast<> #include // for enumeration casts and tests #include #include "s2/base/logging.h" #include "absl/base/casts.h" // IWYU pragma: keep // An "upcast", i.e. a conversion from a pointer to an object to a pointer to a // base subobject, always succeeds if the base is unambiguous and accessible, // and so it's fine to use implicit_cast. // // A "downcast", i.e. a conversion from a pointer to an object to a pointer // to a more-derived object that may contain the original object as a base // subobject, cannot safely be done using static_cast, because you do not // generally know whether the source object is really the base subobject of // a containing, more-derived object of the target type. Thus, when you // downcast in a polymorphic type hierarchy, you should use the following // function template. // // In debug mode, we use dynamic_cast to double-check whether the downcast is // legal (we die if it's not). In normal mode, we do the efficient static_cast // instead. Thus, it's important to test in debug mode to make sure the cast is // legal! // // This is the only place in the codebase we should use dynamic_cast. // In particular, you should NOT use dynamic_cast for RTTI, e.g. for // code like this: // if (auto* p = dynamic_cast(foo)) HandleASubclass1Object(p); // if (auto* p = dynamic_cast(foo)) HandleASubclass2Object(p); // You should design the code some other way not to need this. template // use like this: down_cast(foo); inline To down_cast(From* f) { // so we only accept pointers static_assert((std::is_base_of>::value), "target type not derived from source type"); // We skip the assert and hence the dynamic_cast if RTTI is disabled. #if !defined(__GNUC__) || defined(__GXX_RTTI) // Uses RTTI in dbg and fastbuild. asserts are disabled in opt builds. assert(f == nullptr || dynamic_cast(f) != nullptr); #endif // !defined(__GNUC__) || defined(__GXX_RTTI) return static_cast(f); } // Overload of down_cast for references. Use like this: down_cast(foo). // The code is slightly convoluted because we're still using the pointer // form of dynamic cast. (The reference form throws an exception if it // fails.) // // There's no need for a special const overload either for the pointer // or the reference form. If you call down_cast with a const T&, the // compiler will just bind From to const T. template inline To down_cast(From& f) { static_assert(std::is_lvalue_reference::value, "target type not a reference"); static_assert((std::is_base_of>::value), "target type not derived from source type"); // We skip the assert and hence the dynamic_cast if RTTI is disabled. #if !defined(__GNUC__) || defined(__GXX_RTTI) // RTTI: debug mode only assert(dynamic_cast*>(&f) != nullptr); #endif // !defined(__GNUC__) || defined(__GXX_RTTI) return static_cast(f); } // **** Enumeration Casts and Tests // // C++ requires that the value of an integer that is converted to an // enumeration be within the value bounds of the enumeration. Modern // compilers can and do take advantage of this requirement to optimize // programs. So, using a raw static_cast with enums can be bad. See // // The following templates and macros enable casting from an int to an enum // with checking against the appropriate bounds. First, when defining an // enumeration, identify the limits of the values of its enumerators. // // enum A { A_min = -18, A_max = 33 }; // MAKE_ENUM_LIMITS(A, A_min, A_max) // // Convert an int to an enum in one of two ways. The preferred way is a // tight conversion, which ensures that A_min <= value <= A_max. // // A var = tight_enum_cast(3); // // However, the C++ language defines the set of possible values for an // enumeration to be essentially the range of a bitfield that can represent // all the enumerators, i.e. those within the nearest containing power // of two. In the example above, the nearest positive power of two is 64, // and so the upper bound is 63. The nearest negative power of two is // -32 and so the lower bound is -32 (two's complement), which is upgraded // to match the upper bound, becoming -64. The values within this range // of -64 to 63 are valid, according to the C++ standard. You can cast // values within this range as follows. // // A var = loose_enum_cast(45); // // These casts will log a message if the value does not reside within the // specified range, and will be fatal when in debug mode. // // For those times when an assert too strong, there are test functions. // // bool var = tight_enum_test(3); // bool var = loose_enum_test(45); // // For code that needs to use the enumeration value if and only if // it is good, there is a function that both tests and casts. // // int i = ....; // A var; // if (tight_enum_test_cast(i, &var)) // .... // use valid var with value as indicated by i // else // .... // handle invalid enum cast // // The enum test/cast facility is currently limited to enumerations that // fit within an int. It is also limited to two's complement ints. // ** Implementation Description // // The enum_limits template class captures the minimum and maximum // enumerator. All uses of this template are intended to be of // specializations, so the generic has a field to identify itself as // not specialized. The test/cast templates assert specialization. template class enum_limits { public: static constexpr Enum min_enumerator = 0; static constexpr Enum max_enumerator = 0; static constexpr bool is_specialized = false; }; // Now we define the macro to define the specialization for enum_limits. // The specialization checks that the enumerators fit within an int. // This checking relies on integral promotion. #define MAKE_ENUM_LIMITS(ENUM_TYPE, ENUM_MIN, ENUM_MAX) \ template <> \ class enum_limits { \ public: \ static const ENUM_TYPE min_enumerator = ENUM_MIN; \ static const ENUM_TYPE max_enumerator = ENUM_MAX; \ static const bool is_specialized = true; \ static_assert(ENUM_MIN >= INT_MIN, "enumerator too negative for int"); \ static_assert(ENUM_MAX <= INT_MAX, "enumerator too positive for int"); \ }; // The loose enum test/cast is actually the more complicated one, // because of the problem of finding the bounds. // // The unary upper bound, ub, on a positive number is its positive // saturation, i.e. for a value v within pow(2,k-1) <= v < pow(2,k), // the upper bound is pow(2,k)-1. // // The unary lower bound, lb, on a negative number is its negative // saturation, i.e. for a value v within -pow(2,k) <= v < -pow(2,k-1), // the lower bound is -pow(2,k). // // The actual bounds are (1) the binary upper bound over the maximum // enumerator and the one's complement of a negative minimum enumerator // and (2) the binary lower bound over the minimum enumerator and the // one's complement of the positive maximum enumerator, except that if no // enumerators are negative, the lower bound is zero. // // The algorithm relies heavily on the observation that // // a,b>0 then ub(a,b) == ub(a) | ub(b) == ub(a|b) // a,b<0 then lb(a,b) == lb(a) & lb(b) == lb(a&b) // // Note that the compiler will boil most of this code away // because of value propagation on the constant enumerator bounds. template inline bool loose_enum_test(int e_val) { static_assert(enum_limits::is_specialized, "missing MAKE_ENUM_LIMITS"); const Enum e_min = enum_limits::min_enumerator; const Enum e_max = enum_limits::max_enumerator; static_assert(sizeof(e_val) == 4 || sizeof(e_val) == 8, "unexpected int size"); // Find the unary bounding negative number of e_min and e_max. // Find the unary bounding negative number of e_max. // This would be b_min = e_max < 0 ? e_max : ~e_max, // but we want to avoid branches to help the compiler. int e_max_sign = e_max >> (sizeof(e_val) * 8 - 1); int b_min = ~e_max_sign ^ e_max; // Find the binary bounding negative of both e_min and e_max. b_min &= e_min; // However, if e_min is positive, the result will be positive. // Now clear all bits right of the most significant clear bit, // which is a negative saturation for negative numbers. // In the case of positive numbers, this is flush to zero. b_min &= b_min >> 1; b_min &= b_min >> 2; b_min &= b_min >> 4; b_min &= b_min >> 8; b_min &= b_min >> 16; #if INT_MAX > 2147483647 b_min &= b_min >> 32; #endif // Find the unary bounding positive number of e_max. int b_max = e_max_sign ^ e_max; // Find the binary bounding positive number of that // and the unary bounding positive number of e_min. int e_min_sign = e_min >> (sizeof(e_val) * 8 - 1); b_max |= e_min_sign ^ e_min; // Now set all bits right of the most significant set bit, // which is a positive saturation for positive numbers. b_max |= b_max >> 1; b_max |= b_max >> 2; b_max |= b_max >> 4; b_max |= b_max >> 8; b_max |= b_max >> 16; #if INT_MAX > 2147483647 b_max |= b_max >> 32; #endif // Finally test the bounds. return b_min <= e_val && e_val <= b_max; } template inline bool tight_enum_test(int e_val) { static_assert(enum_limits::is_specialized, "missing MAKE_ENUM_LIMITS"); const Enum e_min = enum_limits::min_enumerator; const Enum e_max = enum_limits::max_enumerator; return e_min <= e_val && e_val <= e_max; } template inline bool loose_enum_test_cast(int e_val, Enum* e_var) { if (loose_enum_test(e_val)) { *e_var = static_cast(e_val); return true; } else { return false; } } template inline bool tight_enum_test_cast(int e_val, Enum* e_var) { if (tight_enum_test(e_val)) { *e_var = static_cast(e_val); return true; } else { return false; } } template inline Enum loose_enum_cast(int e_val) { if (!loose_enum_test(e_val)) { S2_LOG(DFATAL) << "enum_cast error for value " << e_val; } return static_cast(e_val); } template inline Enum tight_enum_cast(int e_val) { if (!tight_enum_test(e_val)) { S2_LOG(DFATAL) << "enum_cast error for value " << e_val; } return static_cast(e_val); } #endif // S2_BASE_CASTS_H_ s2geometry-0.10.0/src/s2/base/commandlineflags.h000066400000000000000000000067331422156367100214110ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_COMMANDLINEFLAGS_H_ #define S2_BASE_COMMANDLINEFLAGS_H_ #include #include "s2/base/integral_types.h" #include "absl/strings/string_view.h" #ifdef S2_USE_GFLAGS #include // If the GFlags library is available, map the local macro names to // GFlags macros. #define S2_DEFINE_bool DEFINE_bool #define S2_DECLARE_bool DECLARE_bool #define S2_DEFINE_double DEFINE_double #define S2_DECLARE_double DECLARE_double #define S2_DEFINE_int32 DEFINE_int32 #define S2_DECLARE_int32 DECLARE_int32 #define S2_DEFINE_int64 DEFINE_int64 #define S2_DECLARE_int64 DECLARE_int64 #define S2_DEFINE_string DEFINE_string #define S2_DECLARE_string DECLARE_string // gflags has its own int32 definitions. namespace absl { inline int32 GetFlag(google::int32 flag) { return flag; } inline int64 GetFlag(google::int64 flag) { return flag; } inline void SetFlag(google::int32* f, google::int32 v) { *f = v; } inline void SetFlag(google::int64* f, google::int64 v) { *f = v; } } // namespace absl #else // !defined(S2_USE_GFLAGS) #include #include "s2/base/integral_types.h" // Create a set of gflags-like macros for declaring/defining flags. Use // a library-specific name to potential minimize clashes with GFlags. #define S2_DEFINE_bool(name, default_value, description) \ bool FLAGS_##name = default_value #define S2_DECLARE_bool(name) \ extern bool FLAGS_##name #define S2_DEFINE_double(name, default_value, description) \ double FLAGS_##name = default_value #define S2_DECLARE_double(name) \ extern double FLAGS_##name #define S2_DEFINE_int32(name, default_value, description) \ int32 FLAGS_##name = default_value #define S2_DECLARE_int32(name) \ extern int32 FLAGS_##name #define S2_DEFINE_int64(name, default_value, description) \ int64 FLAGS_##name = default_value #define S2_DECLARE_int64(name) extern int64 FLAGS_##name #define S2_DEFINE_string(name, default_value, description) \ std::string FLAGS_##name = default_value #define S2_DECLARE_string(name) \ extern std::string FLAGS_##name namespace absl { inline int32 GetFlag(int32 flag) { return flag; } inline int64 GetFlag(int64 flag) { return flag; } inline void SetFlag(int32* f, int32 v) { *f = v; } inline void SetFlag(int64* f, int64 v) { *f = v; } } // namespace absl #endif // !defined(S2_USE_GFLAGS) // Flags are declared with v1 flags, but accessed via absl::GetFlag/SetFlag. // TODO(user,b/205817426): Convert everything to absl flags and remove gflags. namespace absl { inline bool GetFlag(bool flag) { return flag; } inline double GetFlag(double flag) { return flag; } inline std::string GetFlag(const std::string& flag) { return flag; } inline void SetFlag(bool* f, bool v) { *f = v; } inline void SetFlag(double* f, double v) { *f = v; } inline void SetFlag(std::string* f, absl::string_view v) { *f = std::string(v); } } // namespace absl #endif // S2_BASE_COMMANDLINEFLAGS_H_ s2geometry-0.10.0/src/s2/base/integral_types.h000066400000000000000000000017761422156367100211410ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_INTEGRAL_TYPES_H_ #define S2_BASE_INTEGRAL_TYPES_H_ // NOLINTBEGIN(runtime/int) using int8 = signed char; using int16 = short; using int32 = int; using int64 = long long; using uint8 = unsigned char; using uint16 = unsigned short; using uint32 = unsigned int; using uint64 = unsigned long long; using uword_t = unsigned long; // NOLINTEND(runtime/int) #endif // S2_BASE_INTEGRAL_TYPES_H_ s2geometry-0.10.0/src/s2/base/log_severity.h000066400000000000000000000020131422156367100206040ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_LOG_SEVERITY_H_ #define S2_BASE_LOG_SEVERITY_H_ #ifdef S2_USE_GLOG #include #else // !defined(S2_USE_GLOG) #include "absl/base/log_severity.h" // Stay compatible with glog. namespace google { #ifdef NDEBUG constexpr bool DEBUG_MODE = false; #else constexpr bool DEBUG_MODE = true; #endif } // namespace google #endif // !defined(S2_USE_GLOG) #endif // S2_BASE_LOG_SEVERITY_H_ s2geometry-0.10.0/src/s2/base/logging.h000066400000000000000000000125451422156367100175320ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_LOGGING_H_ #define S2_BASE_LOGGING_H_ #ifdef S2_USE_GLOG #include // The names CHECK, etc. are too common and may conflict with other // packages. We use S2_CHECK to make it easier to switch to // something other than GLOG for logging. #define S2_LOG LOG #define S2_LOG_IF LOG_IF #define S2_DLOG DLOG #define S2_DLOG_IF DLOG_IF #define S2_CHECK CHECK #define S2_CHECK_EQ CHECK_EQ #define S2_CHECK_NE CHECK_NE #define S2_CHECK_LT CHECK_LT #define S2_CHECK_LE CHECK_LE #define S2_CHECK_GT CHECK_GT #define S2_CHECK_GE CHECK_GE #define S2_DCHECK DCHECK #define S2_DCHECK_EQ DCHECK_EQ #define S2_DCHECK_NE DCHECK_NE #define S2_DCHECK_LT DCHECK_LT #define S2_DCHECK_LE DCHECK_LE #define S2_DCHECK_GT DCHECK_GT #define S2_DCHECK_GE DCHECK_GE #define S2_VLOG VLOG #define S2_VLOG_IS_ON VLOG_IS_ON #else // !defined(S2_USE_GLOG) #include #include "absl/base/attributes.h" #include "s2/base/log_severity.h" class S2LogMessage { public: S2LogMessage(const char* file, int line, absl::LogSeverity severity, std::ostream& stream) : severity_(severity), stream_(stream) { if (enabled()) { stream_ << file << ":" << line << " " << absl::LogSeverityName(severity) << " "; } } ~S2LogMessage() { if (enabled()) stream_ << std::endl; } std::ostream& stream() { return stream_; } private: bool enabled() const { #ifdef ABSL_MIN_LOG_LEVEL return (static_cast(severity_) >= ABSL_MIN_LOG_LEVEL || severity_ >= absl::LogSeverity::kFatal); #else return true; #endif } absl::LogSeverity severity_; std::ostream& stream_; }; // Same as S2LogMessage, but destructor is marked no-return to avoid // "no return value warnings" in functions that return non-void. class S2FatalLogMessage : public S2LogMessage { public: S2FatalLogMessage(const char* file, int line, absl::LogSeverity severity, std::ostream& stream) ABSL_ATTRIBUTE_COLD : S2LogMessage(file, line, severity, stream) {} ABSL_ATTRIBUTE_NORETURN ~S2FatalLogMessage() { abort(); } }; // Logging stream that does nothing. struct S2NullStream { template S2NullStream& operator<<(const T& v) { return *this; } }; // Used to suppress "unused value" warnings. struct S2LogMessageVoidify { // Must have precedence lower than << but higher than ?:. void operator&(std::ostream&) {} }; #define S2_LOG_MESSAGE_(LogMessageClass, log_severity) \ LogMessageClass(__FILE__, __LINE__, log_severity, std::cerr) #define S2_LOG_INFO \ S2_LOG_MESSAGE_(S2LogMessage, absl::LogSeverity::kInfo) #define S2_LOG_WARNING \ S2_LOG_MESSAGE_(S2LogMessage, absl::LogSeverity::kWarning) #define S2_LOG_ERROR \ S2_LOG_MESSAGE_(S2LogMessage, absl::LogSeverity::kError) #define S2_LOG_FATAL \ S2_LOG_MESSAGE_(S2FatalLogMessage, absl::LogSeverity::kFatal) #ifndef NDEBUG #define S2_LOG_DFATAL S2_LOG_FATAL #else #define S2_LOG_DFATAL S2_LOG_ERROR #endif #define S2_LOG(severity) S2_LOG_##severity.stream() // Implementing this as if (...) {} else S2_LOG(...) will cause dangling else // warnings when someone does if (...) S2_LOG_IF(...), so do this tricky // thing instead. #define S2_LOG_IF(severity, condition) \ !(condition) ? (void)0 : S2LogMessageVoidify() & S2_LOG(severity) #define S2_CHECK(condition) \ S2_LOG_IF(FATAL, ABSL_PREDICT_FALSE(!(condition))) \ << ("Check failed: " #condition " ") #ifndef NDEBUG #define S2_DLOG S2_LOG #define S2_DLOG_IF S2_LOG_IF #define S2_DCHECK S2_CHECK #else // defined(NDEBUG) #define S2_DLOG(severity) \ while (false) S2NullStream() #define S2_DLOG_IF(severity, condition) \ while (false && (condition)) S2NullStream() #define S2_DCHECK(condition) \ while (false && (condition)) S2NullStream() #endif // defined(NDEBUG) #define S2_CHECK_OP(op, val1, val2) S2_CHECK((val1) op (val2)) #define S2_CHECK_EQ(val1, val2) S2_CHECK_OP(==, val1, val2) #define S2_CHECK_NE(val1, val2) S2_CHECK_OP(!=, val1, val2) #define S2_CHECK_LT(val1, val2) S2_CHECK_OP(<, val1, val2) #define S2_CHECK_LE(val1, val2) S2_CHECK_OP(<=, val1, val2) #define S2_CHECK_GT(val1, val2) S2_CHECK_OP(>, val1, val2) #define S2_CHECK_GE(val1, val2) S2_CHECK_OP(>=, val1, val2) #define S2_DCHECK_OP(op, val1, val2) S2_DCHECK((val1) op (val2)) #define S2_DCHECK_EQ(val1, val2) S2_DCHECK_OP(==, val1, val2) #define S2_DCHECK_NE(val1, val2) S2_DCHECK_OP(!=, val1, val2) #define S2_DCHECK_LT(val1, val2) S2_DCHECK_OP(<, val1, val2) #define S2_DCHECK_LE(val1, val2) S2_DCHECK_OP(<=, val1, val2) #define S2_DCHECK_GT(val1, val2) S2_DCHECK_OP(>, val1, val2) #define S2_DCHECK_GE(val1, val2) S2_DCHECK_OP(>=, val1, val2) // We don't support VLOG. #define S2_VLOG(verbose_level) S2NullStream() #define S2_VLOG_IS_ON(verbose_level) (false) #endif // !defined(S2_USE_GLOG) #endif // S2_BASE_LOGGING_H_ s2geometry-0.10.0/src/s2/base/port.h000066400000000000000000000250471422156367100170710ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_PORT_H_ #define S2_BASE_PORT_H_ // This file contains things that are not used in third_party/absl but needed by // - Platform specific requirement // - MSVC // - Utility macros // - Endianness // - Hash // - Global variables // - Type alias // - Predefined system/language macros // - Predefined system/language functions // - Performance optimization (alignment) // - Obsolete #include #include #include #include #include "s2/base/integral_types.h" #include "absl/base/config.h" #include "absl/base/port.h" // IWYU pragma: keep #ifdef SWIG %include "third_party/absl/base/port.h" #endif // ----------------------------------------------------------------------------- // MSVC Specific Requirements // ----------------------------------------------------------------------------- #ifdef _MSC_VER /* if Visual C++ */ #include #include // _getpid() // clang-format off #include // Must come before #include // clang-format on #undef ERROR #undef DELETE #undef DIFFERENCE #define STDIN_FILENO 0 #define STDOUT_FILENO 1 #define STDERR_FILENO 2 #define S_IRUSR 00400 #define S_IWUSR 00200 #define S_IXUSR 00100 #define S_IRGRP 00040 #define S_IWGRP 00020 #define S_IXGRP 00010 #define S_IROTH 00004 #define S_IWOTH 00002 #define S_IXOTH 00001 // This compiler flag can be easily overlooked on MSVC. // _CHAR_UNSIGNED gets set with the /J flag. #ifndef _CHAR_UNSIGNED #error chars must be unsigned! Use the /J flag on the compiler command line. // NOLINT #endif // Allow comparisons between signed and unsigned values. // // Lots of Google code uses this pattern: // for (int i = 0; i < container.size(); ++i) // Since size() returns an unsigned value, this warning would trigger // frequently. Very few of these instances are actually bugs since containers // rarely exceed MAX_INT items. Unfortunately, there are bugs related to // signed-unsigned comparisons that have been missed because we disable this // warning. For example: // const long stop_time = os::GetMilliseconds() + kWaitTimeoutMillis; // while (os::GetMilliseconds() <= stop_time) { ... } #pragma warning(disable : 4018) // level 3 #pragma warning(disable : 4267) // level 3 // Don't warn about unused local variables. // // extension to silence particular instances of this warning. There's no way // to define ABSL_ATTRIBUTE_UNUSED to quiet particular instances of this warning // in VC++, so we disable it globally. Currently, there aren't many false // positives, so perhaps we can address those in the future and re-enable these // warnings, which sometimes catch real bugs. #pragma warning(disable : 4101) // level 3 // Allow initialization and assignment to a smaller type without warnings about // possible loss of data. // // There is a distinct warning, 4267, that warns about size_t conversions to // smaller types, but we don't currently disable that warning. // // Correct code can be written in such a way as to avoid false positives // by making the conversion explicit, but Google code isn't usually that // verbose. There are too many false positives to address at this time. Note // that this warning triggers at levels 2, 3, and 4 depending on the specific // type of conversion. By disabling it, we not only silence minor narrowing // conversions but also serious ones. #pragma warning(disable : 4244) // level 2, 3, and 4 // Allow silent truncation of double to float. // // Silencing this warning has caused us to miss some subtle bugs. #pragma warning(disable : 4305) // level 1 // Allow a constant to be assigned to a type that is too small. // // I don't know why we allow this at all. I can't think of a case where this // wouldn't be a bug, but enabling the warning breaks many builds today. #pragma warning(disable : 4307) // level 2 // Allow passing the this pointer to an initializer even though it refers // to an uninitialized object. // // Some observer implementations rely on saving the this pointer. Those are // safe because the pointer is not dereferenced until after the object is fully // constructed. This could however, obscure other instances. In the future, we // should look into disabling this warning locally rather globally. #pragma warning(disable : 4355) // level 1 and 4 // Allow implicit coercion from an integral type to a bool. // // These could be avoided by making the code more explicit, but that's never // been the style here, so there would be many false positives. It's not // obvious if a true positive would ever help to find an actual bug. #pragma warning(disable : 4800) // level 3 #endif // _MSC_VER // ----------------------------------------------------------------------------- // Endianness // ----------------------------------------------------------------------------- // IS_LITTLE_ENDIAN, IS_BIG_ENDIAN #if defined __linux__ || defined OS_ANDROID || defined(__ANDROID__) // TODO(user): http://b/21460321; use one of OS_ANDROID or __ANDROID__. // _BIG_ENDIAN #include #elif defined(__APPLE__) // BIG_ENDIAN #include // NOLINT(build/include) /* Let's try and follow the Linux convention */ #define __BYTE_ORDER BYTE_ORDER #define __LITTLE_ENDIAN LITTLE_ENDIAN #define __BIG_ENDIAN BIG_ENDIAN #endif // defines __BYTE_ORDER #ifdef _WIN32 #define __BYTE_ORDER __LITTLE_ENDIAN #define IS_LITTLE_ENDIAN #else // _WIN32 // define the macros IS_LITTLE_ENDIAN or IS_BIG_ENDIAN // using the above endian definitions from endian.h if // endian.h was included #ifdef __BYTE_ORDER #if __BYTE_ORDER == __LITTLE_ENDIAN #define IS_LITTLE_ENDIAN #endif #if __BYTE_ORDER == __BIG_ENDIAN #define IS_BIG_ENDIAN #endif #else // __BYTE_ORDER #if defined(__LITTLE_ENDIAN__) #define IS_LITTLE_ENDIAN #elif defined(__BIG_ENDIAN__) #define IS_BIG_ENDIAN #endif #endif // __BYTE_ORDER #endif // _WIN32 // byte swap functions (bswap_16, bswap_32, bswap_64). // The following guarantees declaration of the byte swap functions #ifdef _MSC_VER #include // NOLINT(build/include) #define bswap_16(x) _byteswap_ushort(x) #define bswap_32(x) _byteswap_ulong(x) #define bswap_64(x) _byteswap_uint64(x) #elif defined(__APPLE__) // Mac OS X / Darwin features #include #define bswap_16(x) OSSwapInt16(x) #define bswap_32(x) OSSwapInt32(x) #define bswap_64(x) OSSwapInt64(x) #elif defined(__GLIBC__) || defined(__BIONIC__) || defined(__ASYLO__) || \ 0 #include // IWYU pragma: export #else static inline uint16 bswap_16(uint16 x) { #ifdef __cplusplus return static_cast(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)); #else return (uint16)(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)); // NOLINT #endif // __cplusplus } #define bswap_16(x) bswap_16(x) static inline uint32 bswap_32(uint32 x) { return (((x & 0xFF) << 24) | ((x & 0xFF00) << 8) | ((x & 0xFF0000) >> 8) | ((x & 0xFF000000) >> 24)); } #define bswap_32(x) bswap_32(x) static inline uint64 bswap_64(uint64 x) { return (((x & (uint64)0xFF) << 56) | ((x & (uint64)0xFF00) << 40) | ((x & (uint64)0xFF0000) << 24) | ((x & (uint64)0xFF000000) << 8) | ((x & (uint64)0xFF00000000) >> 8) | ((x & (uint64)0xFF0000000000) >> 24) | ((x & (uint64)0xFF000000000000) >> 40) | ((x & (uint64)0xFF00000000000000) >> 56)); } #define bswap_64(x) bswap_64(x) #endif // printf macros // __STDC_FORMAT_MACROS must be defined before inttypes.h inclusion */ #if defined(__APPLE__) /* From MacOSX's inttypes.h: * "C++ implementations should define these macros only when * __STDC_FORMAT_MACROS is defined before is included." */ #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif /* __STDC_FORMAT_MACROS */ #endif /* __APPLE__ */ // printf macros for size_t, in the style of inttypes.h #if defined(_LP64) || defined(__APPLE__) #define __PRIS_PREFIX "z" #else #define __PRIS_PREFIX #endif // Use these macros after a % in a printf format string // to get correct 32/64 bit behavior, like this: // size_t size = records.size(); // printf("%" PRIuS "\n", size); #define PRIdS __PRIS_PREFIX "d" #define PRIxS __PRIS_PREFIX "x" #define PRIuS __PRIS_PREFIX "u" #define PRIXS __PRIS_PREFIX "X" #define PRIoS __PRIS_PREFIX "o" // ----------------------------------------------------------------------------- // Performance Optimization // ----------------------------------------------------------------------------- // Alignment // Unaligned APIs // Portable handling of unaligned loads, stores, and copies. These are simply // constant-length memcpy calls. // // TODO(user): These APIs are forked in Abseil, see // "third_party/absl/base/internal/unaligned_access.h". // // The unaligned API is C++ only. The declarations use C++ features // (namespaces, inline) which are absent or incompatible in C. #if defined(__cplusplus) inline uint16 UNALIGNED_LOAD16(const void *p) { uint16 t; memcpy(&t, p, sizeof t); return t; } inline uint32 UNALIGNED_LOAD32(const void *p) { uint32 t; memcpy(&t, p, sizeof t); return t; } inline uint64 UNALIGNED_LOAD64(const void *p) { uint64 t; memcpy(&t, p, sizeof t); return t; } inline void UNALIGNED_STORE16(void *p, uint16 v) { memcpy(p, &v, sizeof v); } inline void UNALIGNED_STORE32(void *p, uint32 v) { memcpy(p, &v, sizeof v); } inline void UNALIGNED_STORE64(void *p, uint64 v) { memcpy(p, &v, sizeof v); } // The UNALIGNED_LOADW and UNALIGNED_STOREW macros load and store values // of type uword_t. #ifdef _LP64 #define UNALIGNED_LOADW(_p) UNALIGNED_LOAD64(_p) #define UNALIGNED_STOREW(_p, _val) UNALIGNED_STORE64(_p, _val) #else #define UNALIGNED_LOADW(_p) UNALIGNED_LOAD32(_p) #define UNALIGNED_STOREW(_p, _val) UNALIGNED_STORE32(_p, _val) #endif inline void UnalignedCopy16(const void *src, void *dst) { memcpy(dst, src, 2); } inline void UnalignedCopy32(const void *src, void *dst) { memcpy(dst, src, 4); } inline void UnalignedCopy64(const void *src, void *dst) { memcpy(dst, src, 8); } #endif // defined(__cplusplus), end of unaligned API #endif // S2_BASE_PORT_H_ s2geometry-0.10.0/src/s2/base/spinlock.h000066400000000000000000000027721422156367100177270ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_SPINLOCK_H_ #define S2_BASE_SPINLOCK_H_ #include class SpinLock { public: SpinLock() = default; ~SpinLock() = default; SpinLock(SpinLock const&) = delete; SpinLock& operator=(SpinLock const&) = delete; inline void Lock() { while (locked_.exchange(true, std::memory_order_acquire)) { // Spin. continue; } } inline void Unlock() { locked_.store(false, std::memory_order_release); } inline bool IsHeld() const { return locked_.load(std::memory_order_relaxed); } private: std::atomic_bool locked_{false}; }; class SpinLockHolder { public: inline explicit SpinLockHolder(SpinLock* l) : lock_(l) { lock_->Lock(); } inline ~SpinLockHolder() { lock_->Unlock(); } SpinLockHolder(const SpinLockHolder&) = delete; SpinLockHolder& operator=(const SpinLockHolder&) = delete; private: SpinLock* lock_; }; #endif // S2_BASE_SPINLOCK_H_ s2geometry-0.10.0/src/s2/base/timer.h000066400000000000000000000022771422156367100172250ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef S2_BASE_TIMER_H_ #define S2_BASE_TIMER_H_ #include #include "s2/base/integral_types.h" class CycleTimer { public: CycleTimer() = default; void Start() { start_ = Now(); } int64 GetInMs() const { using msec = std::chrono::milliseconds; return std::chrono::duration_cast(GetDuration()).count(); } private: using Clock = std::chrono::high_resolution_clock; static Clock::time_point Now() { return Clock::now(); } Clock::duration GetDuration() const { return Now() - start_; } Clock::time_point start_; }; #endif // S2_BASE_TIMER_H_ s2geometry-0.10.0/src/s2/encoded_s2cell_id_vector.cc000066400000000000000000000145561422156367100222370ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2cell_id_vector.h" #include #include "s2/util/bits/bits.h" using absl::Span; using std::max; using std::min; using std::vector; namespace s2coding { void EncodeS2CellIdVector(Span v, Encoder* encoder) { // v[i] is encoded as (base + (deltas[i] << shift)). // // "base" consists of 0-7 bytes, and is always shifted so that its bytes are // the most-significant bytes of a uint64. // // "deltas" is an EncodedUintVector, which means that all deltas // have a fixed-length encoding determined by the largest delta. // // "shift" is in the range 0..56. The shift value is odd only if all // S2CellIds are at the same level, in which case the bit at position // (shift - 1) is automatically set to 1 in "base". // // "base" (3 bits) and "shift" (6 bits) are encoded in either one or two // bytes as follows: // // - if (shift <= 4 or shift is even), then 1 byte // - otherwise 2 bytes // // Note that (shift == 1) means that all S2CellIds are leaf cells, and // (shift == 2) means that all S2CellIds are at level 29. // // The full encoded format is as follows: // // Byte 0, bits 0-2: base length (0-7 bytes) // Byte 0, bits 3-7: encoded shift (see below) // Byte 1: extended shift code (only needed for odd shifts >= 5) // Followed by 0-7 bytes of "base" // Followed by an EncodedUintVector of deltas. uint64 v_or = 0, v_and = ~0ULL, v_min = ~0ULL, v_max = 0; for (auto cellid : v) { v_or |= cellid.id(); v_and &= cellid.id(); v_min = min(v_min, cellid.id()); v_max = max(v_max, cellid.id()); } // These variables represent the values that will used during encoding. uint64 e_base = 0; // Base value. int e_base_len = 0; // Number of bytes to represent "base". int e_shift = 0; // Delta shift. int e_max_delta_msb = 0; // Bit position of the MSB of the largest delta. if (v_or > 0) { // We only allow even shifts, unless all values have the same low bit (in // which case the shift is odd and the preceding bit is implicitly on). // There is no point in allowing shifts > 56 since deltas are encoded in // at least 1 byte each. e_shift = min(56, Bits::FindLSBSetNonZero64(v_or) & ~1); if (v_and & (1ULL << e_shift)) ++e_shift; // All S2CellIds same level. // "base" consists of the "base_len" most significant bytes of the minimum // S2CellId. We consider all possible values of "base_len" (0..7) and // choose the one that minimizes the total encoding size. uint64 e_bytes = ~0ULL; // Best encoding size so far. for (int len = 0; len < 8; ++len) { // "t_base" is the base value being tested (first "len" bytes of v_min). // "t_max_delta_msb" is the most-significant bit position of the largest // delta (or zero if there are no deltas, i.e. if v.size() == 0). // "t_bytes" is the total size of the variable portion of the encoding. uint64 t_base = v_min & ~(~0ULL >> (8 * len)); int t_max_delta_msb = max(0, Bits::Log2Floor64((v_max - t_base) >> e_shift)); uint64 t_bytes = len + v.size() * ((t_max_delta_msb >> 3) + 1); if (t_bytes < e_bytes) { e_base = t_base; e_base_len = len; e_max_delta_msb = t_max_delta_msb; e_bytes = t_bytes; } } // It takes one extra byte to encode odd shifts (i.e., the case where all // S2CellIds are at the same level), so check whether we can get the same // encoding size per delta using an even shift. if ((e_shift & 1) && (e_max_delta_msb & 7) != 7) --e_shift; } S2_DCHECK_LE(e_base_len, 7); S2_DCHECK_LE(e_shift, 56); encoder->Ensure(2 + e_base_len); // As described above, "shift" and "base_len" are encoded in 1 or 2 bytes. // "shift_code" is 5 bits: // values <= 28 represent even shifts in the range 0..56 // values 29, 30 represent odd shifts 1 and 3 // value 31 indicates that the shift is odd and encoded in the next byte int shift_code = e_shift >> 1; if (e_shift & 1) shift_code = min(31, shift_code + 29); encoder->put8((shift_code << 3) | e_base_len); if (shift_code == 31) { encoder->put8(e_shift >> 1); // Shift is always odd, so 3 bits unused. } // Encode the "base_len" most-significant bytes of "base". uint64 base_bytes = e_base >> (64 - 8 * max(1, e_base_len)); EncodeUintWithLength(base_bytes, e_base_len, encoder); // Finally, encode the vector of deltas. vector deltas; deltas.reserve(v.size()); for (auto cellid : v) { deltas.push_back((cellid.id() - e_base) >> e_shift); } EncodeUintVector(deltas, encoder); } bool EncodedS2CellIdVector::Init(Decoder* decoder) { // All encodings have at least 2 bytes (one for our header and one for the // EncodedUintVector header), so this is safe. if (decoder->avail() < 2) return false; // Invert the encoding of (shift_code, base_len) described above. int code_plus_len = decoder->get8(); int shift_code = code_plus_len >> 3; if (shift_code == 31) { shift_code = 29 + decoder->get8(); if (shift_code > 56) return false; // Valid range 0..56 } // Decode the "base_len" most-significant bytes of "base". int base_len = code_plus_len & 7; if (!DecodeUintWithLength(base_len, decoder, &base_)) return false; base_ <<= 64 - 8 * max(1, base_len); // Invert the encoding of "shift_code" described above. if (shift_code >= 29) { shift_ = 2 * (shift_code - 29) + 1; base_ |= 1ULL << (shift_ - 1); } else { shift_ = 2 * shift_code; } return deltas_.Init(decoder); } vector EncodedS2CellIdVector::Decode() const { vector result(size()); for (int i = 0; i < size(); ++i) { result[i] = (*this)[i]; } return result; } } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_s2cell_id_vector.h000066400000000000000000000077351422156367100221020ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ENCODED_S2CELL_ID_VECTOR_H_ #define S2_ENCODED_S2CELL_ID_VECTOR_H_ #include "absl/types/span.h" #include "s2/encoded_uint_vector.h" #include "s2/s2cell_id.h" namespace s2coding { // Encodes a vector of S2CellIds in a format that can later be decoded as an // EncodedS2CellIdVector. The S2CellIds do not need to be valid. // // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). void EncodeS2CellIdVector(absl::Span v, Encoder* encoder); // This class represents an encoded vector of S2CellIds. Values are decoded // only when they are accessed. This allows for very fast initialization and // no additional memory use beyond the encoded data. The encoded data is not // owned by this class; typically it points into a large contiguous buffer // that contains other encoded data as well. // // This is one of several helper classes that allow complex data structures to // be initialized from an encoded format in constant time and then decoded on // demand. This can be a big performance advantage when only a small part of // the data structure is actually used. // // The S2CellIds do not need to be sorted or at the same level. The // implementation is biased towards minimizing decoding times rather than // space usage. // // NOTE: If your S2CellIds represent S2Points that have been snapped to // S2CellId centers, then EncodedS2PointVector is both faster and smaller. class EncodedS2CellIdVector { public: // Constructs an uninitialized object; requires Init() to be called. EncodedS2CellIdVector() {} // Initializes the EncodedS2CellIdVector. // // REQUIRES: The Decoder data buffer must outlive this object. bool Init(Decoder* decoder); // Returns the size of the original vector. size_t size() const; // Returns the element at the given index. S2CellId operator[](int i) const; // Returns the index of the first element x such that (x >= target), or // size() if no such element exists. // // REQUIRES: The vector elements are sorted in non-decreasing order. size_t lower_bound(S2CellId target) const; // Decodes and returns the entire original vector. std::vector Decode() const; private: // Values are decoded as (base_ + (deltas_[i] << shift_)). EncodedUintVector deltas_; uint64 base_; uint8 shift_; }; ////////////////// Implementation details follow //////////////////// inline size_t EncodedS2CellIdVector::size() const { return deltas_.size(); } inline S2CellId EncodedS2CellIdVector::operator[](int i) const { return S2CellId((deltas_[i] << shift_) + base_); } inline size_t EncodedS2CellIdVector::lower_bound(S2CellId target) const { // We optimize the search by converting "target" to the corresponding delta // value and then searching directly in the deltas_ vector. // // To invert operator[], we essentially compute ((target - base_) >> shift_) // except that we also need to round up when shifting. The first two cases // ensure that "target" doesn't wrap around past zero when we do this. if (target.id() <= base_) return 0; if (target >= S2CellId::End(S2CellId::kMaxLevel)) return size(); return deltas_.lower_bound( (target.id() - base_ + (uint64{1} << shift_) - 1) >> shift_); } } // namespace s2coding #endif // S2_ENCODED_S2CELL_ID_VECTOR_H_ s2geometry-0.10.0/src/s2/encoded_s2cell_id_vector_test.cc000066400000000000000000000231761422156367100232740ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2cell_id_vector.h" #include #include #include "absl/memory/memory.h" #include "s2/s2cell_id.h" #include "s2/s2loop.h" #include "s2/s2pointutil.h" #include "s2/s2shape_index.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" using absl::make_unique; using s2textformat::MakeCellIdOrDie; using std::vector; namespace s2coding { namespace { // Encodes the given vector and returns the corresponding // EncodedS2CellIdVector (which points into the Encoder's data buffer). EncodedS2CellIdVector MakeEncodedS2CellIdVector(const vector& input, Encoder* encoder) { EncodeS2CellIdVector(input, encoder); Decoder decoder(encoder->base(), encoder->length()); EncodedS2CellIdVector cell_ids; EXPECT_TRUE(cell_ids.Init(&decoder)); return cell_ids; } // Encodes the given vector and checks that it has the expected size and // contents. void TestEncodedS2CellIdVector(const vector& expected, size_t expected_bytes) { Encoder encoder; EncodedS2CellIdVector actual = MakeEncodedS2CellIdVector(expected, &encoder); EXPECT_EQ(expected_bytes, encoder.length()); EXPECT_EQ(actual.Decode(), expected); } // Like the above, but accepts a vector rather than a vector. void TestEncodedS2CellIdVector(const vector& raw_expected, size_t expected_bytes) { vector expected; for (uint64 raw_id : raw_expected) { expected.push_back(S2CellId(raw_id)); } TestEncodedS2CellIdVector(expected, expected_bytes); } TEST(EncodedS2CellIdVector, Empty) { TestEncodedS2CellIdVector(vector{}, 2); } TEST(EncodedS2CellIdVector, None) { TestEncodedS2CellIdVector({S2CellId::None()}, 3); } TEST(EncodedS2CellIdVector, NoneNone) { TestEncodedS2CellIdVector({S2CellId::None(), S2CellId::None()}, 4); } TEST(EncodedS2CellIdVector, Sentinel) { TestEncodedS2CellIdVector({S2CellId::Sentinel()}, 10); } TEST(EncodedS2CellIdVector, MaximumShiftCell) { // Tests the encoding of a single cell at level 2, which corresponds the // maximum encodable shift value (56). TestEncodedS2CellIdVector({MakeCellIdOrDie("0/00")}, 3); } TEST(EncodedS2CellIdVector, SentinelSentinel) { TestEncodedS2CellIdVector({S2CellId::Sentinel(), S2CellId::Sentinel()}, 11); } TEST(EncodedS2CellIdVector, NoneSentinelNone) { TestEncodedS2CellIdVector( {S2CellId::None(), S2CellId::Sentinel(), S2CellId::None()}, 26); } TEST(EncodedS2CellIdVector, InvalidCells) { // Tests that cells with an invalid LSB can be encoded. TestEncodedS2CellIdVector({0x6, 0xe, 0x7e}, 5); } TEST(EncodedS2CellIdVector, OneByteLeafCells) { // Tests that (1) if all cells are leaf cells, the low bit is not encoded, // and (2) this can be indicated using the standard 1-byte header. TestEncodedS2CellIdVector({0x3, 0x7, 0x177}, 5); } TEST(EncodedS2CellIdVector, OneByteLevel29Cells) { // Tests that (1) if all cells are at level 29, the low bit is not encoded, // and (2) this can be indicated using the standard 1-byte header. TestEncodedS2CellIdVector({0xc, 0x1c, 0x47c}, 5); } TEST(EncodedS2CellIdVector, OneByteLevel28Cells) { // Tests that (1) if all cells are at level 28, the low bit is not encoded, // and (2) this can be indicated using the extended 2-byte header. TestEncodedS2CellIdVector({0x30, 0x70, 0x1770}, 6); } TEST(EncodedS2CellIdVector, OneByteMixedCellLevels) { // Tests that cells at mixed levels can be encoded in one byte. TestEncodedS2CellIdVector({0x300, 0x1c00, 0x7000, 0xff00}, 6); } TEST(EncodedS2CellIdVector, OneByteMixedCellLevelsWithPrefix) { // Tests that cells at mixed levels can be encoded in one byte even when // they share a multi-byte prefix. TestEncodedS2CellIdVector({ 0x1234567800000300, 0x1234567800001c00, 0x1234567800007000, 0x123456780000ff00}, 10); } TEST(EncodedS2CellIdVector, OneByteRangeWithBaseValue) { // Tests that cells can be encoded in one byte by choosing a base value // whose bit range overlaps the delta values. // 1 byte header, 3 bytes base, 1 byte size, 4 bytes deltas TestEncodedS2CellIdVector({ 0x00ffff0000000000, 0x0100fc0000000000, 0x0100500000000000, 0x0100330000000000}, 9); } TEST(EncodedS2CellIdVector, MaxShiftRange) { const std::vector bytes = { (31 << 3) // 31 -> add 29 to bytes[1]. + 1, // Number of encoded cell IDs. 27, // 27+29 is the maximum supported shift. 1, 0 // Encoded cell ID. Not important. }; Decoder decoder(bytes.data(), bytes.size()); EncodedS2CellIdVector cell_ids; EXPECT_TRUE(cell_ids.Init(&decoder)); } TEST(EncodedS2CellIdVector, ShiftOutOfRange) { const std::vector bytes = { (31 << 3) // 31 -> add 29 to bytes[1]. + 1, // Number of encoded cell IDs. 28, // 28+29 is greater than the maximum supported shift of 56. 1, 0 // Encoded cell ID. Not important. }; Decoder decoder(bytes.data(), bytes.size()); EncodedS2CellIdVector cell_ids; EXPECT_FALSE(cell_ids.Init(&decoder)); } TEST(EncodedS2CellIdVector, SixFaceCells) { vector ids; for (int face = 0; face < 6; ++face) { ids.push_back(S2CellId::FromFace(face)); } TestEncodedS2CellIdVector(ids, 8); } TEST(EncodedS2CellIdVector, FourLevel10Children) { vector ids; S2CellId parent = MakeCellIdOrDie("3/012301230"); for (S2CellId id = parent.child_begin(); id != parent.child_end(); id = id.next()) { ids.push_back(id); } TestEncodedS2CellIdVector(ids, 8); } TEST(EncodedS2CellIdVector, FractalS2ShapeIndexCells) { S2Testing::Fractal fractal; fractal.SetLevelForApproxMaxEdges(3 * 1024); S2Point center = s2textformat::MakePointOrDie("47.677:-122.206"); MutableS2ShapeIndex index; index.Add(make_unique( fractal.MakeLoop(S2::GetFrame(center), S1Angle::Degrees(1)))); vector ids; for (MutableS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { ids.push_back(it.id()); } EXPECT_EQ(966, ids.size()); TestEncodedS2CellIdVector(ids, 2902); } TEST(EncodedS2CellIdVector, CoveringCells) { vector ids { 0x414a617f00000000, 0x414a61c000000000, 0x414a624000000000, 0x414a63c000000000, 0x414a647000000000, 0x414a64c000000000, 0x414a653000000000, 0x414a704000000000, 0x414a70c000000000, 0x414a714000000000, 0x414a71b000000000, 0x414a7a7c00000000, 0x414a7ac000000000, 0x414a8a4000000000, 0x414a8bc000000000, 0x414a8c4000000000, 0x414a8d7000000000, 0x414a8dc000000000, 0x414a914000000000, 0x414a91c000000000, 0x414a924000000000, 0x414a942c00000000, 0x414a95c000000000, 0x414a96c000000000, 0x414ab0c000000000, 0x414ab14000000000, 0x414ab34000000000, 0x414ab3c000000000, 0x414ab44000000000, 0x414ab4c000000000, 0x414ab6c000000000, 0x414ab74000000000, 0x414ab8c000000000, 0x414ab94000000000, 0x414aba1000000000, 0x414aba3000000000, 0x414abbc000000000, 0x414abe4000000000, 0x414abec000000000, 0x414abf4000000000, 0x46b5454000000000, 0x46b545c000000000, 0x46b5464000000000, 0x46b547c000000000, 0x46b5487000000000, 0x46b548c000000000, 0x46b5494000000000, 0x46b54a5400000000, 0x46b54ac000000000, 0x46b54b4000000000, 0x46b54bc000000000, 0x46b54c7000000000, 0x46b54c8004000000, 0x46b54ec000000000, 0x46b55ad400000000, 0x46b55b4000000000, 0x46b55bc000000000, 0x46b55c4000000000, 0x46b55c8100000000, 0x46b55dc000000000, 0x46b55e4000000000, 0x46b5604000000000, 0x46b560c000000000, 0x46b561c000000000, 0x46ca424000000000, 0x46ca42c000000000, 0x46ca43c000000000, 0x46ca444000000000, 0x46ca45c000000000, 0x46ca467000000000, 0x46ca469000000000, 0x46ca5fc000000000, 0x46ca604000000000, 0x46ca60c000000000, 0x46ca674000000000, 0x46ca679000000000, 0x46ca67f000000000, 0x46ca684000000000, 0x46ca855000000000, 0x46ca8c4000000000, 0x46ca8cc000000000, 0x46ca8e5400000000, 0x46ca8ec000000000, 0x46ca8f0100000000, 0x46ca8fc000000000, 0x46ca900400000000, 0x46ca98c000000000, 0x46ca994000000000, 0x46ca99c000000000, 0x46ca9a4000000000, 0x46ca9ac000000000, 0x46ca9bd500000000, 0x46ca9e4000000000, 0x46ca9ec000000000, 0x46caf34000000000, 0x46caf4c000000000, 0x46caf54000000000 }; EXPECT_EQ(97, ids.size()); TestEncodedS2CellIdVector(ids, 488); } TEST(EncodedS2CellIdVector, LowerBoundLimits) { // Test seeking before the beginning and past the end of the vector. S2CellId first = S2CellId::Begin(S2CellId::kMaxLevel); S2CellId last = S2CellId::End(S2CellId::kMaxLevel).prev(); Encoder encoder; EncodedS2CellIdVector cell_ids = MakeEncodedS2CellIdVector( {first, last}, &encoder); EXPECT_EQ(0, cell_ids.lower_bound(S2CellId::None())); EXPECT_EQ(0, cell_ids.lower_bound(first)); EXPECT_EQ(1, cell_ids.lower_bound(first.next())); EXPECT_EQ(1, cell_ids.lower_bound(last.prev())); EXPECT_EQ(1, cell_ids.lower_bound(last)); EXPECT_EQ(2, cell_ids.lower_bound(last.next())); EXPECT_EQ(2, cell_ids.lower_bound(S2CellId::Sentinel())); } } // namespace } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_s2point_vector.cc000066400000000000000000001102441422156367100217640ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2point_vector.h" #include #include "absl/base/internal/unaligned_access.h" #include "s2/util/bits/bits.h" #include "s2/s2cell_id.h" #include "s2/s2coords.h" using absl::MakeSpan; using absl::Span; using std::max; using std::min; using std::vector; namespace s2coding { // Like util_bits::InterleaveUint32, but interleaves bit pairs rather than // individual bits. This format is faster to decode than the fully interleaved // format, and produces the same results for our use case. inline uint64 InterleaveUint32BitPairs(const uint32 val0, const uint32 val1) { uint64 v0 = val0, v1 = val1; v0 = (v0 | (v0 << 16)) & 0x0000ffff0000ffff; v1 = (v1 | (v1 << 16)) & 0x0000ffff0000ffff; v0 = (v0 | (v0 << 8)) & 0x00ff00ff00ff00ff; v1 = (v1 | (v1 << 8)) & 0x00ff00ff00ff00ff; v0 = (v0 | (v0 << 4)) & 0x0f0f0f0f0f0f0f0f; v1 = (v1 | (v1 << 4)) & 0x0f0f0f0f0f0f0f0f; v0 = (v0 | (v0 << 2)) & 0x3333333333333333; v1 = (v1 | (v1 << 2)) & 0x3333333333333333; return v0 | (v1 << 2); } // This code is about 50% faster than util_bits::DeinterleaveUint32, which // uses a lookup table. The speed advantage is expected to be even larger in // code that mixes bit interleaving with other significant operations since it // doesn't require keeping a 256-byte lookup table in the L1 data cache. inline void DeinterleaveUint32BitPairs(uint64 code, uint32 *val0, uint32 *val1) { uint64 v0 = code, v1 = code >> 2; v0 &= 0x3333333333333333; v0 |= v0 >> 2; v1 &= 0x3333333333333333; v1 |= v1 >> 2; v0 &= 0x0f0f0f0f0f0f0f0f; v0 |= v0 >> 4; v1 &= 0x0f0f0f0f0f0f0f0f; v1 |= v1 >> 4; v0 &= 0x00ff00ff00ff00ff; v0 |= v0 >> 8; v1 &= 0x00ff00ff00ff00ff; v1 |= v1 >> 8; v0 &= 0x0000ffff0000ffff; v0 |= v0 >> 16; v1 &= 0x0000ffff0000ffff; v1 |= v1 >> 16; *val0 = v0; *val1 = v1; } // Forward declarations. void EncodeS2PointVectorFast(Span points, Encoder* encoder); void EncodeS2PointVectorCompact(Span points, Encoder* encoder); // To save space (especially for vectors of length 0, 1, and 2), the encoding // format is encoded in the low-order 3 bits of the vector size. Up to 7 // encoding formats are supported (only 2 are currently defined). Additional // formats could be supported by using "7" as an overflow indicator and // encoding the actual format separately, but it seems unlikely we will ever // need to do that. static const int kEncodingFormatBits = 3; static const uint8 kEncodingFormatMask = (1 << kEncodingFormatBits) - 1; void EncodeS2PointVector(Span points, CodingHint hint, Encoder* encoder) { switch (hint) { case CodingHint::FAST: return EncodeS2PointVectorFast(points, encoder); case CodingHint::COMPACT: return EncodeS2PointVectorCompact(points, encoder); default: S2_LOG(DFATAL) << "Unknown CodingHint: " << static_cast(hint); } } bool EncodedS2PointVector::Init(Decoder* decoder) { if (decoder->avail() < 1) return false; // Peek at the format but don't advance the decoder; the format-specific // Init functions will do that. format_ = static_cast(*decoder->skip(0) & kEncodingFormatMask); switch (format_) { case UNCOMPRESSED: return InitUncompressedFormat(decoder); case CELL_IDS: return InitCellIdsFormat(decoder); default: return false; } } vector EncodedS2PointVector::Decode() const { vector points; points.reserve(size_); for (int i = 0; i < size_; ++i) { points.push_back((*this)[i]); } return points; } // The encoding must be identical to EncodeS2PointVector(). void EncodedS2PointVector::Encode(Encoder* encoder) const { switch (format_) { case UNCOMPRESSED: EncodeS2PointVectorFast(MakeSpan(uncompressed_.points, size_), encoder); break; case CELL_IDS: { // This is a full decode/encode dance, and not at all efficient. EncodeS2PointVectorCompact(Decode(), encoder); break; } default: S2_LOG(FATAL) << "Unknown Format: " << static_cast(format_); } } ////////////////////////////////////////////////////////////////////////////// // UNCOMPRESSED Encoding Format ////////////////////////////////////////////////////////////////////////////// // Encodes a vector of points, optimizing for (encoding and decoding) speed. void EncodeS2PointVectorFast(Span points, Encoder* encoder) { #ifndef IS_LITTLE_ENDIAN S2_LOG(FATAL) << "Not implemented on big-endian architectures"; #endif // This function always uses the UNCOMPRESSED encoding. The header consists // of a varint64 in the following format: // // bits 0-2: encoding format (UNCOMPRESSED) // bits 3-63: vector size // // This is followed by an array of S2Points in little-endian order. encoder->Ensure(Varint::kMax64 + points.size() * sizeof(S2Point)); uint64 size_format = (points.size() << kEncodingFormatBits | EncodedS2PointVector::UNCOMPRESSED); encoder->put_varint64(size_format); encoder->putn(points.data(), points.size() * sizeof(S2Point)); } bool EncodedS2PointVector::InitUncompressedFormat(Decoder* decoder) { #if !defined(IS_LITTLE_ENDIAN) || defined(__arm__) || \ defined(ABSL_INTERNAL_NEED_ALIGNED_LOADS) // TODO(ericv): Make this work on platforms that don't support unaligned // 64-bit little-endian reads, e.g. by falling back to // // bit_cast(little_endian::Load64()). // // Maybe the compiler is smart enough that we can do this all the time, // but more likely we will need two cases using the #ifdef above. // (Note that even ARMv7 does not support unaligned 64-bit loads.) S2_LOG(DFATAL) << "Needs architecture with 64-bit little-endian unaligned loads"; return false; #endif uint64 size; if (!decoder->get_varint64(&size)) return false; size >>= kEncodingFormatBits; // Note that the encoding format supports up to 2**59 vertices, but we // currently only support decoding up to 2**32 vertices. if (size > std::numeric_limits::max()) return false; size_ = size; size_t bytes = size_t{size_} * sizeof(S2Point); if (decoder->avail() < bytes) return false; uncompressed_.points = reinterpret_cast(decoder->skip(0)); decoder->skip(bytes); return true; } ////////////////////////////////////////////////////////////////////////////// // CELL_IDS Encoding Format ////////////////////////////////////////////////////////////////////////////// // Represents a point that can be encoded as an S2CellId center. // (If such an encoding is not possible then level < 0.) struct CellPoint { // Constructor necessary in order to narrow "int" arguments to "int8". CellPoint(int level, int face, uint32 si, uint32 ti) : level(level), face(face), si(si), ti(ti) {} int8 level, face; uint32 si, ti; }; // S2CellIds are represented in a special 64-bit format and are encoded in // fixed-size blocks. kBlockSize represents the number of values per block. // Block sizes of 4, 8, 16, and 32 were tested and kBlockSize == 16 seems to // offer the best compression. (Note that kBlockSize == 32 requires some code // modifications which have since been removed.) static constexpr int kBlockShift = 4; static constexpr size_t kBlockSize = 1 << kBlockShift; // Used to indicate that a point must be encoded as an exception (a 24-byte // S2Point) rather than as an S2CellId. static constexpr uint64 kException = ~0ULL; // Represents the encoding parameters to be used for a given block (consisting // of kBlockSize encodable 64-bit values). See below. struct BlockCode { int delta_bits; // Delta length in bits (multiple of 4) int offset_bits; // Offset length in bits (multiple of 8) int overlap_bits; // {Delta, Offset} overlap in bits (0 or 4) }; // Returns a bit mask with "n" low-order 1 bits, for 0 <= n <= 64. inline uint64 BitMask(int n) { return (n == 0) ? 0 : (~0ULL >> (64 - n)); } // Returns the maximum number of bits per value at the given S2CellId level. inline int MaxBitsForLevel(int level) { return 2 * level + 3; } // Returns the number of bits that "base" should be right-shifted in order to // encode only its leading "base_bits" bits, assuming that all points are // encoded at the given S2CellId level. inline int BaseShift(int level, int base_bits) { return max(0, MaxBitsForLevel(level) - base_bits); } // Forward declarations. int ChooseBestLevel(Span points, vector* cell_points); vector ConvertCellsToValues(const vector& cell_points, int level, bool* have_exceptions); uint64 ChooseBase(const vector& values, int level, bool have_exceptions, int* base_bits); BlockCode GetBlockCode(Span values, uint64 base, bool have_exceptions); // Encodes a vector of points, optimizing for space. void EncodeS2PointVectorCompact(Span points, Encoder* encoder) { // OVERVIEW // -------- // // We attempt to represent each S2Point as the center of an S2CellId. All // S2CellIds must be at the same level. Any points that cannot be encoded // exactly as S2CellId centers are stored as exceptions using 24 bytes each. // If there are so many exceptions that the CELL_IDS encoding does not save // significant space, we give up and use the uncompressed encoding. // // The first step is to choose the best S2CellId level. This requires // converting each point to (face, si, ti) coordinates and checking whether // the point can be represented exactly as an S2CellId center at some level. // We then build a histogram of S2CellId levels (just like the similar code // in S2Polygon::Encode) and choose the best level (or give up, if there are // not enough S2CellId-encodable points). // // The simplest approach would then be to take all the S2CellIds and // right-shift them to remove all the constant bits at the chosen level. // This would give the best spatial locality and hence the smallest deltas. // However instead we give up some spatial locality and use the similar but // faster transformation described below. // // Each encodable point is first converted to the (sj, tj) representation // defined below: // // sj = (((face & 3) << 30) | (si >> 1)) >> (30 - level); // tj = (((face & 4) << 29) | ti) >> (31 - level); // // These two values encode the (face, si, ti) tuple using (2 * level + 3) // bits. To see this, recall that "si" and "ti" are 31-bit values that all // share a common suffix consisting of a "1" bit followed by (30 - level) // "0" bits. The code above right-shifts these values to remove the // constant bits and then prepends the bits for "face", yielding a total of // (level + 2) bits for "sj" and (level + 1) bits for "tj". // // We then combine (sj, tj) into one 64-bit value by interleaving bit pairs: // // v = InterleaveBitPairs(sj, tj); // // (We could also interleave individual bits, but it is faster this way.) // The result is similar to right-shifting an S2CellId by (61 - 2 * level), // except that it is faster to decode and the spatial locality is not quite // as good. // // The 64-bit values are divided into blocks of size kBlockSize, and then // each value is encoded as the sum of a base value, a per-block offset, and // a per-value delta within that block: // // v[i,j] = base + offset[i] + delta[i, j] // // where "i" represents a block and "j" represents an entry in that block. // // The deltas for each block are encoded using a fixed number of 4-bit nibbles // (1-16 nibbles per delta). This allows any delta to be accessed in constant // time. // // The "offset" for each block is a 64-bit value encoded in 0-8 bytes. The // offset is left-shifted such that it overlaps the deltas by a configurable // number of bits (either 0 or 4), called the "overlap". The overlap and // offset length (0-8 bytes) are specified per block. The reason for the // overlap is that it allows fewer delta bits to be used in some cases. For // example if base == 0 and the range within a block is 0xf0 to 0x110, then // rather than using 12-bits deltas with an offset of 0, the overlap lets us // use 8-bits deltas with an offset of 0xf0 (saving 7 bytes per block). // // The global minimum value "base" is encoded using 0-7 bytes starting with // the most-significant non-zero bit possible for the chosen level. For // example, if (level == 7) then the encoded values have at most 17 bits, so // if "base" is encoded in 1 byte then it is shifted to occupy bits 9-16. // // Example: at level == 15, there are at most 33 non-zero value bits. The // following shows the bit positions covered by "base", "offset", and "delta" // assuming that "base" and "offset" are encoded in 2 bytes each, deltas are // encoded in 2 nibbles (1 byte) each, and "overlap" is 4 bits: // // Base: 1111111100000000----------------- // Offset: -------------1111111100000000---- // Delta: -------------------------00000000 // Overlap: ^^^^ // // The numbers (0 or 1) in this diagram denote the byte number of the encoded // value. Notice that "base" is shifted so that it starts at the leftmost // possible bit, "delta" always starts at the rightmost possible bit (bit 0), // and "offset" is shifted so that it overlaps "delta" by the chosen "overlap" // (either 0 or 4 bits). Also note that all of these values are summed, and // therefore each value can affect higher-order bits due to carries. // // NOTE(ericv): Encoding deltas in 4-bit rather than 8-bit length increments // reduces encoded sizes by about 7%. Allowing a 4-bit overlap between the // offset and deltas reduces encoded sizes by about 1%. Both optimizations // make the code more complex but don't affect running times significantly. // // ENCODING DETAILS // ---------------- // // Now we can move on to the actual encodings. First, there is a 2 byte // header encoded as follows: // // Byte 0, bits 0-2: encoding_format (CELL_IDS) // Byte 0, bit 3: have_exceptions // Byte 0, bits 4-7: (last_block_size - 1) // Byte 1, bits 0-2: base_bytes // Byte 1, bits 3-7: level (0-30) // // This is followed by an EncodedStringVector containing the encoded blocks. // Each block contains kBlockSize (8) values. The total size of the // EncodeS2PointVector is not stored explicity, but instead is calculated as // // num_values == kBlockSize * (num_blocks - 1) + last_block_size . // // (An empty vector has num_blocks == 0 and last_block_size == kBlockSize.) // // Each block starts with a 1 byte header containing the following: // // Byte 0, bits 0-2: (offset_bytes - overlap_nibbles) // Byte 0, bit 3: overlap_nibbles // Byte 0, bits 4-7: (delta_nibbles - 1) // // "overlap_nibbles" is either 0 or 1 (indicating an overlap of 0 or 4 bits), // while "offset_bytes" is in the range 0-8 (indicating the number of bytes // used to encode the offset for this block). Note that some combinations // cannot be encoded: in particular, offset_bytes == 0 can only be encoded // with an overlap of 0 bits, and offset_bytes == 8 can only be encoded with // an overlap of 4 bits. This allows us to encode offset lengths of 0-8 // rather than just 0-7 without using an extra bit. (Note that the // combinations that can't be encoded are not useful anyway.) // // The header is followed by "offset_bytes" bytes for the offset, and then // (4 * delta_nibbles) bytes for the deltas. // // If there are any points that could not be represented as S2CellIds, then // "have_exceptions" in the header is true. In that case the delta values // within each block are encoded as (delta + kBlockSize), and values // 0...kBlockSize-1 are used to represent exceptions. If a block has // exceptions, they are encoded immediately following the array of deltas, // and are referenced by encoding the corresponding exception index // 0...kBlockSize-1 as the delta. // // TODO(ericv): A vector containing a single leaf cell is currently encoded as // 13 bytes (2 byte header, 7 byte base, 1 byte block count, 1 byte block // length, 1 byte block header, 1 byte delta). However if this case occurs // often, a better solution would be implement a separate format that encodes // the leading k bytes of an S2CellId. It would have a one-byte header // consisting of the encoding format (3 bits) and the number of bytes encoded // (3 bits), followed by the S2CellId bytes. The extra 2 header bits could be // used to store single points using other encodings, e.g. E7. // // If we had used 8-value blocks, we could have used the extra bit in the // first byte of the header to indicate that there is only one value, and // then skip the 2nd byte of header and the EncodedStringVector. But this // would be messy because it also requires special cases while decoding. // Essentially this would be a sub-format within the CELL_IDS format. // 1. Compute (level, face, si, ti) for each point, build a histogram of // levels, and determine the optimal level to use for encoding (if any). vector cell_points; int level = ChooseBestLevel(points, &cell_points); if (level < 0) { return EncodeS2PointVectorFast(points, encoder); } // 2. Convert the points into encodable 64-bit values. We don't use the // S2CellId itself because it requires a somewhat more complicated bit // interleaving operation. // // TODO(ericv): Benchmark using shifted S2CellIds instead. bool have_exceptions; vector values = ConvertCellsToValues(cell_points, level, &have_exceptions); // 3. Choose the global encoding parameter "base" (consisting of the bit // prefix shared by all values to be encoded). int base_bits; uint64 base = ChooseBase(values, level, have_exceptions, &base_bits); // Now encode the output, starting with the 2-byte header (see above). int num_blocks = (values.size() + kBlockSize - 1) >> kBlockShift; int base_bytes = base_bits >> 3; encoder->Ensure(2 + base_bytes); int last_block_count = values.size() - kBlockSize * (num_blocks - 1); S2_DCHECK_GE(last_block_count, 0); S2_DCHECK_LE(last_block_count, kBlockSize); S2_DCHECK_LE(base_bytes, 7); S2_DCHECK_LE(level, 30); encoder->put8(EncodedS2PointVector::CELL_IDS | (have_exceptions << 3) | ((last_block_count - 1) << 4)); encoder->put8(base_bytes | (level << 3)); // Next we encode 0-7 bytes of "base". int base_shift = BaseShift(level, base_bits); EncodeUintWithLength(base >> base_shift, base_bytes, encoder); // Now we encode the contents of each block. StringVectorEncoder blocks; vector exceptions; uint64 offset_bytes_sum = 0; uint64 delta_nibbles_sum = 0; uint64 exceptions_sum = 0; for (int i = 0; i < values.size(); i += kBlockSize) { int block_size = min(kBlockSize, values.size() - i); BlockCode code = GetBlockCode(MakeSpan(&values[i], block_size), base, have_exceptions); // Encode the one-byte block header (see above). Encoder* block = blocks.AddViaEncoder(); int offset_bytes = code.offset_bits >> 3; int delta_nibbles = code.delta_bits >> 2; int overlap_nibbles = code.overlap_bits >> 2; block->Ensure(1 + offset_bytes + (kBlockSize / 2) * delta_nibbles); S2_DCHECK_LE(offset_bytes - overlap_nibbles, 7); S2_DCHECK_LE(overlap_nibbles, 1); S2_DCHECK_LE(delta_nibbles, 16); block->put8((offset_bytes - overlap_nibbles) | (overlap_nibbles << 3) | (delta_nibbles - 1) << 4); // Determine the offset for this block, and whether there are exceptions. uint64 offset = ~0ULL; int num_exceptions = 0; for (int j = 0; j < block_size; ++j) { if (values[i + j] == kException) { num_exceptions += 1; } else { S2_DCHECK_GE(values[i + j], base); offset = min(offset, values[i + j] - base); } } if (num_exceptions == block_size) offset = 0; // Encode the offset. int offset_shift = code.delta_bits - code.overlap_bits; offset &= ~BitMask(offset_shift); S2_DCHECK_EQ(offset == 0, offset_bytes == 0); if (offset > 0) { EncodeUintWithLength(offset >> offset_shift, offset_bytes, block); } // Encode the deltas, and also gather any exceptions present. int delta_bytes = (delta_nibbles + 1) >> 1; exceptions.clear(); for (int j = 0; j < block_size; ++j) { uint64 delta; if (values[i + j] == kException) { delta = exceptions.size(); exceptions.push_back(points[i + j]); } else { S2_DCHECK_GE(values[i + j], offset + base); delta = values[i + j] - (offset + base); if (have_exceptions) { S2_DCHECK_LE(delta, ~0ULL - kBlockSize); delta += kBlockSize; } } S2_DCHECK_LE(delta, BitMask(code.delta_bits)); if ((delta_nibbles & 1) && (j & 1)) { // Combine this delta with the high-order 4 bits of the previous delta. uint8 last_byte = *(block->base() + block->length() - 1); block->RemoveLast(1); delta = (delta << 4) | (last_byte & 0xf); } EncodeUintWithLength(delta, delta_bytes, block); } // Append any exceptions to the end of the block. if (num_exceptions > 0) { int exceptions_bytes = exceptions.size() * sizeof(S2Point); block->Ensure(exceptions_bytes); block->putn(exceptions.data(), exceptions_bytes); } offset_bytes_sum += offset_bytes; delta_nibbles_sum += delta_nibbles; exceptions_sum += num_exceptions; } blocks.Encode(encoder); } // Returns the S2CellId level for which the greatest number of the given points // can be represented as the center of an S2CellId. Initializes "cell_points" // to contain the S2CellId representation of each point (if any). Returns -1 // if there is no S2CellId that would result in significant space savings. int ChooseBestLevel(Span points, vector* cell_points) { cell_points->clear(); cell_points->reserve(points.size()); // Count the number of points at each level. int level_counts[S2CellId::kMaxLevel + 1] = { 0 }; for (const S2Point& point : points) { int face; uint32 si, ti; int level = S2::XYZtoFaceSiTi(point, &face, &si, &ti); cell_points->push_back(CellPoint(level, face, si, ti)); if (level >= 0) ++level_counts[level]; } // Choose the level for which the most points can be encoded. int best_level = 0; for (int level = 1; level <= S2CellId::kMaxLevel; ++level) { if (level_counts[level] > level_counts[best_level]) { best_level = level; } } // The uncompressed encoding is smaller *and* faster when very few of the // points are encodable as S2CellIds. The CELL_IDS encoding uses about 1 // extra byte per point in this case, consisting of up to a 3 byte // EncodedStringVector offset for each block, a 1 byte block header, and 4 // bits per delta (encoding an exception number from 0-7), for a total of 8 // bytes per block. This represents a space overhead of about 4%, so we // require that at least 5% of the input points should be encodable as // S2CellIds in order for the CELL_IDS format to be worthwhile. constexpr double kMinEncodableFraction = 0.05; if (level_counts[best_level] <= kMinEncodableFraction * points.size()) { return -1; } return best_level; } // Given a vector of points in CellPoint format and an S2CellId level that has // been chosen for encoding, returns a vector of 64-bit values that should be // encoded in order to represent these points. Points that cannot be // represented losslessly as the center of an S2CellId at the chosen level are // indicated by the value "kException". "have_exceptions" is set to indicate // whether any exceptions were present. vector ConvertCellsToValues(const vector& cell_points, int level, bool* have_exceptions) { vector values; values.reserve(cell_points.size()); *have_exceptions = false; int shift = S2CellId::kMaxLevel - level; for (CellPoint cp : cell_points) { if (cp.level != level) { values.push_back(kException); *have_exceptions = true; } else { // Note that bit 31 of tj is always zero, and that bits are interleaved in // such a way that bit 63 of the result is always zero. // // The S2CellId version of the following code is: // uint64 v = S2CellId::FromFaceIJ(cp.face, cp.si >> 1, cp.ti >> 1). // parent(level).id() >> (2 * shift + 1); uint32 sj = (((cp.face & 3) << 30) | (cp.si >> 1)) >> shift; uint32 tj = (((cp.face & 4) << 29) | cp.ti) >> (shift + 1); uint64 v = InterleaveUint32BitPairs(sj, tj); S2_DCHECK_LE(v, BitMask(MaxBitsForLevel(level))); values.push_back(v); } } return values; } uint64 ChooseBase(const vector& values, int level, bool have_exceptions, int* base_bits) { // Find the minimum and maximum non-exception values to be represented. uint64 v_min = kException, v_max = 0; for (auto v : values) { if (v != kException) { v_min = min(v_min, v); v_max = max(v_max, v); } } if (v_min == kException) return 0; // Generally "base" is chosen as the bit prefix shared by v_min and v_max. // However there are a few adjustments we need to make. // // 1. Encodings are usually smaller if the bits represented by "base" and // "delta" do not overlap. Usually the shared prefix rule does this // automatically, but if v_min == v_max or there are special circumstances // that increase delta_bits (such as values.size() == 1) then we need to // make an adjustment. // // 2. The format only allows us to represent up to 7 bytes (56 bits) of // "base", so we need to ensure that "base" conforms to this requirement. int min_delta_bits = (have_exceptions || values.size() == 1) ? 8 : 4; int excluded_bits = max(Bits::Log2Floor64(v_min ^ v_max) + 1, max(min_delta_bits, BaseShift(level, 56))); uint64 base = v_min & ~BitMask(excluded_bits); // Determine how many bytes are needed to represent this prefix. if (base == 0) { *base_bits = 0; } else { int low_bit = Bits::FindLSBSetNonZero64(base); *base_bits = (MaxBitsForLevel(level) - low_bit + 7) & ~7; } // Since base_bits has been rounded up to a multiple of 8, we may now be // able to represent additional bits of v_min. In general this reduces the // final encoded size. // // NOTE(ericv): A different strategy for choosing "base" is to encode all // blocks under the assumption that "base" equals v_min exactly, and then // set base equal to the minimum-length prefix of "v_min" that allows these // encodings to be used. This strategy reduces the encoded sizes by // about 0.2% relative to the strategy here, but is more complicated. return v_min & ~BitMask(BaseShift(level, *base_bits)); } // Returns true if the range of values [d_min, d_max] can be encoded using the // specified parameters (delta_bits, overlap_bits, and have_exceptions). bool CanEncode(uint64 d_min, uint64 d_max, int delta_bits, int overlap_bits, bool have_exceptions) { // "offset" can't represent the lowest (delta_bits - overlap_bits) of d_min. d_min &= ~BitMask(delta_bits - overlap_bits); // The maximum delta is reduced by kBlockSize if any exceptions exist, since // deltas 0..kBlockSize-1 are used to indicate exceptions. uint64 max_delta = BitMask(delta_bits); if (have_exceptions) { if (max_delta < kBlockSize) return false; max_delta -= kBlockSize; } // The first test below is necessary to avoid 64-bit overflow. return (d_min > ~max_delta) || (d_min + max_delta >= d_max); } // Given a vector of 64-bit values to be encoded and an S2CellId level, returns // the optimal encoding parameters that should be used to encode each block. // Also returns the global minimum value "base" and the number of bits that // should be used to encode it ("base_bits"). BlockCode GetBlockCode(Span values, uint64 base, bool have_exceptions) { // "b_min" and "b_max"n are the minimum and maximum values within this block. uint64 b_min = kException, b_max = 0; for (uint64 v : values) { if (v != kException) { b_min = min(b_min, v); b_max = max(b_max, v); } } if (b_min == kException) { // All values in this block are exceptions. return BlockCode{4, 0, 0}; } // Adjust the min/max values so that they are relative to "base". b_min -= base; b_max -= base; // Determine the minimum possible delta length and overlap that can be used // to encode this block. The block will usually be encodable using the // number of bits in (b_max - b_min) rounded up to a multiple of 4. If this // is not possible, the preferred solution is to shift "offset" so that the // delta and offset values overlap by 4 bits (since this only costs an // average of 4 extra bits per block). Otherwise we increase the delta size // by 4 bits. Certain cases require that both of these techniques are used. // // Example 1: b_min = 0x72, b_max = 0x7e. The range is 0x0c. This can be // encoded using delta_bits = 4 and overlap_bits = 0, which allows us to // represent an offset of 0x70 and a maximum delta of 0x0f, so that we can // encode values up to 0x7f. // // Example 2: b_min = 0x78, b_max = 0x84. The range is 0x0c, but in this // case it is not sufficient to use delta_bits = 4 and overlap_bits = 0 // because we can again only represent an offset of 0x70, so the maximum // delta of 0x0f only lets us encode values up to 0x7f. However if we // increase the overlap to 4 bits then we can represent an offset of 0x78, // which lets us encode values up to 0x78 + 0x0f = 0x87. // // Example 3: b_min = 0x08, b_max = 0x104. The range is 0xfc, so we should // be able to use 8-bit deltas. But even with a 4-bit overlap, we can still // only encode offset = 0 and a maximum value of 0xff. (We don't allow // bigger overlaps because statistically they are not worthwhile.) Instead // we increase the delta size to 12 bits, which handles this case easily. // // Example 4: b_min = 0xf08, b_max = 0x1004. The range is 0xfc, so we // should be able to use 8-bit deltas. With 8-bit deltas and no overlap, we // have offset = 0xf00 and a maximum encodable value of 0xfff. With 8-bit // deltas and a 4-bit overlap, we still have offset = 0xf00 and a maximum // encodable value of 0xfff. Even with 12-bit deltas, we have offset = 0 // and we can still only represent 0xfff. However with delta_bits = 12 and // overlap_bits = 4, we can represent offset = 0xf00 and a maximum encodable // value of 0xf00 + 0xfff = 0x1eff. // // It is possible to show that this last example is the worst case, i.e. we // do not need to consider increasing delta_bits or overlap_bits further. int delta_bits = (max(1, Bits::Log2Floor64(b_max - b_min)) + 3) & ~3; int overlap_bits = 0; if (!CanEncode(b_min, b_max, delta_bits, 0, have_exceptions)) { if (CanEncode(b_min, b_max, delta_bits, 4, have_exceptions)) { overlap_bits = 4; } else { S2_DCHECK_LE(delta_bits, 60); delta_bits += 4; if (!CanEncode(b_min, b_max, delta_bits, 0, have_exceptions)) { S2_DCHECK(CanEncode(b_min, b_max, delta_bits, 4, have_exceptions)); overlap_bits = 4; } } } // When the block size is 1 and no exceptions exist, we have delta_bits == 4 // and overlap_bits == 0 which wastes 4 bits. We fix this below, which // among other things reduces the encoding size for single leaf cells by one // byte. (Note that when exceptions exist, delta_bits == 8 and overlap_bits // may be 0 or 4. These cases are covered by the unit tests.) if (values.size() == 1 && !have_exceptions) { S2_DCHECK(delta_bits == 4 && overlap_bits == 0); delta_bits = 8; } // Now determine the number of bytes needed to encode "offset", given the // chosen delta length. uint64 max_delta = BitMask(delta_bits) - (have_exceptions ? kBlockSize : 0); int offset_bits = 0; if (b_max > max_delta) { // At least one byte of offset is required. Round up the minimum offset // to the next encodable value, and determine how many bits it has. int offset_shift = delta_bits - overlap_bits; uint64 mask = BitMask(offset_shift); uint64 min_offset = (b_max - max_delta + mask) & ~mask; S2_DCHECK_GT(min_offset, 0); offset_bits = (Bits::FindMSBSetNonZero64(min_offset) + 1 - offset_shift + 7) & ~7; // A 64-bit offset can only be encoded with an overlap of 4 bits. if (offset_bits == 64) overlap_bits = 4; } return BlockCode{delta_bits, offset_bits, overlap_bits}; } bool EncodedS2PointVector::InitCellIdsFormat(Decoder* decoder) { // This function inverts the encodings documented above. // First we decode the two-byte header. if (decoder->avail() < 2) return false; uint8 header1 = decoder->get8(); uint8 header2 = decoder->get8(); S2_DCHECK_EQ(header1 & 7, CELL_IDS); int last_block_count, base_bytes; cell_ids_.have_exceptions = (header1 & 8) != 0; last_block_count = (header1 >> 4) + 1; base_bytes = header2 & 7; cell_ids_.level = header2 >> 3; // Decode the base value (if any). uint64 base; if (!DecodeUintWithLength(base_bytes, decoder, &base)) return false; cell_ids_.base = base << BaseShift(cell_ids_.level, base_bytes << 3); // Initialize the vector of encoded blocks. if (!cell_ids_.blocks.Init(decoder)) return false; size_ = kBlockSize * (cell_ids_.blocks.size() - 1) + last_block_count; return true; } S2Point EncodedS2PointVector::DecodeCellIdsFormat(int i) const { // This function inverts the encodings documented above. // First we decode the block header. const char* ptr = cell_ids_.blocks.GetStart(i >> kBlockShift); uint8 header = *ptr++; int overlap_nibbles = (header >> 3) & 1; int offset_bytes = (header & 7) + overlap_nibbles; int delta_nibbles = (header >> 4) + 1; // Decode the offset for this block. int offset_shift = (delta_nibbles - overlap_nibbles) << 2; uint64 offset = GetUintWithLength(ptr, offset_bytes) << offset_shift; ptr += offset_bytes; // Decode the delta for the requested value. int delta_nibble_offset = (i & (kBlockSize - 1)) * delta_nibbles; int delta_bytes = (delta_nibbles + 1) >> 1; const char* delta_ptr = ptr + (delta_nibble_offset >> 1); uint64 delta = GetUintWithLength(delta_ptr, delta_bytes); delta >>= (delta_nibble_offset & 1) << 2; delta &= BitMask(delta_nibbles << 2); // Test whether this point is encoded as an exception. if (cell_ids_.have_exceptions) { if (delta < kBlockSize) { int block_size = min(kBlockSize, size_ - (i & ~(kBlockSize - 1))); ptr += (block_size * delta_nibbles + 1) >> 1; ptr += delta * sizeof(S2Point); return *reinterpret_cast(ptr); } delta -= kBlockSize; } // Otherwise convert the 64-bit value back to an S2Point. uint64 value = cell_ids_.base + offset + delta; int shift = S2CellId::kMaxLevel - cell_ids_.level; // The S2CellId version of the following code is: // return S2CellId(((value << 1) | 1) << (2 * shift)).ToPoint(); uint32 sj, tj; DeinterleaveUint32BitPairs(value, &sj, &tj); int si = (((sj << 1) | 1) << shift) & 0x7fffffff; int ti = (((tj << 1) | 1) << shift) & 0x7fffffff; int face = ((sj << shift) >> 30) | (((tj << (shift + 1)) >> 29) & 4); return S2::FaceUVtoXYZ(face, S2::STtoUV(S2::SiTitoST(si)), S2::STtoUV(S2::SiTitoST(ti))).Normalize(); } } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_s2point_vector.h000066400000000000000000000112161422156367100216250ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ENCODED_S2POINT_VECTOR_H_ #define S2_ENCODED_S2POINT_VECTOR_H_ #include #include "absl/types/span.h" #include "s2/encoded_string_vector.h" #include "s2/encoded_uint_vector.h" #include "s2/s2point.h" #include "s2/s2shape.h" namespace s2coding { // Encodes a vector of S2Points in a format that can later be decoded as an // EncodedS2PointVector. // // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). void EncodeS2PointVector(absl::Span points, CodingHint hint, Encoder* encoder); // This class represents an encoded vector of S2Points. Values are decoded // only when they are accessed. This allows for very fast initialization and // no additional memory use beyond the encoded data. The encoded data is not // owned by this class; typically it points into a large contiguous buffer // that contains other encoded data as well. // // This is one of several helper classes that allow complex data structures to // be initialized from an encoded format in constant time and then decoded on // demand. This can be a big performance advantage when only a small part of // the data structure is actually used. class EncodedS2PointVector { public: // Constructs an uninitialized object; requires Init() to be called. EncodedS2PointVector() {} // Initializes the EncodedS2PointVector. // // REQUIRES: The Decoder data buffer must outlive this object. bool Init(Decoder* decoder); // Returns the size of the original vector. size_t size() const; // Returns the element at the given index. S2Point operator[](int i) const; // Decodes and returns the entire original vector. std::vector Decode() const; // Copy the encoded data to the encoder. This allows for "reserialization" of // encoded shapes created through lazy decoding. void Encode(Encoder* encoder) const; // TODO(ericv): Consider adding a method that returns an adjacent pair of // points. This would save some decoding overhead. private: friend void EncodeS2PointVector(absl::Span, CodingHint, Encoder*); friend void EncodeS2PointVectorFast(absl::Span, Encoder*); friend void EncodeS2PointVectorCompact(absl::Span, Encoder*); bool InitUncompressedFormat(Decoder* decoder); bool InitCellIdsFormat(Decoder* decoder); S2Point DecodeCellIdsFormat(int i) const; // We use a tagged union to represent multiple formats, as opposed to an // abstract base class or templating. This represents the best compromise // between performance, space, and convenience. Note that the overhead of // checking the tag is trivial and will typically be branch-predicted // perfectly. // // TODO(ericv): Once additional formats have been implemented, consider // using std::variant<> instead. It's unclear whether this would have // better or worse performance than the current approach. enum Format : uint8 { UNCOMPRESSED = 0, CELL_IDS = 1, }; Format format_; uint32 size_; union { struct { const S2Point* points; } uncompressed_; struct { EncodedStringVector blocks; uint64 base; uint8 level; bool have_exceptions; // TODO(ericv): Use std::atomic_flag to cache the last point decoded in // a thread-safe way. This reduces benchmark times for actual polygon // operations (e.g. S2ClosestEdgeQuery) by about 15%. } cell_ids_; }; }; ////////////////// Implementation details follow //////////////////// inline size_t EncodedS2PointVector::size() const { return size_; } inline S2Point EncodedS2PointVector::operator[](int i) const { switch (format_) { case Format::UNCOMPRESSED: return uncompressed_.points[i]; case Format::CELL_IDS: return DecodeCellIdsFormat(i); default: S2_DLOG(FATAL) << "Unrecognized format"; return S2Point(); } } } // namespace s2coding #endif // S2_ENCODED_S2POINT_VECTOR_H_ s2geometry-0.10.0/src/s2/encoded_s2point_vector_test.cc000066400000000000000000000445761422156367100230410ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2point_vector.h" #include #include #include "absl/flags/flag.h" #include "absl/strings/str_cat.h" #include "s2/base/log_severity.h" #include "s2/util/bits/bit-interleave.h" #include "s2/s2loop.h" #include "s2/s2polygon.h" #include "s2/s2shape.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" using absl::make_unique; using s2textformat::MakeCellIdOrDie; using s2textformat::MakePointOrDie; using std::vector; namespace s2coding { static int kBlockSize = 16; // Number of deltas per block in implementation. size_t TestEncodedS2PointVector(const vector& expected, CodingHint hint, int64 expected_bytes) { Encoder encoder; EncodeS2PointVector(expected, hint, &encoder); if (expected_bytes >= 0) { EXPECT_EQ(expected_bytes, encoder.length()); } Decoder decoder(encoder.base(), encoder.length()); EncodedS2PointVector actual; EXPECT_TRUE(actual.Init(&decoder)); EXPECT_EQ(actual.Decode(), expected); return encoder.length(); } // In order to make it easier to construct tests that encode particular // values, this function duplicates the part of EncodedS2PointVector that // converts an encoded 64-bit value back to an S2Point. S2Point EncodedValueToPoint(uint64 value, int level) { uint32 sj, tj; util_bits::DeinterleaveUint32(value, &sj, &tj); int shift = S2CellId::kMaxLevel - level; int si = (((sj << 1) | 1) << shift) & 0x7fffffff; int ti = (((tj << 1) | 1) << shift) & 0x7fffffff; int face = ((sj << shift) >> 30) | (((tj << (shift + 1)) >> 29) & 4); return S2::FaceUVtoXYZ(face, S2::STtoUV(S2::SiTitoST(si)), S2::STtoUV(S2::SiTitoST(ti))).Normalize(); } TEST(EncodedS2PointVectorTest, Empty) { TestEncodedS2PointVector({}, CodingHint::FAST, 1); // Test that an empty vector uses the UNCOMPRESSED encoding. TestEncodedS2PointVector({}, CodingHint::COMPACT, 1); } TEST(EncodedS2PointVectorTest, OnePoint) { TestEncodedS2PointVector({S2Point(1, 0, 0)}, CodingHint::FAST, 25); // Encoding: header (2 bytes), block count (1 byte), block offsets (1 byte), // block header (1 byte), delta (1 byte). TestEncodedS2PointVector({S2Point(1, 0, 0)}, CodingHint::COMPACT, 6); } TEST(EncodedS2PointVectorTest, OnePointWithExceptionsNoOverlap) { // Test encoding a block with one point when other blocks have exceptions // (which changes the encoding for all blocks). The case below yields // delta_bits == 8 and overlap_bits == 0. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: block header (1 byte), 16 deltas (16 bytes), exception (24 bytes) // Block 1: block header (1 byte), delta (1 byte) S2Point a(1, 0, 0); vector points = { S2Point(1, 2, 3).Normalize(), a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, a // Second block }; TestEncodedS2PointVector(points, CodingHint::COMPACT, 48); } TEST(EncodedS2PointVectorTest, OnePointWithExceptionsWithOverlap) { // Test encoding a block with one point when other blocks have exceptions // (which changes the encoding for all blocks). The case below yields // delta_bits == 8 and overlap_bits == 4. // // Encoding: header (2 bytes), base (2 bytes), block count (1 byte), // block offsets (2 bytes) // Block 0: header (1 byte), offset (2 bytes), 16 deltas (16 bytes), // exception (24 bytes) // Block 1: header (1 byte), offset (2 bytes), delta (1 byte) S2Point a = S2CellId(0x946df618d0000000).ToPoint(); S2Point b = S2CellId(0x947209e070000000).ToPoint(); vector points = { S2Point(1, 2, 3).Normalize(), a, a, a, a, a, a, a, a, a, a, a, a, a, a, a, b // Second block }; TestEncodedS2PointVector(points, CodingHint::COMPACT, 54); } TEST(EncodedS2PointVectorTest, CellIdWithException) { // Test one point encoded as an S2CellId with one point encoded as an // exception. // // Encoding: header (2 bytes), block count (1 byte), block offsets (1 byte), // block header (1 byte), two deltas (2 bytes), exception (24 bytes). TestEncodedS2PointVector( {MakeCellIdOrDie("1/23").ToPoint(), S2Point(0.1, 0.2, 0.3).Normalize()}, CodingHint::COMPACT, 31); } TEST(EncodedS2PointVectorTest, PointsAtMultipleLevels) { // Test that when points at multiple levels are present, the level with the // most points is chosen (preferring the smallest level in case of ties). // (All other points are encoded as exceptions.) // In this example, the two points at level 5 (on face 1) should be encoded. // It is possible to tell which points are encoded by the length of the // encoding (since different numbers of "base" bytes are encoded). // // Encoding: header (2 bytes), base (1 byte), block count (1 byte), block // offsets (1 byte), block header (1 byte), 5 deltas (5 bytes), S2Point // exceptions (72 bytes). TestEncodedS2PointVector( {MakeCellIdOrDie("2/11001310230102").ToPoint(), MakeCellIdOrDie("1/23322").ToPoint(), MakeCellIdOrDie("3/3").ToPoint(), MakeCellIdOrDie("1/23323").ToPoint(), MakeCellIdOrDie("2/12101023022012").ToPoint()}, CodingHint::COMPACT, 83); } TEST(EncodedS2PointVectorTest, NoOverlapOrExtraDeltaBitsNeeded) { // This function tests the case in GetBlockCodes() where values can be // encoded using the minimum number delta bits and no overlap. From the // comments there: // // Example 1: d_min = 0x72, d_max = 0x7e. The range is 0x0c. This can be // encoded using delta_bits = 4 and overlap_bits = 0, which allows us to // represent an offset of 0x70 and a maximum delta of 0x0f, so that we can // encode values up to 0x7f. // // To set up this test, we need at least two blocks: one to set the global // minimum value, and the other to encode a specific range of deltas. To // make things easier, the first block has a minimum value of zero. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: header (1 byte), 8 deltas (8 bytes) // Block 1: header (1 byte), offset (1 byte), 4 deltas (2 bytes) const int level = 3; vector points(kBlockSize, EncodedValueToPoint(0, level)); points.push_back(EncodedValueToPoint(0x72, level)); points.push_back(EncodedValueToPoint(0x74, level)); points.push_back(EncodedValueToPoint(0x75, level)); points.push_back(EncodedValueToPoint(0x7e, level)); TestEncodedS2PointVector(points, CodingHint::COMPACT, 10 + kBlockSize / 2); } TEST(EncodedS2PointVectorTest, OverlapNeeded) { // Like the above, but tests the following case: // // Example 2: d_min = 0x78, d_max = 0x84. The range is 0x0c, but in this // case it is not sufficient to use delta_bits = 4 and overlap_bits = 0 // because we can again only represent an offset of 0x70, so the maximum // delta of 0x0f only lets us encode values up to 0x7f. However if we // increase the overlap to 4 bits then we can represent an offset of 0x78, // which lets us encode values up to 0x78 + 0x0f = 0x87. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: header (1 byte), 8 deltas (8 bytes) // Block 1: header (1 byte), offset (1 byte), 4 deltas (2 bytes) const int level = 3; vector points(kBlockSize, EncodedValueToPoint(0, level)); points.push_back(EncodedValueToPoint(0x78, level)); points.push_back(EncodedValueToPoint(0x7a, level)); points.push_back(EncodedValueToPoint(0x7c, level)); points.push_back(EncodedValueToPoint(0x84, level)); TestEncodedS2PointVector(points, CodingHint::COMPACT, 10 + kBlockSize / 2); } TEST(EncodedS2PointVectorTest, ExtraDeltaBitsNeeded) { // Like the above, but tests the following case: // // Example 3: d_min = 0x08, d_max = 0x104. The range is 0xfc, so we should // be able to use 8-bit deltas. But even with a 4-bit overlap, we can still // only encode offset = 0 and a maximum value of 0xff. (We don't allow // bigger overlaps because statistically they are not worthwhile.) Instead // we increase the delta size to 12 bits, which handles this case easily. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: header (1 byte), 8 deltas (8 bytes) // Block 1: header (1 byte), 4 deltas (6 bytes) const int level = 3; vector points(kBlockSize, EncodedValueToPoint(0, level)); points.push_back(EncodedValueToPoint(0x08, level)); points.push_back(EncodedValueToPoint(0x4e, level)); points.push_back(EncodedValueToPoint(0x82, level)); points.push_back(EncodedValueToPoint(0x104, level)); TestEncodedS2PointVector(points, CodingHint::COMPACT, 13 + kBlockSize / 2); } TEST(EncodedS2PointVectorTest, ExtraDeltaBitsAndOverlapNeeded) { // Like the above, but tests the following case: // // Example 4: d_min = 0xf08, d_max = 0x1004. The range is 0xfc, so we // should be able to use 8-bit deltas. With 8-bit deltas and no overlap, we // have offset = 0xf00 and a maximum encodable value of 0xfff. With 8-bit // deltas and a 4-bit overlap, we still have offset = 0xf00 and a maximum // encodable value of 0xfff. Even with 12-bit deltas, we have offset = 0 // and we can still only represent 0xfff. However with delta_bits = 12 and // overlap_bits = 4, we can represent offset = 0xf00 and a maximum encodable // value of 0xf00 + 0xfff = 0x1eff. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: header (1 byte), 8 deltas (8 bytes) // Block 1: header (1 byte), offset (1 byte), 4 deltas (6 bytes) const int level = 5; vector points(kBlockSize, EncodedValueToPoint(0, level)); points.push_back(EncodedValueToPoint(0xf08, level)); points.push_back(EncodedValueToPoint(0xf4e, level)); points.push_back(EncodedValueToPoint(0xf82, level)); points.push_back(EncodedValueToPoint(0x1004, level)); TestEncodedS2PointVector(points, CodingHint::COMPACT, 14 + kBlockSize / 2); } TEST(EncodedS2PointVectorTest, SixtyFourBitOffset) { // Tests a case where a 64-bit block offset is needed. // // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes) // Block 0: header (1 byte), 8 deltas (8 bytes) // Block 1: header (1 byte), offset (8 bytes), 2 deltas (1 byte) const int level = S2CellId::kMaxLevel; vector points(kBlockSize, S2CellId::Begin(level).ToPoint()); points.push_back(S2CellId::End(level).prev().ToPoint()); points.push_back(S2CellId::End(level).prev().prev().ToPoint()); TestEncodedS2PointVector(points, CodingHint::COMPACT, 16 + kBlockSize / 2); } TEST(EncodedS2PointVectorTest, AllExceptionsBlock) { // The encoding consists of two blocks; the first contains 16 encodable // values, while the second contains two exceptions. vector points(kBlockSize, EncodedValueToPoint(0, S2CellId::kMaxLevel)); points.push_back(S2Point(0.1, 0.2, 0.3).Normalize()); points.push_back(S2Point(0.3, 0.2, 0.1).Normalize()); // Encoding: header (2 bytes), block count (1 byte), block offsets (2 bytes). // 1st block header (1 byte), 16 deltas (16 bytes). // 2nd block header (1 byte), 2 deltas (1 byte), 2 exceptions (48 bytes). TestEncodedS2PointVector(points, CodingHint::COMPACT, 72); // Encoding: header (2 bytes), 18 S2Points (432 bytes). TestEncodedS2PointVector(points, CodingHint::FAST, 434); } TEST(EncodedS2PointVectorTest, FirstAtAllLevels) { // Test encoding the first S2CellId at each level (which also happens to have // the maximum face, si, and ti values). All such S2CellIds can be encoded in // 6 bytes because most of the bits are zero. for (int level = 0; level <= S2CellId::kMaxLevel; ++level) { SCOPED_TRACE(absl::StrCat("Level = ", level)); TestEncodedS2PointVector({S2CellId::Begin(level).ToPoint()}, CodingHint::COMPACT, 6); } } TEST(EncodedS2PointVectorTest, LastAtAllLevels) { // Test encoding the last S2CellId at each level. It turns out that such // S2CellIds have the largest possible face and ti values, and the minimum // possible si value at that level. Such S2CellIds can be encoded in 6 to 13 // bytes depending on the level. for (int level = 0; level <= S2CellId::kMaxLevel; ++level) { SCOPED_TRACE(absl::StrCat("Level = ", level)); // Note that 8 bit deltas are used to encode blocks of size 1, which // reduces the size of "base" from ((level + 2) / 4) to (level / 4) bytes. int expected_size = 6 + level / 4; TestEncodedS2PointVector({S2CellId::End(level).prev().ToPoint()}, CodingHint::COMPACT, expected_size); } } TEST(EncodedS2PointVectorTest, MaxFaceSiTiAtAllLevels) { // Similar to the test above, but tests encoding the S2CellId at each level // whose face, si, and ti values are all maximal. This turns out to be the // S2CellId whose human-readable form is 5/222...22 (0xb555555555555555), // however for clarity we consruct it using S2CellId::FromFaceIJ. for (int level = 0; level <= S2CellId::kMaxLevel; ++level) { SCOPED_TRACE(absl::StrCat("Level = ", level)); S2CellId id = S2CellId::FromFaceIJ(5, S2::kLimitIJ - 1, S2::kLimitIJ - 1) .parent(level); // This encoding is one byte bigger than the previous test at levels 7, 11, // 15, 19, 23, and 27. This is because in the previous test, the // odd-numbered value bits are all zero (except for the face number), which // reduces the number of base bits needed by exactly 1. The encoding size // at level==3 is unaffected because for singleton blocks, the lowest 8 // value bits are encoded in the delta. int expected_size = (level < 4) ? 6 : 6 + (level + 1) / 4; TestEncodedS2PointVector({id.ToPoint()}, CodingHint::COMPACT, expected_size); } } TEST(EncodedS2PointVectorTest, LastTwoPointsAtAllLevels) { // Test encoding the last two S2CellIds at each level. for (int level = 0; level <= S2CellId::kMaxLevel; ++level) { SCOPED_TRACE(absl::StrCat("Level = ", level)); S2CellId id = S2CellId::End(level).prev(); // Notice that this costs only 4 bits more than encoding the last S2CellId // by itself (see LastAtAllLevels). This is because encoding a block of // size 1 uses 8-bit deltas (which reduces the size of "base" by 4 bits), // while this test uses two 4-bit deltas. int expected_size = 6 + (level + 2) / 4; TestEncodedS2PointVector({id.ToPoint(), id.prev().ToPoint()}, CodingHint::COMPACT, expected_size); } } TEST(EncodedS2PointVectorTest, ManyDuplicatePointsAtAllLevels) { // Test encoding 32 copies of the last S2CellId at each level. This uses // between 27 and 38 bytes depending on the level. (Note that the encoding // can use less than 1 byte per point in this situation.) for (int level = 0; level <= S2CellId::kMaxLevel; ++level) { SCOPED_TRACE(absl::StrCat("Level = ", level)); S2CellId id = S2CellId::End(level).prev(); // Encoding: header (2 bytes), base ((level + 2) / 4 bytes), block count // (1 byte), block offsets (2 bytes), block headers (2 bytes), 32 deltas // (16 bytes). At level 30 the encoding size goes up by 1 byte because // we can't encode an 8 byte "base" value, so instead this case uses a // base of 7 bytes plus a one-byte offset in each of the 2 blocks. int expected_size = 23 + (level + 2) / 4; if (level == 30) expected_size += 1; vector points(32, id.ToPoint()); TestEncodedS2PointVector(points, CodingHint::COMPACT, expected_size); } } TEST(EncodedS2PointVectorTest, SnappedFractalLoops) { S2Testing::rnd.Reset(absl::GetFlag(FLAGS_s2_random_seed)); int kMaxPoints = 3 << (google::DEBUG_MODE ? 10 : 14); for (int num_points = 3; num_points <= kMaxPoints; num_points *= 4) { size_t s2polygon_size = 0, lax_polygon_size = 0; for (int i = 0; i < 10; ++i) { S2Testing::Fractal fractal; fractal.SetLevelForApproxMaxEdges(num_points); auto frame = S2Testing::GetRandomFrame(); auto loop = fractal.MakeLoop(frame, S2Testing::KmToAngle(10)); std::vector points; for (int j = 0; j < loop->num_vertices(); ++j) { points.push_back(S2CellId(loop->vertex(j)).ToPoint()); } S2Polygon s2polygon(make_unique(points)); Encoder encoder; s2polygon.Encode(&encoder); s2polygon_size += encoder.length(); // S2LaxPolygonShape has 2 extra bytes of overhead to encode one loop. lax_polygon_size += TestEncodedS2PointVector(points, CodingHint::COMPACT, -1) + 2; } printf("n=%5d s2=%9" PRIuS " lax=%9" PRIuS "\n", num_points, s2polygon_size, lax_polygon_size); } } void TestRoundtripEncoding(s2coding::CodingHint hint) { // Ensures that the EncodedS2PointVector can be encoded and decoded without // loss. const int level = 3; vector points(kBlockSize, EncodedValueToPoint(0, level)); points.push_back(EncodedValueToPoint(0x78, level)); points.push_back(EncodedValueToPoint(0x7a, level)); points.push_back(EncodedValueToPoint(0x7c, level)); points.push_back(EncodedValueToPoint(0x84, level)); EncodedS2PointVector a_vector; Encoder a_encoder; EncodedS2PointVector b_vector; Encoder b_encoder; // Encode and decode from a vector. { EncodeS2PointVector(points, hint, &a_encoder); Decoder decoder(a_encoder.base(), a_encoder.length()); a_vector.Init(&decoder); } ASSERT_EQ(points, a_vector.Decode()); // Encode and decode from an EncodedS2PointVector. { a_vector.Encode(&b_encoder); Decoder decoder(b_encoder.base(), b_encoder.length()); b_vector.Init(&decoder); } EXPECT_EQ(points, b_vector.Decode()); } TEST(EncodedS2PointVectorTest, RoundtripEncodingFast) { TestRoundtripEncoding(s2coding::CodingHint::FAST); } TEST(EncodedS2PointVectorTest, RoundtripEncodingCompact) { TestRoundtripEncoding(s2coding::CodingHint::COMPACT); } } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_s2shape_index.cc000066400000000000000000000173431422156367100215460ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2shape_index.h" #include #include "absl/memory/memory.h" #include "s2/util/bits/bits.h" #include "s2/mutable_s2shape_index.h" using absl::make_unique; using std::unique_ptr; using std::vector; bool EncodedS2ShapeIndex::Iterator::Locate(const S2Point& target) { return LocateImpl(target, this); } EncodedS2ShapeIndex::CellRelation EncodedS2ShapeIndex::Iterator::Locate( S2CellId target) { return LocateImpl(target, this); } unique_ptr EncodedS2ShapeIndex::Iterator::Clone() const { return make_unique(*this); } void EncodedS2ShapeIndex::Iterator::Copy(const IteratorBase& other) { *this = *down_cast(&other); } S2Shape* EncodedS2ShapeIndex::GetShape(int id) const { // This method is called when a shape has not been decoded yet. unique_ptr shape = (*shape_factory_)[id]; if (shape) shape->id_ = id; S2Shape* expected = kUndecodedShape(); if (shapes_[id].compare_exchange_strong(expected, shape.get(), std::memory_order_acq_rel)) { return shape.release(); // Ownership has been transferred to shapes_. } return expected; // Another thread updated shapes_[id] first. } inline const S2ShapeIndexCell* EncodedS2ShapeIndex::GetCell(int i) const { // memory_order_release ensures that no reads or writes in the current // thread can be reordered after this store, and all writes in the current // thread are visible to other threads that acquire the same atomic // variable. // // memory_order_acquire ensures that no reads or writes in the current // thread can be reordered before this load, and all writes in other threads // that release the same atomic variable are visible in this thread. // // We use this to implement lock-free synchronization on the read path as // follows: // // 1. cells_decoded(i) is updated using acquire/release semantics // 2. cells_[i] is written before cells_decoded(i) // 3. cells_[i] is read after cells_decoded(i) // // Note that we do still use a lock for the write path to ensure that // cells_[i] and cell_decoded(i) are updated together atomically. if (cell_decoded(i)) return cells_[i]; // Decode the cell before acquiring the spinlock in order to minimize the // time that the lock is held. auto cell = make_unique(); Decoder decoder = encoded_cells_.GetDecoder(i); if (!cell->Decode(num_shape_ids(), &decoder)) { return nullptr; } // Recheck cell_decoded(i) once we hold the lock in case another thread // has decoded this cell in the meantime. SpinLockHolder l(&cells_lock_); if (cell_decoded(i)) return cells_[i]; // Update the cell, setting cells_[i] before cell_decoded(i). cells_[i] = cell.get(); set_cell_decoded(i); if (cell_cache_.size() < max_cell_cache_size()) { cell_cache_.push_back(i); } return cell.release(); // Ownership has been transferred to cells_. } const S2ShapeIndexCell* EncodedS2ShapeIndex::Iterator::GetCell() const { return index_->GetCell(cell_pos_); } EncodedS2ShapeIndex::EncodedS2ShapeIndex() { } EncodedS2ShapeIndex::~EncodedS2ShapeIndex() { // Although Minimize() does slightly more than required for destruction // (i.e., it resets vector elements to their default values), this does not // affect benchmark times. Minimize(); } bool EncodedS2ShapeIndex::Init(Decoder* decoder, const ShapeFactory& shape_factory) { Minimize(); uint64 max_edges_version; if (!decoder->get_varint64(&max_edges_version)) return false; int version = max_edges_version & 3; if (version != MutableS2ShapeIndex::kCurrentEncodingVersionNumber) { return false; } options_.set_max_edges_per_cell(max_edges_version >> 2); // AtomicShape is a subtype of std::atomic that changes the // default constructor value to kUndecodedShape(). This saves the effort of // initializing all the elements twice. shapes_ = std::vector(shape_factory.size()); shape_factory_ = shape_factory.Clone(); if (!cell_ids_.Init(decoder)) return false; // The cells_ elements are *uninitialized memory*. Instead we have bit // vector (cells_decoded_) to indicate which elements of cells_ are valid. // This reduces constructor times by more than a factor of 50, since rather // than needing to initialize one 64-bit pointer per cell to zero, we only // need to initialize one bit per cell to zero. // // For very large S2ShapeIndexes the internal memset() call to initialize // cells_decoded_ still takes about 1.3 microseconds per million cells // (assuming an optimized implementation that writes 32 bytes per cycle), // but this seems reasonable relative to other likely costs (I/O, etc). // // NOTE(ericv): DO NOT use make_unique<> here! make_unique<> allocates memory // using "new T[n]()", which initializes all elements of the array. This // slows down some benchmarks by over 100x. // // cells_ = make_unique[](cell_ids_.size()); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // NO NO NO cells_.reset(new S2ShapeIndexCell*[cell_ids_.size()]); cells_decoded_ = vector>((cell_ids_.size() + 63) >> 6); return encoded_cells_.Init(decoder); } void EncodedS2ShapeIndex::Minimize() { if (cells_ == nullptr) return; // Not initialized yet. for (auto& atomic_shape : shapes_) { S2Shape* shape = atomic_shape.load(std::memory_order_relaxed); if (shape != kUndecodedShape() && shape != nullptr) { atomic_shape.store(kUndecodedShape(), std::memory_order_relaxed); delete shape; } } if (cell_cache_.size() < max_cell_cache_size()) { // When only a tiny fraction of the cells are decoded, we keep track of // those cells in cell_cache_ to avoid the cost of scanning the // cells_decoded_ vector. (The cost is only about 1 cycle per 64 cells, // but for a huge polygon with 1 million cells that's still 16000 cycles.) for (int pos : cell_cache_) { cells_decoded_[pos >> 6].store(0, std::memory_order_relaxed); delete cells_[pos]; } } else { // Scan the cells_decoded_ vector looking for cells that must be deleted. for (int i = cells_decoded_.size(); --i >= 0;) { uint64 bits = cells_decoded_[i].load(std::memory_order_relaxed); if (bits == 0) continue; do { int offset = Bits::FindLSBSetNonZero64(bits); delete cells_[(i << 6) + offset]; bits &= bits - 1; } while (bits != 0); cells_decoded_[i].store(0, std::memory_order_relaxed); } } cell_cache_.clear(); } size_t EncodedS2ShapeIndex::SpaceUsed() const { // TODO(ericv): Add SpaceUsed() method to S2Shape base class,and include // memory owned by the allocated S2Shapes (here and in S2ShapeIndex). size_t size = sizeof(*this); size += shapes_.capacity() * sizeof(std::atomic); size += cell_ids_.size() * sizeof(std::atomic); // cells_ size += cells_decoded_.capacity() * sizeof(std::atomic); size += cell_cache_.capacity() * sizeof(int); return size; } s2geometry-0.10.0/src/s2/encoded_s2shape_index.h000066400000000000000000000352021422156367100214020ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ENCODED_S2SHAPE_INDEX_H_ #define S2_ENCODED_S2SHAPE_INDEX_H_ #include "s2/encoded_s2cell_id_vector.h" #include "s2/encoded_string_vector.h" #include "s2/mutable_s2shape_index.h" // EncodedS2ShapeIndex is an S2ShapeIndex implementation that works directly // with encoded data. Rather than decoding everything in advance, geometry is // decoded incrementally (down to individual edges) as needed. It can be // initialized from a single block of data in nearly constant time (about 1.3 // microseconds per million edges). This saves large amounts of memory and is // also much faster in the common situation where geometric data is loaded // from somewhere, decoded, and then only a single operation is performed on // it. It supports all S2ShapeIndex operations including boolean operations, // measuring distances, etc. // // The speedups can be over 1000x for large geometric objects. For example // vertices and 50,000 loops. If this geometry is represented as an // S2Polygon, then simply decoding it takes ~250ms and building its internal // S2ShapeIndex takes a further ~1500ms. These times are much longer than the // time needed for many operations, e.g. e.g. measuring the distance from the // polygon to one of its vertices takes only about 0.001ms. // // If the same geometry is represented using EncodedLaxPolygonShape and // EncodedS2ShapeIndex, initializing the index takes only 0.005ms. The // distance measuring operation itself takes slightly longer than before // (0.0013ms vs. the original 0.001ms) but the overall time is now much lower // (~0.007ms vs. 1750ms). This is possible because the new classes decode // data lazily (only when it is actually needed) and with fine granularity // (down to the level of individual edges). The overhead associated with this // incremental decoding is small; operations are typically 25% slower compared // to fully decoding the MutableS2ShapeIndex and its underlying shapes. // // EncodedS2ShapeIndex also uses less memory than MutableS2ShapeIndex. The // encoded data is contiguous block of memory that is typically between 4-20% // of the original index size (see MutableS2ShapeIndex::Encode for examples). // Constructing the EncodedS2ShapeIndex uses additional memory, but even so // the total memory usage immediately after construction is typically 25-35% // of the corresponding MutableS2ShapeIndex size. // // Note that MutableS2ShapeIndex will still be faster and use less memory if // you need to decode the entire index. Similarly MutableS2ShapeIndex will be // faster if you plan to execute a large number of operations on it. The main // advantage of EncodedS2ShapeIndex is that it is much faster and uses less // memory when only a small portion of the data needs to be decoded. // // Example code showing how to create an encoded index: // // Encoder encoder; // s2shapeutil::CompactEncodeTaggedShapes(index, encoder); // index.Encode(encoder); // string encoded(encoder.base(), encoder.length()); // Encoded data. // // Example code showing how to use an encoded index: // // Decoder decoder(encoded.data(), encoded.size()); // EncodedS2ShapeIndex index; // index.Init(&decoder, s2shapeutil::LazyDecodeShapeFactory(&decoder)); // S2ClosestEdgeQuery query(&index); // S2ClosestEdgeQuery::PointTarget target(test_point); // if (query.IsDistanceLessOrEqual(&target, limit)) { // ... // } // // Note that EncodedS2ShapeIndex does not make a copy of the encoded data, and // therefore the client must ensure that this data outlives the // EncodedS2ShapeIndex object. // // There are a number of built-in classes that work with S2ShapeIndex objects. // Generally these classes accept any collection of geometry that can be // represented by an S2ShapeIndex, i.e. any combination of points, polylines, // and polygons. Such classes include: // // - S2ContainsPointQuery: returns the shape(s) that contain a given point. // // - S2ClosestEdgeQuery: returns the closest edge(s) to a given point, edge, // S2CellId, or S2ShapeIndex. // // - S2CrossingEdgeQuery: returns the edge(s) that cross a given edge. // // - S2BooleanOperation: computes boolean operations such as union, // and boolean predicates such as containment. // // - S2ShapeIndexRegion: can be used together with S2RegionCoverer to // approximate geometry as a set of S2CellIds. // // - S2ShapeIndexBufferedRegion: computes approximations that have been // expanded by a given radius. // // EncodedS2ShapeIndex is thread-compatible, meaning that const methods are // thread safe, and non-const methods are not thread safe. The only non-const // method is Minimize(), so if you plan to call Minimize() while other threads // are actively using the index that you must use an external reader-writer // lock such as absl::Mutex to guard access to it. (There is no global state // and therefore each index can be guarded independently.) class EncodedS2ShapeIndex final : public S2ShapeIndex { public: using Options = MutableS2ShapeIndex::Options; using ShapeFactory = S2ShapeIndex::ShapeFactory; // Creates an index that must be initialized by calling Init(). EncodedS2ShapeIndex(); ~EncodedS2ShapeIndex() override; // Initializes the EncodedS2ShapeIndex, returning true on success. // // This method does not decode the S2Shape objects in the index; this is // the responsibility of the client-provided function "shape_factory" // (see s2shapeutil_coding.h). Example usage: // // index.Init(decoder, s2shapeutil::LazyDecodeShapeFactory(decoder)); // // Note that the encoded shape vector must *precede* the encoded S2ShapeIndex // in the Decoder's data buffer in this example. bool Init(Decoder* decoder, const ShapeFactory& shape_factory); const Options& options() const { return options_; } // The number of distinct shape ids in the index. This equals the number of // shapes in the index provided that no shapes have ever been removed. // (Shape ids are not reused.) int num_shape_ids() const override { return shapes_.size(); } // Return a pointer to the shape with the given id, or nullptr if the shape // has been removed from the index. S2Shape* shape(int id) const override; // Minimizes memory usage by requesting that any data structures that can be // rebuilt should be discarded. This method invalidates all iterators. // // Like all non-const methods, this method is not thread-safe. void Minimize() override; class Iterator final : public IteratorBase { public: // Default constructor; must be followed by a call to Init(). Iterator(); // Constructs an iterator positioned as specified. By default iterators // are unpositioned, since this avoids an extra seek in this situation // where one of the seek methods (such as Locate) is immediately called. // // If you want to position the iterator at the beginning, e.g. in order to // loop through the entire index, do this instead: // // for (EncodedS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN); // !it.done(); it.Next()) { ... } explicit Iterator(const EncodedS2ShapeIndex* index, InitialPosition pos = UNPOSITIONED); // Initializes an iterator for the given EncodedS2ShapeIndex. void Init(const EncodedS2ShapeIndex* index, InitialPosition pos = UNPOSITIONED); // Inherited non-virtual methods: // S2CellId id() const; // const S2ShapeIndexCell& cell() const; // bool done() const; // S2Point center() const; // IteratorBase API: void Begin() override; void Finish() override; void Next() override; bool Prev() override; void Seek(S2CellId target) override; bool Locate(const S2Point& target) override; CellRelation Locate(S2CellId target) override; protected: const S2ShapeIndexCell* GetCell() const override; std::unique_ptr Clone() const override; void Copy(const IteratorBase& other) override; private: void Refresh(); // Updates the IteratorBase fields. const EncodedS2ShapeIndex* index_; int32 cell_pos_; // Current position in the vector of index cells. int32 num_cells_; }; // Returns the number of bytes currently occupied by the index (including any // unused space at the end of vectors, etc). It has the same thread safety // as the other "const" methods (see introduction). size_t SpaceUsed() const override; protected: std::unique_ptr NewIterator(InitialPosition pos) const override; private: friend class Iterator; // Returns a value indicating that a shape has not been decoded yet. inline static S2Shape* kUndecodedShape() { return reinterpret_cast(1); } // Like std::atomic, but defaults to kUndecodedShape(). class AtomicShape : public std::atomic { public: AtomicShape() : std::atomic(kUndecodedShape()) {} }; S2Shape* GetShape(int id) const; const S2ShapeIndexCell* GetCell(int i) const; bool cell_decoded(int i) const; void set_cell_decoded(int i) const; int max_cell_cache_size() const; std::unique_ptr shape_factory_; // The options specified for this index. Options options_; // A vector containing all shapes in the index. Initially all shapes are // set to kUndecodedShape(); as shapes are decoded, they are added to the // vector using std::atomic::compare_exchange_strong. mutable std::vector shapes_; // A vector containing the S2CellIds of each cell in the index. s2coding::EncodedS2CellIdVector cell_ids_; // A vector containing the encoded contents of each cell in the index. s2coding::EncodedStringVector encoded_cells_; // A raw array containing the decoded contents of each cell in the index. // Initially all values are *uninitialized memory*. The cells_decoded_ // field below keeps track of which elements are present. mutable std::unique_ptr cells_; // A bit vector indicating which elements of cells_ have been decoded. // All other elements of cells_ contain uninitialized (random) memory. mutable std::vector> cells_decoded_; // In order to minimize destructor time when very few cells of a large // S2ShapeIndex are needed, we keep track of the indices of the first few // cells to be decoded. This lets us avoid scanning the cells_decoded_ // vector when the number of cells decoded is very small. mutable std::vector cell_cache_; // Protects all updates to cells_ and cells_decoded_. mutable SpinLock cells_lock_; EncodedS2ShapeIndex(const EncodedS2ShapeIndex&) = delete; void operator=(const EncodedS2ShapeIndex&) = delete; }; ////////////////// Implementation details follow //////////////////// inline EncodedS2ShapeIndex::Iterator::Iterator() : index_(nullptr) { } inline EncodedS2ShapeIndex::Iterator::Iterator( const EncodedS2ShapeIndex* index, InitialPosition pos) { Init(index, pos); } inline void EncodedS2ShapeIndex::Iterator::Init( const EncodedS2ShapeIndex* index, InitialPosition pos) { index_ = index; num_cells_ = index->cell_ids_.size(); cell_pos_ = (pos == BEGIN) ? 0 : num_cells_; Refresh(); } inline void EncodedS2ShapeIndex::Iterator::Refresh() { if (cell_pos_ == num_cells_) { set_finished(); } else { // It's faster to initialize the cell to nullptr even if it has already // been decoded, since algorithms frequently don't need it (i.e., based on // the S2CellId they might not need to look at the cell contents). set_state(index_->cell_ids_[cell_pos_], nullptr); } } inline void EncodedS2ShapeIndex::Iterator::Begin() { cell_pos_ = 0; Refresh(); } inline void EncodedS2ShapeIndex::Iterator::Finish() { cell_pos_ = num_cells_; Refresh(); } inline void EncodedS2ShapeIndex::Iterator::Next() { S2_DCHECK(!done()); ++cell_pos_; Refresh(); } inline bool EncodedS2ShapeIndex::Iterator::Prev() { if (cell_pos_ == 0) return false; --cell_pos_; Refresh(); return true; } inline void EncodedS2ShapeIndex::Iterator::Seek(S2CellId target) { cell_pos_ = index_->cell_ids_.lower_bound(target); Refresh(); } inline std::unique_ptr EncodedS2ShapeIndex::NewIterator(InitialPosition pos) const { return absl::make_unique(this, pos); } inline S2Shape* EncodedS2ShapeIndex::shape(int id) const { S2Shape* shape = shapes_[id].load(std::memory_order_acquire); if (shape != kUndecodedShape()) return shape; return GetShape(id); } // Returns true if the given cell has already been decoded. inline bool EncodedS2ShapeIndex::cell_decoded(int i) const { // cell_decoded(i) uses acquire/release synchronization (see .cc file). uint64 group_bits = cells_decoded_[i >> 6].load(std::memory_order_acquire); return (group_bits & (1ULL << (i & 63))) != 0; } // Marks the given cell as having been decoded. // REQUIRES: cells_lock_ is held inline void EncodedS2ShapeIndex::set_cell_decoded(int i) const { // We use memory_order_release for the store operation below to ensure that // cells_decoded(i) sees the most recent value, however we can use // memory_order_relaxed for the load because cells_lock_ is held. std::atomic* group = &cells_decoded_[i >> 6]; uint64 bits = group->load(std::memory_order_relaxed); group->store(bits | 1ULL << (i & 63), std::memory_order_release); } inline int EncodedS2ShapeIndex::max_cell_cache_size() const { // The cell cache is sized so that scanning decoded_cells_ in the destructor // costs about 30 cycles per decoded cell in the worst case. (This overhead // is acceptable relative to the other costs of decoding each cell.) // // For example, if there are 65,536 cells then we won't need to scan // encoded_cells_ unless we decode at least (65536/2048) == 32 cells. It // takes about 1 cycle per 64 cells to scan encoded_cells_, so that works // out to (65536/64) == 1024 cycles. However this cost is amortized over // the 32 cells decoded, which works out to 32 cycles per cell. return cell_ids_.size() >> 11; } #endif // S2_ENCODED_S2SHAPE_INDEX_H_ s2geometry-0.10.0/src/s2/encoded_s2shape_index_test.cc000066400000000000000000000277701422156367100226120ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_s2shape_index.h" #include #include #include #include #include #include "absl/base/call_once.h" #include "absl/flags/flag.h" #include "absl/memory/memory.h" #include "absl/strings/escaping.h" #include "absl/strings/str_cat.h" #include "s2/mutable_s2shape_index.h" #include "s2/s2builder.h" #include "s2/s2builderutil_s2polyline_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2cap.h" #include "s2/s2closest_edge_query.h" #include "s2/s2contains_point_query.h" #include "s2/s2edge_distances.h" #include "s2/s2latlng.h" #include "s2/s2lax_polygon_shape.h" #include "s2/s2lax_polyline_shape.h" #include "s2/s2loop.h" #include "s2/s2point_vector_shape.h" #include "s2/s2pointutil.h" #include "s2/s2shapeutil_coding.h" #include "s2/s2shapeutil_testing.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" #include "s2/thread_testing.h" using absl::make_unique; using absl::StrCat; using s2builderutil::S2CellIdSnapFunction; using s2builderutil::S2PolylineLayer; using std::max; using std::string; using std::unique_ptr; using std::vector; template bool DecodeHomegeneousShapeIndex(EncodedS2ShapeIndex* index, Decoder* decoder) { return index->Init(decoder, s2shapeutil::HomogeneousShapeFactory(decoder)); } template void TestEncodedS2ShapeIndex(const MutableS2ShapeIndex& expected, size_t expected_bytes) { Encoder encoder; s2shapeutil::EncodeHomogeneousShapes(expected, &encoder); size_t shapes_bytes = encoder.length(); expected.Encode(&encoder); EXPECT_EQ(expected_bytes, encoder.length() - shapes_bytes); Decoder decoder(encoder.base(), encoder.length()); EncodedS2ShapeIndex actual; ASSERT_TRUE(DecodeHomegeneousShapeIndex(&actual, &decoder)); EXPECT_EQ(expected.options().max_edges_per_cell(), actual.options().max_edges_per_cell()); s2testing::ExpectEqual(expected, actual); } TEST(EncodedS2ShapeIndex, Empty) { MutableS2ShapeIndex index; TestEncodedS2ShapeIndex(index, 4); } TEST(EncodedS2ShapeIndex, OneEdge) { MutableS2ShapeIndex index; index.Add(s2textformat::MakeLaxPolylineOrDie("1:1, 2:2")); TestEncodedS2ShapeIndex(index, 8); } TEST(EncodedS2ShapeIndex, RegularLoops) { struct TestCase { int num_edges; size_t expected_bytes; }; vector test_cases = { {4, 8}, {8, 8}, {16, 16}, {64, 77}, {256, 327}, {4096, 8813}, {65536, 168291}, }; for (const auto& test_case : test_cases) { MutableS2ShapeIndex index; S2Testing::rnd.Reset(test_case.num_edges); SCOPED_TRACE(StrCat("num_edges = ", test_case.num_edges)); S2Polygon polygon(S2Loop::MakeRegularLoop(S2Point(3, 2, 1).Normalize(), S1Angle::Degrees(0.1), test_case.num_edges)); index.Add(make_unique(polygon)); TestEncodedS2ShapeIndex( index, test_case.expected_bytes); } } TEST(EncodedS2ShapeIndex, OverlappingPointClouds) { struct TestCase { int num_shapes, num_points_per_shape; size_t expected_bytes; }; vector test_cases = { {1, 50, 83}, {2, 100, 583}, {4, 100, 1383}, }; S2Cap cap(S2Point(0.1, -0.4, 0.3).Normalize(), S1Angle::Degrees(1)); for (const auto& test_case : test_cases) { MutableS2ShapeIndex index; S2Testing::rnd.Reset(test_case.num_shapes); SCOPED_TRACE(StrCat("num_shapes = ", test_case.num_shapes)); for (int i = 0; i < test_case.num_shapes; ++i) { vector points; for (int j = 0; j < test_case.num_points_per_shape; ++j) { points.push_back(S2Testing::SamplePoint(cap)); } index.Add(make_unique(points)); } TestEncodedS2ShapeIndex( index, test_case.expected_bytes); } } TEST(EncodedS2ShapeIndex, OverlappingPolylines) { struct TestCase { int num_shapes, num_shape_edges; size_t expected_bytes; }; vector test_cases = { {2, 50, 139}, {10, 50, 777}, {20, 50, 2219}, }; S2Cap cap(S2Point(-0.2, -0.3, 0.4).Normalize(), S1Angle::Degrees(0.1)); for (const auto& test_case : test_cases) { S1Angle edge_len = 2 * cap.GetRadius() / test_case.num_shape_edges; MutableS2ShapeIndex index; S2Testing::rnd.Reset(test_case.num_shapes); SCOPED_TRACE(StrCat("num_shapes = ", test_case.num_shapes)); for (int i = 0; i < test_case.num_shapes; ++i) { S2Point a = S2Testing::SamplePoint(cap), b = S2Testing::RandomPoint(); vector vertices; int n = test_case.num_shape_edges; for (int j = 0; j <= n; ++j) { vertices.push_back(S2::GetPointOnLine(a, b, j * edge_len)); } index.Add(make_unique(vertices)); } TestEncodedS2ShapeIndex( index, test_case.expected_bytes); } } TEST(EncodedS2ShapeIndex, OverlappingLoops) { struct TestCase { int num_shapes, max_edges_per_loop; size_t expected_bytes; }; vector test_cases = { {2, 250, 138}, {5, 250, 1084}, {25, 50, 3673}, }; S2Cap cap(S2Point(-0.1, 0.25, 0.2).Normalize(), S1Angle::Degrees(3)); for (const auto& test_case : test_cases) { MutableS2ShapeIndex index; S2Testing::rnd.Reset(test_case.num_shapes); SCOPED_TRACE(StrCat("num_shapes = ", test_case.num_shapes)); for (int i = 0; i < test_case.num_shapes; ++i) { S2Point center = S2Testing::SamplePoint(cap); double radius_fraction = S2Testing::rnd.RandDouble(); // Scale the number of edges so that they are all about the same length // (similar to modeling all geometry at a similar resolution). int num_edges = max(3.0, test_case.max_edges_per_loop * radius_fraction); S2Polygon polygon(S2Loop::MakeRegularLoop( center, cap.GetRadius() * radius_fraction, num_edges)); index.Add(make_unique(polygon)); } TestEncodedS2ShapeIndex( index, test_case.expected_bytes); } } // Like S2PolylineLayer, but converts the polyline to an S2LaxPolylineShape // and adds it to an S2ShapeIndex (if the polyline is non-empty). class IndexedLaxPolylineLayer : public S2Builder::Layer { public: using Options = S2PolylineLayer::Options; explicit IndexedLaxPolylineLayer(MutableS2ShapeIndex* index, const Options& options = Options()) : index_(index), polyline_(make_unique()), layer_(polyline_.get(), options) {} GraphOptions graph_options() const override { return layer_.graph_options(); } void Build(const Graph& g, S2Error* error) override { layer_.Build(g, error); if (error->ok() && polyline_->num_vertices() > 0) { index_->Add(absl::make_unique(*polyline_)); } } private: MutableS2ShapeIndex* index_; std::unique_ptr polyline_; S2PolylineLayer layer_; }; TEST(EncodedS2ShapeIndex, SnappedFractalPolylines) { MutableS2ShapeIndex index; S2Builder builder{S2Builder::Options{S2CellIdSnapFunction()}}; for (int i = 0; i < 5; ++i) { builder.StartLayer(make_unique(&index)); S2Testing::Fractal fractal; fractal.SetLevelForApproxMaxEdges(3 * 256); auto frame = S2::GetFrame(S2LatLng::FromDegrees(10, i).ToPoint()); auto loop = fractal.MakeLoop(frame, S1Angle::Degrees(0.1)); std::vector vertices; S2Testing::AppendLoopVertices(*loop, &vertices); S2Polyline polyline(vertices); builder.AddPolyline(polyline); } S2Error error; ASSERT_TRUE(builder.Build(&error)) << error.text(); TestEncodedS2ShapeIndex( index, 8698); } // A test that repeatedly minimizes "index_" in one thread and then reads the // index_ concurrently from several other threads. When all threads have // finished reading, the first thread minimizes the index again. // // Note that Minimize() is non-const and therefore does not need to be tested // concurrently with the const methods. class LazyDecodeTest : public s2testing::ReaderWriterTest { public: LazyDecodeTest() { // We generate one shape per dimension. Each shape has vertices uniformly // distributed across the sphere, and the vertices for each dimension are // different. Having fewer cells in the index is more likely to trigger // race conditions, and so shape 0 has 384 points, shape 1 is a polyline // with 96 vertices, and shape 2 is a polygon with 24 vertices. MutableS2ShapeIndex input; for (int dim = 0; dim < 3; ++dim) { int level = 3 - dim; // See comments above. vector vertices; for (auto id = S2CellId::Begin(level); id != S2CellId::End(level); id = id.next()) { vertices.push_back(id.ToPoint()); } switch (dim) { case 0: input.Add(make_unique(vertices)); break; case 1: input.Add(make_unique(vertices)); break; default: input.Add(make_unique( vector>{std::move(vertices)})); break; } } Encoder encoder; s2shapeutil::CompactEncodeTaggedShapes(input, &encoder); input.Encode(&encoder); encoded_.assign(encoder.base(), encoder.length()); Decoder decoder(encoded_.data(), encoded_.size()); index_.Init(&decoder, s2shapeutil::LazyDecodeShapeFactory(&decoder)); } void WriteOp() override { index_.Minimize(); } void ReadOp() override { S2ClosestEdgeQuery query(&index_); for (int iter = 0; iter < 10; ++iter) { S2ClosestEdgeQuery::PointTarget target(S2Testing::RandomPoint()); query.FindClosestEdge(&target); } } protected: string encoded_; EncodedS2ShapeIndex index_; }; TEST(EncodedS2ShapeIndex, LazyDecode) { // Ensure that lazy decoding is thread-safe. In other words, make sure that // nothing bad happens when multiple threads call "const" methods that cause // index and/or shape data to be decoded. LazyDecodeTest test; // The number of readers should be large enough so that it is likely that // several readers will be running at once (with a multiple-core CPU). const int kNumReaders = 8; const int kIters = 1000; test.Run(kNumReaders, kIters); } TEST(EncodedS2ShapeIndex, JavaByteCompatibility) { MutableS2ShapeIndex expected; expected.Add(make_unique( s2textformat::MakePolylineOrDie("0:0, 1:1"))); expected.Add(make_unique( s2textformat::MakePolylineOrDie("1:1, 2:2"))); expected.Release(0); // bytes is the encoded data of an S2ShapeIndex with a null shape and a // polyline with one edge. It was derived by base-16 encoding the buffer of // an encoder to which expected was encoded. string bytes = absl::HexStringToBytes( "100036020102000000B4825F3C81FDEF3F27DCF7C958DE913F1EDD892B0BDF913FFC7FB8" "B805F6EF3F28516A6D8FDBA13F27DCF7C958DEA13F28C809010408020010"); Decoder decoder(bytes.data(), bytes.length()); MutableS2ShapeIndex actual; actual.Init(&decoder, s2shapeutil::FullDecodeShapeFactory(&decoder)); s2testing::ExpectEqual(expected, actual); } s2geometry-0.10.0/src/s2/encoded_string_vector.cc000066400000000000000000000043371422156367100217010ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_string_vector.h" using absl::MakeSpan; using absl::Span; using absl::string_view; using std::string; using std::vector; namespace s2coding { StringVectorEncoder::StringVectorEncoder() { } void StringVectorEncoder::Encode(Encoder* encoder) { offsets_.push_back(data_.length()); // We don't encode the first element of "offsets_", which is always zero. EncodeUintVector( MakeSpan(offsets_.data() + 1, offsets_.data() + offsets_.size()), encoder); encoder->Ensure(data_.length()); encoder->putn(data_.base(), data_.length()); } void StringVectorEncoder::Encode(Span v, Encoder* encoder) { StringVectorEncoder string_vector; for (const auto& str : v) string_vector.Add(str); string_vector.Encode(encoder); } bool EncodedStringVector::Init(Decoder* decoder) { if (!offsets_.Init(decoder)) return false; data_ = decoder->skip(0); if (offsets_.size() > 0) { uint64 length = offsets_[offsets_.size() - 1]; if (decoder->avail() < length) return false; decoder->skip(length); } return true; } vector EncodedStringVector::Decode() const { size_t n = size(); vector result(n); for (int i = 0; i < n; ++i) { result[i] = (*this)[i]; } return result; } // The encoding must be identical to StringVectorEncoder::Encode(). void EncodedStringVector::Encode(Encoder* encoder) const { offsets_.Encode(encoder); if (offsets_.size() > 0) { const uint64 length = offsets_[offsets_.size() - 1]; encoder->Ensure(length); encoder->putn(data_, length); } } } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_string_vector.h000066400000000000000000000127061422156367100215420ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ENCODED_STRING_VECTOR_H_ #define S2_ENCODED_STRING_VECTOR_H_ #include #include #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "s2/encoded_uint_vector.h" namespace s2coding { // This class allows an EncodedStringVector to be created by adding strings // incrementally. It also supports adding strings that are the output of // another Encoder. For example, to create a vector of encoded S2Polygons, // you can do this: // // void EncodePolygons(const vector& polygons, Encoder* encoder) { // StringVectorEncoder encoded_polygons; // for (auto polygon : polygons) { // polygon->Encode(encoded_polygons.AddViaEncoder()); // } // encoded_polygons.Encode(encoder); // } class StringVectorEncoder { public: StringVectorEncoder(); // Adds a string to the encoded vector. void Add(const std::string& str); // Adds a string to the encoded vector by means of the given Encoder. The // string consists of all output added to the encoder before the next call // to any method of this class (after which the encoder is no longer valid). Encoder* AddViaEncoder(); // Appends the EncodedStringVector representation to the given Encoder. // // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). void Encode(Encoder* encoder); // Encodes a vector of strings in a format that can later be decoded as an // EncodedStringVector. // // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). static void Encode(absl::Span v, Encoder* encoder); private: // A vector consisting of the starting offset of each string in the // encoder's data buffer, plus a final entry pointing just past the end of // the last string. std::vector offsets_; Encoder data_; }; // This class represents an encoded vector of strings. Values are decoded // only when they are accessed. This allows for very fast initialization and // no additional memory use beyond the encoded data. The encoded data is not // owned by this class; typically it points into a large contiguous buffer // that contains other encoded data as well. // // This is one of several helper classes that allow complex data structures to // be initialized from an encoded format in constant time and then decoded on // demand. This can be a big performance advantage when only a small part of // the data structure is actually used. class EncodedStringVector { public: // Constructs an uninitialized object; requires Init() to be called. EncodedStringVector() {} // Initializes the EncodedStringVector. Returns false on errors, leaving // the vector in an unspecified state. // // REQUIRES: The Decoder data buffer must outlive this object. bool Init(Decoder* decoder); // Resets the vector to be empty. void Clear(); // Returns the size of the original vector. size_t size() const; // Returns the string at the given index. absl::string_view operator[](int i) const; // Returns a Decoder initialized with the string at the given index. Decoder GetDecoder(int i) const; // Returns a pointer to the start of the string at the given index. This is // faster than operator[] but returns an unbounded string. const char* GetStart(int i) const; // Returns the entire vector of original strings. Requires that the // data buffer passed to the constructor persists until the result vector is // no longer needed. std::vector Decode() const; void Encode(Encoder* encoder) const; private: EncodedUintVector offsets_; const char* data_; }; ////////////////// Implementation details follow //////////////////// inline void StringVectorEncoder::Add(const std::string& str) { offsets_.push_back(data_.length()); data_.Ensure(str.size()); data_.putn(str.data(), str.size()); } inline Encoder* StringVectorEncoder::AddViaEncoder() { offsets_.push_back(data_.length()); return &data_; } inline void EncodedStringVector::Clear() { offsets_.Clear(); data_ = nullptr; } inline size_t EncodedStringVector::size() const { return offsets_.size(); } inline absl::string_view EncodedStringVector::operator[](int i) const { uint64 start = (i == 0) ? 0 : offsets_[i - 1]; uint64 limit = offsets_[i]; return absl::string_view(data_ + start, limit - start); } inline Decoder EncodedStringVector::GetDecoder(int i) const { uint64 start = (i == 0) ? 0 : offsets_[i - 1]; uint64 limit = offsets_[i]; return Decoder(data_ + start, limit - start); } inline const char* EncodedStringVector::GetStart(int i) const { uint64 start = (i == 0) ? 0 : offsets_[i - 1]; return data_ + start; } } // namespace s2coding #endif // S2_ENCODED_STRING_VECTOR_H_ s2geometry-0.10.0/src/s2/encoded_string_vector_test.cc000066400000000000000000000037061422156367100227370ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_string_vector.h" #include #include #include "absl/strings/string_view.h" using absl::string_view; using std::string; using std::vector; namespace s2coding { void TestEncodedStringVector(const vector& input, size_t expected_bytes) { Encoder encoder; StringVectorEncoder::Encode(input, &encoder); EXPECT_EQ(expected_bytes, encoder.length()); Decoder decoder(encoder.base(), encoder.length()); EncodedStringVector actual; ASSERT_TRUE(actual.Init(&decoder)); vector expected; for (const auto& str : input) { expected.push_back(string_view(str)); } EXPECT_EQ(actual.Decode(), expected); } TEST(EncodedStringVectorTest, Empty) { TestEncodedStringVector({}, 1); } TEST(EncodedStringVectorTest, EmptyString) { TestEncodedStringVector({""}, 2); } TEST(EncodedStringVectorTest, RepeatedEmptyStrings) { TestEncodedStringVector({"", "", ""}, 4); } TEST(EncodedStringVectorTest, OneString) { TestEncodedStringVector({"apples"}, 8); } TEST(EncodedStringVectorTest, TwoStrings) { TestEncodedStringVector({"fuji", "mutsu"}, 12); } TEST(EncodedStringVectorTest, TwoBigStrings) { TestEncodedStringVector({string(10000, 'x'), string(100000, 'y')}, 110007); } } // namespace s2coding s2geometry-0.10.0/src/s2/encoded_uint_vector.h000066400000000000000000000243721422156367100212150ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ENCODED_UINT_VECTOR_H_ #define S2_ENCODED_UINT_VECTOR_H_ #include #include #include "absl/base/internal/unaligned_access.h" #include "absl/types/span.h" #include "s2/util/bits/bits.h" #include "s2/util/coding/coder.h" #include "s2/util/coding/varint.h" namespace s2coding { // Encodes a vector of unsigned integers in a format that can later be // decoded as an EncodedUintVector. // // REQUIRES: T is an unsigned integer type. // REQUIRES: 2 <= sizeof(T) <= 8 // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). template void EncodeUintVector(absl::Span v, Encoder* encoder); // This class represents an encoded vector of unsigned integers of type T. // Values are decoded only when they are accessed. This allows for very fast // initialization and no additional memory use beyond the encoded data. // The encoded data is not owned by this class; typically it points into a // large contiguous buffer that contains other encoded data as well. // // This is one of several helper classes that allow complex data structures to // be initialized from an encoded format in constant time and then decoded on // demand. This can be a big performance advantage when only a small part of // the data structure is actually used. // // Values are encoded using a fixed number of bytes per value, where the // number of bytes depends on the largest value present. // // REQUIRES: T is an unsigned integer type. // REQUIRES: 2 <= sizeof(T) <= 8 template class EncodedUintVector { public: static_assert(std::is_unsigned::value, "Unsupported signed integer"); static_assert(sizeof(T) & 0xe, "Unsupported integer length"); // Constructs an uninitialized object; requires Init() to be called. EncodedUintVector() {} // Initializes the EncodedUintVector. Returns false on errors, leaving the // vector in an unspecified state. // // REQUIRES: The Decoder data buffer must outlive this object. bool Init(Decoder* decoder); // Resets the vector to be empty. void Clear(); // Returns the size of the original vector. size_t size() const; // Returns the element at the given index. T operator[](int i) const; // Returns the index of the first element x such that (x >= target), or // size() if no such element exists. // // REQUIRES: The vector elements are sorted in non-decreasing order. size_t lower_bound(T target) const; // Decodes and returns the entire original vector. std::vector Decode() const; void Encode(Encoder* encoder) const; private: template size_t lower_bound(T target) const; const char* data_; uint32 size_; uint8 len_; }; // Encodes an unsigned integer in little-endian format using "length" bytes. // (The client must ensure that the encoder's buffer is large enough.) // // REQUIRES: T is an unsigned integer type. // REQUIRES: 2 <= sizeof(T) <= 8 // REQUIRES: 0 <= length <= sizeof(T) // REQUIRES: value < 256 ** length // REQUIRES: encoder->avail() >= length template void EncodeUintWithLength(T value, int length, Encoder* encoder); // Decodes a variable-length integer consisting of "length" bytes starting at // "ptr" in little-endian format. // // REQUIRES: T is an unsigned integer type. // REQUIRES: 2 <= sizeof(T) <= 8 // REQUIRES: 0 <= length <= sizeof(T) template T GetUintWithLength(const void* ptr, int length); // Decodes and consumes a variable-length integer consisting of "length" bytes // in little-endian format. Returns false if not enough bytes are available. // // REQUIRES: T is an unsigned integer type. // REQUIRES: 2 <= sizeof(T) <= 8 // REQUIRES: 0 <= length <= sizeof(T) template bool DecodeUintWithLength(int length, Decoder* decoder, T* result); ////////////////// Implementation details follow //////////////////// template inline void EncodeUintWithLength(T value, int length, Encoder* encoder) { static_assert(std::is_unsigned::value, "Unsupported signed integer"); static_assert(sizeof(T) & 0xe, "Unsupported integer length"); S2_DCHECK(length >= 0 && length <= sizeof(T)); S2_DCHECK_GE(encoder->avail(), length); while (--length >= 0) { encoder->put8(value); value >>= 8; } S2_DCHECK_EQ(value, 0); } template inline T GetUintWithLength(const char* ptr, int length) { static_assert(std::is_unsigned::value, "Unsupported signed integer"); static_assert(sizeof(T) & 0xe, "Unsupported integer length"); S2_DCHECK(length >= 0 && length <= sizeof(T)); // Note that the following code is faster than any of the following: // // - A loop that repeatedly loads and shifts one byte. // - memcpying "length" bytes to a local variable of type T. // - A switch statement that handles each length optimally. // // The following code is slightly faster: // // T mask = (length == 0) ? 0 : ~T{0} >> 8 * (sizeof(T) - length); // return *reinterpret_cast(ptr) & mask; // // However this technique is unsafe because in extremely rare cases it might // access out-of-bounds heap memory. (This can only happen if "ptr" is // within (sizeof(T) - length) bytes of the end of a memory page and the // following page in the address space is unmapped.) if (length & sizeof(T)) { if (sizeof(T) == 8) return ABSL_INTERNAL_UNALIGNED_LOAD64(ptr); if (sizeof(T) == 4) return ABSL_INTERNAL_UNALIGNED_LOAD32(ptr); if (sizeof(T) == 2) return ABSL_INTERNAL_UNALIGNED_LOAD16(ptr); S2_DCHECK_EQ(sizeof(T), 1); return *ptr; } T x = 0; ptr += length; if (sizeof(T) > 4 && (length & 4)) { x = ABSL_INTERNAL_UNALIGNED_LOAD32(ptr -= sizeof(uint32)); } if (sizeof(T) > 2 && (length & 2)) { x = (x << 16) + ABSL_INTERNAL_UNALIGNED_LOAD16(ptr -= sizeof(uint16)); } if (sizeof(T) > 1 && (length & 1)) { x = (x << 8) + static_cast(*--ptr); } return x; } template bool DecodeUintWithLength(int length, Decoder* decoder, T* result) { if (decoder->avail() < length) return false; const char* ptr = decoder->skip(0); *result = GetUintWithLength(ptr, length); decoder->skip(length); return true; } template void EncodeUintVector(absl::Span v, Encoder* encoder) { // The encoding is as follows: // // varint64: (v.size() * sizeof(T)) | (len - 1) // array of v.size() elements ["len" bytes each] // // Note that we don't allow (len == 0) since this would require an extra bit // to encode the length. T one_bits = 1; // Ensures len >= 1. for (auto x : v) one_bits |= x; int len = (Bits::FindMSBSetNonZero64(one_bits) >> 3) + 1; S2_DCHECK(len >= 1 && len <= 8); // Note that the multiplication is optimized into a bit shift. encoder->Ensure(Varint::kMax64 + v.size() * len); uint64 size_len = (uint64{v.size()} * sizeof(T)) | (len - 1); encoder->put_varint64(size_len); for (auto x : v) { EncodeUintWithLength(x, len, encoder); } } template bool EncodedUintVector::Init(Decoder* decoder) { uint64 size_len; if (!decoder->get_varint64(&size_len)) return false; size_ = size_len / sizeof(T); // Optimized into bit shift. len_ = (size_len & (sizeof(T) - 1)) + 1; if (size_ > std::numeric_limits::max() / sizeof(T)) return false; size_t bytes = size_ * len_; if (decoder->avail() < bytes) return false; data_ = decoder->skip(0); decoder->skip(bytes); return true; } template void EncodedUintVector::Clear() { size_ = 0; data_ = nullptr; } template inline size_t EncodedUintVector::size() const { return size_; } template inline T EncodedUintVector::operator[](int i) const { S2_DCHECK(i >= 0 && i < size_); return GetUintWithLength(data_ + i * len_, len_); } template size_t EncodedUintVector::lower_bound(T target) const { static_assert(sizeof(T) & 0xe, "Unsupported integer length"); S2_DCHECK(len_ >= 1 && len_ <= sizeof(T)); // TODO(ericv): Consider using the unused 28 bits of "len_" to store the // last result of lower_bound() to be used as a hint. This should help in // common situation where the same element is looked up repeatedly. This // would require declaring the new field (length_lower_bound_hint_) as // mutable std::atomic (accessed using std::memory_order_relaxed) // with a custom copy constructor that resets the hint component to zero. switch (len_) { case 1: return lower_bound<1>(target); case 2: return lower_bound<2>(target); case 3: return lower_bound<3>(target); case 4: return lower_bound<4>(target); case 5: return lower_bound<5>(target); case 6: return lower_bound<6>(target); case 7: return lower_bound<7>(target); default: return lower_bound<8>(target); } } template template inline size_t EncodedUintVector::lower_bound(T target) const { size_t lo = 0, hi = size_; while (lo < hi) { size_t mid = (lo + hi) >> 1; T value = GetUintWithLength(data_ + mid * length, length); if (value < target) { lo = mid + 1; } else { hi = mid; } } return lo; } template std::vector EncodedUintVector::Decode() const { std::vector result(size_); for (int i = 0; i < size_; ++i) { result[i] = (*this)[i]; } return result; } template // The encoding must be identical to StringVectorEncoder::Encode(). void EncodedUintVector::Encode(Encoder* encoder) const { uint64 size_len = (uint64{size_} * sizeof(T)) | (len_ - 1); encoder->Ensure(Varint::kMax64 + size_len); encoder->put_varint64(size_len); encoder->putn(data_, size_ * len_); } } // namespace s2coding #endif // S2_ENCODED_UINT_VECTOR_H_ s2geometry-0.10.0/src/s2/encoded_uint_vector_test.cc000066400000000000000000000103171422156367100224040ustar00rootroot00000000000000// Copyright 2018 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/encoded_uint_vector.h" #include #include using std::vector; namespace s2coding { // Make sure that this class is compact since it is extensively used. // 16 for 64-bit, 12 for 32-bit. static_assert(sizeof(EncodedUintVector) <= 16, "too big"); template void TestEncodedUintVector(const vector& expected, size_t expected_bytes) { Encoder encoder; EncodeUintVector(expected, &encoder); EXPECT_EQ(expected_bytes, encoder.length()); Decoder decoder(encoder.base(), encoder.length()); EncodedUintVector actual; ASSERT_TRUE(actual.Init(&decoder)); EXPECT_EQ(actual.Decode(), expected); } TEST(EncodedUintVectorTest, Empty) { TestEncodedUintVector(vector{}, 1); } TEST(EncodedUintVectorTest, Zero) { TestEncodedUintVector(vector{0}, 2); } TEST(EncodedUintVectorTest, RepeatedZeros) { TestEncodedUintVector(vector{0, 0, 0}, 4); } TEST(EncodedUintVectorTest, MaxInt) { TestEncodedUintVector(vector{~0ULL}, 9); } TEST(EncodedUintVectorTest, OneByte) { TestEncodedUintVector(vector{0, 255, 1, 254}, 5); } TEST(EncodedUintVectorTest, TwoBytes) { TestEncodedUintVector(vector{0, 255, 256, 254}, 9); } TEST(EncodedUintVectorTest, ThreeBytes) { TestEncodedUintVector(vector{0xffffff, 0x0102, 0, 0x050403}, 13); } TEST(EncodedUintVectorTest, EightBytes) { TestEncodedUintVector(vector{~0ULL, 0, 0x0102030405060708}, 25); } template vector MakeSortedTestVector(int bytes_per_value, int num_values) { S2_DCHECK_LE(bytes_per_value, sizeof(T)); T limit_value = ~T{0} >> (8 * (sizeof(T) - bytes_per_value)); vector values; for (int i = 0; i + 1 < num_values; ++i) { values.push_back(limit_value * (static_cast(i) / (num_values - 1))); } // The last value needs special handling since casting it to "double" loses // precision when T == uint64. values.push_back(limit_value); S2_CHECK(std::is_sorted(values.begin(), values.end())); return values; } template EncodedUintVector MakeEncodedVector(const vector& values, Encoder* encoder) { EncodeUintVector(values, encoder); Decoder decoder(encoder->base(), encoder->length()); EncodedUintVector actual; S2_CHECK(actual.Init(&decoder)); return actual; } template void TestLowerBound(int bytes_per_value, int num_values) { auto v = MakeSortedTestVector(bytes_per_value, num_values); Encoder encoder; auto actual = MakeEncodedVector(v, &encoder); for (T x : v) { EXPECT_EQ(std::lower_bound(v.begin(), v.end(), x) - v.begin(), actual.lower_bound(x)); if (x > 0) { EXPECT_EQ(std::lower_bound(v.begin(), v.end(), x - 1) - v.begin(), actual.lower_bound(x - 1)); } } } TEST(EncodedUintVector, LowerBound) { for (int bytes_per_value = 8; bytes_per_value <= 8; ++bytes_per_value) { TestLowerBound(bytes_per_value, 10); if (bytes_per_value <= 4) { TestLowerBound(bytes_per_value, 500); if (bytes_per_value <= 2) { TestLowerBound(bytes_per_value, 100); } } } } TEST(EncodedUintVectorTest, RoundtripEncoding) { std::vector values{10, 20, 30, 40}; Encoder a_encoder; auto a = MakeEncodedVector(values, &a_encoder); ASSERT_EQ(a.Decode(), values); Encoder b_encoder; a.Encode(&b_encoder); Decoder decoder(b_encoder.base(), b_encoder.length()); EncodedUintVector v2; ASSERT_TRUE(v2.Init(&decoder)); EXPECT_EQ(v2.Decode(), values); } } // namespace s2coding s2geometry-0.10.0/src/s2/id_set_lexicon.cc000066400000000000000000000047231422156367100203170ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/id_set_lexicon.h" #include #include #include #include "s2/base/logging.h" IdSetLexicon::IdSetLexicon() { } IdSetLexicon::~IdSetLexicon() { } // We define the copy/move constructors and assignment operators explicitly // in order to avoid copying/moving the temporary storage vector "tmp_". IdSetLexicon::IdSetLexicon(const IdSetLexicon& x) : id_sets_(x.id_sets_) { } IdSetLexicon::IdSetLexicon(IdSetLexicon&& x) : id_sets_(std::move(x.id_sets_)) { } IdSetLexicon& IdSetLexicon::operator=(const IdSetLexicon& x) { id_sets_ = x.id_sets_; return *this; } IdSetLexicon& IdSetLexicon::operator=(IdSetLexicon&& x) { id_sets_ = std::move(x.id_sets_); return *this; } void IdSetLexicon::Clear() { id_sets_.Clear(); } int32 IdSetLexicon::AddInternal(std::vector* ids) { if (ids->empty()) { // Empty sets have a special id chosen not to conflict with other ids. return kEmptySetId; } else if (ids->size() == 1) { // Singleton sets are represented by their element. return (*ids)[0]; } else { // Canonicalize the set by sorting and removing duplicates. std::sort(ids->begin(), ids->end()); ids->erase(std::unique(ids->begin(), ids->end()), ids->end()); // After eliminating duplicates, we may now have a singleton. if (ids->size() == 1) return (*ids)[0]; // Non-singleton sets are represented by the bitwise complement of the id // returned by SequenceLexicon. return ~id_sets_.Add(*ids); } } IdSetLexicon::IdSet IdSetLexicon::id_set(int32 set_id) const { if (set_id >= 0) { return IdSet(set_id); } else if (set_id == kEmptySetId) { return IdSet(); } else { auto sequence = id_sets_.sequence(~set_id); S2_DCHECK_NE(0, sequence.size()); return IdSet(&*sequence.begin(), &*sequence.begin() + sequence.size()); } } s2geometry-0.10.0/src/s2/id_set_lexicon.h000066400000000000000000000152771422156367100201670ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_ID_SET_LEXICON_H_ #define S2_ID_SET_LEXICON_H_ #include #include #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "s2/sequence_lexicon.h" // IdSetLexicon is a class for compactly representing sets of non-negative // integers such as array indices ("id sets"). It is especially suitable when // either (1) there are many duplicate sets, or (2) there are many singleton // or empty sets. See also ValueLexicon and SequenceLexicon. // // Each distinct id set is mapped to a 32-bit integer. Empty and singleton // sets take up no additional space whatsoever; the set itself is represented // by the unique id assigned to the set. Sets of size 2 or more occupy about // 11 bytes per set plus 4 bytes per element (as compared to 24 bytes per set // plus 4 bytes per element for std::vector). Duplicate sets are // automatically eliminated. Note also that id sets are referred to using // 32-bit integers rather than 64-bit pointers. // // This class is especially useful in conjunction with ValueLexicon. For // example, suppose that you want to label objects with a set of strings. You // could use a ValueLexicon to map the strings to "label ids" (32-bit // integers), and then use IdSetLexicon to map each set of labels to a "label // set id". Each reference to that label set then takes up only 4 bytes. // // Example usage: // // ValueLexicon labels_; // IdSetLexicon label_sets_; // // int32 GetLabelSet(const vector& label_strings) { // vector label_ids; // for (const auto& str : label_strings) { // label_ids.push_back(labels_.Add(str)); // } // return label_sets_.Add(label_ids); // } // // int label_set_id = GetLabelSet(...); // for (auto id : label_sets_.id_set(label_set_id)) { // S2_LOG(INFO) << id; // } // // This class is similar to SequenceLexicon, except: // // 1. Empty and singleton sets are represented implicitly; they use no space. // 2. Sets are represented rather than sequences; the ordering of values is // not important and duplicates are removed. // 3. The values must be 32-bit non-negative integers (only). class IdSetLexicon { public: IdSetLexicon(); ~IdSetLexicon(); // IdSetLexicon is movable and copyable. IdSetLexicon(const IdSetLexicon&); IdSetLexicon& operator=(const IdSetLexicon&); IdSetLexicon(IdSetLexicon&&); IdSetLexicon& operator=(IdSetLexicon&&); // Clears all data from the lexicon. void Clear(); // Add the given set of integers to the lexicon if it is not already // present, and return the unique id for this set. "begin" and "end" are // forward iterators over a sequence of values that can be converted to // non-negative 32-bit integers. The values are automatically sorted and // duplicates are removed. Returns a signed integer representing this set. // // REQUIRES: All values in [begin, end) are non-negative 32-bit integers. template int32 Add(FwdIterator begin, FwdIterator end); // Add the given set of integers to the lexicon if it is not already // present, and return the unique id for this set. This is a convenience // method equivalent to Add(std::begin(container), std::end(container)). template int32 Add(const Container& container); // Convenience method that returns the unique id for a singleton set. // Note that because singleton sets take up no space, this method is // const. Equivalent to calling Add(&id, &id + 1). int32 AddSingleton(int32 id) const; // Convenience method that returns the unique id for the empty set. Note // that because the empty set takes up no space and has a fixed id, this // method is static. Equivalent to calling Add() with an empty container. static int32 EmptySetId(); // Iterator type; please treat this as an opaque forward iterator. using Iterator = const int32*; // This class represents a set of integers stored in the IdSetLexicon. class IdSet { public: Iterator begin() const; Iterator end() const; size_t size() const; private: friend class IdSetLexicon; IdSet(); IdSet(Iterator begin, Iterator end); explicit IdSet(int32 singleton_id); Iterator begin_, end_; int32 singleton_id_; }; // Return the set of integers corresponding to an id returned by Add(). IdSet id_set(int32 set_id) const; private: // Choose kEmptySetId to be the last id that will ever be generated. // (Non-negative ids are reserved for singleton sets.) static constexpr int32 kEmptySetId = std::numeric_limits::min(); int32 AddInternal(std::vector* ids); SequenceLexicon id_sets_; std::vector tmp_; // temporary storage used during Add() }; ////////////////// Implementation details follow //////////////////// inline IdSetLexicon::Iterator IdSetLexicon::IdSet::begin() const { return begin_; } inline IdSetLexicon::Iterator IdSetLexicon::IdSet::end() const { return end_; } inline size_t IdSetLexicon::IdSet::size() const { return end_ - begin_; } inline IdSetLexicon::IdSet::IdSet() : begin_(&singleton_id_), end_(begin_) { } inline IdSetLexicon::IdSet::IdSet(Iterator begin, Iterator end) : begin_(begin), end_(end) { } inline IdSetLexicon::IdSet::IdSet(int32 singleton_id) : begin_(&singleton_id_), end_(&singleton_id_ + 1), singleton_id_(singleton_id) { } inline int32 IdSetLexicon::AddSingleton(int32 id) const { S2_DCHECK_GE(id, 0); S2_DCHECK_LE(id, std::numeric_limits::max()); // Singleton sets are represented by their element. return id; } /*static*/ inline int32 IdSetLexicon::EmptySetId() { return kEmptySetId; } template int32 IdSetLexicon::Add(FwdIterator begin, FwdIterator end) { tmp_.clear(); for (; begin != end; ++begin) { S2_DCHECK_GE(*begin, 0); S2_DCHECK_LE(*begin, std::numeric_limits::max()); tmp_.push_back(*begin); } return AddInternal(&tmp_); } template int32 IdSetLexicon::Add(const Container& container) { return Add(std::begin(container), std::end(container)); } #endif // S2_ID_SET_LEXICON_H_ s2geometry-0.10.0/src/s2/id_set_lexicon_test.cc000066400000000000000000000041261422156367100213530ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/id_set_lexicon.h" #include #include void ExpectIdSet(const std::vector& expected, const IdSetLexicon::IdSet& actual) { EXPECT_EQ(expected.size(), actual.size()); EXPECT_TRUE(std::equal(expected.begin(), expected.end(), actual.begin())); } using IdSet = IdSetLexicon::IdSet; using Seq = std::vector; TEST(IdSetLexicon, EmptySet) { IdSetLexicon lexicon; ExpectIdSet({}, lexicon.id_set(lexicon.Add(Seq{}))); } TEST(IdSetLexicon, SingletonSets) { IdSetLexicon lexicon; EXPECT_EQ(5, lexicon.Add(Seq{5})); EXPECT_EQ(0, lexicon.Add(Seq{0, 0})); EXPECT_EQ(1, lexicon.AddSingleton(1)); int32 m = std::numeric_limits::max(); EXPECT_EQ(m, lexicon.Add(&m, &m + 1)); ExpectIdSet({0}, lexicon.id_set(0)); ExpectIdSet({1}, lexicon.id_set(1)); ExpectIdSet({5}, lexicon.id_set(5)); ExpectIdSet({m}, lexicon.id_set(m)); } TEST(IdSetLexicon, SetsAreSorted) { IdSetLexicon lexicon; EXPECT_EQ(~0, lexicon.Add(Seq{2, 5})); EXPECT_EQ(~1, lexicon.Add(Seq{3, 2, 5})); EXPECT_EQ(~0, lexicon.Add(Seq{5, 2})); EXPECT_EQ(~1, lexicon.Add(Seq{5, 3, 2, 5})); ExpectIdSet({2, 5}, lexicon.id_set(~0)); ExpectIdSet({2, 3, 5}, lexicon.id_set(~1)); } TEST(IdSetLexicon, Clear) { IdSetLexicon lexicon; EXPECT_EQ(~0, lexicon.Add(Seq{1, 2})); EXPECT_EQ(~1, lexicon.Add(Seq{3, 4})); lexicon.Clear(); EXPECT_EQ(~0, lexicon.Add(Seq{3, 4})); EXPECT_EQ(~1, lexicon.Add(Seq{1, 2})); } s2geometry-0.10.0/src/s2/mutable_s2shape_index.cc000066400000000000000000002624311422156367100215760ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/mutable_s2shape_index.h" #include #include #include #include #include "s2/base/casts.h" #include "s2/base/commandlineflags.h" #include "s2/base/spinlock.h" #include "absl/base/attributes.h" #include "absl/flags/flag.h" #include "s2/encoded_s2cell_id_vector.h" #include "s2/encoded_string_vector.h" #include "s2/r1interval.h" #include "s2/r2.h" #include "s2/r2rect.h" #include "s2/s2cell_id.h" #include "s2/s2cell_union.h" #include "s2/s2coords.h" #include "s2/s2edge_clipping.h" #include "s2/s2edge_crosser.h" #include "s2/s2metrics.h" #include "s2/s2padded_cell.h" #include "s2/s2pointutil.h" #include "s2/s2shapeutil_contains_brute_force.h" #include "s2/util/math/mathutil.h" using std::fabs; using std::make_pair; using std::max; using std::min; using std::unique_ptr; using std::vector; // FLAGS_s2shape_index_default_max_edges_per_cell // // The default maximum number of edges per cell (not counting 'long' edges). // If a cell has more than this many edges, and it is not a leaf cell, then it // is subdivided. This flag can be overridden via MutableS2ShapeIndex::Options. // Reasonable values range from 10 to about 50 or so. S2_DEFINE_int32( s2shape_index_default_max_edges_per_cell, 10, "Default maximum number of edges per cell (not counting 'long' edges); " "reasonable values range from 10 to 50. Small values makes queries " "faster, while large values make construction faster and use less memory."); // FLAGS_s2shape_index_tmp_memory_budget // // Attempt to limit the amount of temporary memory allocated while building or // updating a MutableS2ShapeIndex to at most this value. This is achieved by // splitting the updates into multiple batches when necessary. (The memory // required is proportional to the number of edges being updated at once.) // // Note that this limit is not a hard guarantee, for several reasons: // (1) the memory estimates are only approximations; // (2) all edges in a given shape are added or removed at once, so shapes // with huge numbers of edges may exceed the budget; // (3) shapes being removed are always processed in a single batch. (This // could be fixed, but it seems better to keep the code simpler for now.) S2_DEFINE_int64( s2shape_index_tmp_memory_budget, int64{100} << 20 /*100 MB*/, "Attempts to limit the amount of temporary memory used by " "MutableS2ShapeIndex when creating or updating very large indexes to at " "most this number of bytes. If more memory than this is needed, updates " "will automatically be split into batches internally."); // FLAGS_s2shape_index_cell_size_to_long_edge_ratio // // The maximum cell size, relative to an edge's length, for which that edge is // considered 'long'. Cell size is defined as the average edge length of all // cells at a given level. For example, a value of 2.0 means that an edge E // is long at cell level k iff the average edge length at level k is at most // twice the length of E. Long edges edges are not counted towards the // max_edges_per_cell() limit because such edges typically need to be // propagated to several children, which increases time and memory costs // without commensurate benefits. S2_DEFINE_double( s2shape_index_cell_size_to_long_edge_ratio, 1.0, "The maximum cell size, relative to an edge's length, for which that " "edge is considered 'long'. Long edges are not counted towards the " "max_edges_per_cell() limit. The size and speed of the index are " "typically not very sensitive to this parameter. Reasonable values range " "from 0.1 to 10, with smaller values causing more aggressive subdivision " "of long edges grouped closely together."); // FLAGS_s2shape_index_min_short_edge_fraction // // The minimum fraction of 'short' edges that must be present in a cell in // order for it to be subdivided. If this parameter is non-zero then the // total index size and construction time are guaranteed to be linear in the // number of input edges; this prevents the worst-case quadratic space and // time usage that can otherwise occur with certain input configurations. // Specifically, the maximum index size is // // O((c1 + c2 * (1 - f) / f) * n) // // where n is the number of input edges, f is this parameter value, and // constant c2 is roughly 20 times larger than constant c1. (The exact values // of c1 and c2 depend on the cell_size_to_long_edge_ratio and // max_edges_per_cell parameters and certain properties of the input geometry // such as whether it consists of O(1) shapes, whether it includes polygons, // and whether the polygon interiors are disjoint.) // // Reasonable parameter values range from 0.1 up to perhaps 0.95. The main // factors to consider when choosing this parameter are: // // - For pathological geometry, larger values result in indexes that are // smaller and faster to construct but have worse query performance (due to // having more edges per cell). However note that even a setting of 0.1 // reduces the worst case by 100x compared with a setting of 0.001. // // - For normal geometry, values up to about 0.8 result in indexes that are // virtually unchanged except for a slight increase in index construction // time (proportional to the parameter value f) for very large inputs. // With millions of edges, indexing time increases by about (15% * f), // e.g. a parameter value of 0.5 slows down indexing for very large inputs // by about 7.5%. (Indexing time for small inputs is not affected.) // // - Values larger than about 0.8 start to affect index construction even for // normal geometry, resulting in smaller indexes and faster construction // times but gradually worse query performance. // // Essentially this parameter provides control over a space-time tradeoff that // largely affects only pathological geometry. The default value of 0.2 was // chosen to make index construction as fast as possible while still // protecting against possible quadratic space usage. S2_DEFINE_double( s2shape_index_min_short_edge_fraction, 0.2, "The minimum fraction of 'short' edges that must be present in a cell in " "order for it to be subdivided. If this parameter is non-zero then the " "total index size and construction time are guaranteed to be linear in the " "number of input edges, where the constant of proportionality has the " "form (c1 + c2 * (1 - f) / f). Reasonable values range from 0.1 to " "perhaps 0.95. Values up to about 0.8 have almost no effect on 'normal' " "geometry except for a small increase in index construction time " "(proportional to f) for very large inputs. For worst-case geometry, " "larger parameter values result in indexes that are smaller and faster " "to construct but have worse query performance (due to having more edges " "per cell). Essentially this parameter provides control over a space-time " "tradeoff that largely affects only pathological geometry."); // The total error when clipping an edge comes from two sources: // (1) Clipping the original spherical edge to a cube face (the "face edge"). // The maximum error in this step is S2::kFaceClipErrorUVCoord. // (2) Clipping the face edge to the u- or v-coordinate of a cell boundary. // The maximum error in this step is S2::kEdgeClipErrorUVCoord. // Finally, since we encounter the same errors when clipping query edges, we // double the total error so that we only need to pad edges during indexing // and not at query time. const double MutableS2ShapeIndex::kCellPadding = 2 * (S2::kFaceClipErrorUVCoord + S2::kEdgeClipErrorUVCoord); MutableS2ShapeIndex::Options::Options() : max_edges_per_cell_( absl::GetFlag(FLAGS_s2shape_index_default_max_edges_per_cell)) {} void MutableS2ShapeIndex::Options::set_max_edges_per_cell( int max_edges_per_cell) { max_edges_per_cell_ = max_edges_per_cell; } bool MutableS2ShapeIndex::Iterator::Locate(const S2Point& target) { return LocateImpl(target, this); } MutableS2ShapeIndex::CellRelation MutableS2ShapeIndex::Iterator::Locate( S2CellId target) { return LocateImpl(target, this); } const S2ShapeIndexCell* MutableS2ShapeIndex::Iterator::GetCell() const { S2_LOG(DFATAL) << "Should never be called"; return nullptr; } unique_ptr MutableS2ShapeIndex::Iterator::Clone() const { return absl::make_unique(*this); } void MutableS2ShapeIndex::Iterator::Copy(const IteratorBase& other) { *this = *down_cast(&other); } // FaceEdge and ClippedEdge store temporary edge data while the index is being // updated. FaceEdge represents an edge that has been projected onto a given // face, while ClippedEdge represents the portion of that edge that has been // clipped to a given S2Cell. // // While it would be possible to combine all the edge information into one // structure, there are two good reasons for separating it: // // - Memory usage. Separating the two classes means that we only need to // store one copy of the per-face data no matter how many times an edge is // subdivided, and it also lets us delay computing bounding boxes until // they are needed for processing each face (when the dataset spans // multiple faces). // // - Performance. UpdateEdges is significantly faster on large polygons when // the data is separated, because it often only needs to access the data in // ClippedEdge and this data is cached more successfully. struct MutableS2ShapeIndex::FaceEdge { int32 shape_id; // The shape that this edge belongs to int32 edge_id; // Edge id within that shape int32 max_level; // Not desirable to subdivide this edge beyond this level bool has_interior; // Belongs to a shape of dimension 2. R2Point a, b; // The edge endpoints, clipped to a given face S2Shape::Edge edge; // The edge endpoints }; struct MutableS2ShapeIndex::ClippedEdge { const FaceEdge* face_edge; // The original unclipped edge R2Rect bound; // Bounding box for the clipped portion }; // Given a set of shapes, InteriorTracker keeps track of which shapes contain // a particular point (the "focus"). It provides an efficient way to move the // focus from one point to another and incrementally update the set of shapes // which contain it. We use this to compute which shapes contain the center // of every S2CellId in the index, by advancing the focus from one cell center // to the next. // // Initially the focus is at the start of the S2CellId space-filling curve. // We then visit all the cells that are being added to the MutableS2ShapeIndex // in increasing order of S2CellId. For each cell, we draw two edges: one // from the entry vertex to the center, and another from the center to the // exit vertex (where "entry" and "exit" refer to the points where the // space-filling curve enters and exits the cell). By counting edge crossings // we can incrementally compute which shapes contain the cell center. Note // that the same set of shapes will always contain the exit point of one cell // and the entry point of the next cell in the index, because either (a) these // two points are actually the same, or (b) the intervening cells in S2CellId // order are all empty, and therefore there are no edge crossings if we follow // this path from one cell to the other. class MutableS2ShapeIndex::InteriorTracker { public: // Constructs the InteriorTracker. You must call AddShape() for each shape // that will be tracked before calling MoveTo() or DrawTo(). InteriorTracker(); // Returns the initial focus point when the InteriorTracker is constructed // (corresponding to the start of the S2CellId space-filling curve). static S2Point Origin(); // Returns the current focus point (see above). const S2Point& focus() { return b_; } // Returns true if any shapes are being tracked. bool is_active() const { return is_active_; } // Adds a shape whose interior should be tracked. "contains_focus" indicates // whether the current focus point is inside the shape. Alternatively, if the // focus point is in the process of being moved (via MoveTo/DrawTo), you can // also specify "contains_focus" at the old focus point and call TestEdge() // for every edge of the shape that might cross the current DrawTo() line. // This updates the state to correspond to the new focus point. // // REQUIRES: shape->dimension() == 2 void AddShape(int32 shape_id, bool contains_focus); // Moves the focus to the given point. This method should only be used when // it is known that there are no edge crossings between the old and new // focus locations; otherwise use DrawTo(). void MoveTo(const S2Point& b) { b_ = b; } // Moves the focus to the given point. After this method is called, // TestEdge() should be called with all edges that may cross the line // segment between the old and new focus locations. void DrawTo(const S2Point& b); // Indicates that the given edge of the given shape may cross the line // segment between the old and new focus locations (see DrawTo). // REQUIRES: shape->dimension() == 2 inline void TestEdge(int32 shape_id, const S2Shape::Edge& edge); // The set of shape ids that contain the current focus. const ShapeIdSet& shape_ids() const { return shape_ids_; } // Indicates that the last argument to MoveTo() or DrawTo() was the entry // vertex of the given S2CellId, i.e. the tracker is positioned at the start // of this cell. By using this method together with at_cellid(), the caller // can avoid calling MoveTo() in cases where the exit vertex of the previous // cell is the same as the entry vertex of the current cell. void set_next_cellid(S2CellId next_cellid) { next_cellid_ = next_cellid.range_min(); } // Returns true if the focus is already at the entry vertex of the given // S2CellId (provided that the caller calls set_next_cellid() as each cell // is processed). bool at_cellid(S2CellId cellid) const { return cellid.range_min() == next_cellid_; } // Makes an internal copy of the state for shape ids below the given limit, // and then clear the state for those shapes. This is used during // incremental updates to track the state of added and removed shapes // separately. void SaveAndClearStateBefore(int32 limit_shape_id); // Restores the state previously saved by SaveAndClearStateBefore(). This // only affects the state for shape_ids below "limit_shape_id". void RestoreStateBefore(int32 limit_shape_id); // Indicates that only some edges of the given shape are being added, and // therefore its interior should not be processed yet. int partial_shape_id() const { return partial_shape_id_; } void set_partial_shape_id(int shape_id) { partial_shape_id_ = shape_id; } private: // Removes "shape_id" from shape_ids_ if it exists, otherwise insert it. void ToggleShape(int shape_id); // Returns a pointer to the first entry "x" where x >= shape_id. ShapeIdSet::iterator lower_bound(int32 shape_id); bool is_active_ = false; S2Point a_, b_; S2CellId next_cellid_; S2EdgeCrosser crosser_; ShapeIdSet shape_ids_; // Shape ids saved by SaveAndClearStateBefore(). The state is never saved // recursively so we don't need to worry about maintaining a stack. ShapeIdSet saved_ids_; // As an optimization, we also save is_active_ so that RestoreStateBefore() // can deactivate the tracker again in the case where the shapes being added // and removed do not have an interior, but some existing shapes do. bool saved_is_active_; // If non-negative, indicates that only some edges of the given shape are // being added and therefore its interior should not be tracked yet. int partial_shape_id_ = -1; }; // As shapes are added, we compute which ones contain the start of the // S2CellId space-filling curve by drawing an edge from S2::Origin() to this // point and counting how many shape edges cross this edge. MutableS2ShapeIndex::InteriorTracker::InteriorTracker() : b_(Origin()), next_cellid_(S2CellId::Begin(S2CellId::kMaxLevel)) { } S2Point MutableS2ShapeIndex::InteriorTracker::Origin() { // The start of the S2CellId space-filling curve. return S2::FaceUVtoXYZ(0, -1, -1).Normalize(); } void MutableS2ShapeIndex::InteriorTracker::AddShape(int32 shape_id, bool contains_focus) { is_active_ = true; if (contains_focus) { ToggleShape(shape_id); } } void MutableS2ShapeIndex::InteriorTracker::ToggleShape(int shape_id) { // Since shape_ids_.size() is typically *very* small (0, 1, or 2), it turns // out to be significantly faster to maintain a sorted array rather than // using an STL set or btree_set. if (shape_ids_.empty()) { shape_ids_.push_back(shape_id); } else if (shape_ids_[0] == shape_id) { shape_ids_.erase(shape_ids_.begin()); } else { ShapeIdSet::iterator pos = shape_ids_.begin(); while (*pos < shape_id) { if (++pos == shape_ids_.end()) { shape_ids_.push_back(shape_id); return; } } if (*pos == shape_id) { shape_ids_.erase(pos); } else { shape_ids_.insert(pos, shape_id); } } } void MutableS2ShapeIndex::InteriorTracker::DrawTo(const S2Point& b) { a_ = b_; b_ = b; crosser_.Init(&a_, &b_); } ABSL_ATTRIBUTE_ALWAYS_INLINE // ~1% faster inline void MutableS2ShapeIndex::InteriorTracker::TestEdge( int32 shape_id, const S2Shape::Edge& edge) { if (crosser_.EdgeOrVertexCrossing(&edge.v0, &edge.v1)) { ToggleShape(shape_id); } } // Like std::lower_bound(shape_ids_.begin(), shape_ids_.end(), shape_id), but // implemented with linear rather than binary search because the number of // shapes being tracked is typically very small. inline MutableS2ShapeIndex::ShapeIdSet::iterator MutableS2ShapeIndex::InteriorTracker::lower_bound(int32 shape_id) { ShapeIdSet::iterator pos = shape_ids_.begin(); while (pos != shape_ids_.end() && *pos < shape_id) { ++pos; } return pos; } void MutableS2ShapeIndex::InteriorTracker::SaveAndClearStateBefore( int32 limit_shape_id) { S2_DCHECK(saved_ids_.empty()); ShapeIdSet::iterator limit = lower_bound(limit_shape_id); saved_ids_.assign(shape_ids_.begin(), limit); shape_ids_.erase(shape_ids_.begin(), limit); saved_is_active_ = is_active_; } void MutableS2ShapeIndex::InteriorTracker::RestoreStateBefore( int32 limit_shape_id) { shape_ids_.erase(shape_ids_.begin(), lower_bound(limit_shape_id)); shape_ids_.insert(shape_ids_.begin(), saved_ids_.begin(), saved_ids_.end()); saved_ids_.clear(); is_active_ = saved_is_active_; } MutableS2ShapeIndex::MutableS2ShapeIndex() { } MutableS2ShapeIndex::MutableS2ShapeIndex(const Options& options) { Init(options); } void MutableS2ShapeIndex::Init(const Options& options) { S2_DCHECK(shapes_.empty()); options_ = options; // Memory tracking is not affected by this method. } MutableS2ShapeIndex::~MutableS2ShapeIndex() { Clear(); } void MutableS2ShapeIndex::set_memory_tracker(S2MemoryTracker* tracker) { mem_tracker_.Tally(-mem_tracker_.client_usage_bytes()); mem_tracker_.Init(tracker); if (mem_tracker_.is_active()) mem_tracker_.Tally(SpaceUsed()); } // Called to set the index status when the index needs to be rebuilt. void MutableS2ShapeIndex::MarkIndexStale() { // The UPDATING status can only be changed in ApplyUpdatesThreadSafe(). if (index_status_.load(std::memory_order_relaxed) == UPDATING) return; // If a memory tracking error has occurred we set the index status to FRESH // in order to prevent us from attempting to rebuild it. IndexStatus status = (shapes_.empty() || !mem_tracker_.ok()) ? FRESH : STALE; index_status_.store(status, std::memory_order_relaxed); } void MutableS2ShapeIndex::Minimize() { mem_tracker_.Tally(-mem_tracker_.client_usage_bytes()); Iterator it; for (it.InitStale(this, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { delete &it.cell(); } cell_map_.clear(); pending_removals_.reset(); pending_additions_begin_ = 0; MarkIndexStale(); if (mem_tracker_.is_active()) mem_tracker_.Tally(SpaceUsed()); } int MutableS2ShapeIndex::Add(unique_ptr shape) { // Additions are processed lazily by ApplyUpdates(). Note that in order to // avoid unexpected client behavior, this method continues to add shapes // even once the specified S2MemoryTracker limit has been exceeded. const int id = shapes_.size(); shape->id_ = id; mem_tracker_.AddSpace(&shapes_, 1); shapes_.push_back(std::move(shape)); MarkIndexStale(); return id; } unique_ptr MutableS2ShapeIndex::Release(int shape_id) { // This class updates itself lazily, because it is much more efficient to // process additions and removals in batches. However this means that when // a shape is removed we need to make a copy of all its edges, since the // client is free to delete "shape" once this call is finished. S2_DCHECK(shapes_[shape_id] != nullptr); auto shape = std::move(shapes_[shape_id]); if (shape_id >= pending_additions_begin_) { // We are removing a shape that has not yet been added to the index, // so there is nothing else to do. } else { if (!pending_removals_) { if (!mem_tracker_.Tally(sizeof(*pending_removals_))) { Minimize(); return shape; } pending_removals_ = absl::make_unique>(); } RemovedShape removed; removed.shape_id = shape->id(); removed.has_interior = (shape->dimension() == 2); removed.contains_tracker_origin = s2shapeutil::ContainsBruteForce(*shape, InteriorTracker::Origin()); int num_edges = shape->num_edges(); if (!mem_tracker_.AddSpace(&removed.edges, num_edges) || !mem_tracker_.AddSpace(pending_removals_.get(), 1)) { Minimize(); return shape; } for (int e = 0; e < num_edges; ++e) { removed.edges.push_back(shape->edge(e)); } pending_removals_->push_back(std::move(removed)); } MarkIndexStale(); return shape; } vector> MutableS2ShapeIndex::ReleaseAll() { S2_DCHECK(update_state_ == nullptr); vector> result; result.swap(shapes_); Minimize(); return result; } void MutableS2ShapeIndex::Clear() { ReleaseAll(); } // Apply any pending updates in a thread-safe way. void MutableS2ShapeIndex::ApplyUpdatesThreadSafe() { lock_.Lock(); if (index_status_.load(std::memory_order_relaxed) == FRESH) { lock_.Unlock(); } else if (index_status_.load(std::memory_order_relaxed) == UPDATING) { // Wait until the updating thread is finished. We do this by attempting // to lock a mutex that is held by the updating thread. When this mutex // is unlocked the index_status_ is guaranteed to be FRESH. ++update_state_->num_waiting; lock_.Unlock(); update_state_->wait_mutex.Lock(); lock_.Lock(); --update_state_->num_waiting; UnlockAndSignal(); // Notify other waiting threads. } else { S2_DCHECK_EQ(STALE, index_status_); index_status_.store(UPDATING, std::memory_order_relaxed); // Allocate the extra state needed for thread synchronization. We keep // the spinlock held while doing this, because (1) memory allocation is // fast, so the chance of a context switch while holding the lock is low; // (2) by far the most common situation is that there is no contention, // and this saves an extra lock and unlock step; (3) even in the rare case // where there is contention, the main side effect is that some other // thread will burn a few CPU cycles rather than sleeping. update_state_ = absl::make_unique(); // lock_.Lock wait_mutex *before* calling Unlock() to ensure that all other // threads will block on it. update_state_->wait_mutex.Lock(); // Release the spinlock before doing any real work. lock_.Unlock(); ApplyUpdatesInternal(); lock_.Lock(); // index_status_ can be updated to FRESH only while locked *and* using // an atomic store operation, so that MaybeApplyUpdates() can check // whether the index is FRESH without acquiring the spinlock. index_status_.store(FRESH, std::memory_order_release); UnlockAndSignal(); // Notify any waiting threads. } } // Releases lock_ and wakes up any waiting threads by releasing wait_mutex. // If this was the last waiting thread, also deletes update_state_. // REQUIRES: lock_ is held. // REQUIRES: wait_mutex is held. inline void MutableS2ShapeIndex::UnlockAndSignal() { S2_DCHECK_EQ(FRESH, index_status_); int num_waiting = update_state_->num_waiting; lock_.Unlock(); // Allow another waiting thread to proceed. Note that no new threads can // start waiting because the index_status_ is now FRESH, and the caller is // required to prevent any new mutations from occurring while these const // methods are running. // // We need to unlock wait_mutex before destroying it even if there are no // waiting threads. update_state_->wait_mutex.Unlock(); if (num_waiting == 0) { update_state_.reset(); } } // This method updates the index by applying all pending additions and // removals. It does *not* update index_status_ (see ApplyUpdatesThreadSafe). void MutableS2ShapeIndex::ApplyUpdatesInternal() { // Check whether we have so many edges to process that we should process // them in multiple batches to save memory. Building the index can use up // to 20x as much memory (per edge) as the final index size. vector batches = GetUpdateBatches(); for (const BatchDescriptor& batch : batches) { if (mem_tracker_.is_active()) { S2_DCHECK_EQ(mem_tracker_.client_usage_bytes(), SpaceUsed()); // Invariant. } vector all_edges[6]; ReserveSpace(batch, all_edges); if (!mem_tracker_.ok()) return Minimize(); InteriorTracker tracker; if (pending_removals_) { // The first batch implicitly includes all shapes being removed. for (const auto& pending_removal : *pending_removals_) { RemoveShape(pending_removal, all_edges, &tracker); } pending_removals_.reset(nullptr); } // A batch consists of zero or more full shapes followed by zero or one // partial shapes. The loop below handles all such cases. for (auto begin = batch.begin; begin < batch.end; ++begin.shape_id, begin.edge_id = 0) { const S2Shape* shape = this->shape(begin.shape_id); if (shape == nullptr) continue; // Already removed. int edges_end = begin.shape_id == batch.end.shape_id ? batch.end.edge_id : shape->num_edges(); AddShape(shape, begin.edge_id, edges_end, all_edges, &tracker); } for (int face = 0; face < 6; ++face) { UpdateFaceEdges(face, all_edges[face], &tracker); // Save memory by clearing vectors after we are done with them. vector().swap(all_edges[face]); } pending_additions_begin_ = batch.end.shape_id; if (batch.begin.edge_id > 0 && batch.end.edge_id == 0) { // We have just finished adding the edges of shape that was split over // multiple batches. Now we need to mark the interior of the shape, if // any, by setting contains_center() on the appropriate index cells. FinishPartialShape(tracker.partial_shape_id()); } if (mem_tracker_.is_active()) { mem_tracker_.Tally(-mem_tracker_.client_usage_bytes()); if (!mem_tracker_.Tally(SpaceUsed())) return Minimize(); } } // It is the caller's responsibility to update index_status_. } // Count the number of edges being updated, and break them into several // batches if necessary to reduce the amount of memory needed. (See the // documentation for FLAGS_s2shape_index_tmp_memory_budget.) vector MutableS2ShapeIndex::GetUpdateBatches() const { // Count the edges being removed and added. int num_edges_removed = 0; if (pending_removals_) { for (const auto& pending_removal : *pending_removals_) { num_edges_removed += pending_removal.edges.size(); } } int num_edges_added = 0; for (int id = pending_additions_begin_; id < shapes_.size(); ++id) { const S2Shape* shape = this->shape(id); if (shape) num_edges_added += shape->num_edges(); } BatchGenerator batch_gen(num_edges_removed, num_edges_added, pending_additions_begin_); for (int id = pending_additions_begin_; id < shapes_.size(); ++id) { const S2Shape* shape = this->shape(id); if (shape) batch_gen.AddShape(id, shape->num_edges()); } return batch_gen.Finish(); } // The following memory estimates are based on heap profiling. // The batch sizes during a given update gradually decrease as the space // occupied by the index itself grows. In order to do this, we need a // conserative lower bound on how much the index grows per edge. // // The final size of a MutableS2ShapeIndex depends mainly on how finely the // index is subdivided, as controlled by Options::max_edges_per_cell() and // --s2shape_index_default_max_edges_per_cell. For realistic values of // max_edges_per_cell() and shapes with moderate numbers of edges, it is // difficult to get much below 8 bytes per edge. *The minimum possible size // is 4 bytes per edge (to store a 32-bit edge id in an S2ClippedShape) plus // 24 bytes per shape (for the S2ClippedShape itself plus a pointer in the // shapes_ vector.) Note that this value is a lower bound; a typical final // index size is closer to 24 bytes per edge. static constexpr size_t kFinalBytesPerEdge = 8; // The temporary memory consists mainly of the FaceEdge and ClippedEdge // structures plus a ClippedEdge pointer for every level of recursive // subdivision. This can be more than 220 bytes per edge even for typical // geometry. (The pathological worst case is higher, but we don't use this to // determine the batch sizes.) static constexpr size_t kTmpBytesPerEdge = 226; // We arbitrarily limit the number of batches as a safety measure. With the // current default memory budget of 100 MB, this limit is not reached even // when building an index of 350 million edges. static constexpr int kMaxBatches = 100; MutableS2ShapeIndex::BatchGenerator::BatchGenerator(int num_edges_removed, int num_edges_added, int shape_id_begin) : max_batch_sizes_(GetMaxBatchSizes(num_edges_removed, num_edges_added)), batch_begin_(shape_id_begin, 0), shape_id_end_(shape_id_begin) { if (max_batch_sizes_.size() > 1) { S2_VLOG(1) << "Removing " << num_edges_removed << ", adding " << num_edges_added << " edges in " << max_batch_sizes_.size() << " batches"; } // Duplicate the last entry to simplify next_max_batch_size(). max_batch_sizes_.push_back(max_batch_sizes_.back()); // We process edge removals before additions, and edges are always removed // in a single batch. The reasons for this include: (1) removed edges use // quite a bit of memory (about 50 bytes each) and this space can be freed // immediately when we process them in one batch; (2) removed shapes are // expected to be small fraction of the index size in typical use cases // (e.g. incremental updates of large indexes), and (3) AbsorbIndexCell() // uses (shape(id) == nullptr) to detect when a shape is being removed, so // in order to split the removed shapes into multiple batches we would need // a different approach (e.g., temporarily adding fake entries to shapes_ // and restoring them back to nullptr as shapes are removed). Removing // individual shapes over multiple batches would be even more work. batch_size_ = num_edges_removed; } void MutableS2ShapeIndex::BatchGenerator::AddShape(int shape_id, int num_edges) { int batch_remaining = max_batch_size() - batch_size_; if (num_edges <= batch_remaining) { ExtendBatch(num_edges); } else if (num_edges <= next_max_batch_size()) { // Avoid splitting shapes across batches unnecessarily. FinishBatch(0, ShapeEdgeId(shape_id, 0)); ExtendBatch(num_edges); } else { // This shape must be split across at least two batches. We simply fill // each batch until the remaining edges will fit in two batches, and then // divide those edges such that both batches have the same amount of // remaining space relative to their maximum size. int e_begin = 0; while (batch_remaining + next_max_batch_size() < num_edges) { e_begin += batch_remaining; FinishBatch(batch_remaining, ShapeEdgeId(shape_id, e_begin)); num_edges -= batch_remaining; batch_remaining = max_batch_size(); } // Figure out how many edges to add to the current batch so that it will // have the same amount of remaining space as the next batch. int n = (num_edges + batch_remaining - next_max_batch_size()) / 2; FinishBatch(n, ShapeEdgeId(shape_id, e_begin + n)); FinishBatch(num_edges - n, ShapeEdgeId(shape_id + 1, 0)); } shape_id_end_ = shape_id + 1; } vector MutableS2ShapeIndex::BatchGenerator::Finish() { // We must generate at least one batch even when num_edges_removed == // num_edges_added == 0, because some shapes have an interior but no edges. // (Specifically, the full polygon has this property.) if (batches_.empty() || shape_id_end_ != batch_begin_.shape_id) { FinishBatch(0, ShapeEdgeId(shape_id_end_, 0)); } return std::move(batches_); } void MutableS2ShapeIndex::BatchGenerator::FinishBatch(int num_edges, ShapeEdgeId batch_end) { ExtendBatch(num_edges); batches_.push_back(BatchDescriptor{batch_begin_, batch_end, batch_size_}); batch_begin_ = batch_end; batch_index_edges_left_ -= batch_size_; while (batch_index_edges_left_ < 0) { batch_index_edges_left_ += max_batch_size(); batch_index_ += 1; } batch_size_ = 0; } // Divides "num_edges" edges into batches where each batch needs about the // same total amount of memory. (The total memory needed by a batch consists // of the temporary memory needed to process the edges in that batch plus the // final representations of the edges that have already been indexed.) It // uses the fewest number of batches (up to kMaxBatches) such that the total // memory usage does not exceed the combined final size of all the edges plus // FLAGS_s2shape_index_tmp_memory_budget. Returns a vector of sizes // indicating the desired number of edges in each batch. /* static */ vector MutableS2ShapeIndex::BatchGenerator::GetMaxBatchSizes( int num_edges_removed, int num_edges_added) { // Check whether we can update all the edges at once. int num_edges_total = num_edges_removed + num_edges_added; const double tmp_memory_budget_bytes = absl::GetFlag(FLAGS_s2shape_index_tmp_memory_budget); if (num_edges_total * kTmpBytesPerEdge <= tmp_memory_budget_bytes) { return vector{num_edges_total}; } // Each batch is allowed to use up to "total_budget_bytes". The memory // usage consists of some number of edges already added by previous batches // (at kFinalBytesPerEdge each), plus some number being updated in the // current batch (at kTmpBytesPerEdge each). The available free space is // multiplied by (1 - kFinalBytesPerEdge / kTmpBytesPerEdge) after each // batch is processed as edges are converted into their final form. const double final_bytes = num_edges_added * kFinalBytesPerEdge; constexpr double kFinalBytesRatio = 1.0 * kFinalBytesPerEdge / kTmpBytesPerEdge; constexpr double kTmpSpaceMultiplier = 1 - kFinalBytesRatio; // The total memory budget is the greater of the final size plus the allowed // temporary memory, or the minimum amount of memory required to limit the // number of batches to "kMaxBatches". const double total_budget_bytes = max( final_bytes + tmp_memory_budget_bytes, final_bytes / (1 - MathUtil::IPow(kTmpSpaceMultiplier, kMaxBatches - 1))); // "ideal_batch_size" is the number of edges in the current batch before // rounding to an integer. double ideal_batch_size = total_budget_bytes / kTmpBytesPerEdge; // Removed edges are always processed in the first batch, even if this might // use more memory than requested (see the BatchGenerator constructor). vector batch_sizes; int num_edges_left = num_edges_added; if (num_edges_removed > ideal_batch_size) { batch_sizes.push_back(num_edges_removed); } else { num_edges_left += num_edges_removed; } for (int i = 0; num_edges_left > 0; ++i) { int batch_size = static_cast(ideal_batch_size + 1); batch_sizes.push_back(batch_size); num_edges_left -= batch_size; ideal_batch_size *= kTmpSpaceMultiplier; } S2_DCHECK_LE(batch_sizes.size(), kMaxBatches); return batch_sizes; } // Reserve an appropriate amount of space for the top-level face edges in the // current batch. This data structure uses about half of the temporary memory // needed during index construction. Furthermore, if the arrays are grown via // push_back() then up to 10% of the total run time consists of copying data // as these arrays grow, so it is worthwhile to preallocate space for them. void MutableS2ShapeIndex::ReserveSpace( const BatchDescriptor& batch, vector all_edges[6]) { // The following accounts for the temporary space needed for everything // except the FaceEdge vectors (which are allocated separately below). int64 other_usage = batch.num_edges * (kTmpBytesPerEdge - sizeof(FaceEdge)); // If the number of edges is relatively small, then the fastest approach is // to simply reserve space on every face for the maximum possible number of // edges. (We use a different threshold for this calculation than for // deciding when to break updates into batches because the cost/benefit // ratio is different. Here the only extra expense is that we need to // sample the edges to estimate how many edges per face there are, and // therefore we generally use a lower threshold.) const size_t kMaxCheapBytes = min(absl::GetFlag(FLAGS_s2shape_index_tmp_memory_budget) / 2, int64{30} << 20 /*30 MB*/); int64 face_edge_usage = batch.num_edges * (6 * sizeof(FaceEdge)); if (face_edge_usage <= kMaxCheapBytes) { if (!mem_tracker_.TallyTemp(face_edge_usage + other_usage)) { return; } for (int face = 0; face < 6; ++face) { all_edges[face].reserve(batch.num_edges); } return; } // Otherwise we estimate the number of edges on each face by taking a random // sample. The goal is to come up with an estimate that is fast and // accurate for non-pathological geometry. If our estimates happen to be // wrong, the vector will still grow automatically - the main side effects // are that memory usage will be larger (by up to a factor of 3), and // constructing the index will be about 10% slower. // // Given a desired sample size, we choose equally spaced edges from // throughout the entire data set. We use a Bresenham-type algorithm to // choose the samples. const int kDesiredSampleSize = 10000; const int sample_interval = max(1, batch.num_edges / kDesiredSampleSize); // Initialize "edge_id" to be midway through the first sample interval. // Because samples are equally spaced the actual sample size may differ // slightly from the desired sample size. int edge_id = sample_interval / 2; const int actual_sample_size = (batch.num_edges + edge_id) / sample_interval; int face_count[6] = { 0, 0, 0, 0, 0, 0 }; if (pending_removals_) { for (const RemovedShape& removed : *pending_removals_) { edge_id += removed.edges.size(); while (edge_id >= sample_interval) { edge_id -= sample_interval; face_count[S2::GetFace(removed.edges[edge_id].v0)] += 1; } } } for (auto begin = batch.begin; begin < batch.end; ++begin.shape_id, begin.edge_id = 0) { const S2Shape* shape = this->shape(begin.shape_id); if (shape == nullptr) continue; // Already removed. int edges_end = begin.shape_id == batch.end.shape_id ? batch.end.edge_id : shape->num_edges(); edge_id += edges_end - begin.edge_id; while (edge_id >= sample_interval) { edge_id -= sample_interval; // For speed, we only count the face containing one endpoint of the // edge. In general the edge could span all 6 faces (with padding), but // it's not worth the expense to compute this more accurately. face_count[S2::GetFace(shape->edge(edge_id + begin.edge_id).v0)] += 1; } } // Now given the raw face counts, compute a confidence interval such that we // will be unlikely to allocate too little space. Computing accurate // binomial confidence intervals is expensive and not really necessary. // Instead we use a simple approximation: // - For any face with at least 1 sample, we use at least a 4-sigma // confidence interval. (The chosen width is adequate for the worst case // accuracy, which occurs when the face contains approximately 50% of the // edges.) Assuming that our sample is representative, the probability of // reserving too little space is approximately 1 in 30,000. // - For faces with no samples at all, we don't bother reserving space. // It is quite likely that such faces are truly empty, so we save time // and memory this way. If the face does contain some edges, there will // only be a few so it is fine to let the vector grow automatically. // On average, we reserve 2% extra space for each face that has geometry // (which could be up to 12% extra space overall, but typically 2%). // kMaxSemiWidth is the maximum semi-width over all probabilities p of a // 4-sigma binomial confidence interval with a sample size of 10,000. const double kMaxSemiWidth = 0.02; // First estimate the total amount of memory we are about to allocate. double multiplier = 1.0; for (int face = 0; face < 6; ++face) { if (face_count[face] != 0) multiplier += kMaxSemiWidth; } face_edge_usage = multiplier * batch.num_edges * sizeof(FaceEdge); if (!mem_tracker_.TallyTemp(face_edge_usage + other_usage)) { return; } const double sample_ratio = 1.0 / actual_sample_size; for (int face = 0; face < 6; ++face) { if (face_count[face] == 0) continue; double fraction = sample_ratio * face_count[face] + kMaxSemiWidth; all_edges[face].reserve(fraction * batch.num_edges); } } // Clips the edges of the given shape to the six cube faces, add the clipped // edges to "all_edges", and start tracking its interior if necessary. void MutableS2ShapeIndex::AddShape( const S2Shape* shape, int edges_begin, int edges_end, vector all_edges[6], InteriorTracker* tracker) const { // Construct a template for the edges to be added. FaceEdge edge; edge.shape_id = shape->id(); edge.has_interior = false; if (shape->dimension() == 2) { // To add a single shape with an interior over multiple batches, we first // add all the edges without tracking the interior. After all edges have // been added, the interior is updated in a separate step by setting the // contains_center() flags appropriately. if (edges_begin > 0 || edges_end < shape->num_edges()) { tracker->set_partial_shape_id(edge.shape_id); } else { edge.has_interior = true; tracker->AddShape( edge.shape_id, s2shapeutil::ContainsBruteForce(*shape, tracker->focus())); } } for (int e = edges_begin; e < edges_end; ++e) { edge.edge_id = e; edge.edge = shape->edge(e); edge.max_level = GetEdgeMaxLevel(edge.edge); AddFaceEdge(&edge, all_edges); } } void MutableS2ShapeIndex::RemoveShape(const RemovedShape& removed, vector all_edges[6], InteriorTracker* tracker) const { FaceEdge edge; edge.edge_id = -1; // Not used or needed for removed edges. edge.shape_id = removed.shape_id; edge.has_interior = removed.has_interior; if (edge.has_interior) { tracker->AddShape(edge.shape_id, removed.contains_tracker_origin); } for (const auto& removed_edge : removed.edges) { edge.edge = removed_edge; edge.max_level = GetEdgeMaxLevel(edge.edge); AddFaceEdge(&edge, all_edges); } } void MutableS2ShapeIndex::FinishPartialShape(int shape_id) { if (shape_id < 0) return; // The partial shape did not have an interior. const S2Shape* shape = this->shape(shape_id); // Filling in the interior of a partial shape can grow the cell_map_ // significantly, however the new cells have just one shape and no edges. // The following is a rough estimate of how much extra memory is needed // based on experiments. It assumes that one new cell is required for every // 10 shape edges, and that the cell map uses 50% more space than necessary // for the new entries because they are inserted between existing entries // (which means that the btree nodes are not full). if (mem_tracker_.is_active()) { const int64 new_usage = SpaceUsed() - mem_tracker_.client_usage_bytes() + 0.1 * shape->num_edges() * (1.5 * sizeof(CellMap::value_type) + sizeof(S2ShapeIndexCell) + sizeof(S2ClippedShape)); if (!mem_tracker_.TallyTemp(new_usage)) return; } // All the edges of the partial shape have already been indexed, now we just // need to set the contains_center() flags appropriately. We use a fresh // InteriorTracker for this purpose since we don't want to continue tracking // the interior state of any other shapes in this batch. // // We have implemented this below in the simplest way possible, namely by // scanning through the entire index. In theory it would be more efficient // to keep track of the set of index cells that were modified when the // partial shape's edges were added, and then visit only those cells. // However in practice any shape that is added over multiple batches is // likely to occupy most or all of the index anyway, so it is faster and // simpler to just iterate through the entire index. // // "tmp_edges" below speeds up large polygon index construction by 3-12%. vector tmp_edges; // Temporary storage. InteriorTracker tracker; tracker.AddShape(shape_id, s2shapeutil::ContainsBruteForce(*shape, tracker.focus())); S2CellId begin = S2CellId::Begin(S2CellId::kMaxLevel); for (CellMap::iterator index_it = cell_map_.begin(); ; ++index_it) { if (!tracker.shape_ids().empty()) { // Check whether we need to add new cells that are entirely contained by // the partial shape. S2CellId fill_end = (index_it != cell_map_.end()) ? index_it->first.range_min() : S2CellId::End(S2CellId::kMaxLevel); if (begin != fill_end) { for (S2CellId cellid : S2CellUnion::FromBeginEnd(begin, fill_end)) { S2ShapeIndexCell* cell = new S2ShapeIndexCell; S2ClippedShape* clipped = cell->add_shapes(1); clipped->Init(shape_id, 0); clipped->set_contains_center(true); index_it = cell_map_.insert(index_it, make_pair(cellid, cell)); ++index_it; } } } if (index_it == cell_map_.end()) break; // Now check whether the current index cell needs to be updated. S2CellId cellid = index_it->first; S2ShapeIndexCell* cell = index_it->second; int n = cell->shapes_.size(); if (n > 0 && cell->shapes_[n - 1].shape_id() == shape_id) { // This cell contains edges of the partial shape. If the partial shape // contains the center of this cell, we must update the index. S2PaddedCell pcell(cellid, kCellPadding); if (!tracker.at_cellid(cellid)) { tracker.MoveTo(pcell.GetEntryVertex()); } tracker.DrawTo(pcell.GetCenter()); S2ClippedShape* clipped = &cell->shapes_[n - 1]; int num_edges = clipped->num_edges(); S2_DCHECK_GT(num_edges, 0); for (int i = 0; i < num_edges; ++i) { tmp_edges.push_back(shape->edge(clipped->edge(i))); } for (const auto& edge : tmp_edges) { tracker.TestEdge(shape_id, edge); } if (!tracker.shape_ids().empty()) { // The partial shape contains the center of this index cell. clipped->set_contains_center(true); } tracker.DrawTo(pcell.GetExitVertex()); for (const auto& edge : tmp_edges) { tracker.TestEdge(shape_id, edge); } tracker.set_next_cellid(cellid.next()); tmp_edges.clear(); } else if (!tracker.shape_ids().empty()) { // The partial shape contains the center of an existing index cell that // does not intersect any of its edges. S2ClippedShape* clipped = cell->add_shapes(1); clipped->Init(shape_id, 0); clipped->set_contains_center(true); } begin = cellid.range_max().next(); } } inline void MutableS2ShapeIndex::AddFaceEdge( FaceEdge* edge, vector all_edges[6]) const { // Fast path: both endpoints are on the same face, and are far enough from // the edge of the face that don't intersect any (padded) adjacent face. int a_face = S2::GetFace(edge->edge.v0); if (a_face == S2::GetFace(edge->edge.v1)) { S2::ValidFaceXYZtoUV(a_face, edge->edge.v0, &edge->a); S2::ValidFaceXYZtoUV(a_face, edge->edge.v1, &edge->b); const double kMaxUV = 1 - kCellPadding; if (fabs(edge->a[0]) <= kMaxUV && fabs(edge->a[1]) <= kMaxUV && fabs(edge->b[0]) <= kMaxUV && fabs(edge->b[1]) <= kMaxUV) { all_edges[a_face].push_back(*edge); return; } } // Otherwise we simply clip the edge to all six faces. for (int face = 0; face < 6; ++face) { if (S2::ClipToPaddedFace(edge->edge.v0, edge->edge.v1, face, kCellPadding, &edge->a, &edge->b)) { all_edges[face].push_back(*edge); } } } // Returns the first level for which the given edge will be considered "long", // i.e. it will not count towards the max_edges_per_cell() limit. int MutableS2ShapeIndex::GetEdgeMaxLevel(const S2Shape::Edge& edge) const { // Compute the maximum cell edge length for which this edge is considered // "long". The calculation does not need to be perfectly accurate, so we // use Norm() rather than Angle() for speed. double max_cell_edge = ((edge.v0 - edge.v1).Norm() * absl::GetFlag(FLAGS_s2shape_index_cell_size_to_long_edge_ratio)); // Now return the first level encountered during subdivision where the // average cell edge length at that level is at most "max_cell_edge". return S2::kAvgEdge.GetLevelForMaxValue(max_cell_edge); } // EdgeAllocator provides temporary storage for new ClippedEdges that are // created during indexing. It is essentially a stack model, where edges are // allocated as the recursion does down and freed as it comes back up. // // It also provides a mutable vector of FaceEdges that is used when // incrementally updating the index (see AbsorbIndexCell). class MutableS2ShapeIndex::EdgeAllocator { public: EdgeAllocator() : size_(0) {} // Return a pointer to a newly allocated edge. The EdgeAllocator // retains ownership. ClippedEdge* NewClippedEdge() { if (size_ == clipped_edges_.size()) { clipped_edges_.emplace_back(new ClippedEdge); } return clipped_edges_[size_++].get(); } // Return the number of allocated edges. size_t size() const { return size_; } // Reset the allocator to only contain the first "size" allocated edges. void Reset(size_t size) { size_ = size; } vector* mutable_face_edges() { return &face_edges_; } private: // We can't use vector because edges are not allowed to move // once they have been allocated. Instead we keep a pool of allocated edges // that are all deleted together at the end. size_t size_; vector> clipped_edges_; // On the other hand, we can use vector because they are allocated // only at one level during the recursion (namely, the level at which we // absorb an existing index cell). vector face_edges_; EdgeAllocator(const EdgeAllocator&) = delete; void operator=(const EdgeAllocator&) = delete; }; // Given a face and a vector of edges that intersect that face, add or remove // all the edges from the index. (An edge is added if shapes_[id] is not // nullptr, and removed otherwise.) void MutableS2ShapeIndex::UpdateFaceEdges(int face, const vector& face_edges, InteriorTracker* tracker) { int num_edges = face_edges.size(); if (num_edges == 0 && tracker->shape_ids().empty()) return; // Create the initial ClippedEdge for each FaceEdge. Additional clipped // edges are created when edges are split between child cells. We create // two arrays, one containing the edge data and another containing pointers // to those edges, so that during the recursion we only need to copy // pointers in order to propagate an edge to the correct child. vector clipped_edge_storage; vector clipped_edges; clipped_edge_storage.reserve(num_edges); clipped_edges.reserve(num_edges); R2Rect bound = R2Rect::Empty(); for (int e = 0; e < num_edges; ++e) { ClippedEdge clipped; clipped.face_edge = &face_edges[e]; clipped.bound = R2Rect::FromPointPair(face_edges[e].a, face_edges[e].b); clipped_edge_storage.push_back(clipped); clipped_edges.push_back(&clipped_edge_storage.back()); bound.AddRect(clipped.bound); } // Construct the initial face cell containing all the edges, and then update // all the edges in the index recursively. EdgeAllocator alloc; S2CellId face_id = S2CellId::FromFace(face); S2PaddedCell pcell(face_id, kCellPadding); // "disjoint_from_index" means that the current cell being processed (and // all its descendants) are not already present in the index. It is set to // true during the recursion whenever we detect that the current cell is // disjoint from the index. We could save a tiny bit of work by setting // this flag to true here on the very first update, however currently there // is no easy way to check that. (It's not sufficient to test whether // cell_map_.empty() or pending_additions_begin_ == 0.) bool disjoint_from_index = false; if (num_edges > 0) { S2CellId shrunk_id = ShrinkToFit(pcell, bound); if (shrunk_id != pcell.id()) { // All the edges are contained by some descendant of the face cell. We // can save a lot of work by starting directly with that cell, but if we // are in the interior of at least one shape then we need to create // index entries for the cells we are skipping over. SkipCellRange(face_id.range_min(), shrunk_id.range_min(), tracker, &alloc, disjoint_from_index); pcell = S2PaddedCell(shrunk_id, kCellPadding); UpdateEdges(pcell, &clipped_edges, tracker, &alloc, disjoint_from_index); SkipCellRange(shrunk_id.range_max().next(), face_id.range_max().next(), tracker, &alloc, disjoint_from_index); return; } } // Otherwise (no edges, or no shrinking is possible), subdivide normally. UpdateEdges(pcell, &clipped_edges, tracker, &alloc, disjoint_from_index); } S2CellId MutableS2ShapeIndex::ShrinkToFit(const S2PaddedCell& pcell, const R2Rect& bound) const { S2CellId shrunk_id = pcell.ShrinkToFit(bound); if (shrunk_id != pcell.id()) { // Don't shrink any smaller than the existing index cells, since we need // to combine the new edges with those cells. Use InitStale() to avoid // applying updates recursively. Iterator iter; iter.InitStale(this); CellRelation r = iter.Locate(shrunk_id); if (r == INDEXED) { shrunk_id = iter.id(); } } return shrunk_id; } // Skip over the cells in the given range, creating index cells if we are // currently in the interior of at least one shape. void MutableS2ShapeIndex::SkipCellRange(S2CellId begin, S2CellId end, InteriorTracker* tracker, EdgeAllocator* alloc, bool disjoint_from_index) { // If we aren't in the interior of a shape, then skipping over cells is easy. if (tracker->shape_ids().empty()) return; // Otherwise generate the list of cell ids that we need to visit, and create // an index entry for each one. for (S2CellId skipped_id : S2CellUnion::FromBeginEnd(begin, end)) { vector clipped_edges; UpdateEdges(S2PaddedCell(skipped_id, kCellPadding), &clipped_edges, tracker, alloc, disjoint_from_index); } } // Given an edge and an interval "middle" along the v-axis, clip the edge // against the boundaries of "middle" and add the edge to the corresponding // children. /* static */ ABSL_ATTRIBUTE_ALWAYS_INLINE // ~8% faster inline void MutableS2ShapeIndex::ClipVAxis( const ClippedEdge* edge, const R1Interval& middle, vector child_edges[2], EdgeAllocator* alloc) { if (edge->bound[1].hi() <= middle.lo()) { // Edge is entirely contained in the lower child. child_edges[0].push_back(edge); } else if (edge->bound[1].lo() >= middle.hi()) { // Edge is entirely contained in the upper child. child_edges[1].push_back(edge); } else { // The edge bound spans both children. child_edges[0].push_back(ClipVBound(edge, 1, middle.hi(), alloc)); child_edges[1].push_back(ClipVBound(edge, 0, middle.lo(), alloc)); } } // Given a cell and a set of ClippedEdges whose bounding boxes intersect that // cell, add or remove all the edges from the index. Temporary space for // edges that need to be subdivided is allocated from the given EdgeAllocator. // "disjoint_from_index" is an optimization hint indicating that cell_map_ // does not contain any entries that overlap the given cell. void MutableS2ShapeIndex::UpdateEdges(const S2PaddedCell& pcell, vector* edges, InteriorTracker* tracker, EdgeAllocator* alloc, bool disjoint_from_index) { // Cases where an index cell is not needed should be detected before this. S2_DCHECK(!edges->empty() || !tracker->shape_ids().empty()); // This function is recursive with a maximum recursion depth of 30 // (S2CellId::kMaxLevel). Note that using an explicit stack does not seem // to be any faster based on profiling. // Incremental updates are handled as follows. All edges being added or // removed are combined together in "edges", and all shapes with interiors // are tracked using "tracker". We subdivide recursively as usual until we // encounter an existing index cell. At this point we "absorb" the index // cell as follows: // // - Edges and shapes that are being removed are deleted from "edges" and // "tracker". // - All remaining edges and shapes from the index cell are added to // "edges" and "tracker". // - Continue subdividing recursively, creating new index cells as needed. // - When the recursion gets back to the cell that was absorbed, we // restore "edges" and "tracker" to their previous state. // // Note that the only reason that we include removed shapes in the recursive // subdivision process is so that we can find all of the index cells that // contain those shapes efficiently, without maintaining an explicit list of // index cells for each shape (which would be expensive in terms of memory). bool index_cell_absorbed = false; if (!disjoint_from_index) { // There may be existing index cells contained inside "pcell". If we // encounter such a cell, we need to combine the edges being updated with // the existing cell contents by "absorbing" the cell. We use InitStale() // to avoid applying updates recursively. Iterator iter; iter.InitStale(this); CellRelation r = iter.Locate(pcell.id()); if (r == DISJOINT) { disjoint_from_index = true; } else if (r == INDEXED) { // Absorb the index cell by transferring its contents to "edges" and // deleting it. We also start tracking the interior of any new shapes. AbsorbIndexCell(pcell, iter, edges, tracker, alloc); index_cell_absorbed = true; disjoint_from_index = true; } else { S2_DCHECK_EQ(SUBDIVIDED, r); } } // If there are existing index cells below us, then we need to keep // subdividing so that we can merge with those cells. Otherwise, // MakeIndexCell checks if the number of edges is small enough, and creates // an index cell if possible (returning true when it does so). if (!disjoint_from_index || !MakeIndexCell(pcell, *edges, tracker)) { // Reserve space for the edges that will be passed to each child. This is // important since otherwise the running time is dominated by the time // required to grow the vectors. The amount of memory involved is // relatively small, so we simply reserve the maximum space for every child. vector child_edges[2][2]; // [i][j] int num_edges = edges->size(); for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { child_edges[i][j].reserve(num_edges); } } // Remember the current size of the EdgeAllocator so that we can free any // edges that are allocated during edge splitting. size_t alloc_size = alloc->size(); // Compute the middle of the padded cell, defined as the rectangle in // (u,v)-space that belongs to all four (padded) children. By comparing // against the four boundaries of "middle" we can determine which children // each edge needs to be propagated to. const R2Rect& middle = pcell.middle(); // Build up a vector edges to be passed to each child cell. The (i,j) // directions are left (i=0), right (i=1), lower (j=0), and upper (j=1). // Note that the vast majority of edges are propagated to a single child. // This case is very fast, consisting of between 2 and 4 floating-point // comparisons and copying one pointer. (ClipVAxis is inline.) for (int e = 0; e < num_edges; ++e) { const ClippedEdge* edge = (*edges)[e]; if (edge->bound[0].hi() <= middle[0].lo()) { // Edge is entirely contained in the two left children. ClipVAxis(edge, middle[1], child_edges[0], alloc); } else if (edge->bound[0].lo() >= middle[0].hi()) { // Edge is entirely contained in the two right children. ClipVAxis(edge, middle[1], child_edges[1], alloc); } else if (edge->bound[1].hi() <= middle[1].lo()) { // Edge is entirely contained in the two lower children. child_edges[0][0].push_back(ClipUBound(edge, 1, middle[0].hi(), alloc)); child_edges[1][0].push_back(ClipUBound(edge, 0, middle[0].lo(), alloc)); } else if (edge->bound[1].lo() >= middle[1].hi()) { // Edge is entirely contained in the two upper children. child_edges[0][1].push_back(ClipUBound(edge, 1, middle[0].hi(), alloc)); child_edges[1][1].push_back(ClipUBound(edge, 0, middle[0].lo(), alloc)); } else { // The edge bound spans all four children. The edge itself intersects // either three or four (padded) children. const ClippedEdge* left = ClipUBound(edge, 1, middle[0].hi(), alloc); ClipVAxis(left, middle[1], child_edges[0], alloc); const ClippedEdge* right = ClipUBound(edge, 0, middle[0].lo(), alloc); ClipVAxis(right, middle[1], child_edges[1], alloc); } } // Free any memory reserved for children that turned out to be empty. This // step is cheap and reduces peak memory usage by about 10% when building // large indexes (> 10M edges). for (int i = 0; i < 2; ++i) { for (int j = 0; j < 2; ++j) { if (child_edges[i][j].empty()) { vector().swap(child_edges[i][j]); } } } // Now recursively update the edges in each child. We call the children in // increasing order of S2CellId so that when the index is first constructed, // all insertions into cell_map_ are at the end (which is much faster). for (int pos = 0; pos < 4; ++pos) { int i, j; pcell.GetChildIJ(pos, &i, &j); if (!child_edges[i][j].empty() || !tracker->shape_ids().empty()) { UpdateEdges(S2PaddedCell(pcell, i, j), &child_edges[i][j], tracker, alloc, disjoint_from_index); } } // Free any temporary edges that were allocated during clipping. alloc->Reset(alloc_size); } if (index_cell_absorbed) { // Restore the state for any edges being removed that we are tracking. tracker->RestoreStateBefore(pending_additions_begin_); } } // Given an edge, clip the given endpoint (lo=0, hi=1) of the u-axis so that // it does not extend past the given value. /* static */ const MutableS2ShapeIndex::ClippedEdge* MutableS2ShapeIndex::ClipUBound(const ClippedEdge* edge, int u_end, double u, EdgeAllocator* alloc) { // First check whether the edge actually requires any clipping. (Sometimes // this method is called when clipping is not necessary, e.g. when one edge // endpoint is in the overlap area between two padded child cells.) if (u_end == 0) { if (edge->bound[0].lo() >= u) return edge; } else { if (edge->bound[0].hi() <= u) return edge; } // We interpolate the new v-value from the endpoints of the original edge. // This has two advantages: (1) we don't need to store the clipped endpoints // at all, just their bounding box; and (2) it avoids the accumulation of // roundoff errors due to repeated interpolations. The result needs to be // clamped to ensure that it is in the appropriate range. const FaceEdge& e = *edge->face_edge; double v = edge->bound[1].Project( S2::InterpolateDouble(u, e.a[0], e.b[0], e.a[1], e.b[1])); // Determine which endpoint of the v-axis bound to update. If the edge // slope is positive we update the same endpoint, otherwise we update the // opposite endpoint. int v_end = u_end ^ ((e.a[0] > e.b[0]) != (e.a[1] > e.b[1])); return UpdateBound(edge, u_end, u, v_end, v, alloc); } // Given an edge, clip the given endpoint (lo=0, hi=1) of the v-axis so that // it does not extend past the given value. /* static */ const MutableS2ShapeIndex::ClippedEdge* MutableS2ShapeIndex::ClipVBound(const ClippedEdge* edge, int v_end, double v, EdgeAllocator* alloc) { // See comments in ClipUBound. if (v_end == 0) { if (edge->bound[1].lo() >= v) return edge; } else { if (edge->bound[1].hi() <= v) return edge; } const FaceEdge& e = *edge->face_edge; double u = edge->bound[0].Project( S2::InterpolateDouble(v, e.a[1], e.b[1], e.a[0], e.b[0])); int u_end = v_end ^ ((e.a[0] > e.b[0]) != (e.a[1] > e.b[1])); return UpdateBound(edge, u_end, u, v_end, v, alloc); } // Given an edge and two bound endpoints that need to be updated, allocate and // return a new edge with the updated bound. /* static */ inline const MutableS2ShapeIndex::ClippedEdge* MutableS2ShapeIndex::UpdateBound(const ClippedEdge* edge, int u_end, double u, int v_end, double v, EdgeAllocator* alloc) { ClippedEdge* clipped = alloc->NewClippedEdge(); clipped->face_edge = edge->face_edge; clipped->bound[0][u_end] = u; clipped->bound[1][v_end] = v; clipped->bound[0][1-u_end] = edge->bound[0][1-u_end]; clipped->bound[1][1-v_end] = edge->bound[1][1-v_end]; S2_DCHECK(!clipped->bound.is_empty()); S2_DCHECK(edge->bound.Contains(clipped->bound)); return clipped; } // Absorb an index cell by transferring its contents to "edges" and/or // "tracker", and then delete this cell from the index. If "edges" includes // any edges that are being removed, this method also updates their // InteriorTracker state to correspond to the exit vertex of this cell, and // saves the InteriorTracker state by calling SaveAndClearStateBefore(). It // is the caller's responsibility to restore this state by calling // RestoreStateBefore() when processing of this cell is finished. void MutableS2ShapeIndex::AbsorbIndexCell(const S2PaddedCell& pcell, const Iterator& iter, vector* edges, InteriorTracker* tracker, EdgeAllocator* alloc) { S2_DCHECK_EQ(pcell.id(), iter.id()); // When we absorb a cell, we erase all the edges that are being removed. // However when we are finished with this cell, we want to restore the state // of those edges (since that is how we find all the index cells that need // to be updated). The edges themselves are restored automatically when // UpdateEdges returns from its recursive call, but the InteriorTracker // state needs to be restored explicitly. // // Here we first update the InteriorTracker state for removed edges to // correspond to the exit vertex of this cell, and then save the // InteriorTracker state. This state will be restored by UpdateEdges when // it is finished processing the contents of this cell. (Note in the test // below that removed edges are always sorted before added edges.) if (tracker->is_active() && !edges->empty() && is_shape_being_removed((*edges)[0]->face_edge->shape_id)) { // We probably need to update the InteriorTracker. ("Probably" because // it's possible that all shapes being removed do not have interiors.) if (!tracker->at_cellid(pcell.id())) { tracker->MoveTo(pcell.GetEntryVertex()); } tracker->DrawTo(pcell.GetExitVertex()); tracker->set_next_cellid(pcell.id().next()); for (const ClippedEdge* edge : *edges) { const FaceEdge* face_edge = edge->face_edge; if (!is_shape_being_removed(face_edge->shape_id)) { break; // All shapes being removed come first. } if (face_edge->has_interior) { tracker->TestEdge(face_edge->shape_id, face_edge->edge); } } } // Save the state of the edges being removed so that it can be restored when // we are finished processing this cell and its children. Below we not only // remove those edges but also add new edges whose state only needs to be // tracked within this subtree. We don't need to save the state of the // edges being added because they aren't being removed from "edges" and will // therefore be updated normally as we visit this cell and its children. tracker->SaveAndClearStateBefore(pending_additions_begin_); // Create a FaceEdge for each edge in this cell that isn't being removed. vector* face_edges = alloc->mutable_face_edges(); face_edges->clear(); bool tracker_moved = false; const S2ShapeIndexCell& cell = iter.cell(); for (int s = 0; s < cell.num_clipped(); ++s) { const S2ClippedShape& clipped = cell.clipped(s); int shape_id = clipped.shape_id(); const S2Shape* shape = this->shape(shape_id); if (shape == nullptr) continue; // This shape is being removed. int num_edges = clipped.num_edges(); // If this shape has an interior, start tracking whether we are inside the // shape. UpdateEdges() wants to know whether the entry vertex of this // cell is inside the shape, but we only know whether the center of the // cell is inside the shape, so we need to test all the edges against the // line segment from the cell center to the entry vertex. FaceEdge edge; edge.shape_id = shape_id; edge.has_interior = (shape->dimension() == 2 && shape_id != tracker->partial_shape_id()); if (edge.has_interior) { tracker->AddShape(shape_id, clipped.contains_center()); // There might not be any edges in this entire cell (i.e., it might be // in the interior of all shapes), so we delay updating the tracker // until we see the first edge. if (!tracker_moved && num_edges > 0) { tracker->MoveTo(pcell.GetCenter()); tracker->DrawTo(pcell.GetEntryVertex()); tracker->set_next_cellid(pcell.id()); tracker_moved = true; } } for (int i = 0; i < num_edges; ++i) { int e = clipped.edge(i); edge.edge_id = e; edge.edge = shape->edge(e); edge.max_level = GetEdgeMaxLevel(edge.edge); if (edge.has_interior) tracker->TestEdge(shape_id, edge.edge); if (!S2::ClipToPaddedFace(edge.edge.v0, edge.edge.v1, pcell.id().face(), kCellPadding, &edge.a, &edge.b)) { S2_LOG(DFATAL) << "Invariant failure in MutableS2ShapeIndex"; } face_edges->push_back(edge); } } // Now create a ClippedEdge for each FaceEdge, and put them in "new_edges". vector new_edges; for (const FaceEdge& face_edge : *face_edges) { ClippedEdge* clipped = alloc->NewClippedEdge(); clipped->face_edge = &face_edge; clipped->bound = S2::GetClippedEdgeBound(face_edge.a, face_edge.b, pcell.bound()); new_edges.push_back(clipped); } // Discard any edges from "edges" that are being removed, and append the // remainder to "new_edges". (This keeps the edges sorted by shape id.) for (int i = 0; i < edges->size(); ++i) { const ClippedEdge* clipped = (*edges)[i]; if (!is_shape_being_removed(clipped->face_edge->shape_id)) { new_edges.insert(new_edges.end(), edges->begin() + i, edges->end()); break; } } // Update the edge list and delete this cell from the index. edges->swap(new_edges); cell_map_.erase(pcell.id()); delete &cell; } // Attempt to build an index cell containing the given edges, and return true // if successful. (Otherwise the edges should be subdivided further.) bool MutableS2ShapeIndex::MakeIndexCell(const S2PaddedCell& pcell, const vector& edges, InteriorTracker* tracker) { if (edges.empty() && tracker->shape_ids().empty()) { // No index cell is needed. (In most cases this situation is detected // before we get to this point, but this can happen when all shapes in a // cell are removed.) return true; } // We can show using amortized analysis that the total index size is // // O(c1 * n + c2 * (1 - f) / f * n) // // where n is the number of input edges (and where we also count an "edge" // for each shape with an interior but no edges), f is the value of // FLAGS_s2shape_index_min_short_edge_fraction, and c1 and c2 are constants // where c2 is about 20 times larger than c1. // // First observe that the space used by a MutableS2ShapeIndex is // proportional to the space used by all of its index cells, and the space // used by an S2ShapeIndexCell is proportional to the number of edges that // intersect that cell plus the number of shapes that contain the entire // cell ("containing shapes"). Define an "index entry" as an intersecting // edge or containing shape stored by an index cell. Our goal is then to // bound the number of index entries. // // We divide the index entries into two groups. An index entry is "short" // if it represents an edge that was considered short in that index cell's // parent, and "long" otherwise. (Note that the long index entries also // include the containing shapes mentioned above.) We then bound the // maximum number of both types of index entries by associating them with // edges that were considered short in those index cells' parents. // // First consider the short index entries for a given edge E. Let S be the // set of index cells that intersect E and where E was considered short in // those index cells' parents. Since E was short in each parent cell, the // width of those parent cells is at least some fraction "g" of E's length // (as controlled by FLAGS_s2shape_index_cell_size_to_long_edge_ratio). // Therefore the minimum width of each cell in S is also at least some // fraction of E's length (i.e., g / 2). This implies that there are at most // a constant number c1 of such cells, since they all intersect E and do not // overlap, which means that there are at most (c1 * n) short entries in // total. // // With index_cell_size_to_long_edge_ratio = 1.0 (the default value), it can // be shown that c1 = 10. In other words, it is not possible for a given // edge to intersect more than 10 index cells where it was considered short // in those cells' parents. The value of c1 can be reduced as low c1 = 4 by // increasing index_cell_size_to_long_edge_ratio to about 3.1. (The reason // the minimum value is 3.1 rather than 2.0 is that this ratio is defined in // terms of the average edge length of cells at a given level, rather than // their minimum width, and 2 * (S2::kAvgEdge / S2::kMinWidth) ~= 3.1.) // // Next we consider the long index entries. Let c2 be the maximum number of // index cells where a given edge E was considered short in those cells' // parents. (Unlike the case above, we do not require that these cells // intersect E.) Because the minimum width of each parent cell is at least // some fraction of E's length and the parent cells at a given level do not // overlap, there can be at most a small constant number of index cells at // each level where E is considered short in those cells' parents. For // example, consider a very short edge E that intersects the midpoint of a // cell edge at level 0. There are 16 cells at level 30 where E was // considered short in the parent cell, 12 cells at each of levels 29..2, and // 4 cells at levels 1 and 0 (pretending that all 6 face cells share a common // "parent"). This yields a total of c2 = 360 index cells. This is actually // the worst case for index_cell_size_to_long_edge_ratio >= 3.1; with the // default value of 1.0 it is possible to have a few more index cells at // levels 29 and 30, for a maximum of c2 = 366 index cells. // // The code below subdivides a given cell only if // // s > f * (s + l) // // where "f" is the min_short_edge_fraction parameter, "s" is the number of // short edges that intersect the cell, and "l" is the number of long edges // that intersect the cell plus an upper bound on the number of shapes that // contain the entire cell. (It is an upper bound rather than an exact count // because we use the number of shapes that contain an arbitrary vertex of // the cell.) Note that the number of long index entries in each child of // this cell is at most "l" because no child intersects more edges than its // parent or is entirely contained by more shapes than its parent. // // The inequality above can be rearranged to give // // l < s * (1 - f) / f // // This says that each long index entry in a child cell can be associated // with at most (1 - f) / f edges that were considered short when the parent // cell was subdivided. Furthermore we know that there are at most c2 index // cells where a given edge was considered short in the parent cell. Since // there are only n edges in total, this means that the maximum number of // long index entries is at most // // c2 * (1 - f) / f * n // // and putting this together with the result for short index entries gives // the desired bound. // // There are a variety of ways to make this bound tighter, e.g. when "n" is // relatively small. For example when the indexed geometry satisfies the // requirements of S2BooleanOperation (i.e., shape interiors are disjoint) // and the min_short_edge_fraction parameter is not too large, then the // constant c2 above is only about half as big (i.e., c2 ~= 180). This is // because the worst case under these circumstances requires having many // shapes whose interiors overlap. // Continue subdividing if the proposed index cell would contain too many // edges that are "short" relative to its size (as controlled by the // FLAGS_s2shape_index_cell_size_to_long_edge_ratio parameter). Usually "too // many" means more than options_.max_edges_per_cell(), but this value might // be increased if the cell has a lot of long edges and/or containing shapes. // This strategy ensures that the total index size is linear (see above). if (edges.size() > options_.max_edges_per_cell()) { int max_short_edges = max(options_.max_edges_per_cell(), static_cast( absl::GetFlag(FLAGS_s2shape_index_min_short_edge_fraction) * (edges.size() + tracker->shape_ids().size()))); int count = 0; for (const ClippedEdge* edge : edges) { count += (pcell.level() < edge->face_edge->max_level); if (count > max_short_edges) return false; } } // Possible optimization: Continue subdividing as long as exactly one child // of "pcell" intersects the given edges. This can be done by finding the // bounding box of all the edges and calling ShrinkToFit(): // // S2CellId cellid = pcell.ShrinkToFit(GetRectBound(edges)); // // Currently this is not beneficial; it slows down construction by 4-25% // (mainly computing the union of the bounding rectangles) and also slows // down queries (since more recursive clipping is required to get down to // the level of a spatial index cell). But it may be worth trying again // once "contains_center" is computed and all algorithms are modified to // take advantage of it. // We update the InteriorTracker as follows. For every S2Cell in the index // we construct two edges: one edge from entry vertex of the cell to its // center, and one from the cell center to its exit vertex. Here "entry" // and "exit" refer the S2CellId ordering, i.e. the order in which points // are encountered along the S2 space-filling curve. The exit vertex then // becomes the entry vertex for the next cell in the index, unless there are // one or more empty intervening cells, in which case the InteriorTracker // state is unchanged because the intervening cells have no edges. // Shift the InteriorTracker focus point to the center of the current cell. if (tracker->is_active() && !edges.empty()) { if (!tracker->at_cellid(pcell.id())) { tracker->MoveTo(pcell.GetEntryVertex()); } tracker->DrawTo(pcell.GetCenter()); TestAllEdges(edges, tracker); } // Allocate and fill a new index cell. To get the total number of shapes we // need to merge the shapes associated with the intersecting edges together // with the shapes that happen to contain the cell center. const ShapeIdSet& cshape_ids = tracker->shape_ids(); int num_shapes = CountShapes(edges, cshape_ids); S2ShapeIndexCell* cell = new S2ShapeIndexCell; S2ClippedShape* base = cell->add_shapes(num_shapes); // To fill the index cell we merge the two sources of shapes: "edge shapes" // (those that have at least one edge that intersects this cell), and // "containing shapes" (those that contain the cell center). We keep track // of the index of the next intersecting edge and the next containing shape // as we go along. Both sets of shape ids are already sorted. int enext = 0; ShapeIdSet::const_iterator cnext = cshape_ids.begin(); for (int i = 0; i < num_shapes; ++i) { S2ClippedShape* clipped = base + i; int eshape_id = num_shape_ids(), cshape_id = eshape_id; // Sentinels if (enext != edges.size()) { eshape_id = edges[enext]->face_edge->shape_id; } if (cnext != cshape_ids.end()) { cshape_id = *cnext; } int ebegin = enext; if (cshape_id < eshape_id) { // The entire cell is in the shape interior. clipped->Init(cshape_id, 0); clipped->set_contains_center(true); ++cnext; } else { // Count the number of edges for this shape and allocate space for them. while (enext < edges.size() && edges[enext]->face_edge->shape_id == eshape_id) { ++enext; } clipped->Init(eshape_id, enext - ebegin); for (int e = ebegin; e < enext; ++e) { clipped->set_edge(e - ebegin, edges[e]->face_edge->edge_id); } if (cshape_id == eshape_id) { clipped->set_contains_center(true); ++cnext; } } } // UpdateEdges() visits cells in increasing order of S2CellId, so during // initial construction of the index all insertions happen at the end. It // is much faster to give an insertion hint in this case. Otherwise the // hint doesn't do much harm. With more effort we could provide a hint even // during incremental updates, but this is probably not worth the effort. cell_map_.insert(cell_map_.end(), make_pair(pcell.id(), cell)); // Shift the InteriorTracker focus point to the exit vertex of this cell. if (tracker->is_active() && !edges.empty()) { tracker->DrawTo(pcell.GetExitVertex()); TestAllEdges(edges, tracker); tracker->set_next_cellid(pcell.id().next()); } return true; } // Call tracker->TestEdge() on all edges from shapes that have interiors. /* static */ void MutableS2ShapeIndex::TestAllEdges(const vector& edges, InteriorTracker* tracker) { for (const ClippedEdge* edge : edges) { const FaceEdge* face_edge = edge->face_edge; if (face_edge->has_interior) { tracker->TestEdge(face_edge->shape_id, face_edge->edge); } } } // Return the number of distinct shapes that are either associated with the // given edges, or that are currently stored in the InteriorTracker. /* static */ int MutableS2ShapeIndex::CountShapes(const vector& edges, const ShapeIdSet& cshape_ids) { int count = 0; int last_shape_id = -1; ShapeIdSet::const_iterator cnext = cshape_ids.begin(); // Next shape for (const ClippedEdge* edge : edges) { if (edge->face_edge->shape_id != last_shape_id) { ++count; last_shape_id = edge->face_edge->shape_id; // Skip over any containing shapes up to and including this one, // updating "count" appropriately. for (; cnext != cshape_ids.end(); ++cnext) { if (*cnext > last_shape_id) break; if (*cnext < last_shape_id) ++count; } } } // Count any remaining containing shapes. count += (cshape_ids.end() - cnext); return count; } size_t MutableS2ShapeIndex::SpaceUsed() const { size_t size = sizeof(*this); size += shapes_.capacity() * sizeof(unique_ptr); // cell_map_ itself is already included in sizeof(*this). size += cell_map_.bytes_used() - sizeof(cell_map_); size += cell_map_.size() * sizeof(S2ShapeIndexCell); Iterator it; for (it.InitStale(this, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { const S2ShapeIndexCell& cell = it.cell(); size += cell.shapes_.capacity() * sizeof(S2ClippedShape); for (int s = 0; s < cell.num_clipped(); ++s) { const S2ClippedShape& clipped = cell.clipped(s); if (!clipped.is_inline()) { size += clipped.num_edges() * sizeof(int32); } } } if (pending_removals_ != nullptr) { size += sizeof(*pending_removals_); size += pending_removals_->capacity() * sizeof(RemovedShape); for (const RemovedShape& removed : *pending_removals_) { size += removed.edges.capacity() * sizeof(S2Shape::Edge); } } return size; } void MutableS2ShapeIndex::Encode(Encoder* encoder) const { // The version number is encoded in 2 bits, under the assumption that by the // time we need 5 versions the first version can be permanently retired. // This only saves 1 byte, but that's significant for very small indexes. encoder->Ensure(Varint::kMax64); uint64 max_edges = options_.max_edges_per_cell(); encoder->put_varint64(max_edges << 2 | kCurrentEncodingVersionNumber); // The index will be built anyway when we iterate through it, but building // it in advance lets us size the cell_ids vector correctly. ForceBuild(); vector cell_ids; cell_ids.reserve(cell_map_.size()); s2coding::StringVectorEncoder encoded_cells; for (Iterator it(this, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { cell_ids.push_back(it.id()); it.cell().Encode(num_shape_ids(), encoded_cells.AddViaEncoder()); } s2coding::EncodeS2CellIdVector(cell_ids, encoder); encoded_cells.Encode(encoder); } bool MutableS2ShapeIndex::Init(Decoder* decoder, const ShapeFactory& shape_factory) { Clear(); uint64 max_edges_version; if (!decoder->get_varint64(&max_edges_version)) return false; int version = max_edges_version & 3; if (version != kCurrentEncodingVersionNumber) return false; options_.set_max_edges_per_cell(max_edges_version >> 2); uint32 num_shapes = shape_factory.size(); shapes_.reserve(num_shapes); for (int shape_id = 0; shape_id < num_shapes; ++shape_id) { auto shape = shape_factory[shape_id]; if (shape) shape->id_ = shape_id; shapes_.push_back(std::move(shape)); } s2coding::EncodedS2CellIdVector cell_ids; s2coding::EncodedStringVector encoded_cells; if (!cell_ids.Init(decoder)) return false; if (!encoded_cells.Init(decoder)) return false; for (int i = 0; i < cell_ids.size(); ++i) { S2CellId id = cell_ids[i]; S2ShapeIndexCell* cell = new S2ShapeIndexCell; Decoder decoder = encoded_cells.GetDecoder(i); if (!cell->Decode(num_shapes, &decoder)) return false; cell_map_.insert(cell_map_.end(), make_pair(id, cell)); } return true; } s2geometry-0.10.0/src/s2/mutable_s2shape_index.h000066400000000000000000001007061422156367100214340ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_MUTABLE_S2SHAPE_INDEX_H_ #define S2_MUTABLE_S2SHAPE_INDEX_H_ #include #include #include #include #include #include #include "absl/base/macros.h" #include "absl/base/thread_annotations.h" #include "absl/container/btree_map.h" #include "absl/memory/memory.h" #include "absl/synchronization/mutex.h" #include "s2/base/commandlineflags.h" #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "s2/base/spinlock.h" #include "s2/_fp_contract_off.h" #include "s2/s2cell_id.h" #include "s2/s2memory_tracker.h" #include "s2/s2pointutil.h" #include "s2/s2shape.h" #include "s2/s2shape_index.h" #include "s2/s2shapeutil_shape_edge_id.h" namespace s2internal { // Hack to expose bytes_used. template class BTreeMap : public absl::btree_map { public: size_t bytes_used() const { return this->tree_.bytes_used(); } }; } // namespace s2internal // MutableS2ShapeIndex is a class for in-memory indexing of polygonal geometry. // The objects in the index are known as "shapes", and may consist of points, // polylines, and/or polygons, possibly overlapping. The index makes it very // fast to answer queries such as finding nearby shapes, measuring distances, // testing for intersection and containment, etc. It is one of several // implementations of the S2ShapeIndex interface (see EncodedS2ShapeIndex). // // MutableS2ShapeIndex allows not only building an index, but also updating it // incrementally by adding or removing shapes (hence its name). It is designed // to be compact; usually the index is smaller than the underlying geometry. // It is capable of indexing up to hundreds of millions of edges. The index is // also fast to construct. The index size and construction time are guaranteed // to be linear in the number of input edges. // // There are a number of built-in classes that work with S2ShapeIndex objects. // Generally these classes accept any collection of geometry that can be // represented by an S2ShapeIndex, i.e. any combination of points, polylines, // and polygons. Such classes include: // // - S2ContainsPointQuery: returns the shape(s) that contain a given point. // // - S2ClosestEdgeQuery: returns the closest edge(s) to a given point, edge, // S2CellId, or S2ShapeIndex. // // - S2CrossingEdgeQuery: returns the edge(s) that cross a given edge. // // - S2BooleanOperation: computes boolean operations such as union, // and boolean predicates such as containment. // // - S2ShapeIndexRegion: can be used together with S2RegionCoverer to // approximate geometry as a set of S2CellIds. // // - S2ShapeIndexBufferedRegion: computes approximations that have been // expanded by a given radius. // // Here is an example showing how to build an index for a set of polygons, and // then then determine which polygon(s) contain each of a set of query points: // // void TestContainment(const vector& points, // const vector& polygons) { // MutableS2ShapeIndex index; // for (auto polygon : polygons) { // index.Add(absl::make_unique(polygon)); // } // auto query = MakeS2ContainsPointQuery(&index); // for (const auto& point : points) { // for (S2Shape* shape : query.GetContainingShapes(point)) { // S2Polygon* polygon = polygons[shape->id()]; // ... do something with (point, polygon) ... // } // } // } // // This example uses S2Polygon::Shape, which is one example of an S2Shape // object. S2Polyline and S2Loop also have nested Shape classes, and there are // additional S2Shape types defined in *_shape.h. // // Internally, MutableS2ShapeIndex is essentially a map from S2CellIds to the // set of shapes that intersect each S2CellId. It is adaptively refined to // ensure that no cell contains more than a small number of edges. // // For efficiency, updates are batched together and applied lazily on the // first subsequent query. Locking is used to ensure that MutableS2ShapeIndex // has the same thread-safety properties as "vector": const methods are // thread-safe, while non-const methods are not thread-safe. This means that // if one thread updates the index, you must ensure that no other thread is // reading or updating the index at the same time. // // MutableS2ShapeIndex has an Encode() method that allows the index to be // serialized. An encoded S2ShapeIndex can be decoded either into its // original form (MutableS2ShapeIndex) or into an EncodedS2ShapeIndex. The // key property of EncodedS2ShapeIndex is that it can be constructed // instantaneously, since the index is kept in its original encoded form. // Data is decoded only when an operation needs it. For example, to determine // which shapes(s) contain a given query point only requires decoding the data // in the S2ShapeIndexCell that contains that point. class MutableS2ShapeIndex final : public S2ShapeIndex { private: using CellMap = s2internal::BTreeMap; public: // Options that affect construction of the MutableS2ShapeIndex. class Options { public: Options(); // The maximum number of edges per cell. If a cell has more than this // many edges that are not considered "long" relative to the cell size, // then it is subdivided. (Whether an edge is considered "long" is // controlled by --s2shape_index_cell_size_to_long_edge_ratio flag.) // // Values between 10 and 50 represent a reasonable balance between memory // usage, construction time, and query time. Small values make queries // faster, while large values make construction faster and use less memory. // Values higher than 50 do not save significant additional memory, and // query times can increase substantially, especially for algorithms that // visit all pairs of potentially intersecting edges (such as polygon // validation), since this is quadratic in the number of edges per cell. // // Note that the *average* number of edges per cell is generally slightly // less than half of the maximum value defined here. // // Defaults to value given by --s2shape_index_default_max_edges_per_cell. int max_edges_per_cell() const { return max_edges_per_cell_; } void set_max_edges_per_cell(int max_edges_per_cell); private: int max_edges_per_cell_; }; // Creates a MutableS2ShapeIndex that uses the default option settings. // Option values may be changed by calling Init(). MutableS2ShapeIndex(); // Create a MutableS2ShapeIndex with the given options. explicit MutableS2ShapeIndex(const Options& options); ~MutableS2ShapeIndex() override; // Initialize a MutableS2ShapeIndex with the given options. This method may // only be called when the index is empty (i.e. newly created or Clear() has // just been called). May be called before or after set_memory_tracker(). void Init(const Options& options); const Options& options() const { return options_; } // Specifies that memory usage should be tracked and/or limited by the given // S2MemoryTracker. For example: // // S2MemoryTracker tracker; // tracker.set_limit(500 << 20); // 500 MB memory limit // MutableS2ShapeIndex index; // index.set_memory_tracker(&tracker); // // If the memory limit is exceeded, an appropriate status is returned in // memory_tracker()->error() and any partially built index is discarded // (equivalent to calling Minimize()). // // This method may be called multiple times in order to switch from one // memory tracker to another or stop memory tracking altogether (by passing // nullptr) in which case the memory usage due to this index is subtracted. // // REQUIRES: The lifetime of "tracker" must exceed the lifetime of the index // unless set_memory_tracker(nullptr) is called to stop memory // tracking before the index destructor is called. // // This implies that the S2MemoryTracker must be declared *before* // the MutableS2ShapeIndex in the example above. // // CAVEATS: // // - This method is not const and is therefore not thread-safe. // // - Does not track memory used by the S2Shapes in the index. // // - While the index representation itself is tracked very accurately, // the temporary data needed for index construction is tracked using // heuristics and may be underestimated or overestimated. // // - Temporary memory usage is typically 10x larger than the final index // size, however it can be reduced by specifying a suitable value for // FLAGS_s2shape_index_tmp_memory_budget (the default is 100 MB). If // more temporary memory than this is needed during construction, index // updates will be split into multiple batches in order to keep the // estimated temporary memory usage below this limit. // // - S2MemoryTracker::limit() has no effect on how much temporary memory // MutableS2ShapeIndex will attempt to use during index construction; it // simply causes an error to be returned when the limit would otherwise // be exceeded. If you set a memory limit smaller than 100MB and want to // reduce memory usage rather than simply generating an error then you // should also set FLAGS_s2shape_index_tmp_memory_budget appropriately. void set_memory_tracker(S2MemoryTracker* tracker); S2MemoryTracker* memory_tracker() const { return mem_tracker_.tracker(); } // The number of distinct shape ids that have been assigned. This equals // the number of shapes in the index provided that no shapes have ever been // removed. (Shape ids are not reused.) int num_shape_ids() const override { return static_cast(shapes_.size()); } // Returns a pointer to the shape with the given id, or nullptr if the shape // has been removed from the index. S2Shape* shape(int id) const override { return shapes_[id].get(); } // Minimizes memory usage by requesting that any data structures that can be // rebuilt should be discarded. This method invalidates all iterators. // // Like all non-const methods, this method is not thread-safe. void Minimize() override; // Appends an encoded representation of the S2ShapeIndex to "encoder". // // This method does not encode the S2Shapes in the index; it is the client's // responsibility to encode them separately. For example: // // s2shapeutil::CompactEncodeTaggedShapes(index, encoder); // index.Encode(encoder); // // The encoded size is typically much smaller than the in-memory size. // Here are a few examples: // // Number of edges In-memory space used Encoded size (%) // -------------------------------------------------------------- // 8 192 8 4% // 768 18,264 2,021 11% // 3,784,212 80,978,992 17,039,020 21% // // The encoded form also has the advantage of being a contiguous block of // memory. // // REQUIRES: "encoder" uses the default constructor, so that its buffer // can be enlarged as necessary by calling Ensure(int). void Encode(Encoder* encoder) const; // Decodes an S2ShapeIndex, returning true on success. // // This method does not decode the S2Shape objects in the index; this is // the responsibility of the client-provided function "shape_factory" // (see s2shapeutil_coding.h). Example usage: // // index.Init(decoder, s2shapeutil::LazyDecodeShapeFactory(decoder)); // // Note that the S2Shape vector must be encoded *before* the S2ShapeIndex in // this example. bool Init(Decoder* decoder, const ShapeFactory& shape_factory); class Iterator final : public IteratorBase { public: // Default constructor; must be followed by a call to Init(). Iterator(); // Constructs an iterator positioned as specified. By default iterators // are unpositioned, since this avoids an extra seek in this situation // where one of the seek methods (such as Locate) is immediately called. // // If you want to position the iterator at the beginning, e.g. in order to // loop through the entire index, do this instead: // // for (MutableS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN); // !it.done(); it.Next()) { ... } explicit Iterator(const MutableS2ShapeIndex* index, InitialPosition pos = UNPOSITIONED); // Initializes an iterator for the given MutableS2ShapeIndex. This method // may also be called in order to restore an iterator to a valid state // after the underlying index has been updated (although it is usually // easier just to declare a new iterator whenever required, since iterator // construction is cheap). void Init(const MutableS2ShapeIndex* index, InitialPosition pos = UNPOSITIONED); // Initialize an iterator for the given MutableS2ShapeIndex without // applying any pending updates. This can be used to observe the actual // current state of the index without modifying it in any way. void InitStale(const MutableS2ShapeIndex* index, InitialPosition pos = UNPOSITIONED); // Inherited non-virtual methods: // S2CellId id() const; // bool done() const; // S2Point center() const; const S2ShapeIndexCell& cell() const; // IteratorBase API: void Begin() override; void Finish() override; void Next() override; bool Prev() override; void Seek(S2CellId target) override; bool Locate(const S2Point& target) override; CellRelation Locate(S2CellId target) override; protected: const S2ShapeIndexCell* GetCell() const override; std::unique_ptr Clone() const override; void Copy(const IteratorBase& other) override; private: void Refresh(); // Updates the IteratorBase fields. const MutableS2ShapeIndex* index_; CellMap::const_iterator iter_, end_; }; // Takes ownership of the given shape and adds it to the index. Also // assigns a unique id to the shape (shape->id()) and returns that id. // Shape ids are assigned sequentially starting from 0 in the order shapes // are added. Invalidates all iterators and their associated data. // // Note that this method is not affected by S2MemoryTracker, i.e. shapes can // continue to be added even once the specified limit has been reached. int Add(std::unique_ptr shape); // Removes the given shape from the index and return ownership to the caller. // Invalidates all iterators and their associated data. std::unique_ptr Release(int shape_id); // Resets the index to its original state and returns ownership of all // shapes to the caller. This method is much more efficient than removing // all shapes one at a time. std::vector> ReleaseAll(); // Resets the index to its original state and deletes all shapes. Any // options specified via Init() are preserved. void Clear(); // Returns the number of bytes currently occupied by the index (including any // unused space at the end of vectors, etc). It has the same thread safety // as the other "const" methods (see introduction). size_t SpaceUsed() const override; // Calls to Add() and Release() are normally queued and processed on the // first subsequent query (in a thread-safe way). Building the index lazily // in this way has several advantages, the most important of which is that // sometimes there *is* no subsequent query and the index doesn't need to be // built at all. // // In contrast, ForceBuild() causes any pending updates to be applied // immediately. It is thread-safe and may be called simultaneously with // other "const" methods (see notes on thread safety above). Similarly this // method is "const" since it does not modify the visible index contents. // // ForceBuild() should not normally be called since it prevents lazy index // construction (which is usually benficial). Some reasons to use it // include: // // - To exclude the cost of building the index from benchmark results. // - To ensure that the first subsequent query is as fast as possible. // - To ensure that the index can be built successfully without exceeding a // specified S2MemoryTracker limit (see the constructor for details). // // Note that this method is thread-safe. void ForceBuild() const; // Returns true if there are no pending updates that need to be applied. // This can be useful to avoid building the index unnecessarily, or for // choosing between two different algorithms depending on whether the index // is available. // // The returned index status may be slightly out of date if the index was // built in a different thread. This is fine for the intended use (as an // efficiency hint), but it should not be used by internal methods (see // MaybeApplyUpdates). bool is_fresh() const; protected: std::unique_ptr NewIterator(InitialPosition pos) const override; private: friend class EncodedS2ShapeIndex; friend class Iterator; friend class MutableS2ShapeIndexTest; friend class S2Stats; struct BatchDescriptor; class BatchGenerator; struct ClippedEdge; class EdgeAllocator; struct FaceEdge; class InteriorTracker; struct RemovedShape; using ShapeEdgeId = s2shapeutil::ShapeEdgeId; using ShapeIdSet = std::vector; // When adding a new encoding, be aware that old binaries will not be able // to decode it. static constexpr unsigned char kCurrentEncodingVersionNumber = 0; // Internal methods are documented with their definitions. bool is_shape_being_removed(int shape_id) const; void MarkIndexStale(); void MaybeApplyUpdates() const; void ApplyUpdatesThreadSafe(); void ApplyUpdatesInternal(); std::vector GetUpdateBatches() const; void ReserveSpace(const BatchDescriptor& batch, std::vector all_edges[6]); void AddShape(const S2Shape* shape, int edges_begin, int edges_end, std::vector all_edges[6], InteriorTracker* tracker) const; void RemoveShape(const RemovedShape& removed, std::vector all_edges[6], InteriorTracker* tracker) const; void FinishPartialShape(int shape_id); void AddFaceEdge(FaceEdge* edge, std::vector all_edges[6]) const; void UpdateFaceEdges(int face, const std::vector& face_edges, InteriorTracker* tracker); S2CellId ShrinkToFit(const S2PaddedCell& pcell, const R2Rect& bound) const; void SkipCellRange(S2CellId begin, S2CellId end, InteriorTracker* tracker, EdgeAllocator* alloc, bool disjoint_from_index); void UpdateEdges(const S2PaddedCell& pcell, std::vector* edges, InteriorTracker* tracker, EdgeAllocator* alloc, bool disjoint_from_index); void AbsorbIndexCell(const S2PaddedCell& pcell, const Iterator& iter, std::vector* edges, InteriorTracker* tracker, EdgeAllocator* alloc); int GetEdgeMaxLevel(const S2Shape::Edge& edge) const; static int CountShapes(const std::vector& edges, const ShapeIdSet& cshape_ids); bool MakeIndexCell(const S2PaddedCell& pcell, const std::vector& edges, InteriorTracker* tracker); static void TestAllEdges(const std::vector& edges, InteriorTracker* tracker); inline static const ClippedEdge* UpdateBound(const ClippedEdge* edge, int u_end, double u, int v_end, double v, EdgeAllocator* alloc); static const ClippedEdge* ClipUBound(const ClippedEdge* edge, int u_end, double u, EdgeAllocator* alloc); static const ClippedEdge* ClipVBound(const ClippedEdge* edge, int v_end, double v, EdgeAllocator* alloc); static void ClipVAxis(const ClippedEdge* edge, const R1Interval& middle, std::vector child_edges[2], EdgeAllocator* alloc); // The amount by which cells are "padded" to compensate for numerical errors // when clipping line segments to cell boundaries. static const double kCellPadding; // The shapes in the index, accessed by their shape id. Removed shapes are // replaced by nullptr pointers. std::vector> shapes_; // A map from S2CellId to the set of clipped shapes that intersect that // cell. The cell ids cover a set of non-overlapping regions on the // sphere. Note that this field is updated lazily (see below). Const // methods *must* call MaybeApplyUpdates() before accessing this field. // (The easiest way to achieve this is simply to use an Iterator.) CellMap cell_map_; // The options supplied for this index. Options options_; // The id of the first shape that has been queued for addition but not // processed yet. int pending_additions_begin_ = 0; // The representation of an edge that has been queued for removal. struct RemovedShape { int32 shape_id; bool has_interior; // Belongs to a shape of dimension 2. bool contains_tracker_origin; std::vector edges; }; // The set of shapes that have been queued for removal but not processed // yet. Note that we need to copy the edge data since the caller is free to // destroy the shape once Release() has been called. This field is present // only when there are removed shapes to process (to save memory). std::unique_ptr> pending_removals_; // Additions and removals are queued and processed on the first subsequent // query. There are several reasons to do this: // // - It is significantly more efficient to process updates in batches. // - Often the index will never be queried, in which case we can save both // the time and memory required to build it. Examples: // + S2Loops that are created simply to pass to an S2Polygon. (We don't // need the S2Loop index, because S2Polygon builds its own index.) // + Applications that load a database of geometry and then query only // a small fraction of it. // + Applications that only read and write geometry (Decode/Encode). // // The main drawback is that we need to go to some extra work to ensure that // "const" methods are still thread-safe. Note that the goal is *not* to // make this class thread-safe in general, but simply to hide the fact that // we defer some of the indexing work until query time. // // The textbook approach to this problem would be to use a mutex and a // condition variable. Unfortunately pthread mutexes are huge (40 bytes). // Instead we use spinlock (which is only 4 bytes) to guard a few small // fields representing the current update status, and only create additional // state while the update is actually occurring. mutable SpinLock lock_; enum IndexStatus { STALE, // There are pending updates. UPDATING, // Updates are currently being applied. FRESH, // There are no pending updates. }; // Reads and writes to this field are guarded by "lock_". std::atomic index_status_{FRESH}; // UpdateState holds temporary data related to thread synchronization. It // is only allocated while updates are being applied. struct UpdateState { // This mutex is used as a condition variable. It is locked by the // updating thread for the entire duration of the update; other threads // lock it in order to wait until the update is finished. absl::Mutex wait_mutex; // The number of threads currently waiting on "wait_mutex_". The // UpdateState can only be freed when this number reaches zero. // // Reads and writes to this field are guarded by "lock_". int num_waiting; UpdateState() : num_waiting(0) { } ~UpdateState() { S2_DCHECK_EQ(0, num_waiting); } }; std::unique_ptr update_state_; S2MemoryTracker::Client mem_tracker_; // Documented in the .cc file. void UnlockAndSignal() ABSL_UNLOCK_FUNCTION(lock_) ABSL_UNLOCK_FUNCTION(update_state_->wait_mutex); MutableS2ShapeIndex(const MutableS2ShapeIndex&) = delete; void operator=(const MutableS2ShapeIndex&) = delete; }; // The following flag can be used to limit the amount of temporary memory used // when building an S2ShapeIndex. See the .cc file for details. // // DEFAULT: 100 MB S2_DECLARE_int64(s2shape_index_tmp_memory_budget); ////////////////// Implementation details follow //////////////////// // A BatchDescriptor represents a set of pending updates that will be applied // at the same time. The batch consists of all edges in (shape id, edge id) // order from "begin" (inclusive) to "end" (exclusive). Note that the last // shape in a batch may have only some of its edges added. The first batch // also implicitly includes all shapes being removed. "num_edges" is the // total number of edges that will be added or removed in this batch. struct MutableS2ShapeIndex::BatchDescriptor { // REQUIRES: If end.edge_id != 0, it must refer to a valid edge. ShapeEdgeId begin, end; int num_edges; }; // The purpose of BatchGenerator is to divide large updates into batches such // that all batches use approximately the same amount of high-water memory. // This class is defined here so that it can be tested independently. class MutableS2ShapeIndex::BatchGenerator { public: // Given the total number of edges that will be removed and added, prepares // to divide the edges into batches. "shape_id_begin" identifies the first // shape whose edges will be added. BatchGenerator(int num_edges_removed, int num_edges_added, int shape_id_begin); // Indicates that the given shape will be added to the index. Shapes with // few edges will be grouped together into a single batch, while shapes with // many edges will be split over several batches if necessary. void AddShape(int shape_id, int num_edges); // Returns a vector describing each batch. This method should be called // once all shapes have been added. std::vector Finish(); private: // Returns a vector indicating the maximum number of edges in each batch. // (The actual batch sizes are adjusted later in order to avoid splitting // shapes between batches unnecessarily.) static std::vector GetMaxBatchSizes(int num_edges_removed, int num_edges_added); // Returns the maximum number of edges in the current batch. int max_batch_size() const { return max_batch_sizes_[batch_index_]; } // Returns the maximum number of edges in the next batch. int next_max_batch_size() const { return max_batch_sizes_[batch_index_ + 1]; } // Adds the given number of edges to the current batch. void ExtendBatch(int num_edges) { batch_size_ += num_edges; } // Adds the given number of edges to the current batch, ending with the edge // just before "batch_end", and then starts a new batch. void FinishBatch(int num_edges, ShapeEdgeId batch_end); // A vector representing the ideal number of edges in each batch; the batch // sizes gradually decrease to ensure that each batch uses approximately the // same total amount of memory as the index grows. The actual batch sizes // are then adjusted based on how many edges each shape has in order to // avoid splitting shapes between batches unnecessarily. std::vector max_batch_sizes_; // The maximum size of the current batch is determined by how many edges // have been added to the index so far. For example if GetBatchSizes() // returned {100, 70, 50, 30} and we have added 0 edges, the current batch // size is 100. But if we have already added 90 edges then the current // batch size would be 70, and if have added 150 edges the batch size would // be 50. We keep track of (1) the current index into batch_sizes and (2) // the number of edges remaining before we increment the batch index. int batch_index_ = 0; int batch_index_edges_left_ = 0; ShapeEdgeId batch_begin_; // The start of the current batch. int shape_id_end_; // One beyond the last shape to be added. int batch_size_ = 0; // The number of edges in the current batch. std::vector batches_; // The completed batches so far. }; inline MutableS2ShapeIndex::Iterator::Iterator() : index_(nullptr) { } inline MutableS2ShapeIndex::Iterator::Iterator( const MutableS2ShapeIndex* index, InitialPosition pos) { Init(index, pos); } inline void MutableS2ShapeIndex::Iterator::Init( const MutableS2ShapeIndex* index, InitialPosition pos) { index->MaybeApplyUpdates(); InitStale(index, pos); } inline void MutableS2ShapeIndex::Iterator::InitStale( const MutableS2ShapeIndex* index, InitialPosition pos) { index_ = index; end_ = index_->cell_map_.end(); if (pos == BEGIN) { iter_ = index_->cell_map_.begin(); } else { iter_ = end_; } Refresh(); } inline const S2ShapeIndexCell& MutableS2ShapeIndex::Iterator::cell() const { // Since MutableS2ShapeIndex always sets the "cell_" field, we can skip the // logic in the base class that conditionally calls GetCell(). return *raw_cell(); } inline void MutableS2ShapeIndex::Iterator::Refresh() { if (iter_ == end_) { set_finished(); } else { set_state(iter_->first, iter_->second); } } inline void MutableS2ShapeIndex::Iterator::Begin() { // Make sure that the index has not been modified since Init() was called. S2_DCHECK(index_->is_fresh()); iter_ = index_->cell_map_.begin(); Refresh(); } inline void MutableS2ShapeIndex::Iterator::Finish() { iter_ = end_; Refresh(); } inline void MutableS2ShapeIndex::Iterator::Next() { S2_DCHECK(!done()); ++iter_; Refresh(); } inline bool MutableS2ShapeIndex::Iterator::Prev() { if (iter_ == index_->cell_map_.begin()) return false; --iter_; Refresh(); return true; } inline void MutableS2ShapeIndex::Iterator::Seek(S2CellId target) { iter_ = index_->cell_map_.lower_bound(target); Refresh(); } inline std::unique_ptr MutableS2ShapeIndex::NewIterator(InitialPosition pos) const { return absl::make_unique(this, pos); } inline void MutableS2ShapeIndex::ForceBuild() const { MaybeApplyUpdates(); } inline bool MutableS2ShapeIndex::is_fresh() const { return index_status_.load(std::memory_order_relaxed) == FRESH; } // Given that the given shape is being updated, return true if it is being // removed (as opposed to being added). inline bool MutableS2ShapeIndex::is_shape_being_removed(int shape_id) const { // All shape ids being removed are less than all shape ids being added. return shape_id < pending_additions_begin_; } // Ensure that any pending updates have been applied. This method must be // called before accessing the cell_map_ field, even if the index_status_ // appears to be FRESH, because a memory barrier is required in order to // ensure that all the index updates are visible if the updates were done in // another thread. inline void MutableS2ShapeIndex::MaybeApplyUpdates() const { // To avoid acquiring and releasing the spinlock on every query, we use // atomic operations when testing whether the status is FRESH and when // updating the status to be FRESH. This guarantees that any thread that // sees a status of FRESH will also see the corresponding index updates. if (index_status_.load(std::memory_order_acquire) != FRESH) { const_cast(this)->ApplyUpdatesThreadSafe(); } } #endif // S2_MUTABLE_S2SHAPE_INDEX_H_ s2geometry-0.10.0/src/s2/mutable_s2shape_index_test.cc000066400000000000000000000745071422156367100226420ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/mutable_s2shape_index.h" #include #include #include #include #include #include #include #include #include #include "absl/flags/reflection.h" #include "absl/flags/flag.h" #include "absl/memory/memory.h" #include "absl/strings/str_format.h" #include "s2/base/commandlineflags.h" #include "s2/base/logging.h" #include "s2/base/log_severity.h" #include "s2/r2.h" #include "s2/r2rect.h" #include "s2/s1angle.h" #include "s2/s2cap.h" #include "s2/s2cell.h" #include "s2/s2cell_id.h" #include "s2/s2cell_union.h" #include "s2/s2debug.h" #include "s2/s2edge_clipping.h" #include "s2/s2edge_crosser.h" #include "s2/s2edge_distances.h" #include "s2/s2edge_vector_shape.h" #include "s2/s2error.h" #include "s2/s2lax_polygon_shape.h" #include "s2/s2loop.h" #include "s2/s2point_vector_shape.h" #include "s2/s2pointutil.h" #include "s2/s2polygon.h" #include "s2/s2shapeutil_coding.h" #include "s2/s2shapeutil_contains_brute_force.h" #include "s2/s2shapeutil_testing.h" #include "s2/s2shapeutil_visit_crossing_edge_pairs.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" #include "s2/thread_testing.h" using absl::make_unique; using absl::WrapUnique; using s2textformat::MakePolylineOrDie; using std::string; using std::unique_ptr; using std::vector; S2_DECLARE_double(s2shape_index_min_short_edge_fraction); S2_DECLARE_double(s2shape_index_cell_size_to_long_edge_ratio); class MutableS2ShapeIndexTest : public ::testing::Test { protected: // This test harness owns a MutableS2ShapeIndex for convenience. MutableS2ShapeIndex index_; // Verifies that that every cell of the index contains the correct edges, and // that no cells are missing from the index. The running time of this // function is quadratic in the number of edges. void QuadraticValidate(); // Given an edge and a cell id, determines whether or not the edge should be // present in that cell and verify that this matches "index_has_edge". void ValidateEdge(const S2Point& a, const S2Point& b, S2CellId id, bool index_has_edge); // Given a shape and a cell id, determines whether or not the shape contains // the cell center and verify that this matches "index_contains_center". void ValidateInterior(const S2Shape* shape, S2CellId id, bool index_contains_center); // Verifies that the index can be encoded and decoded without change. void TestEncodeDecode(); using BatchDescriptor = MutableS2ShapeIndex::BatchDescriptor; // Converts the given vector of batches to a human-readable form. static string ToString(const vector& batches); // Verifies that removing and adding the given combination of shapes with // the given memory budget yields the expected vector of batches. void TestBatchGenerator( int num_edges_removed, const vector& shape_edges_added, int64 tmp_memory_budget, int shape_id_begin, const vector& expected_batches); }; void MutableS2ShapeIndexTest::QuadraticValidate() { // Iterate through a sequence of nonoverlapping cell ids that cover the // sphere and include as a subset all the cell ids used in the index. For // each cell id, verify that the expected set of edges is present. // "min_cellid" is the first S2CellId that has not been validated yet. S2CellId min_cellid = S2CellId::Begin(S2CellId::kMaxLevel); for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); ; it.Next()) { // Generate a list of S2CellIds ("skipped cells") that cover the gap // between the last cell we validated and the next cell in the index. S2CellUnion skipped; if (!it.done()) { S2CellId cellid = it.id(); EXPECT_GE(cellid, min_cellid); skipped.InitFromBeginEnd(min_cellid, cellid.range_min()); min_cellid = cellid.range_max().next(); } else { // Validate the empty cells beyond the last cell in the index. skipped.InitFromBeginEnd(min_cellid, S2CellId::End(S2CellId::kMaxLevel)); } // Iterate through all the shapes, simultaneously validating the current // index cell and all the skipped cells. int num_edges = 0; // all edges in the cell int num_short_edges = 0; // "short" edges int num_containing_shapes = 0; // shapes containing cell's entry vertex for (int id = 0; id < index_.num_shape_ids(); ++id) { const S2Shape* shape = index_.shape(id); const S2ClippedShape* clipped = nullptr; if (!it.done()) clipped = it.cell().find_clipped(id); // First check that contains_center() is set correctly. for (S2CellId skipped_id : skipped) { ValidateInterior(shape, skipped_id, false); } if (!it.done()) { bool contains_center = clipped && clipped->contains_center(); ValidateInterior(shape, it.id(), contains_center); S2PaddedCell pcell(it.id(), MutableS2ShapeIndex::kCellPadding); if (shape != nullptr) { num_containing_shapes += s2shapeutil::ContainsBruteForce(*shape, pcell.GetEntryVertex()); } } // If this shape has been released, it should not be present at all. if (shape == nullptr) { EXPECT_EQ(nullptr, clipped); continue; } // Otherwise check that the appropriate edges are present. for (int e = 0; e < shape->num_edges(); ++e) { auto edge = shape->edge(e); for (int j = 0; j < skipped.num_cells(); ++j) { ValidateEdge(edge.v0, edge.v1, skipped.cell_id(j), false); } if (!it.done()) { bool has_edge = clipped && clipped->ContainsEdge(e); ValidateEdge(edge.v0, edge.v1, it.id(), has_edge); int max_level = index_.GetEdgeMaxLevel(edge); if (has_edge) { ++num_edges; if (it.id().level() < max_level) ++num_short_edges; } } } } // This mirrors the calculation in MutableS2ShapeIndex::MakeIndexCell(). // It is designed to ensure that the index size is always linear in the // number of indexed edges. int max_short_edges = std::max( index_.options().max_edges_per_cell(), static_cast( absl::GetFlag(FLAGS_s2shape_index_min_short_edge_fraction) * (num_edges + num_containing_shapes))); EXPECT_LE(num_short_edges, max_short_edges); if (it.done()) break; } } // Verify that "index_has_edge" is true if and only if the edge AB intersects // the given cell id. void MutableS2ShapeIndexTest::ValidateEdge(const S2Point& a, const S2Point& b, S2CellId id, bool index_has_edge) { // Expand or shrink the padding slightly to account for errors in the // function we use to test for intersection (IntersectsRect). double padding = MutableS2ShapeIndex::kCellPadding; padding += (index_has_edge ? 1 : -1) * S2::kIntersectsRectErrorUVDist; R2Rect bound = id.GetBoundUV().Expanded(padding); R2Point a_uv, b_uv; EXPECT_EQ(S2::ClipToPaddedFace(a, b, id.face(), padding, &a_uv, &b_uv) && S2::IntersectsRect(a_uv, b_uv, bound), index_has_edge); } void MutableS2ShapeIndexTest::ValidateInterior( const S2Shape* shape, S2CellId id, bool index_contains_center) { if (shape == nullptr) { EXPECT_FALSE(index_contains_center); } else { EXPECT_EQ(s2shapeutil::ContainsBruteForce(*shape, id.ToPoint()), index_contains_center); } } void MutableS2ShapeIndexTest::TestEncodeDecode() { Encoder encoder; index_.Encode(&encoder); Decoder decoder(encoder.base(), encoder.length()); MutableS2ShapeIndex index2; ASSERT_TRUE(index2.Init(&decoder, s2shapeutil::WrappedShapeFactory(&index_))); s2testing::ExpectEqual(index_, index2); } /*static*/ string MutableS2ShapeIndexTest::ToString( const vector& batches) { string result; for (const auto& batch : batches) { if (!result.empty()) result += ", "; absl::StrAppendFormat(&result, "(%d:%d, %d:%d, %d)", batch.begin.shape_id, batch.begin.edge_id, batch.end.shape_id, batch.end.edge_id, batch.num_edges); } return result; } void MutableS2ShapeIndexTest::TestBatchGenerator( int num_edges_removed, const vector& shape_edges_added, int64 tmp_memory_budget, int shape_id_begin, const vector& expected_batches) { absl::FlagSaver fs; absl::SetFlag(&FLAGS_s2shape_index_tmp_memory_budget, tmp_memory_budget); int num_edges_added = 0; for (auto n : shape_edges_added) num_edges_added += n; MutableS2ShapeIndex::BatchGenerator bgen(num_edges_removed, num_edges_added, shape_id_begin); for (int i = 0; i < shape_edges_added.size(); ++i) { bgen.AddShape(shape_id_begin + i, shape_edges_added[i]); } auto actual_batches = bgen.Finish(); EXPECT_EQ(ToString(actual_batches), ToString(expected_batches)); } namespace { void TestIteratorMethods(const MutableS2ShapeIndex& index) { MutableS2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN); EXPECT_FALSE(it.Prev()); it.Finish(); EXPECT_TRUE(it.done()); vector ids; MutableS2ShapeIndex::Iterator it2(&index); S2CellId min_cellid = S2CellId::Begin(S2CellId::kMaxLevel); for (it.Begin(); !it.done(); it.Next()) { S2CellId cellid = it.id(); auto skipped = S2CellUnion::FromBeginEnd(min_cellid, cellid.range_min()); for (S2CellId skipped_id : skipped) { EXPECT_FALSE(it2.Locate(skipped_id.ToPoint())); EXPECT_EQ(S2ShapeIndex::DISJOINT, it2.Locate(skipped_id)); it2.Begin(); it2.Seek(skipped_id); EXPECT_EQ(cellid, it2.id()); } if (!ids.empty()) { it2 = it; EXPECT_TRUE(it2.Prev()); EXPECT_EQ(ids.back(), it2.id()); it2.Next(); EXPECT_EQ(cellid, it2.id()); it2.Seek(ids.back()); EXPECT_EQ(ids.back(), it2.id()); } it2.Begin(); EXPECT_EQ(cellid.ToPoint(), it.center()); EXPECT_TRUE(it2.Locate(it.center())); EXPECT_EQ(cellid, it2.id()); it2.Begin(); EXPECT_EQ(S2ShapeIndex::INDEXED, it2.Locate(cellid)); EXPECT_EQ(cellid, it2.id()); if (!cellid.is_face()) { it2.Begin(); EXPECT_EQ(S2ShapeIndex::SUBDIVIDED, it2.Locate(cellid.parent())); EXPECT_LE(it2.id(), cellid); EXPECT_GE(it2.id(), cellid.parent().range_min()); } if (!cellid.is_leaf()) { for (int i = 0; i < 4; ++i) { it2.Begin(); EXPECT_EQ(S2ShapeIndex::INDEXED, it2.Locate(cellid.child(i))); EXPECT_EQ(cellid, it2.id()); } } ids.push_back(cellid); min_cellid = cellid.range_max().next(); } } // NOTE(ericv): The tests below are all somewhat fragile since they depend on // the internal BatchGenerator heuristics; if these heuristics change // (including constants) then the tests below may need to change as well. TEST_F(MutableS2ShapeIndexTest, RemoveFullPolygonBatch) { TestBatchGenerator(0, {}, 100 /*bytes*/, 7, {{{7, 0}, {7, 0}, 0}}); } TEST_F(MutableS2ShapeIndexTest, AddFullPolygonBatch) { TestBatchGenerator(0, {0}, 100 /*bytes*/, 7, {{{7, 0}, {8, 0}, 0}}); } TEST_F(MutableS2ShapeIndexTest, RemoveManyEdgesInOneBatch) { // Test removing more edges than would normally fit in a batch. For good // measure we also add two full polygons in the same batch. TestBatchGenerator(1000, {0, 0}, 100 /*bytes*/, 7, {{{7, 0}, {9, 0}, 1000}}); } TEST_F(MutableS2ShapeIndexTest, RemoveAndAddEdgesInOneBatch) { // Test removing and adding edges in one batch. TestBatchGenerator(3, {4, 5}, 10000 /*bytes*/, 7, {{{7, 0}, {9, 0}, 12}}); } TEST_F(MutableS2ShapeIndexTest, RemoveAndAddEdgesInTwoBatches) { // Test removing many edges and then adding a few. TestBatchGenerator(1000, {3}, 1000 /*bytes*/, 7, {{{7, 0}, {7, 0}, 1000}, {{7, 0}, {8, 0}, 3}}); } TEST_F(MutableS2ShapeIndexTest, RemoveAndAddEdgesAndFullPolygonsInTwoBatches) { // Like the above, but also add two full polygons such that one polygon is // processed in each batch. TestBatchGenerator(1000, {0, 3, 0}, 1000 /*bytes*/, 7, {{{7, 0}, {8, 0}, 1000}, {{8, 0}, {10, 0}, 3}}); } TEST_F(MutableS2ShapeIndexTest, SeveralShapesInOneBatch) { // Test adding several shapes in one batch. TestBatchGenerator(0, {3, 4, 5}, 10000 /*bytes*/, 7, {{{7, 0}, {10, 0}, 12}}); } TEST_F(MutableS2ShapeIndexTest, GroupSmallShapesIntoBatches) { // Test adding several small shapes that must be split into batches. // 10000 bytes ~= temporary space to process 48 edges. TestBatchGenerator(0, {20, 20, 20, 20, 20}, 10000 /*bytes*/, 7, {{{7, 0}, {9, 0}, 40}, {{9, 0}, {11, 0}, 40}, {{11, 0}, {12, 0}, 20}}); } TEST_F(MutableS2ShapeIndexTest, AvoidPartialShapeInBatch) { // Test adding a small shape followed by a large shape that won't fit in the // same batch as the small shape, but will fit in its own separate batch. // 10000 bytes ~= temporary space to process 48 edges. TestBatchGenerator(0, {20, 40, 20}, 10000 /*bytes*/, 7, {{{7, 0}, {8, 0}, 20}, {{8, 0}, {9, 0}, 40}, {{9, 0}, {10, 0}, 20}}); } TEST_F(MutableS2ShapeIndexTest, SplitShapeIntoTwoBatches) { // Test adding a few small shapes, then a large shape that can be split // across the remainder of the first batch plus the next batch. The first // two batches should have the same amount of remaining space relative to // their maximum size. (For 10000 bytes of temporary space, the ideal batch // sizes are 48, 46, 45.) // // Note that we need a separate batch for the full polygon at the end, even // though it has no edges, because partial shapes must always be the last // shape in their batch. TestBatchGenerator(0, {20, 60, 0}, 10000 /*bytes*/, 7, {{{7, 0}, {8, 21}, 41}, {{8, 21}, {9, 0}, 39}, {{9, 0}, {10, 0}, 0}}); } TEST_F(MutableS2ShapeIndexTest, RemoveEdgesAndAddPartialShapeInSameBatch) { // Test a batch that consists of removing some edges and then adding a // partial shape. We also check that the small shape at the end is put into // its own batch, since partial shapes must be the last shape in their batch. TestBatchGenerator(20, {60, 5}, 10000 /*bytes*/, 7, {{{7, 0}, {7, 21}, 41}, {{7, 21}, {8, 0}, 39}, {{8, 0}, {9, 0}, 5}}); } TEST_F(MutableS2ShapeIndexTest, SplitShapeIntoManyBatches) { // Like the above except that the shape is split into 10 batches. With // 10000 bytes of temporary space, the ideal batch sizes are 63, 61, 59, 57, // 55, 53, 51, 49, 48, 46. The first 8 batches are as full as possible, // while the last two batches have the same amount of remaining space // relative to their ideal size. There is also a small batch at the end. TestBatchGenerator(0, {20, 500, 5}, 10000 /*bytes*/, 7, {{{7, 0}, {8, 43}, 63}, {{8, 43}, {8, 104}, 61}, {{8, 104}, {8, 163}, 59}, {{8, 163}, {8, 220}, 57}, {{8, 220}, {8, 275}, 55}, {{8, 275}, {8, 328}, 53}, {{8, 328}, {8, 379}, 51}, {{8, 379}, {8, 428}, 49}, {{8, 428}, {8, 465}, 37}, {{8, 465}, {9, 0}, 35}, {{9, 0}, {10, 0}, 5}}); } TEST_F(MutableS2ShapeIndexTest, SpaceUsed) { index_.Add(make_unique(S2Point(1, 0, 0), S2Point(0, 1, 0))); EXPECT_FALSE(index_.is_fresh()); size_t size_before = index_.SpaceUsed(); EXPECT_FALSE(index_.is_fresh()); QuadraticValidate(); size_t size_after = index_.SpaceUsed(); EXPECT_TRUE(index_.is_fresh()); EXPECT_TRUE(size_after > size_before); } TEST_F(MutableS2ShapeIndexTest, NoEdges) { MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); EXPECT_TRUE(it.done()); TestIteratorMethods(index_); TestEncodeDecode(); } TEST_F(MutableS2ShapeIndexTest, OneEdge) { EXPECT_EQ(0, index_.Add(make_unique(S2Point(1, 0, 0), S2Point(0, 1, 0)))); QuadraticValidate(); TestIteratorMethods(index_); TestEncodeDecode(); } TEST_F(MutableS2ShapeIndexTest, ShrinkToFitOptimization) { // This used to trigger a bug in the ShrinkToFit optimization. The loop // below contains almost all of face 0 except for a small region in the // 0/00000 subcell. That subcell is the only one that contains any edges. // This caused the index to be built only in that subcell. However, all the // other cells on that face should also have index entries, in order to // indicate that they are contained by the loop. unique_ptr loop(S2Loop::MakeRegularLoop( S2Point(1, 0.5, 0.5).Normalize(), S1Angle::Degrees(89), 100)); index_.Add(make_unique(loop.get())); QuadraticValidate(); TestEncodeDecode(); } TEST_F(MutableS2ShapeIndexTest, LoopsSpanningThreeFaces) { S2Polygon polygon; const int kNumEdges = 100; // Validation is quadratic // Construct two loops consisting of kNumEdges vertices each, centered // around the cube vertex at the start of the Hilbert curve. S2Testing::ConcentricLoopsPolygon(S2Point(1, -1, -1).Normalize(), 2, kNumEdges, &polygon); vector> loops = polygon.Release(); for (auto& loop : loops) { index_.Add(make_unique(&*loop)); } QuadraticValidate(); TestIteratorMethods(index_); TestEncodeDecode(); } TEST_F(MutableS2ShapeIndexTest, ManyIdenticalEdges) { const int kNumEdges = 100; // Validation is quadratic S2Point a = S2Point(0.99, 0.99, 1).Normalize(); S2Point b = S2Point(-0.99, -0.99, 1).Normalize(); for (int i = 0; i < kNumEdges; ++i) { EXPECT_EQ(i, index_.Add(make_unique(a, b))); } QuadraticValidate(); TestIteratorMethods(index_); TestEncodeDecode(); // Since all edges span the diagonal of a face, no subdivision should // have occurred (with the default index options). for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { EXPECT_EQ(0, it.id().level()); } } TEST_F(MutableS2ShapeIndexTest, DegenerateEdge) { // This test verifies that degenerate edges are supported. The following // point is a cube face vertex, and so it should be indexed in 3 cells. S2Point a = S2Point(1, 1, 1).Normalize(); auto shape = make_unique(); shape->Add(a, a); index_.Add(std::move(shape)); QuadraticValidate(); TestEncodeDecode(); // Check that exactly 3 index cells contain the degenerate edge. int count = 0; for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); !it.done(); it.Next(), ++count) { EXPECT_TRUE(it.id().is_leaf()); EXPECT_EQ(1, it.cell().num_clipped()); EXPECT_EQ(1, it.cell().clipped(0).num_edges()); } EXPECT_EQ(3, count); } TEST_F(MutableS2ShapeIndexTest, ManyTinyEdges) { // This test adds many edges to a single leaf cell, to check that // subdivision stops when no further subdivision is possible. const int kNumEdges = 100; // Validation is quadratic // Construct two points in the same leaf cell. S2Point a = S2CellId(S2Point(1, 0, 0)).ToPoint(); S2Point b = (a + S2Point(0, 1e-12, 0)).Normalize(); auto shape = make_unique(); for (int i = 0; i < kNumEdges; ++i) { shape->Add(a, b); } index_.Add(std::move(shape)); QuadraticValidate(); TestEncodeDecode(); // Check that there is exactly one index cell and that it is a leaf cell. MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); ASSERT_TRUE(!it.done()); EXPECT_TRUE(it.id().is_leaf()); it.Next(); EXPECT_TRUE(it.done()); } TEST_F(MutableS2ShapeIndexTest, SimpleUpdates) { // Add 5 loops one at a time, then release them one at a time, // validating the index at each step. S2Polygon polygon; S2Testing::ConcentricLoopsPolygon(S2Point(1, 0, 0), 5, 20, &polygon); for (int i = 0; i < polygon.num_loops(); ++i) { index_.Add(make_unique(polygon.loop(i))); QuadraticValidate(); } for (int id = 0; id < polygon.num_loops(); ++id) { index_.Release(id); EXPECT_EQ(index_.shape(id), nullptr); QuadraticValidate(); TestEncodeDecode(); } } TEST_F(MutableS2ShapeIndexTest, RandomUpdates) { // Set the temporary memory budget such that at least one shape needs to be // split into multiple update batches (namely, the "5 concentric rings" // polygon below which needs ~25KB of temporary space). absl::FlagSaver fs; absl::SetFlag(&FLAGS_s2shape_index_tmp_memory_budget, 10000); // Allow the seed to be varied from the command line. S2Testing::rnd.Reset(absl::GetFlag(FLAGS_s2_random_seed)); // A few polylines. index_.Add(make_unique( MakePolylineOrDie("0:0, 2:1, 0:2, 2:3, 0:4, 2:5, 0:6"))); index_.Add(make_unique( MakePolylineOrDie("1:0, 3:1, 1:2, 3:3, 1:4, 3:5, 1:6"))); index_.Add(make_unique( MakePolylineOrDie("2:0, 4:1, 2:2, 4:3, 2:4, 4:5, 2:6"))); // A loop that used to trigger an indexing bug. index_.Add(make_unique(S2Loop::MakeRegularLoop( S2Point(1, 0.5, 0.5).Normalize(), S1Angle::Degrees(89), 20))); // Five concentric loops. S2Polygon polygon5; S2Testing::ConcentricLoopsPolygon(S2Point(1, -1, -1).Normalize(), 5, 20, &polygon5); for (int i = 0; i < polygon5.num_loops(); ++i) { index_.Add(make_unique(polygon5.loop(i))); } // Two clockwise loops around S2Cell cube vertices. index_.Add(make_unique(S2Loop::MakeRegularLoop( S2Point(-1, 1, 1).Normalize(), S1Angle::Radians(M_PI - 0.001), 10))); index_.Add(make_unique(S2Loop::MakeRegularLoop( S2Point(-1, -1, -1).Normalize(), S1Angle::Radians(M_PI - 0.001), 10))); // A shape with no edges and no interior. index_.Add(make_unique( make_unique(S2Loop::kEmpty()))); // A shape with no edges that covers the entire sphere. index_.Add(make_unique( make_unique(S2Loop::kFull()))); vector> released; vector added(index_.num_shape_ids()); std::iota(added.begin(), added.end(), 0); QuadraticValidate(); TestEncodeDecode(); for (int iter = 0; iter < 100; ++iter) { S2_VLOG(1) << "Iteration: " << iter; // Choose some shapes to add and release. int num_updates = 1 + S2Testing::rnd.Skewed(5); for (int n = 0; n < num_updates; ++n) { if (S2Testing::rnd.OneIn(2) && !added.empty()) { int i = S2Testing::rnd.Uniform(added.size()); S2_VLOG(1) << " Released shape " << added[i] << " (" << index_.shape(added[i]) << ")"; released.push_back(index_.Release(added[i])); added.erase(added.begin() + i); } else if (!released.empty()) { int i = S2Testing::rnd.Uniform(released.size()); S2Shape* shape = released[i].get(); index_.Add(std::move(released[i])); // Changes shape->id(). released.erase(released.begin() + i); added.push_back(shape->id()); S2_VLOG(1) << " Added shape " << shape->id() << " (" << shape << ")"; } } QuadraticValidate(); TestEncodeDecode(); } } // A test that repeatedly updates "index_" in one thread and attempts to // concurrently read the index_ from several other threads. When all threads // have finished reading, the first thread makes another update. // // Note that we only test concurrent read access, since MutableS2ShapeIndex // requires all updates to be single-threaded and not concurrent with any // reads. class LazyUpdatesTest : public s2testing::ReaderWriterTest { public: LazyUpdatesTest() {} void WriteOp() override { index_.Clear(); int num_vertices = 4 * S2Testing::rnd.Skewed(10); // Up to 4K vertices unique_ptr loop(S2Loop::MakeRegularLoop( S2Testing::RandomPoint(), S2Testing::KmToAngle(5), num_vertices)); index_.Add(make_unique(std::move(loop))); } void ReadOp() override { for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { continue; // NOLINT } } protected: MutableS2ShapeIndex index_; }; TEST(MutableS2ShapeIndex, ConstMethodsThreadSafe) { // Ensure that lazy updates are thread-safe. In other words, make sure that // nothing bad happens when multiple threads call "const" methods that // cause pending updates to be applied. LazyUpdatesTest test; // The number of readers should be large enough so that it is likely that // several readers will be running at once (with a multiple-core CPU). const int kNumReaders = 8; const int kIters = 100; test.Run(kNumReaders, kIters); } TEST(MutableS2ShapeIndex, MixedGeometry) { // This test used to trigger a bug where the presence of a shape with an // interior could cause shapes that don't have an interior to suddenly // acquire one. This would cause extra S2ShapeIndex cells to be created // that are outside the bounds of the given geometry. vector> polylines; polylines.push_back(MakePolylineOrDie("0:0, 2:1, 0:2, 2:3, 0:4, 2:5, 0:6")); polylines.push_back(MakePolylineOrDie("1:0, 3:1, 1:2, 3:3, 1:4, 3:5, 1:6")); polylines.push_back(MakePolylineOrDie("2:0, 4:1, 2:2, 4:3, 2:4, 4:5, 2:6")); MutableS2ShapeIndex index; for (auto& polyline : polylines) { index.Add(make_unique(std::move(polyline))); } S2Loop loop(S2Cell(S2CellId::Begin(S2CellId::kMaxLevel))); index.Add(make_unique(&loop)); MutableS2ShapeIndex::Iterator it(&index); // No geometry intersects face 1, so there should be no index cells there. EXPECT_EQ(S2ShapeIndex::DISJOINT, it.Locate(S2CellId::FromFace(1))); } TEST_F(MutableS2ShapeIndexTest, LinearSpace) { // Build an index that requires FLAGS_s2shape_index_min_short_edge_fraction // to be non-zero in order to use a non-quadratic amount of space. // Uncomment the following line to check whether this test works properly. // FLAGS_s2shape_index_min_short_edge_fraction = 0; // Set the maximum number of "short" edges per cell to 1 so that we can // implement this test using a smaller index. MutableS2ShapeIndex::Options options; options.set_max_edges_per_cell(1); index_.Init(options); // The idea is to create O(n) copies of a single long edge, along with O(n) // clusters of (M + 1) points equally spaced along the long edge, where "M" // is the max_edges_per_cell() parameter. The edges are divided such that // there are equal numbers of long and short edges; this maximizes the index // size when FLAGS_s2shape_index_min_short_edge_fraction is set to zero. const int kNumEdges = 100; // Validation is quadratic int edges_per_cluster = options.max_edges_per_cell() + 1; int num_clusters = (kNumEdges / 2) / edges_per_cluster; // Create the long edges. S2Point a(1, 0, 0), b(0, 1, 0); for (int i = 0; i < kNumEdges / 2; ++i) { index_.Add(make_unique(a, b)); } // Create the clusters of short edges. for (int k = 0; k < num_clusters; ++k) { S2Point p = S2::Interpolate(a, b, k / (num_clusters - 1.0)); vector points(edges_per_cluster, p); index_.Add(make_unique(points)); } QuadraticValidate(); // The number of index cells should not exceed the number of clusters. int cell_count = 0; for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { ++cell_count; } EXPECT_LE(cell_count, num_clusters); } TEST_F(MutableS2ShapeIndexTest, LongIndexEntriesBound) { // This test demonstrates that the c2 = 366 upper bound (using default // parameter values) mentioned in the .cc file is achievable. // Set the maximum number of "short" edges per cell to 1 so that we can test // using a smaller index. MutableS2ShapeIndex::Options options; options.set_max_edges_per_cell(1); index_.Init(options); // This is a worst-case edge AB that touches as many cells as possible at // level 30 while still being considered "short" at level 29. We create an // index consisting of two copies of this edge plus a full polygon. S2Point a = S2::FaceSiTitoXYZ(0, 0, (1 << 30) + 0).Normalize(); S2Point b = S2::FaceSiTitoXYZ(0, 0, (1 << 30) + 6).Normalize(); for (int i = 0; i < 2; ++i) { index_.Add(make_unique(a, b)); } index_.Add(make_unique(vector>{{}})); // Count the number of index cells at each level. vector counts(S2CellId::kMaxLevel + 1); for (MutableS2ShapeIndex::Iterator it(&index_, S2ShapeIndex::BEGIN); !it.done(); it.Next()) { ++counts[it.id().level()]; } int sum = 0; for (int i = 0; i < counts.size(); ++i) { S2_LOG(INFO) << i << ": " << counts[i]; sum += counts[i]; } EXPECT_EQ(sum, 366); } TEST(S2Shape, user_data) { struct MyData { int x, y; MyData(int _x, int _y) : x(_x), y(_y) {} }; class MyEdgeVectorShape : public S2EdgeVectorShape { public: explicit MyEdgeVectorShape(const MyData& data) : S2EdgeVectorShape(), data_(data) { } const void* user_data() const override { return &data_; } void* mutable_user_data() override { return &data_; } private: MyData data_; }; MyEdgeVectorShape shape(MyData(3, 5)); MyData* data = static_cast(shape.mutable_user_data()); S2_DCHECK_EQ(3, data->x); data->y = 10; S2_DCHECK_EQ(10, static_cast(shape.user_data())->y); } } // namespace s2geometry-0.10.0/src/s2/r1interval.h000066400000000000000000000202431422156367100172530ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_R1INTERVAL_H_ #define S2_R1INTERVAL_H_ #include #include #include #include #include #include "s2/base/logging.h" #include "s2/_fp_contract_off.h" #include "s2/util/math/vector.h" // IWYU pragma: export // An R1Interval represents a closed, bounded interval on the real line. // It is capable of representing the empty interval (containing no points) // and zero-length intervals (containing a single point). // // This class is intended to be copied by value as desired. It uses // the default copy constructor and assignment operator. class R1Interval { public: // Constructor. If lo > hi, the interval is empty. R1Interval(double lo, double hi) : bounds_(lo, hi) {} // The default constructor creates an empty interval. (Any interval where // lo > hi is considered to be empty.) // // Note: Don't construct an interval using the default constructor and // set_lo()/set_hi(), since this technique doesn't work with S1Interval and // is bad programming style anyways. If you need to set both endpoints, use // the constructor above: // // lat_bounds_ = R1Interval(lat_lo, lat_hi); R1Interval() : bounds_(1, 0) {} // Returns an empty interval. static R1Interval Empty() { return R1Interval(); } // Convenience method to construct an interval containing a single point. static R1Interval FromPoint(double p) { return R1Interval(p, p); } // Convenience method to construct the minimal interval containing // the two given points. This is equivalent to starting with an empty // interval and calling AddPoint() twice, but it is more efficient. static R1Interval FromPointPair(double p1, double p2) { if (p1 <= p2) { return R1Interval(p1, p2); } else { return R1Interval(p2, p1); } } // The low bound of the interval. double lo() const { return bounds_[0]; } // The high bound of the interval. double hi() const { return bounds_[1]; } // Methods to modify one endpoint of an existing R1Interval. Do not use // these methods if you want to replace both endpoints of the interval; use // a constructor instead. For example: // // *lat_bounds = R1Interval(lat_lo, lat_hi); void set_lo(double p) { bounds_[0] = p; } void set_hi(double p) { bounds_[1] = p; } // Methods that allow the R1Interval to be accessed as a vector. (The // recommended style is to use lo() and hi() whenever possible, but these // methods are useful when the endpoint to be selected is not constant.) double operator[](int i) const { return bounds_[i]; } double& operator[](int i) { return bounds_[i]; } const Vector2_d& bounds() const { return bounds_; } Vector2_d* mutable_bounds() { return &bounds_; } // Return true if the interval is empty, i.e. it contains no points. bool is_empty() const { return lo() > hi(); } // Return the center of the interval. For empty intervals, // the result is arbitrary. double GetCenter() const { return 0.5 * (lo() + hi()); } // Return the length of the interval. The length of an empty interval // is negative. double GetLength() const { return hi() - lo(); } // Returns true if the given point is in the closed interval [lo, hi]. bool Contains(double p) const { return p >= lo() && p <= hi(); } // Returns true if the given point is in the open interval (lo, hi). bool InteriorContains(double p) const { return p > lo() && p < hi(); } // Return true if this interval contains the interval 'y'. bool Contains(const R1Interval& y) const { if (y.is_empty()) return true; return y.lo() >= lo() && y.hi() <= hi(); } // Return true if the interior of this interval contains the entire // interval 'y' (including its boundary). bool InteriorContains(const R1Interval& y) const { if (y.is_empty()) return true; return y.lo() > lo() && y.hi() < hi(); } // Return true if this interval intersects the given interval, // i.e. if they have any points in common. bool Intersects(const R1Interval& y) const { if (lo() <= y.lo()) { return y.lo() <= hi() && y.lo() <= y.hi(); } else { return lo() <= y.hi() && lo() <= hi(); } } // Return true if the interior of this interval intersects // any point of the given interval (including its boundary). bool InteriorIntersects(const R1Interval& y) const { return y.lo() < hi() && lo() < y.hi() && lo() < hi() && y.lo() <= y.hi(); } // Return the Hausdorff distance to the given interval 'y'. For two // R1Intervals x and y, this distance is defined as // h(x, y) = max_{p in x} min_{q in y} d(p, q). double GetDirectedHausdorffDistance(const R1Interval& y) const { if (is_empty()) return 0.0; if (y.is_empty()) return HUGE_VAL; return std::max(0.0, std::max(hi() - y.hi(), y.lo() - lo())); } // Expand the interval so that it contains the given point "p". void AddPoint(double p) { if (is_empty()) { set_lo(p); set_hi(p); } else if (p < lo()) { set_lo(p); } else if (p > hi()) { set_hi(p); } } // Expand the interval so that it contains the given interval "y". void AddInterval(const R1Interval& y) { if (y.is_empty()) return; if (is_empty()) { *this = y; return; } if (y.lo() < lo()) set_lo(y.lo()); if (y.hi() > hi()) set_hi(y.hi()); } // Return the closest point in the interval to the given point "p". // The interval must be non-empty. double Project(double p) const { S2_DCHECK(!is_empty()); return std::max(lo(), std::min(hi(), p)); } // Return an interval that has been expanded on each side by the given // distance "margin". If "margin" is negative, then shrink the interval on // each side by "margin" instead. The resulting interval may be empty. Any // expansion of an empty interval remains empty. R1Interval Expanded(double margin) const { if (is_empty()) return *this; return R1Interval(lo() - margin, hi() + margin); } // Return the smallest interval that contains this interval and the // given interval "y". R1Interval Union(const R1Interval& y) const { if (is_empty()) return y; if (y.is_empty()) return *this; return R1Interval(std::min(lo(), y.lo()), std::max(hi(), y.hi())); } // Return the intersection of this interval with the given interval. // Empty intervals do not need to be special-cased. R1Interval Intersection(const R1Interval& y) const { return R1Interval(std::max(lo(), y.lo()), std::min(hi(), y.hi())); } // Return true if two intervals contain the same set of points. bool operator==(const R1Interval& y) const { return (lo() == y.lo() && hi() == y.hi()) || (is_empty() && y.is_empty()); } // Return true if two intervals do not contain the same set of points. bool operator!=(const R1Interval& y) const { return !operator==(y); } // Return true if this interval can be transformed into the given interval // by moving each endpoint by at most "max_error". The empty interval is // considered to be positioned arbitrarily on the real line, thus any // interval with (length <= 2*max_error) matches the empty interval. bool ApproxEquals(const R1Interval& y, double max_error = 1e-15) const { if (is_empty()) return y.GetLength() <= 2 * max_error; if (y.is_empty()) return GetLength() <= 2 * max_error; return (std::fabs(y.lo() - lo()) <= max_error && std::fabs(y.hi() - hi()) <= max_error); } private: Vector2_d bounds_; }; inline std::ostream& operator<<(std::ostream& os, const R1Interval& x) { return os << "[" << x.lo() << ", " << x.hi() << "]"; } #endif // S2_R1INTERVAL_H_ s2geometry-0.10.0/src/s2/r1interval_test.cc000066400000000000000000000160401422156367100204500ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/r1interval.h" #include #include static void TestIntervalOps(const R1Interval& x, const R1Interval& y, const char* expected) { // Test all of the interval operations on the given pair of intervals. // "expected" is a sequence of "T" and "F" characters corresponding to // the expected results of Contains(), InteriorContains(), Intersects(), // and InteriorIntersects() respectively. EXPECT_EQ(expected[0] == 'T', x.Contains(y)); EXPECT_EQ(expected[1] == 'T', x.InteriorContains(y)); EXPECT_EQ(expected[2] == 'T', x.Intersects(y)); EXPECT_EQ(expected[3] == 'T', x.InteriorIntersects(y)); EXPECT_EQ(x.Contains(y), x.Union(y) == x); EXPECT_EQ(x.Intersects(y), !x.Intersection(y).is_empty()); R1Interval z = x; z.AddInterval(y); EXPECT_EQ(x.Union(y), z); } TEST(R1Interval, TestBasic) { // Constructors and accessors. R1Interval unit(0, 1); R1Interval negunit(-1, 0); EXPECT_EQ(0, unit.lo()); EXPECT_EQ(1, unit.hi()); EXPECT_EQ(-1, negunit[0]); EXPECT_EQ(0, negunit[1]); R1Interval ten(0, 0); ten.set_hi(10); EXPECT_EQ(R1Interval(0, 10), ten); ten[0] = -10; EXPECT_EQ(R1Interval(-10, 10), ten); ten[1] = 0; EXPECT_EQ(Vector2_d(-10, 0), ten.bounds()); *ten.mutable_bounds() = Vector2_d(0, 10); EXPECT_EQ(R1Interval(0, 10), ten); // is_empty() R1Interval half(0.5, 0.5); EXPECT_FALSE(unit.is_empty()); EXPECT_FALSE(half.is_empty()); R1Interval empty = R1Interval::Empty(); EXPECT_TRUE(empty.is_empty()); // == and != EXPECT_TRUE(empty == empty); EXPECT_TRUE(unit == unit); EXPECT_TRUE(unit != empty); EXPECT_TRUE(R1Interval(1, 2) != R1Interval(1, 3)); // Check that the default R1Interval is identical to Empty(). R1Interval default_empty; EXPECT_TRUE(default_empty.is_empty()); EXPECT_EQ(empty.lo(), default_empty.lo()); EXPECT_EQ(empty.hi(), default_empty.hi()); // GetCenter(), GetLength() EXPECT_EQ(unit.GetCenter(), 0.5); EXPECT_EQ(half.GetCenter(), 0.5); EXPECT_EQ(negunit.GetLength(), 1.0); EXPECT_EQ(half.GetLength(), 0); EXPECT_LT(empty.GetLength(), 0); // Contains(double), InteriorContains(double) EXPECT_TRUE(unit.Contains(0.5)); EXPECT_TRUE(unit.InteriorContains(0.5)); EXPECT_TRUE(unit.Contains(0)); EXPECT_FALSE(unit.InteriorContains(0)); EXPECT_TRUE(unit.Contains(1)); EXPECT_FALSE(unit.InteriorContains(1)); // Contains(R1Interval), InteriorContains(R1Interval) // Intersects(R1Interval), InteriorIntersects(R1Interval) { SCOPED_TRACE(""); TestIntervalOps(empty, empty, "TTFF"); } { SCOPED_TRACE(""); TestIntervalOps(empty, unit, "FFFF"); } { SCOPED_TRACE(""); TestIntervalOps(unit, half, "TTTT"); } { SCOPED_TRACE(""); TestIntervalOps(unit, unit, "TFTT"); } { SCOPED_TRACE(""); TestIntervalOps(unit, empty, "TTFF"); } { SCOPED_TRACE(""); TestIntervalOps(unit, negunit, "FFTF"); } { SCOPED_TRACE(""); TestIntervalOps(unit, R1Interval(0, 0.5), "TFTT"); } { SCOPED_TRACE(""); TestIntervalOps(half, R1Interval(0, 0.5), "FFTF"); } // AddPoint() R1Interval r = empty; r.AddPoint(5); EXPECT_EQ(5, r.lo()); EXPECT_EQ(5, r.hi()); r.AddPoint(-1); EXPECT_EQ(-1, r.lo()); EXPECT_EQ(5, r.hi()); r.AddPoint(0); EXPECT_EQ(-1, r.lo()); EXPECT_EQ(5, r.hi()); // Project() EXPECT_EQ(0.3, R1Interval(0.1, 0.4).Project(0.3)); EXPECT_EQ(0.1, R1Interval(0.1, 0.4).Project(-7.0)); EXPECT_EQ(0.4, R1Interval(0.1, 0.4).Project(0.6)); // FromPointPair() EXPECT_EQ(R1Interval(4, 4), R1Interval::FromPointPair(4, 4)); EXPECT_EQ(R1Interval(-2, -1), R1Interval::FromPointPair(-1, -2)); EXPECT_EQ(R1Interval(-5, 3), R1Interval::FromPointPair(-5, 3)); // Expanded() EXPECT_EQ(empty, empty.Expanded(0.45)); EXPECT_EQ(R1Interval(-0.5, 1.5), unit.Expanded(0.5)); EXPECT_EQ(R1Interval(0.5, 0.5), unit.Expanded(-0.5)); EXPECT_TRUE(unit.Expanded(-0.51).is_empty()); EXPECT_TRUE(unit.Expanded(-0.51).Expanded(0.51).is_empty()); // Union(), Intersection() EXPECT_EQ(R1Interval(99, 100), R1Interval(99, 100).Union(empty)); EXPECT_EQ(R1Interval(99, 100), empty.Union(R1Interval(99, 100))); EXPECT_TRUE(R1Interval(5, 3).Union(R1Interval(0, -2)).is_empty()); EXPECT_TRUE(R1Interval(0, -2).Union(R1Interval(5, 3)).is_empty()); EXPECT_EQ(unit, unit.Union(unit)); EXPECT_EQ(R1Interval(-1, 1), unit.Union(negunit)); EXPECT_EQ(R1Interval(-1, 1), negunit.Union(unit)); EXPECT_EQ(unit, half.Union(unit)); EXPECT_EQ(half, unit.Intersection(half)); EXPECT_EQ(R1Interval(0, 0), unit.Intersection(negunit)); EXPECT_TRUE(negunit.Intersection(half).is_empty()); EXPECT_TRUE(unit.Intersection(empty).is_empty()); EXPECT_TRUE(empty.Intersection(unit).is_empty()); } TEST(R1Interval, ApproxEquals) { // Choose two values kLo and kHi such that it's okay to shift an endpoint by // kLo (i.e., the resulting interval is equivalent) but not by kHi. static const double kLo = 4 * DBL_EPSILON; // < max_error default static const double kHi = 6 * DBL_EPSILON; // > max_error default // Empty intervals. R1Interval empty = R1Interval::Empty(); EXPECT_TRUE(empty.ApproxEquals(empty)); EXPECT_TRUE(R1Interval(0, 0).ApproxEquals(empty)); EXPECT_TRUE(empty.ApproxEquals(R1Interval(0, 0))); EXPECT_TRUE(R1Interval(1, 1).ApproxEquals(empty)); EXPECT_TRUE(empty.ApproxEquals(R1Interval(1, 1))); EXPECT_FALSE(empty.ApproxEquals(R1Interval(0, 1))); EXPECT_TRUE(empty.ApproxEquals(R1Interval(1, 1 + 2*kLo))); EXPECT_FALSE(empty.ApproxEquals(R1Interval(1, 1 + 2*kHi))); // Singleton intervals. EXPECT_TRUE(R1Interval(1, 1).ApproxEquals(R1Interval(1, 1))); EXPECT_TRUE(R1Interval(1, 1).ApproxEquals(R1Interval(1 - kLo, 1 - kLo))); EXPECT_TRUE(R1Interval(1, 1).ApproxEquals(R1Interval(1 + kLo, 1 + kLo))); EXPECT_FALSE(R1Interval(1, 1).ApproxEquals(R1Interval(1 - kHi, 1))); EXPECT_FALSE(R1Interval(1, 1).ApproxEquals(R1Interval(1, 1 + kHi))); EXPECT_TRUE(R1Interval(1, 1).ApproxEquals(R1Interval(1 - kLo, 1 + kLo))); EXPECT_FALSE(R1Interval(0, 0).ApproxEquals(R1Interval(1, 1))); // Other intervals. EXPECT_TRUE(R1Interval(1 - kLo, 2 + kLo).ApproxEquals(R1Interval(1, 2))); EXPECT_TRUE(R1Interval(1 + kLo, 2 - kLo).ApproxEquals(R1Interval(1, 2))); EXPECT_FALSE(R1Interval(1 - kHi, 2 + kLo).ApproxEquals(R1Interval(1, 2))); EXPECT_FALSE(R1Interval(1 + kHi, 2 - kLo).ApproxEquals(R1Interval(1, 2))); EXPECT_FALSE(R1Interval(1 - kLo, 2 + kHi).ApproxEquals(R1Interval(1, 2))); EXPECT_FALSE(R1Interval(1 + kLo, 2 - kHi).ApproxEquals(R1Interval(1, 2))); } s2geometry-0.10.0/src/s2/r2.h000066400000000000000000000014751422156367100155150ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_R2_H_ #define S2_R2_H_ #include "s2/_fp_contract_off.h" #include "s2/util/math/vector.h" // IWYU pragma: export using R2Point = Vector2_d; #endif // S2_R2_H_ s2geometry-0.10.0/src/s2/r2rect.cc000066400000000000000000000053671422156367100165350ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/r2rect.h" #include #include "s2/base/logging.h" #include "s2/r1interval.h" #include "s2/r2.h" R2Rect R2Rect::FromCenterSize(const R2Point& center, const R2Point& size) { return R2Rect(R1Interval(center.x() - 0.5 * size.x(), center.x() + 0.5 * size.x()), R1Interval(center.y() - 0.5 * size.y(), center.y() + 0.5 * size.y())); } bool R2Rect::Contains(const R2Rect& other) const { return x().Contains(other.x()) && y().Contains(other.y()); } bool R2Rect::InteriorContains(const R2Rect& other) const { return x().InteriorContains(other.x()) && y().InteriorContains(other.y()); } bool R2Rect::Intersects(const R2Rect& other) const { return x().Intersects(other.x()) && y().Intersects(other.y()); } bool R2Rect::InteriorIntersects(const R2Rect& other) const { return x().InteriorIntersects(other.x()) && y().InteriorIntersects(other.y()); } void R2Rect::AddPoint(const R2Point& p) { bounds_[0].AddPoint(p[0]); bounds_[1].AddPoint(p[1]); } void R2Rect::AddRect(const R2Rect& other) { bounds_[0].AddInterval(other[0]); bounds_[1].AddInterval(other[1]); } R2Point R2Rect::Project(const R2Point& p) const { return R2Point(x().Project(p.x()), y().Project(p.y())); } R2Rect R2Rect::Expanded(const R2Point& margin) const { R1Interval xx = x().Expanded(margin.x()); R1Interval yy = y().Expanded(margin.y()); if (xx.is_empty() || yy.is_empty()) return Empty(); return R2Rect(xx, yy); } R2Rect R2Rect::Union(const R2Rect& other) const { return R2Rect(x().Union(other.x()), y().Union(other.y())); } R2Rect R2Rect::Intersection(const R2Rect& other) const { R1Interval xx = x().Intersection(other.x()); R1Interval yy = y().Intersection(other.y()); if (xx.is_empty() || yy.is_empty()) return Empty(); return R2Rect(xx, yy); } bool R2Rect::ApproxEquals(const R2Rect& other, double max_error) const { return (x().ApproxEquals(other.x(), max_error) && y().ApproxEquals(other.y(), max_error)); } std::ostream& operator<<(std::ostream& os, const R2Rect& r) { return os << "[Lo" << r.lo() << ", Hi" << r.hi() << "]"; } s2geometry-0.10.0/src/s2/r2rect.h000066400000000000000000000214411422156367100163660ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_R2RECT_H_ #define S2_R2RECT_H_ #include #include #include "s2/base/logging.h" #include "s2/_fp_contract_off.h" #include "s2/r1interval.h" #include "s2/r2.h" // An R2Rect represents a closed axis-aligned rectangle in the (x,y) plane. // // This class is intended to be copied by value as desired. It uses // the default copy constructor and assignment operator, however it is // not a "plain old datatype" (POD) because it has virtual functions. class R2Rect { public: // Construct a rectangle from the given lower-left and upper-right points. R2Rect(const R2Point& lo, const R2Point& hi); // Construct a rectangle from the given intervals in x and y. The two // intervals must either be both empty or both non-empty. R2Rect(const R1Interval& x, const R1Interval& y); // The default constructor creates an empty R2Rect. R2Rect(); // The canonical empty rectangle. Use is_empty() to test for empty // rectangles, since they have more than one representation. static R2Rect Empty(); // Construct a rectangle from a center point and size in each dimension. // Both components of size should be non-negative, i.e. this method cannot // be used to create an empty rectangle. static R2Rect FromCenterSize(const R2Point& center, const R2Point& size); // Convenience method to construct a rectangle containing a single point. static R2Rect FromPoint(const R2Point& p); // Convenience method to construct the minimal bounding rectangle containing // the two given points. This is equivalent to starting with an empty // rectangle and calling AddPoint() twice. Note that it is different than // the R2Rect(lo, hi) constructor, where the first point is always // used as the lower-left corner of the resulting rectangle. static R2Rect FromPointPair(const R2Point& p1, const R2Point& p2); // Accessor methods. const R1Interval& x() const { return bounds_[0]; } const R1Interval& y() const { return bounds_[1]; } R2Point lo() const { return R2Point(x().lo(), y().lo()); } R2Point hi() const { return R2Point(x().hi(), y().hi()); } // Methods that allow the R2Rect to be accessed as a vector. const R1Interval& operator[](int i) const { return bounds_[i]; } R1Interval& operator[](int i) { return bounds_[i]; } // Return true if the rectangle is valid, which essentially just means // that if the bound for either axis is empty then both must be. bool is_valid() const; // Return true if the rectangle is empty, i.e. it contains no points at all. bool is_empty() const; // Return the k-th vertex of the rectangle (k = 0,1,2,3) in CCW order. // Vertex 0 is in the lower-left corner. For convenience, the argument is // reduced modulo 4 to the range [0..3]. R2Point GetVertex(int k) const; // Return the vertex in direction "i" along the x-axis (0=left, 1=right) and // direction "j" along the y-axis (0=down, 1=up). Equivalently, return the // vertex constructed by selecting endpoint "i" of the x-interval (0=lo, // 1=hi) and vertex "j" of the y-interval. R2Point GetVertex(int i, int j) const; // Return the center of the rectangle in (x,y)-space. R2Point GetCenter() const; // Return the width and height of this rectangle in (x,y)-space. Empty // rectangles have a negative width and height. R2Point GetSize() const; // Return true if the rectangle contains the given point. Note that // rectangles are closed regions, i.e. they contain their boundary. bool Contains(const R2Point& p) const; // Return true if and only if the given point is contained in the interior // of the region (i.e. the region excluding its boundary). bool InteriorContains(const R2Point& p) const; // Return true if and only if the rectangle contains the given other // rectangle. bool Contains(const R2Rect& other) const; // Return true if and only if the interior of this rectangle contains all // points of the given other rectangle (including its boundary). bool InteriorContains(const R2Rect& other) const; // Return true if this rectangle and the given other rectangle have any // points in common. bool Intersects(const R2Rect& other) const; // Return true if and only if the interior of this rectangle intersects // any point (including the boundary) of the given other rectangle. bool InteriorIntersects(const R2Rect& other) const; // Expand the rectangle to include the given point. The rectangle is // expanded by the minimum amount possible. void AddPoint(const R2Point& p); // Expand the rectangle to include the given other rectangle. This is the // same as replacing the rectangle by the union of the two rectangles, but // is somewhat more efficient. void AddRect(const R2Rect& other); // Return the closest point in the rectangle to the given point "p". // The rectangle must be non-empty. R2Point Project(const R2Point& p) const; // Return a rectangle that has been expanded on each side in the x-direction // by margin.x(), and on each side in the y-direction by margin.y(). If // either margin is empty, then shrink the interval on the corresponding // sides instead. The resulting rectangle may be empty. Any expansion of // an empty rectangle remains empty. R2Rect Expanded(const R2Point& margin) const; R2Rect Expanded(double margin) const; // Return the smallest rectangle containing the union of this rectangle and // the given rectangle. R2Rect Union(const R2Rect& other) const; // Return the smallest rectangle containing the intersection of this // rectangle and the given rectangle. R2Rect Intersection(const R2Rect& other) const; // Return true if two rectangles contains the same set of points. bool operator==(const R2Rect& other) const; // Return true if two rectangles do not contain the same set of points. bool operator!=(const R2Rect& other) const; // Return true if the x- and y-intervals of the two rectangles are the same // up to the given tolerance (see r1interval.h for details). bool ApproxEquals(const R2Rect& other, double max_error = 1e-15) const; private: R1Interval bounds_[2]; }; inline R2Rect::R2Rect(const R2Point& lo, const R2Point& hi) { bounds_[0] = R1Interval(lo.x(), hi.x()); bounds_[1] = R1Interval(lo.y(), hi.y()); S2_DCHECK(is_valid()); } inline R2Rect::R2Rect(const R1Interval& x, const R1Interval& y) { bounds_[0] = x; bounds_[1] = y; S2_DCHECK(is_valid()); } inline R2Rect::R2Rect() { // The default R1Interval constructor creates an empty interval. S2_DCHECK(is_valid()); } inline R2Rect R2Rect::Empty() { return R2Rect(R1Interval::Empty(), R1Interval::Empty()); } inline R2Rect R2Rect::FromPointPair(const R2Point& p1, const R2Point& p2) { return R2Rect(R1Interval::FromPointPair(p1.x(), p2.x()), R1Interval::FromPointPair(p1.y(), p2.y())); } inline bool R2Rect::is_valid() const { // The x/y ranges must either be both empty or both non-empty. return x().is_empty() == y().is_empty(); } inline bool R2Rect::is_empty() const { return x().is_empty(); } inline R2Rect R2Rect::FromPoint(const R2Point& p) { return R2Rect(p, p); } inline R2Point R2Rect::GetVertex(int k) const { // Twiddle bits to return the points in CCW order (lower left, lower right, // upper right, upper left). int j = (k >> 1) & 1; return GetVertex(j ^ (k & 1), j); } inline R2Point R2Rect::GetVertex(int i, int j) const { return R2Point(bounds_[0][i], bounds_[1][j]); } inline R2Point R2Rect::GetCenter() const { return R2Point(x().GetCenter(), y().GetCenter()); } inline R2Point R2Rect::GetSize() const { return R2Point(x().GetLength(), y().GetLength()); } inline bool R2Rect::Contains(const R2Point& p) const { return x().Contains(p.x()) && y().Contains(p.y()); } inline bool R2Rect::InteriorContains(const R2Point& p) const { return x().InteriorContains(p.x()) && y().InteriorContains(p.y()); } inline R2Rect R2Rect::Expanded(double margin) const { return Expanded(R2Point(margin, margin)); } inline bool R2Rect::operator==(const R2Rect& other) const { return x() == other.x() && y() == other.y(); } inline bool R2Rect::operator!=(const R2Rect& other) const { return !operator==(other); } std::ostream& operator<<(std::ostream& os, const R2Rect& r); #endif // S2_R2RECT_H_ s2geometry-0.10.0/src/s2/r2rect_test.cc000066400000000000000000000212721422156367100175650ustar00rootroot00000000000000// Copyright 2012 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // Most of the R2Rect methods have trivial implementations in terms of the // R1Interval class, so most of the testing is done in that unit test. #include "s2/r2rect.h" #include #include "s2/r2.h" static void TestIntervalOps(const R2Rect& x, const R2Rect& y, const char* expected_rexion, const R2Rect& expected_union, const R2Rect& expected_intersection) { // Test all of the interval operations on the given pair of intervals. // "expected_rexion" is a sequence of "T" and "F" characters corresponding // to the expected results of Contains(), InteriorContains(), Intersects(), // and InteriorIntersects() respectively. EXPECT_EQ(expected_rexion[0] == 'T', x.Contains(y)); EXPECT_EQ(expected_rexion[1] == 'T', x.InteriorContains(y)); EXPECT_EQ(expected_rexion[2] == 'T', x.Intersects(y)); EXPECT_EQ(expected_rexion[3] == 'T', x.InteriorIntersects(y)); EXPECT_EQ(x.Union(y) == x, x.Contains(y)); EXPECT_EQ(!x.Intersection(y).is_empty(), x.Intersects(y)); EXPECT_EQ(expected_union, x.Union(y)); EXPECT_EQ(expected_intersection, x.Intersection(y)); R2Rect r = x; r.AddRect(y); EXPECT_EQ(expected_union, r); if (y.GetSize() == R2Point(0, 0)) { r = x; r.AddPoint(y.lo()); EXPECT_EQ(expected_union, r); } } TEST(R2Rect, EmptyRectangles) { // Test basic properties of empty rectangles. R2Rect empty = R2Rect::Empty(); EXPECT_TRUE(empty.is_valid()); EXPECT_TRUE(empty.is_empty()); EXPECT_EQ(empty, empty); } TEST(R2Rect, ConstructorsAndAccessors) { // Check various constructors and accessor methods. R2Rect r = R2Rect(R2Point(0.1, 0), R2Point(0.25, 1)); EXPECT_EQ(0.1, r.x().lo()); EXPECT_EQ(0.25, r.x().hi()); EXPECT_EQ(0.0, r.y().lo()); EXPECT_EQ(1.0, r.y().hi()); EXPECT_EQ(0.1, r[0][0]); EXPECT_EQ(0.25, r[0][1]); EXPECT_EQ(0.0, r[1][0]); EXPECT_EQ(1.0, r[1][1]); EXPECT_EQ(R1Interval(0.1, 0.25), r.x()); EXPECT_EQ(R1Interval(0, 1), r.y()); EXPECT_EQ(R1Interval(0.1, 0.25), r[0]); EXPECT_EQ(R1Interval(0, 1), r[1]); r[0] = R1Interval(3, 4); r[1][0] = 5; r[1][1] = 6; EXPECT_EQ(R1Interval(3, 4), r[0]); EXPECT_EQ(R1Interval(5, 6), r[1]); EXPECT_EQ(r, r); EXPECT_NE(r, R2Rect::Empty()); R2Rect r2; EXPECT_TRUE(r2.is_empty()); EXPECT_EQ(r2, R2Rect::Empty()); } TEST(R2Rect, FromCenterSize) { // FromCenterSize() EXPECT_TRUE(R2Rect::FromCenterSize(R2Point(0.3, 0.5), R2Point(0.2, 0.4)). ApproxEquals(R2Rect(R2Point(0.2, 0.3), R2Point(0.4, 0.7)))); EXPECT_TRUE(R2Rect::FromCenterSize(R2Point(1, 0.1), R2Point(0, 2)). ApproxEquals(R2Rect(R2Point(1, -0.9), R2Point(1, 1.1)))); } TEST(R2Rect, FromPoint) { // FromPoint(), FromPointPair() R2Rect d1 = R2Rect(R2Point(0.1, 0), R2Point(0.25, 1)); EXPECT_EQ(R2Rect(d1.lo(), d1.lo()), R2Rect::FromPoint(d1.lo())); EXPECT_EQ(R2Rect(R2Point(0.15, 0.3), R2Point(0.35, 0.9)), R2Rect::FromPointPair(R2Point(0.15, 0.9), R2Point(0.35, 0.3))); EXPECT_EQ(R2Rect(R2Point(0.12, 0), R2Point(0.83, 0.5)), R2Rect::FromPointPair(R2Point(0.83, 0), R2Point(0.12, 0.5))); } TEST(R2Rect, SimplePredicates) { // GetCenter(), GetVertex(), Contains(R2Point), InteriorContains(R2Point). R2Point sw1 = R2Point(0, 0.25); R2Point ne1 = R2Point(0.5, 0.75); R2Rect r1(sw1, ne1); EXPECT_EQ(R2Point(0.25, 0.5), r1.GetCenter()); EXPECT_EQ(R2Point(0, 0.25), r1.GetVertex(0)); EXPECT_EQ(R2Point(0.5, 0.25), r1.GetVertex(1)); EXPECT_EQ(R2Point(0.5, 0.75), r1.GetVertex(2)); EXPECT_EQ(R2Point(0, 0.75), r1.GetVertex(3)); EXPECT_TRUE(r1.Contains(R2Point(0.2, 0.4))); EXPECT_FALSE(r1.Contains(R2Point(0.2, 0.8))); EXPECT_FALSE(r1.Contains(R2Point(-0.1, 0.4))); EXPECT_FALSE(r1.Contains(R2Point(0.6, 0.1))); EXPECT_TRUE(r1.Contains(sw1)); EXPECT_TRUE(r1.Contains(ne1)); EXPECT_FALSE(r1.InteriorContains(sw1)); EXPECT_FALSE(r1.InteriorContains(ne1)); // Make sure that GetVertex() returns vertices in CCW order. for (int k = 0; k < 4; ++k) { R2Point a = r1.GetVertex(k - 1); R2Point b = r1.GetVertex(k); R2Point c = r1.GetVertex(k + 1); EXPECT_GT((b - a).Ortho().DotProd(c - a), 0); } } TEST(R2Rect, IntervalOperations) { // Contains(R2Rect), InteriorContains(R2Rect), // Intersects(), InteriorIntersects(), Union(), Intersection(). // // Much more testing of these methods is done in s1interval_test // and r1interval_test. R2Rect empty = R2Rect::Empty(); R2Point sw1 = R2Point(0, 0.25); R2Point ne1 = R2Point(0.5, 0.75); R2Rect r1(sw1, ne1); R2Rect r1_mid = R2Rect(R2Point(0.25, 0.5), R2Point(0.25, 0.5)); R2Rect r_sw1(sw1, sw1); R2Rect r_ne1(ne1, ne1); TestIntervalOps(r1, r1_mid, "TTTT", r1, r1_mid); TestIntervalOps(r1, r_sw1, "TFTF", r1, r_sw1); TestIntervalOps(r1, r_ne1, "TFTF", r1, r_ne1); EXPECT_EQ(R2Rect(R2Point(0, 0.25), R2Point(0.5, 0.75)), r1); TestIntervalOps(r1, R2Rect(R2Point(0.45, 0.1), R2Point(0.75, 0.3)), "FFTT", R2Rect(R2Point(0, 0.1), R2Point(0.75, 0.75)), R2Rect(R2Point(0.45, 0.25), R2Point(0.5, 0.3))); TestIntervalOps(r1, R2Rect(R2Point(0.5, 0.1), R2Point(0.7, 0.3)), "FFTF", R2Rect(R2Point(0, 0.1), R2Point(0.7, 0.75)), R2Rect(R2Point(0.5, 0.25), R2Point(0.5, 0.3))); TestIntervalOps(r1, R2Rect(R2Point(0.45, 0.1), R2Point(0.7, 0.25)), "FFTF", R2Rect(R2Point(0, 0.1), R2Point(0.7, 0.75)), R2Rect(R2Point(0.45, 0.25), R2Point(0.5, 0.25))); TestIntervalOps(R2Rect(R2Point(0.1, 0.2), R2Point(0.1, 0.3)), R2Rect(R2Point(0.15, 0.7), R2Point(0.2, 0.8)), "FFFF", R2Rect(R2Point(0.1, 0.2), R2Point(0.2, 0.8)), empty); // Check that the intersection of two rectangles that overlap in x but not y // is valid, and vice versa. TestIntervalOps(R2Rect(R2Point(0.1, 0.2), R2Point(0.4, 0.5)), R2Rect(R2Point(0, 0), R2Point(0.2, 0.1)), "FFFF", R2Rect(R2Point(0, 0), R2Point(0.4, 0.5)), empty); TestIntervalOps(R2Rect(R2Point(0, 0), R2Point(0.1, 0.3)), R2Rect(R2Point(0.2, 0.1), R2Point(0.3, 0.4)), "FFFF", R2Rect(R2Point(0, 0), R2Point(0.3, 0.4)), empty); } TEST(R2Rect, AddPoint) { // AddPoint() R2Point sw1 = R2Point(0, 0.25); R2Point ne1 = R2Point(0.5, 0.75); R2Rect r1(sw1, ne1); R2Rect r2 = R2Rect::Empty(); r2.AddPoint(R2Point(0, 0.25)); r2.AddPoint(R2Point(0.5, 0.25)); r2.AddPoint(R2Point(0, 0.75)); r2.AddPoint(R2Point(0.1, 0.4)); EXPECT_EQ(r1, r2); } TEST(R2Rect, Project) { R2Rect r1(R1Interval(0, 0.5), R1Interval(0.25, 0.75)); EXPECT_EQ(R2Point(0, 0.25), r1.Project(R2Point(-0.01, 0.24))); EXPECT_EQ(R2Point(0, 0.48), r1.Project(R2Point(-5.0, 0.48))); EXPECT_EQ(R2Point(0, 0.75), r1.Project(R2Point(-5.0, 2.48))); EXPECT_EQ(R2Point(0.19, 0.75), r1.Project(R2Point(0.19, 2.48))); EXPECT_EQ(R2Point(0.5, 0.75), r1.Project(R2Point(6.19, 2.48))); EXPECT_EQ(R2Point(0.5, 0.53), r1.Project(R2Point(6.19, 0.53))); EXPECT_EQ(R2Point(0.5, 0.25), r1.Project(R2Point(6.19, -2.53))); EXPECT_EQ(R2Point(0.33, 0.25), r1.Project(R2Point(0.33, -2.53))); EXPECT_EQ(R2Point(0.33, 0.37), r1.Project(R2Point(0.33, 0.37))); } TEST(R2Rect, Expanded) { // Expanded() EXPECT_TRUE(R2Rect::Empty().Expanded(R2Point(0.1, 0.3)).is_empty()); EXPECT_TRUE(R2Rect::Empty().Expanded(R2Point(-0.1, -0.3)).is_empty()); EXPECT_TRUE(R2Rect(R2Point(0.2, 0.4), R2Point(0.3, 0.7)). Expanded(R2Point(0.1, 0.3)). ApproxEquals(R2Rect(R2Point(0.1, 0.1), R2Point(0.4, 1.0)))); EXPECT_TRUE(R2Rect(R2Point(0.2, 0.4), R2Point(0.3, 0.7)). Expanded(R2Point(-0.1, 0.3)).is_empty()); EXPECT_TRUE(R2Rect(R2Point(0.2, 0.4), R2Point(0.3, 0.7)). Expanded(R2Point(0.1, -0.2)).is_empty()); EXPECT_TRUE(R2Rect(R2Point(0.2, 0.4), R2Point(0.3, 0.7)). Expanded(R2Point(0.1, -0.1)). ApproxEquals(R2Rect(R2Point(0.1, 0.5), R2Point(0.4, 0.6)))); EXPECT_TRUE(R2Rect(R2Point(0.2, 0.4), R2Point(0.3, 0.7)).Expanded(0.1). ApproxEquals(R2Rect(R2Point(0.1, 0.3), R2Point(0.4, 0.8)))); } s2geometry-0.10.0/src/s2/s1angle.cc000066400000000000000000000026361422156367100166620ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s1angle.h" #include #include #include #include "s2/s2latlng.h" S1Angle::S1Angle(const S2Point& x, const S2Point& y) : radians_(x.Angle(y)) { } S1Angle::S1Angle(const S2LatLng& x, const S2LatLng& y) : radians_(x.GetDistance(y).radians()) { } S1Angle S1Angle::Normalized() const { S1Angle a(radians_); a.Normalize(); return a; } void S1Angle::Normalize() { radians_ = remainder(radians_, 2.0 * M_PI); if (radians_ <= -M_PI) radians_ = M_PI; } std::ostream& operator<<(std::ostream& os, S1Angle a) { double degrees = a.degrees(); char buffer[13]; int sz = snprintf(buffer, sizeof(buffer), "%.7f", degrees); if (sz >= 0 && sz < sizeof(buffer)) { return os << buffer; } else { return os << degrees; } } s2geometry-0.10.0/src/s2/s1angle.h000066400000000000000000000252721422156367100165250ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_S1ANGLE_H_ #define S2_S1ANGLE_H_ #include #include #include #include #include "s2/base/integral_types.h" #include "s2/_fp_contract_off.h" #include "s2/s2point.h" #include "s2/util/math/mathutil.h" #include "s2/util/math/vector.h" class S2LatLng; #ifndef SWIG #define IFNDEF_SWIG(x) x #else #define IFNDEF_SWIG(x) #endif // This class represents a one-dimensional angle (as opposed to a // two-dimensional solid angle). It has methods for converting angles to // or from radians, degrees, and the E5/E6/E7 representations (i.e. degrees // multiplied by 1e5/1e6/1e7 and rounded to the nearest integer). // // The internal representation is a double-precision value in radians, so // conversion to and from radians is exact. Conversions between E5, E6, E7, // and Degrees are not always exact; for example, Degrees(3.1) is different // from E6(3100000) or E7(310000000). However, the following properties are // guaranteed for any integer "n", provided that "n" is in the input range of // both functions: // // Degrees(n) == E6(1000000 * n) // Degrees(n) == E7(10000000 * n) // E6(n) == E7(10 * n) // // The corresponding properties are *not* true for E5, so if you use E5 then // don't test for exact equality when comparing to other formats such as // Degrees or E7. // // The following conversions between degrees and radians are exact: // // Degrees(180) == Radians(M_PI) // Degrees(45 * k) == Radians(k * M_PI / 4) for k == 0..8 // // These identities also hold when the arguments are scaled up or down by any // power of 2. Some similar identities are also true, for example, // Degrees(60) == Radians(M_PI / 3), but be aware that this type of identity // does not hold in general. For example, Degrees(3) != Radians(M_PI / 60). // // Similarly, the conversion to radians means that Angle::Degrees(x).degrees() // does not always equal "x". For example, // // S1Angle::Degrees(45 * k).degrees() == 45 * k for k == 0..8 // but S1Angle::Degrees(60).degrees() != 60. // // This means that when testing for equality, you should allow for numerical // errors (EXPECT_DOUBLE_EQ) or convert to discrete E5/E6/E7 values first. // // CAVEAT: All of the above properties depend on "double" being the usual // 64-bit IEEE 754 type (which is true on almost all modern platforms). // // This class is intended to be copied by value as desired. It uses // the default copy constructor and assignment operator. class S1Angle { public: // These methods construct S1Angle objects from their measure in radians // or degrees. static constexpr S1Angle Radians(double radians); static constexpr S1Angle Degrees(double degrees); static constexpr S1Angle E5(int32 e5); static constexpr S1Angle E6(int32 e6); static constexpr S1Angle E7(int32 e7); // Convenience functions -- to use when args have been fixed32s in protos. // // The arguments are static_cast into int32, so very large unsigned values // are treated as negative numbers. static constexpr S1Angle UnsignedE6(uint32 e6); static constexpr S1Angle UnsignedE7(uint32 e7); // The default constructor yields a zero angle. This is useful for STL // containers and class methods with output arguments. IFNDEF_SWIG(constexpr) S1Angle() : radians_(0) {} // Return an angle larger than any finite angle. static constexpr S1Angle Infinity(); // A explicit shorthand for the default constructor. static constexpr S1Angle Zero(); // Return the angle between two points, which is also equal to the distance // between these points on the unit sphere. The points do not need to be // normalized. This function has a maximum error of 3.25 * DBL_EPSILON (or // 2.5 * DBL_EPSILON for angles up to 1 radian). If either point is // zero-length (e.g. an uninitialized S2Point), or almost zero-length, the // resulting angle will be zero. S1Angle(const S2Point& x, const S2Point& y); // Like the constructor above, but return the angle (i.e., distance) between // two S2LatLng points. This function has about 15 digits of accuracy for // small distances but only about 8 digits of accuracy as the distance // approaches 180 degrees (i.e., nearly-antipodal points). S1Angle(const S2LatLng& x, const S2LatLng& y); constexpr double radians() const; constexpr double degrees() const; int32 e5() const; int32 e6() const; int32 e7() const; // Return the absolute value of an angle. S1Angle abs() const; friend S1Angle abs(S1Angle a); // Comparison operators. friend IFNDEF_SWIG(constexpr) bool operator==(S1Angle x, S1Angle y); friend IFNDEF_SWIG(constexpr) bool operator!=(S1Angle x, S1Angle y); friend IFNDEF_SWIG(constexpr) bool operator<(S1Angle x, S1Angle y); friend IFNDEF_SWIG(constexpr) bool operator>(S1Angle x, S1Angle y); friend IFNDEF_SWIG(constexpr) bool operator<=(S1Angle x, S1Angle y); friend IFNDEF_SWIG(constexpr) bool operator>=(S1Angle x, S1Angle y); // Simple arithmetic operators for manipulating S1Angles. friend IFNDEF_SWIG(constexpr) S1Angle operator-(S1Angle a); friend IFNDEF_SWIG(constexpr) S1Angle operator+(S1Angle a, S1Angle b); friend IFNDEF_SWIG(constexpr) S1Angle operator-(S1Angle a, S1Angle b); friend IFNDEF_SWIG(constexpr) S1Angle operator*(double m, S1Angle a); friend IFNDEF_SWIG(constexpr) S1Angle operator*(S1Angle a, double m); friend IFNDEF_SWIG(constexpr) S1Angle operator/(S1Angle a, double m); friend IFNDEF_SWIG(constexpr) double operator/(S1Angle a, S1Angle b); S1Angle& operator+=(S1Angle a); S1Angle& operator-=(S1Angle a); S1Angle& operator*=(double m); S1Angle& operator/=(double m); // Trigonmetric functions (not necessary but slightly more convenient). friend double sin(S1Angle a); friend double cos(S1Angle a); friend double tan(S1Angle a); // Return the angle normalized to the range (-180, 180] degrees. S1Angle Normalized() const; // Normalize this angle to the range (-180, 180] degrees. void Normalize(); // When S1Angle is used as a key in one of the absl::btree container types, // indicate that linear rather than binary search should be used. This is // much faster when the comparison function is cheap. typedef std::true_type absl_btree_prefer_linear_node_search; private: explicit IFNDEF_SWIG(constexpr) S1Angle(double radians) : radians_(radians) {} double radians_; }; ////////////////// Implementation details follow //////////////////// inline constexpr S1Angle S1Angle::Infinity() { return S1Angle(std::numeric_limits::infinity()); } inline constexpr S1Angle S1Angle::Zero() { return S1Angle(0); } inline constexpr double S1Angle::radians() const { return radians_; } inline constexpr double S1Angle::degrees() const { return (180 / M_PI) * radians_; } // Note that the E5, E6, and E7 conversion involve two multiplications rather // than one. This is mainly for backwards compatibility (changing this would // break many tests), but it does have the nice side effect that conversions // between Degrees, E6, and E7 are exact when the arguments are integers. inline int32 S1Angle::e5() const { return MathUtil::FastIntRound(1e5 * degrees()); } inline int32 S1Angle::e6() const { return MathUtil::FastIntRound(1e6 * degrees()); } inline int32 S1Angle::e7() const { return MathUtil::FastIntRound(1e7 * degrees()); } inline S1Angle S1Angle::abs() const { return S1Angle(std::fabs(radians_)); } inline S1Angle abs(S1Angle a) { return S1Angle(std::fabs(a.radians_)); } inline constexpr bool operator==(S1Angle x, S1Angle y) { return x.radians() == y.radians(); } inline constexpr bool operator!=(S1Angle x, S1Angle y) { return x.radians() != y.radians(); } inline constexpr bool operator<(S1Angle x, S1Angle y) { return x.radians() < y.radians(); } inline constexpr bool operator>(S1Angle x, S1Angle y) { return x.radians() > y.radians(); } inline constexpr bool operator<=(S1Angle x, S1Angle y) { return x.radians() <= y.radians(); } inline constexpr bool operator>=(S1Angle x, S1Angle y) { return x.radians() >= y.radians(); } inline constexpr S1Angle operator-(S1Angle a) { return S1Angle::Radians(-a.radians()); } inline constexpr S1Angle operator+(S1Angle a, S1Angle b) { return S1Angle::Radians(a.radians() + b.radians()); } inline constexpr S1Angle operator-(S1Angle a, S1Angle b) { return S1Angle::Radians(a.radians() - b.radians()); } inline constexpr S1Angle operator*(double m, S1Angle a) { return S1Angle::Radians(m * a.radians()); } inline constexpr S1Angle operator*(S1Angle a, double m) { return S1Angle::Radians(m * a.radians()); } inline constexpr S1Angle operator/(S1Angle a, double m) { return S1Angle::Radians(a.radians() / m); } inline constexpr double operator/(S1Angle a, S1Angle b) { return a.radians() / b.radians(); } inline S1Angle& S1Angle::operator+=(S1Angle a) { radians_ += a.radians(); return *this; } inline S1Angle& S1Angle::operator-=(S1Angle a) { radians_ -= a.radians(); return *this; } inline S1Angle& S1Angle::operator*=(double m) { radians_ *= m; return *this; } inline S1Angle& S1Angle::operator/=(double m) { radians_ /= m; return *this; } inline double sin(S1Angle a) { return sin(a.radians()); } inline double cos(S1Angle a) { return cos(a.radians()); } inline double tan(S1Angle a) { return tan(a.radians()); } inline constexpr S1Angle S1Angle::Radians(double radians) { return S1Angle(radians); } inline constexpr S1Angle S1Angle::Degrees(double degrees) { return S1Angle((M_PI / 180) * degrees); } inline constexpr S1Angle S1Angle::E5(int32 e5) { return Degrees(1e-5 * e5); } inline constexpr S1Angle S1Angle::E6(int32 e6) { return Degrees(1e-6 * e6); } inline constexpr S1Angle S1Angle::E7(int32 e7) { return Degrees(1e-7 * e7); } inline constexpr S1Angle S1Angle::UnsignedE6(uint32 e6) { return E6(static_cast(e6)); } inline constexpr S1Angle S1Angle::UnsignedE7(uint32 e7) { return E7(static_cast(e7)); } // Writes the angle in degrees with 7 digits of precision after the // decimal point, e.g. "17.3745904". std::ostream& operator<<(std::ostream& os, S1Angle a); #undef IFNDEF_SWIG #endif // S2_S1ANGLE_H_ s2geometry-0.10.0/src/s2/s1angle_test.cc000066400000000000000000000157421422156367100177230ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s1angle.h" #include #include "absl/flags/flag.h" #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "s2/s2latlng.h" #include "s2/s2testing.h" TEST(S1Angle, DefaultConstructor) { // Check that the default constructor returns an angle of 0. S1Angle a; EXPECT_EQ(0, a.radians()); } TEST(S1Angle, Infinity) { EXPECT_LT(S1Angle::Radians(1e30), S1Angle::Infinity()); EXPECT_LT(-S1Angle::Infinity(), S1Angle::Zero()); EXPECT_EQ(S1Angle::Infinity(), S1Angle::Infinity()); } TEST(S1Angle, Zero) { EXPECT_EQ(S1Angle::Radians(0), S1Angle::Zero()); } TEST(S1Angle, PiRadiansExactly180Degrees) { // Check that the conversion between Pi radians and 180 degrees is exact. EXPECT_EQ(M_PI, S1Angle::Radians(M_PI).radians()); EXPECT_EQ(180.0, S1Angle::Radians(M_PI).degrees()); EXPECT_EQ(M_PI, S1Angle::Degrees(180).radians()); EXPECT_EQ(180.0, S1Angle::Degrees(180).degrees()); EXPECT_EQ(90.0, S1Angle::Radians(M_PI_2).degrees()); // Check negative angles. EXPECT_EQ(-90.0, S1Angle::Radians(-M_PI_2).degrees()); EXPECT_EQ(-M_PI_4, S1Angle::Degrees(-45).radians()); } TEST(S1Angle, E5E6E7Representations) { // Check that E5/E6/E7 representations work as expected. EXPECT_DOUBLE_EQ(S1Angle::Degrees(-45).radians(), S1Angle::E5(-4500000).radians()); EXPECT_DOUBLE_EQ(S1Angle::Degrees(-60).radians(), S1Angle::E6(-60000000).radians()); EXPECT_DOUBLE_EQ(S1Angle::Degrees(75).radians(), S1Angle::E7(750000000).radians()); EXPECT_EQ(-17256123, S1Angle::Degrees(-172.56123).e5()); EXPECT_EQ(12345678, S1Angle::Degrees(12.345678).e6()); EXPECT_EQ(-123456789, S1Angle::Degrees(-12.3456789).e7()); } TEST(S1Angle, E6E7RepresentationsUnsigned) { // Check that unsigned E6/E7 representations work as expected. EXPECT_DOUBLE_EQ( S1Angle::Degrees(60).radians(), S1Angle::UnsignedE6(static_cast(60000000)).radians()); EXPECT_DOUBLE_EQ( S1Angle::Degrees(-60).radians(), S1Angle::UnsignedE6(static_cast(-60000000)).radians()); EXPECT_DOUBLE_EQ( S1Angle::Degrees(75).radians(), S1Angle::UnsignedE7(static_cast(750000000)).radians()); EXPECT_DOUBLE_EQ( S1Angle::Degrees(-75).radians(), S1Angle::UnsignedE7(static_cast(-750000000)).radians()); } TEST(S1Angle, NormalizeCorrectlyCanonicalizesAngles) { EXPECT_DOUBLE_EQ(0.0, S1Angle::Degrees(360.0).Normalized().degrees()); EXPECT_DOUBLE_EQ(-90.0, S1Angle::Degrees(-90.0).Normalized().degrees()); EXPECT_DOUBLE_EQ(180.0, S1Angle::Degrees(-180.0).Normalized().degrees()); EXPECT_DOUBLE_EQ(180.0, S1Angle::Degrees(180.0).Normalized().degrees()); EXPECT_DOUBLE_EQ(180.0, S1Angle::Degrees(540.0).Normalized().degrees()); EXPECT_DOUBLE_EQ(90.0, S1Angle::Degrees(-270.0).Normalized().degrees()); } TEST(S1Angle, ArithmeticOperationsOnAngles) { EXPECT_DOUBLE_EQ(0.3, S1Angle::Radians(-0.3).abs().radians()); EXPECT_DOUBLE_EQ(0.3, abs(S1Angle::Radians(-0.3)).radians()); EXPECT_DOUBLE_EQ(-0.1, (-S1Angle::Radians(0.1)).radians()); EXPECT_DOUBLE_EQ(0.4, (S1Angle::Radians(0.1) + S1Angle::Radians(0.3)).radians()); EXPECT_DOUBLE_EQ(-0.2, (S1Angle::Radians(0.1) - S1Angle::Radians(0.3)).radians()); EXPECT_DOUBLE_EQ(0.6, (2 * S1Angle::Radians(0.3)).radians()); EXPECT_DOUBLE_EQ(0.6, (S1Angle::Radians(0.3) * 2).radians()); EXPECT_DOUBLE_EQ(0.15, (S1Angle::Radians(0.3) / 2).radians()); EXPECT_DOUBLE_EQ(0.5, (S1Angle::Radians(0.3) / S1Angle::Radians(0.6))); S1Angle tmp = S1Angle::Radians(1.0); tmp += S1Angle::Radians(0.5); EXPECT_DOUBLE_EQ(1.5, tmp.radians()); tmp -= S1Angle::Radians(1.0); EXPECT_DOUBLE_EQ(0.5, tmp.radians()); tmp *= 5; EXPECT_DOUBLE_EQ(2.5, tmp.radians()); tmp /= 2; EXPECT_DOUBLE_EQ(1.25, tmp.radians()); } TEST(S1Angle, Trigonometry) { // Spot check a few angles to ensure that the correct function is called. EXPECT_DOUBLE_EQ(1, cos(S1Angle::Degrees(0))); EXPECT_DOUBLE_EQ(1, sin(S1Angle::Degrees(90))); EXPECT_DOUBLE_EQ(1, tan(S1Angle::Degrees(45))); } TEST(S1Angle, ConstructorsThatMeasureAngles) { EXPECT_DOUBLE_EQ(M_PI_2, S1Angle(S2Point(1, 0, 0), S2Point(0, 0, 2)).radians()); EXPECT_DOUBLE_EQ(0.0, S1Angle(S2Point(1, 0, 0), S2Point(1, 0, 0)).radians()); EXPECT_NEAR(50.0, S1Angle(S2LatLng::FromDegrees(20, 20), S2LatLng::FromDegrees(70, 20)).degrees(), 1e-13); } TEST(S1Angle, TestFormatting) { std::ostringstream ss; ss << S1Angle::Degrees(180.0); EXPECT_EQ("180.0000000", ss.str()); } // The current implementation guarantees exact conversions between // Degrees() and E6() when the Degrees() argument is an integer. TEST(S1Angle, DegreesVsE6) { for (int i = 0; i <= 180; ++i) { EXPECT_EQ(S1Angle::Degrees(i), S1Angle::E6(1000000 * i)); } } // The current implementation guarantees exact conversions between // Degrees() and E7() when the Degrees() argument is an integer. TEST(S1Angle, DegreesVsE7) { for (int i = 0; i <= 180; ++i) { EXPECT_EQ(S1Angle::Degrees(i), S1Angle::E7(10000000 * i)); } } // The current implementation guarantees exact conversions between // E6() and E7() when the E6() argument is an integer. TEST(S1Angle, E6VsE7) { S2Testing::rnd.Reset(absl::GetFlag(FLAGS_s2_random_seed)); for (int iter = 0; iter < 1000; ++iter) { int i = S2Testing::rnd.Uniform(180000000); EXPECT_EQ(S1Angle::E6(i), S1Angle::E7(10 * i)); } } // The current implementation guarantees certain exact conversions between // degrees and radians (see the header file for details). TEST(S1Angle, DegreesVsRadians) { for (int k = -8; k <= 8; ++k) { EXPECT_EQ(S1Angle::Degrees(45 * k), S1Angle::Radians(k * M_PI / 4)); EXPECT_EQ(45 * k, S1Angle::Degrees(45 * k).degrees()); } for (int k = 0; k <= 30; ++k) { int n = 1 << k; EXPECT_EQ(S1Angle::Degrees(180. / n), S1Angle::Radians(M_PI / n)); EXPECT_EQ(S1Angle::Degrees(60. / n), S1Angle::Radians(M_PI / (3. * n))); EXPECT_EQ(S1Angle::Degrees(36. / n), S1Angle::Radians(M_PI / (5. * n))); EXPECT_EQ(S1Angle::Degrees(20. / n), S1Angle::Radians(M_PI / (9. * n))); EXPECT_EQ(S1Angle::Degrees(4. / n), S1Angle::Radians(M_PI / (45. * n))); } // We also spot check a couple of non-identities. EXPECT_NE(S1Angle::Degrees(3), S1Angle::Radians(M_PI / 60)); EXPECT_NE(60, S1Angle::Degrees(60).degrees()); } s2geometry-0.10.0/src/s2/s1chord_angle.cc000066400000000000000000000132041422156367100200320ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s1chord_angle.h" #include #include #include #include "s2/s1angle.h" #include "s2/s2pointutil.h" using std::max; using std::min; // Android with gnustl has ::nextafter but not std::nextafter. // https://github.com/android-ndk/ndk/issues/82 // Check for gnustl with _GLIBCXX_CMATH, which is its cmath include // guard. #if !defined(__ANDROID__) || !defined(_GLIBCXX_CMATH) using std::nextafter; #endif static constexpr double kMaxLength2 = 4.0; S1ChordAngle::S1ChordAngle(S1Angle angle) { if (angle.radians() < 0) { *this = Negative(); } else if (angle == S1Angle::Infinity()) { *this = Infinity(); } else { // The chord length is 2 * sin(angle / 2). double length = 2 * sin(0.5 * min(M_PI, angle.radians())); length2_ = length * length; } S2_DCHECK(is_valid()); } S1Angle S1ChordAngle::ToAngle() const { if (is_negative()) return S1Angle::Radians(-1); if (is_infinity()) return S1Angle::Infinity(); return S1Angle::Radians(2 * asin(0.5 * sqrt(length2_))); } bool S1ChordAngle::is_valid() const { return (length2_ >= 0 && length2_ <= kMaxLength2) || is_special(); } S1ChordAngle S1ChordAngle::Successor() const { if (length2_ >= kMaxLength2) return Infinity(); if (length2_ < 0.0) return Zero(); return S1ChordAngle(nextafter(length2_, 10.0)); } S1ChordAngle S1ChordAngle::Predecessor() const { if (length2_ <= 0.0) return Negative(); if (length2_ > kMaxLength2) return Straight(); return S1ChordAngle(nextafter(length2_, -10.0)); } S1ChordAngle S1ChordAngle::PlusError(double error) const { // If angle is Negative() or Infinity(), don't change it. // Otherwise clamp it to the valid range. if (is_special()) return *this; return S1ChordAngle(max(0.0, min(kMaxLength2, length2_ + error))); } double S1ChordAngle::GetS2PointConstructorMaxError() const { // There is a relative error of 2.5 * DBL_EPSILON when computing the squared // distance, plus a relative error of 2 * DBL_EPSILON and an absolute error // of (16 * DBL_EPSILON**2) because the lengths of the input points may // differ from 1 by up to (2 * DBL_EPSILON) each. (This is the maximum // length error in S2Point::Normalize.) return 4.5 * DBL_EPSILON * length2_ + 16 * DBL_EPSILON * DBL_EPSILON; } double S1ChordAngle::GetS1AngleConstructorMaxError() const { // Assuming that an accurate math library is being used, the sin() call and // the multiply each have a relative error of 0.5 * DBL_EPSILON. However // the sin() error is squared. return 1.5 * DBL_EPSILON * length2_; } S1ChordAngle operator+(S1ChordAngle a, S1ChordAngle b) { // Note that this method is much more efficient than converting the chord // angles to S1Angles and adding those. It requires only one square root // plus a few additions and multiplications. S2_DCHECK(!a.is_special()) << a; S2_DCHECK(!b.is_special()) << b; // Optimization for the common case where "b" is an error tolerance // parameter that happens to be set to zero. double a2 = a.length2(), b2 = b.length2(); if (b2 == 0) return a; // Clamp the angle sum to at most 180 degrees. if (a2 + b2 >= kMaxLength2) return S1ChordAngle::Straight(); // Let "a" and "b" be the (non-squared) chord lengths, and let c = a+b. // Let A, B, and C be the corresponding half-angles (a = 2*sin(A), etc). // Then the formula below can be derived from c = 2 * sin(A+B) and the // relationships sin(A+B) = sin(A)*cos(B) + sin(B)*cos(A) // cos(X) = sqrt(1 - sin^2(X)) . double x = a2 * (1 - 0.25 * b2); // is_valid() => non-negative double y = b2 * (1 - 0.25 * a2); // is_valid() => non-negative return S1ChordAngle(min(kMaxLength2, x + y + 2 * sqrt(x * y))); } S1ChordAngle operator-(S1ChordAngle a, S1ChordAngle b) { // See comments in operator+(). S2_DCHECK(!a.is_special()) << a; S2_DCHECK(!b.is_special()) << b; double a2 = a.length2(), b2 = b.length2(); if (b2 == 0) return a; if (a2 <= b2) return S1ChordAngle::Zero(); double x = a2 * (1 - 0.25 * b2); double y = b2 * (1 - 0.25 * a2); // The calculation below is formulated differently (with two square roots // rather than one) to avoid excessive cancellation error when two nearly // equal values are subtracted. double c = max(0.0, sqrt(x) - sqrt(y)); return S1ChordAngle(c * c); } double sin2(S1ChordAngle a) { S2_DCHECK(!a.is_special()); // Let "a" be the (non-squared) chord length, and let A be the corresponding // half-angle (a = 2*sin(A)). The formula below can be derived from: // sin(2*A) = 2 * sin(A) * cos(A) // cos^2(A) = 1 - sin^2(A) // This is much faster than converting to an angle and computing its sine. return a.length2() * (1 - 0.25 * a.length2()); } double sin(S1ChordAngle a) { return sqrt(sin2(a)); } double cos(S1ChordAngle a) { // cos(2*A) = cos^2(A) - sin^2(A) = 1 - 2*sin^2(A) S2_DCHECK(!a.is_special()); return 1 - 0.5 * a.length2(); } double tan(S1ChordAngle a) { return sin(a) / cos(a); } std::ostream& operator<<(std::ostream& os, S1ChordAngle a) { return os << a.ToAngle(); } s2geometry-0.10.0/src/s2/s1chord_angle.h000066400000000000000000000367501422156367100177070ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_S1CHORD_ANGLE_H_ #define S2_S1CHORD_ANGLE_H_ #include #include #include #include #include #include "s2/_fp_contract_off.h" #include "s2/s1angle.h" #include "s2/s2pointutil.h" // S1ChordAngle represents the angle subtended by a chord (i.e., the straight // line segment connecting two points on the sphere). Its representation // makes it very efficient for computing and comparing distances, but unlike // S1Angle it is only capable of representing angles between 0 and Pi radians. // S1ChordAngle is intended for applications where many angles need to be // computed and compared, otherwise it is simpler to use S1Angle. // // S1ChordAngle also loses some accuracy as the angle approaches Pi radians. // There are several different ways to measure this error, including the // representational error (i.e., how accurately S1ChordAngle can represent // angles near Pi radians), the conversion error (i.e., how much precision is // lost when an S1Angle is converted to an S1ChordAngle), and the measurement // error (i.e., how accurate the S1ChordAngle(a, b) constructor is when the // points A and B are separated by angles close to Pi radians). All of these // errors differ by a small constant factor. // // For the measurement error (which is the largest of these errors and also // the most important in practice), let the angle between A and B be (Pi - x) // radians, i.e. A and B are within "x" radians of being antipodal. The // corresponding chord length is // // r = 2 * sin((Pi - x) / 2) = 2 * cos(x / 2) . // // For values of x not close to Pi the relative error in the squared chord // length is at most 4.5 * DBL_EPSILON (see GetS2PointConstructorMaxError). // The relative error in "r" is thus at most 2.25 * DBL_EPSILON ~= 5e-16. To // convert this error into an equivalent angle, we have // // |dr / dx| = sin(x / 2) // // and therefore // // |dx| = dr / sin(x / 2) // = 5e-16 * (2 * cos(x / 2)) / sin(x / 2) // = 1e-15 / tan(x / 2) // // The maximum error is attained when // // x = |dx| // = 1e-15 / tan(x / 2) // ~= 1e-15 / (x / 2) // ~= sqrt(2e-15) // // In summary, the measurement error for an angle (Pi - x) is at most // // dx = min(1e-15 / tan(x / 2), sqrt(2e-15)) // (~= min(2e-15 / x, sqrt(2e-15)) when x is small). // // On the Earth's surface (assuming a radius of 6371km), this corresponds to // the following worst-case measurement errors: // // Accuracy: Unless antipodal to within: // --------- --------------------------- // 6.4 nanometers 10,000 km (90 degrees) // 1 micrometer 81.2 kilometers // 1 millimeter 81.2 meters // 1 centimeter 8.12 meters // 28.5 centimeters 28.5 centimeters // // The representational and conversion errors referred to earlier are somewhat // smaller than this. For example, maximum distance between adjacent // representable S1ChordAngle values is only 13.5 cm rather than 28.5 cm. To // see this, observe that the closest representable value to r^2 = 4 is // r^2 = 4 * (1 - DBL_EPSILON / 2). Thus r = 2 * (1 - DBL_EPSILON / 4) and // the angle between these two representable values is // // x = 2 * acos(r / 2) // = 2 * acos(1 - DBL_EPSILON / 4) // ~= 2 * asin(sqrt(DBL_EPSILON / 2) // ~= sqrt(2 * DBL_EPSILON) // ~= 2.1e-8 // // which is 13.5 cm on the Earth's surface. // // The worst case rounding error occurs when the value halfway between these // two representable values is rounded up to 4. This halfway value is // r^2 = (4 * (1 - DBL_EPSILON / 4)), thus r = 2 * (1 - DBL_EPSILON / 8) and // the worst case rounding error is // // x = 2 * acos(r / 2) // = 2 * acos(1 - DBL_EPSILON / 8) // ~= 2 * asin(sqrt(DBL_EPSILON / 4) // ~= sqrt(DBL_EPSILON) // ~= 1.5e-8 // // which is 9.5 cm on the Earth's surface. // // This class is intended to be copied by value as desired. It uses // the default copy constructor and assignment operator. class S1ChordAngle { public: // The default constructor yields a zero angle. This is useful for STL // containers and class methods with output arguments. S1ChordAngle() : length2_(0) {} // Construct the S1ChordAngle corresponding to the distance between the two // given points. The points must be unit length. S1ChordAngle(const S2Point& x, const S2Point& y); // Return the zero chord angle. static S1ChordAngle Zero(); // Return a chord angle of 90 degrees (a "right angle"). static S1ChordAngle Right(); // Return a chord angle of 180 degrees (a "straight angle"). This is the // maximum finite chord angle. static S1ChordAngle Straight(); // Return a chord angle larger than any finite chord angle. The only valid // operations on Infinity() are comparisons, S1Angle conversions, and // Successor() / Predecessor(). static S1ChordAngle Infinity(); // Return a chord angle smaller than Zero(). The only valid operations on // Negative() are comparisons, S1Angle conversions, and Successor() / // Predecessor(). static S1ChordAngle Negative(); // Conversion from an S1Angle. Angles outside the range [0, Pi] are handled // as follows: Infinity() is mapped to Infinity(), negative angles are // mapped to Negative(), and finite angles larger than Pi are mapped to // Straight(). // // Note that this operation is relatively expensive and should be avoided. // To use S1ChordAngle effectively, you should structure your code so that // input arguments are converted to S1ChordAngles at the beginning of your // algorithm, and results are converted back to S1Angles only at the end. explicit S1ChordAngle(S1Angle angle); // Convenience methods implemented by converting from an S1Angle. static S1ChordAngle Radians(double radians); static S1ChordAngle Degrees(double degrees); static S1ChordAngle E5(int32 e5); static S1ChordAngle E6(int32 e6); static S1ChordAngle E7(int32 e7); // Construct an S1ChordAngle that is an upper bound on the given S1Angle, // i.e. such that FastUpperBoundFrom(x).ToAngle() >= x. Unlike the S1Angle // constructor above, this method is very fast, and the bound is accurate to // within 1% for distances up to about 3100km on the Earth's surface. static S1ChordAngle FastUpperBoundFrom(S1Angle angle); // Construct an S1ChordAngle from the squared chord length. Note that the // argument is automatically clamped to a maximum of 4.0 to handle possible // roundoff errors. The argument must be non-negative. static S1ChordAngle FromLength2(double length2); // Converts to an S1Angle. Can be used just like an S1Angle constructor: // // S1ChordAngle x = ...; // return S1Angle(x); // // Infinity() is converted to S1Angle::Infinity(), and Negative() is // converted to an unspecified negative S1Angle. // // Note that the conversion uses trigonometric functions and therefore // should be avoided in inner loops. explicit operator S1Angle() const; // Converts to an S1Angle (equivalent to the operator above). S1Angle ToAngle() const; // Convenience methods implemented by calling ToAngle() first. Note that // because of the S1Angle conversion these methods are relatively expensive // (despite their lowercase names), so the results should be cached if they // are needed inside loops. double radians() const; double degrees() const; int32 e5() const; int32 e6() const; int32 e7() const; // All operators and functions are declared here so that we can put them all // in one place. (The compound assignment operators must be put here.) // Comparison operators. friend bool operator==(S1ChordAngle x, S1ChordAngle y); friend bool operator!=(S1ChordAngle x, S1ChordAngle y); friend bool operator<(S1ChordAngle x, S1ChordAngle y); friend bool operator>(S1ChordAngle x, S1ChordAngle y); friend bool operator<=(S1ChordAngle x, S1ChordAngle y); friend bool operator>=(S1ChordAngle x, S1ChordAngle y); // Comparison predicates. bool is_zero() const; bool is_negative() const; bool is_infinity() const; bool is_special() const; // Negative or infinity. // Only addition and subtraction of S1ChordAngles is supported. These // methods add or subtract the corresponding S1Angles, and clamp the result // to the range [0, Pi]. Both arguments must be non-negative and // non-infinite. // // REQUIRES: !a.is_special() && !b.is_special() friend S1ChordAngle operator+(S1ChordAngle a, S1ChordAngle b); friend S1ChordAngle operator-(S1ChordAngle a, S1ChordAngle b); S1ChordAngle& operator+=(S1ChordAngle a); S1ChordAngle& operator-=(S1ChordAngle a); // Trigonmetric functions. It is more accurate and efficient to call these // rather than first converting to an S1Angle. friend double sin(S1ChordAngle a); friend double cos(S1ChordAngle a); friend double tan(S1ChordAngle a); // Returns sin(a)**2, but computed more efficiently. friend double sin2(S1ChordAngle a); // The squared length of the chord. (Most clients will not need this.) double length2() const { return length2_; } // Returns the smallest representable S1ChordAngle larger than this object. // This can be used to convert a "<" comparison to a "<=" comparison. For // example: // // S2ClosestEdgeQuery query(...); // S1ChordAngle limit = ...; // if (query.IsDistanceLess(target, limit.Successor())) { // // Distance to "target" is less than or equal to "limit". // } // // Note the following special cases: // Negative().Successor() == Zero() // Straight().Successor() == Infinity() // Infinity().Successor() == Infinity() S1ChordAngle Successor() const; // Like Successor(), but returns the largest representable S1ChordAngle less // than this object. // // Note the following special cases: // Infinity().Predecessor() == Straight() // Zero().Predecessor() == Negative() // Negative().Predecessor() == Negative() S1ChordAngle Predecessor() const; // Returns a new S1ChordAngle that has been adjusted by the given error // bound (which can be positive or negative). "error" should be the value // returned by one of the error bound methods below. For example: // S1ChordAngle a(x, y); // S1ChordAngle a1 = a.PlusError(a.GetS2PointConstructorMaxError()); S1ChordAngle PlusError(double error) const; // Return the maximum error in length2() for the S1ChordAngle(x, y) // constructor, assuming that "x" and "y" are normalized to within the // bounds guaranteed by S2Point::Normalize(). (The error is defined with // respect to the true distance after the points are projected to lie // exactly on the sphere.) double GetS2PointConstructorMaxError() const; // Return the maximum error in length2() for the S1Angle constructor. double GetS1AngleConstructorMaxError() const; // Return true if the internal representation is valid. Negative() and // Infinity() are both considered valid. bool is_valid() const; // When S1ChordAngle is used as a key in one of the absl::btree container // types, indicate that linear rather than binary search should be used. // This is much faster when the comparison function is cheap. typedef std::true_type absl_btree_prefer_linear_node_search; private: // S1ChordAngles are represented by the squared chord length, which can // range from 0 to 4. Infinity() uses an infinite squared length. explicit S1ChordAngle(double length2) : length2_(length2) { S2_DCHECK(is_valid()); } double length2_; }; ////////////////// Implementation details follow //////////////////// inline S1ChordAngle::S1ChordAngle(const S2Point& x, const S2Point& y) { S2_DCHECK(S2::IsUnitLength(x)); S2_DCHECK(S2::IsUnitLength(y)); // The squared distance may slightly exceed 4.0 due to roundoff errors. // The maximum error in the result is 2 * DBL_EPSILON * length2_. length2_ = std::min(4.0, (x - y).Norm2()); S2_DCHECK(is_valid()); } inline S1ChordAngle S1ChordAngle::FromLength2(double length2) { return S1ChordAngle(std::min(4.0, length2)); } inline S1ChordAngle S1ChordAngle::Zero() { return S1ChordAngle(0); } inline S1ChordAngle S1ChordAngle::Right() { return S1ChordAngle(2); } inline S1ChordAngle S1ChordAngle::Straight() { return S1ChordAngle(4); } inline S1ChordAngle S1ChordAngle::Infinity() { return S1ChordAngle(std::numeric_limits::infinity()); } inline S1ChordAngle S1ChordAngle::Negative() { return S1ChordAngle(-1); } inline S1ChordAngle S1ChordAngle::Radians(double radians) { return S1ChordAngle(S1Angle::Radians(radians)); } inline S1ChordAngle S1ChordAngle::Degrees(double degrees) { return S1ChordAngle(S1Angle::Degrees(degrees)); } inline S1ChordAngle S1ChordAngle::E5(int32 e5) { return S1ChordAngle(S1Angle::E5(e5)); } inline S1ChordAngle S1ChordAngle::E6(int32 e6) { return S1ChordAngle(S1Angle::E6(e6)); } inline S1ChordAngle S1ChordAngle::E7(int32 e7) { return S1ChordAngle(S1Angle::E7(e7)); } inline S1ChordAngle S1ChordAngle::FastUpperBoundFrom(S1Angle angle) { // This method uses the distance along the surface of the sphere as an upper // bound on the distance through the sphere's interior. return S1ChordAngle::FromLength2(angle.radians() * angle.radians()); } inline S1ChordAngle::operator S1Angle() const { return ToAngle(); } inline double S1ChordAngle::radians() const { return ToAngle().radians(); } inline double S1ChordAngle::degrees() const { return ToAngle().degrees(); } inline int32 S1ChordAngle::e5() const { return ToAngle().e5(); } inline int32 S1ChordAngle::e6() const { return ToAngle().e6(); } inline int32 S1ChordAngle::e7() const { return ToAngle().e7(); } inline bool S1ChordAngle::is_zero() const { return length2_ == 0; } inline bool S1ChordAngle::is_negative() const { // TODO(ericv): Consider stricter check here -- only allow Negative(). return length2_ < 0; } inline bool S1ChordAngle::is_infinity() const { return length2_ == std::numeric_limits::infinity(); } inline bool S1ChordAngle::is_special() const { return is_negative() || is_infinity(); } inline bool operator==(S1ChordAngle x, S1ChordAngle y) { return x.length2() == y.length2(); } inline bool operator!=(S1ChordAngle x, S1ChordAngle y) { return x.length2() != y.length2(); } inline bool operator<(S1ChordAngle x, S1ChordAngle y) { return x.length2() < y.length2(); } inline bool operator>(S1ChordAngle x, S1ChordAngle y) { return x.length2() > y.length2(); } inline bool operator<=(S1ChordAngle x, S1ChordAngle y) { return x.length2() <= y.length2(); } inline bool operator>=(S1ChordAngle x, S1ChordAngle y) { return x.length2() >= y.length2(); } inline S1ChordAngle& S1ChordAngle::operator+=(S1ChordAngle a) { return (*this = *this + a); } inline S1ChordAngle& S1ChordAngle::operator-=(S1ChordAngle a) { return (*this = *this - a); } // Outputs the chord angle as the equivalent S1Angle. std::ostream& operator<<(std::ostream& os, S1ChordAngle a); #endif // S2_S1CHORD_ANGLE_H_ s2geometry-0.10.0/src/s2/s1chord_angle_test.cc000066400000000000000000000214011422156367100210670ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "s2/s1chord_angle.h" #include #include #include #include "s2/s1angle.h" #include "s2/s2edge_distances.h" #include "s2/s2predicates.h" #include "s2/s2testing.h" using std::numeric_limits; TEST(S1ChordAngle, DefaultConstructor) { // Check that the default constructor returns an angle of 0. S1ChordAngle a; EXPECT_EQ(S1ChordAngle::Zero(), a); } TEST(S1ChordAngle, TwoPointConstructor) { for (int iter = 0; iter < 100; ++iter) { S2Point x, y, z; S2Testing::GetRandomFrame(&x, &y, &z); EXPECT_EQ(S1Angle::Zero(), S1Angle(S1ChordAngle(z, z))); EXPECT_NEAR(M_PI, S1ChordAngle(-z, z).radians(), 1e-7); EXPECT_DOUBLE_EQ(M_PI_2, S1ChordAngle(x, z).radians()); S2Point w = (y + z).Normalize(); EXPECT_DOUBLE_EQ(M_PI_4, S1ChordAngle(w, z).radians()); } } TEST(S1ChordAngle, FromLength2) { EXPECT_EQ(0, S1ChordAngle::FromLength2(0).degrees()); EXPECT_DOUBLE_EQ(60, S1ChordAngle::FromLength2(1).degrees()); EXPECT_DOUBLE_EQ(90, S1ChordAngle::FromLength2(2).degrees()); EXPECT_EQ(180, S1ChordAngle::FromLength2(4).degrees()); EXPECT_EQ(180, S1ChordAngle::FromLength2(5).degrees()); } TEST(S1ChordAngle, Zero) { EXPECT_EQ(S1Angle::Zero(), S1Angle(S1ChordAngle::Zero())); } TEST(S1ChordAngle, Right) { EXPECT_DOUBLE_EQ(90, S1ChordAngle::Right().degrees()); } TEST(S1ChordAngle, Straight) { EXPECT_EQ(S1Angle::Degrees(180), S1Angle(S1ChordAngle::Straight())); } TEST(S1ChordAngle, Infinity) { EXPECT_LT(S1ChordAngle::Straight(), S1ChordAngle::Infinity()); EXPECT_EQ(S1ChordAngle::Infinity(), S1ChordAngle::Infinity()); EXPECT_EQ(S1Angle::Infinity(), S1Angle(S1ChordAngle::Infinity())); } TEST(S1ChordAngle, Negative) { EXPECT_LT(S1ChordAngle::Negative(), S1ChordAngle::Zero()); EXPECT_EQ(S1ChordAngle::Negative(), S1ChordAngle::Negative()); EXPECT_LT(S1ChordAngle::Negative().ToAngle(), S1Angle::Zero()); } TEST(S1ChordAngle, Predicates) { EXPECT_TRUE(S1ChordAngle::Zero().is_zero()); EXPECT_FALSE(S1ChordAngle::Zero().is_negative()); EXPECT_FALSE(S1ChordAngle::Zero().is_special()); EXPECT_FALSE(S1ChordAngle::Straight().is_special()); EXPECT_TRUE(S1ChordAngle::Negative().is_negative()); EXPECT_TRUE(S1ChordAngle::Negative().is_special()); EXPECT_TRUE(S1ChordAngle::Infinity().is_infinity()); EXPECT_TRUE(S1ChordAngle::Infinity().is_special()); } TEST(S1ChordAngle, ToFromS1Angle) { EXPECT_EQ(0, S1ChordAngle(S1Angle::Zero()).radians()); EXPECT_EQ(4, S1ChordAngle(S1Angle::Radians(M_PI)).length2()); EXPECT_EQ(M_PI, S1ChordAngle(S1Angle::Radians(M_PI)).radians()); EXPECT_EQ(S1Angle::Infinity(), S1Angle(S1ChordAngle(S1Angle::Infinity()))); EXPECT_LT(S1ChordAngle(S1Angle::Radians(-1)).radians(), 0); EXPECT_DOUBLE_EQ(1.0, S1ChordAngle(S1Angle::Radians(1.0)).radians()); } TEST(S1ChordAngle, Successor) { EXPECT_EQ(S1ChordAngle::Zero(), S1ChordAngle::Negative().Successor()); EXPECT_EQ(S1ChordAngle::Infinity(), S1ChordAngle::Straight().Successor()); EXPECT_EQ(S1ChordAngle::Infinity(), S1ChordAngle::Infinity().Successor()); S1ChordAngle x = S1ChordAngle::Negative(); for (int i = 0; i < 10; ++i) { EXPECT_LT(x, x.Successor()); x = x.Successor(); } } TEST(S1ChordAngle, Predecessor) { EXPECT_EQ(S1ChordAngle::Straight(), S1ChordAngle::Infinity().Predecessor()); EXPECT_EQ(S1ChordAngle::Negative(), S1ChordAngle::Zero().Predecessor()); EXPECT_EQ(S1ChordAngle::Negative(), S1ChordAngle::Negative().Predecessor()); S1ChordAngle x = S1ChordAngle::Infinity(); for (int i = 0; i < 10; ++i) { EXPECT_GT(x, x.Predecessor()); x = x.Predecessor(); } } TEST(S1ChordAngle, Arithmetic) { S1ChordAngle zero = S1ChordAngle::Zero(); S1ChordAngle degree30 = S1ChordAngle::Degrees(30); S1ChordAngle degree60 = S1ChordAngle::Degrees(60); S1ChordAngle degree90 = S1ChordAngle::Degrees(90); S1ChordAngle degree120 = S1ChordAngle::Degrees(120); S1ChordAngle degree180 = S1ChordAngle::Straight(); EXPECT_EQ(0, (zero + zero).degrees()); EXPECT_EQ(0, (zero - zero).degrees()); EXPECT_EQ(0, (degree60 - degree60).degrees()); EXPECT_EQ(0, (degree180 - degree180).degrees()); EXPECT_EQ(0, (zero - degree60).degrees()); EXPECT_EQ(0, (degree30 - degree90).degrees()); EXPECT_DOUBLE_EQ(60, (degree60 + zero).degrees()); EXPECT_DOUBLE_EQ(60, (degree60 - zero).degrees()); EXPECT_DOUBLE_EQ(60, (zero + degree60).degrees()); EXPECT_DOUBLE_EQ(90, (degree30 + degree60).degrees()); EXPECT_DOUBLE_EQ(90, (degree60 + degree30).degrees()); EXPECT_DOUBLE_EQ(60, (degree90 - degree30).degrees()); EXPECT_DOUBLE_EQ(30, (degree90 - degree60).degrees()); EXPECT_EQ(180, (degree180 + zero).degrees()); EXPECT_EQ(180, (degree180 - zero).degrees()); EXPECT_EQ(180, (degree90 + degree90).degrees()); EXPECT_EQ(180, (degree120 + degree90).degrees()); EXPECT_EQ(180, (degree120 + degree120).degrees()); EXPECT_EQ(180, (degree30 + degree180).degrees()); EXPECT_EQ(180, (degree180 + degree180).degrees()); } TEST(S1ChordAngle, ArithmeticPrecision) { // Verifies that S1ChordAngle is capable of adding and subtracting angles // extremely accurately up to Pi/2 radians. (Accuracy continues to be good // well beyond this value but degrades as angles approach Pi.) S1ChordAngle kEps = S1ChordAngle::Radians(1e-15); S1ChordAngle k90 = S1ChordAngle::Right(); S1ChordAngle k90MinusEps = k90 - kEps; S1ChordAngle k90PlusEps = k90 + kEps; double kMaxError = 2 * DBL_EPSILON; EXPECT_NEAR(k90MinusEps.radians(), M_PI_2 - kEps.radians(), kMaxError); EXPECT_NEAR(k90PlusEps.radians(), M_PI_2 + kEps.radians(), kMaxError); EXPECT_NEAR((k90 - k90MinusEps).radians(), kEps.radians(), kMaxError); EXPECT_NEAR((k90PlusEps - k90).radians(), kEps.radians(), kMaxError); EXPECT_NEAR((k90MinusEps + kEps).radians(), M_PI_2, kMaxError); } TEST(S1ChordAngle, Trigonometry) { static const int kIters = 20; for (int iter = 0; iter <= kIters; ++iter) { double radians = M_PI * iter / kIters; S1ChordAngle angle(S1Angle::Radians(radians)); EXPECT_NEAR(sin(radians), sin(angle), 1e-15); EXPECT_NEAR(cos(radians), cos(angle), 1e-15); // Since the tan(x) is unbounded near Pi/4, we map the result back to an // angle before comparing. (The assertion is that the result is equal to // the tangent of a nearby angle.) EXPECT_NEAR(atan(tan(radians)), atan(tan(angle)), 1e-15); } // Unlike S1Angle, S1ChordAngle can represent 90 and 180 degrees exactly. S1ChordAngle angle90 = S1ChordAngle::FromLength2(2); S1ChordAngle angle180 = S1ChordAngle::FromLength2(4); EXPECT_EQ(1, sin(angle90)); EXPECT_EQ(0, cos(angle90)); EXPECT_EQ(numeric_limits::infinity(), tan(angle90)); EXPECT_EQ(0, sin(angle180)); EXPECT_EQ(-1, cos(angle180)); EXPECT_EQ(0, tan(angle180)); } TEST(S1ChordAngle, PlusError) { EXPECT_EQ(S1ChordAngle::Negative(), S1ChordAngle::Negative().PlusError(5)); EXPECT_EQ(S1ChordAngle::Infinity(), S1ChordAngle::Infinity().PlusError(-5)); EXPECT_EQ(S1ChordAngle::Straight(), S1ChordAngle::Straight().PlusError(5)); EXPECT_EQ(S1ChordAngle::Zero(), S1ChordAngle::Zero().PlusError(-5)); EXPECT_EQ(S1ChordAngle::FromLength2(1.25), S1ChordAngle::FromLength2(1).PlusError(0.25)); EXPECT_EQ(S1ChordAngle::FromLength2(0.75), S1ChordAngle::FromLength2(1).PlusError(-0.25)); } TEST(S1ChordAngle, GetS2PointConstructorMaxError) { // Check that the error bound returned by GetS2PointConstructorMaxError() is // large enough. auto& rnd = S2Testing::rnd; for (int iter = 0; iter < 100000; ++iter) { rnd.Reset(iter); // Easier to reproduce a specific case. S2Point x = S2Testing::RandomPoint(); S2Point y = S2Testing::RandomPoint(); if (rnd.OneIn(10)) { // Occasionally test a point pair that is nearly identical or antipodal. S1Angle r = S1Angle::Radians(1e-15 * rnd.RandDouble()); y = S2::GetPointOnLine(x, y, r); if (rnd.OneIn(2)) y = -y; } S1ChordAngle dist = S1ChordAngle(x, y); double error = dist.GetS2PointConstructorMaxError(); EXPECT_LE(s2pred::CompareDistance(x, y, dist.PlusError(error)), 0) << "angle=" << S1Angle(dist) << ", iter=" << iter; EXPECT_GE(s2pred::CompareDistance(x, y, dist.PlusError(-error)), 0) << "angle=" << S1Angle(dist) << ", iter=" << iter; } } s2geometry-0.10.0/src/s2/s1interval.cc000066400000000000000000000240051422156367100174120ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s1interval.h" #include #include #include #include "s2/base/logging.h" using std::fabs; using std::max; S1Interval S1Interval::FromPoint(double p) { if (p == -M_PI) p = M_PI; return S1Interval(p, p, ARGS_CHECKED); } double S1Interval::GetCenter() const { double center = 0.5 * (lo() + hi()); if (!is_inverted()) return center; // Return the center in the range (-Pi, Pi]. return (center <= 0) ? (center + M_PI) : (center - M_PI); } double S1Interval::GetLength() const { double length = hi() - lo(); if (length >= 0) return length; length += 2 * M_PI; // Empty intervals have a negative length. return (length > 0) ? length : -1; } S1Interval S1Interval::Complement() const { if (lo() == hi()) return Full(); // Singleton. return S1Interval(hi(), lo(), ARGS_CHECKED); // Handles empty and full. } double S1Interval::GetComplementCenter() const { if (lo() != hi()) { return Complement().GetCenter(); } else { // Singleton. return (hi() <= 0) ? (hi() + M_PI) : (hi() - M_PI); } } bool S1Interval::FastContains(double p) const { if (is_inverted()) { return (p >= lo() || p <= hi()) && !is_empty(); } else { return p >= lo() && p <= hi(); } } bool S1Interval::Contains(double p) const { // Works for empty, full, and singleton intervals. S2_DCHECK_LE(fabs(p), M_PI); if (p == -M_PI) p = M_PI; return FastContains(p); } bool S1Interval::InteriorContains(double p) const { // Works for empty, full, and singleton intervals. S2_DCHECK_LE(fabs(p), M_PI); if (p == -M_PI) p = M_PI; if (is_inverted()) { return p > lo() || p < hi(); } else { return (p > lo() && p < hi()) || is_full(); } } bool S1Interval::Contains(const S1Interval& y) const { // It might be helpful to compare the structure of these tests to // the simpler Contains(double) method above. if (is_inverted()) { if (y.is_inverted()) return y.lo() >= lo() && y.hi() <= hi(); return (y.lo() >= lo() || y.hi() <= hi()) && !is_empty(); } else { if (y.is_inverted()) return is_full() || y.is_empty(); return y.lo() >= lo() && y.hi() <= hi(); } } bool S1Interval::InteriorContains(const S1Interval& y) const { if (is_inverted()) { if (!y.is_inverted()) return y.lo() > lo() || y.hi() < hi(); return (y.lo() > lo() && y.hi() < hi()) || y.is_empty(); } else { if (y.is_inverted()) return is_full() || y.is_empty(); return (y.lo() > lo() && y.hi() < hi()) || is_full(); } } bool S1Interval::Intersects(const S1Interval& y) const { if (is_empty() || y.is_empty()) return false; if (is_inverted()) { // Every non-empty inverted interval contains Pi. return y.is_inverted() || y.lo() <= hi() || y.hi() >= lo(); } else { if (y.is_inverted()) return y.lo() <= hi() || y.hi() >= lo(); return y.lo() <= hi() && y.hi() >= lo(); } } bool S1Interval::InteriorIntersects(const S1Interval& y) const { if (is_empty() || y.is_empty() || lo() == hi()) return false; if (is_inverted()) { return y.is_inverted() || y.lo() < hi() || y.hi() > lo(); } else { if (y.is_inverted()) return y.lo() < hi() || y.hi() > lo(); return (y.lo() < hi() && y.hi() > lo()) || is_full(); } } inline static double PositiveDistance(double a, double b) { // Compute the distance from "a" to "b" in the range [0, 2*Pi). // This is equivalent to (remainder(b - a - M_PI, 2 * M_PI) + M_PI), // except that it is more numerically stable (it does not lose // precision for very small positive distances). double d = b - a; if (d >= 0) return d; // We want to ensure that if b == Pi and a == (-Pi + eps), // the return result is approximately 2*Pi and not zero. return (b + M_PI) - (a - M_PI); } double S1Interval::GetDirectedHausdorffDistance(const S1Interval& y) const { if (y.Contains(*this)) return 0.0; // this includes the case *this is empty if (y.is_empty()) return M_PI; // maximum possible distance on S1 double y_complement_center = y.GetComplementCenter(); if (Contains(y_complement_center)) { return PositiveDistance(y.hi(), y_complement_center); } else { // The Hausdorff distance is realized by either two hi() endpoints or two // lo() endpoints, whichever is farther apart. double hi_hi = S1Interval(y.hi(), y_complement_center).Contains(hi()) ? PositiveDistance(y.hi(), hi()) : 0; double lo_lo = S1Interval(y_complement_center, y.lo()).Contains(lo()) ? PositiveDistance(lo(), y.lo()) : 0; S2_DCHECK(hi_hi > 0 || lo_lo > 0); return max(hi_hi, lo_lo); } } void S1Interval::AddPoint(double p) { S2_DCHECK_LE(fabs(p), M_PI); if (p == -M_PI) p = M_PI; if (FastContains(p)) return; if (is_empty()) { set_hi(p); set_lo(p); } else { // Compute distance from p to each endpoint. double dlo = PositiveDistance(p, lo()); double dhi = PositiveDistance(hi(), p); if (dlo < dhi) { set_lo(p); } else { set_hi(p); } // Adding a point can never turn a non-full interval into a full one. } } double S1Interval::Project(double p) const { S2_DCHECK(!is_empty()); S2_DCHECK_LE(fabs(p), M_PI); if (p == -M_PI) p = M_PI; if (FastContains(p)) return p; // Compute distance from p to each endpoint. double dlo = PositiveDistance(p, lo()); double dhi = PositiveDistance(hi(), p); return (dlo < dhi) ? lo() : hi(); } S1Interval S1Interval::FromPointPair(double p1, double p2) { S2_DCHECK_LE(fabs(p1), M_PI); S2_DCHECK_LE(fabs(p2), M_PI); if (p1 == -M_PI) p1 = M_PI; if (p2 == -M_PI) p2 = M_PI; if (PositiveDistance(p1, p2) <= M_PI) { return S1Interval(p1, p2, ARGS_CHECKED); } else { return S1Interval(p2, p1, ARGS_CHECKED); } } S1Interval S1Interval::Expanded(double margin) const { if (margin >= 0) { if (is_empty()) return *this; // Check whether this interval will be full after expansion, allowing // for a 1-bit rounding error when computing each endpoint. if (GetLength() + 2 * margin + 2 * DBL_EPSILON >= 2 * M_PI) return Full(); } else { if (is_full()) return *this; // Check whether this interval will be empty after expansion, allowing // for a 1-bit rounding error when computing each endpoint. if (GetLength() + 2 * margin - 2 * DBL_EPSILON <= 0) return Empty(); } S1Interval result(remainder(lo() - margin, 2*M_PI), remainder(hi() + margin, 2*M_PI)); if (result.lo() <= -M_PI) result.set_lo(M_PI); return result; } S1Interval S1Interval::Union(const S1Interval& y) const { // The y.is_full() case is handled correctly in all cases by the code // below, but can follow three separate code paths depending on whether // this interval is inverted, is non-inverted but contains Pi, or neither. if (y.is_empty()) return *this; if (FastContains(y.lo())) { if (FastContains(y.hi())) { // Either this interval contains y, or the union of the two // intervals is the Full() interval. if (Contains(y)) return *this; // is_full() code path return Full(); } return S1Interval(lo(), y.hi(), ARGS_CHECKED); } if (FastContains(y.hi())) return S1Interval(y.lo(), hi(), ARGS_CHECKED); // This interval contains neither endpoint of y. This means that either y // contains all of this interval, or the two intervals are disjoint. if (is_empty() || y.FastContains(lo())) return y; // Check which pair of endpoints are closer together. double dlo = PositiveDistance(y.hi(), lo()); double dhi = PositiveDistance(hi(), y.lo()); if (dlo < dhi) { return S1Interval(y.lo(), hi(), ARGS_CHECKED); } else { return S1Interval(lo(), y.hi(), ARGS_CHECKED); } } S1Interval S1Interval::Intersection(const S1Interval& y) const { // The y.is_full() case is handled correctly in all cases by the code // below, but can follow three separate code paths depending on whether // this interval is inverted, is non-inverted but contains Pi, or neither. if (y.is_empty()) return Empty(); if (FastContains(y.lo())) { if (FastContains(y.hi())) { // Either this interval contains y, or the region of intersection // consists of two disjoint subintervals. In either case, we want // to return the shorter of the two original intervals. if (y.GetLength() < GetLength()) return y; // is_full() code path return *this; } return S1Interval(y.lo(), hi(), ARGS_CHECKED); } if (FastContains(y.hi())) return S1Interval(lo(), y.hi(), ARGS_CHECKED); // This interval contains neither endpoint of y. This means that either y // contains all of this interval, or the two intervals are disjoint. if (y.FastContains(lo())) return *this; // is_empty() okay here S2_DCHECK(!Intersects(y)); return Empty(); } bool S1Interval::ApproxEquals(const S1Interval& y, double max_error) const { // Full and empty intervals require special cases because the "endpoints" // are considered to be positioned arbitrarily. if (is_empty()) return y.GetLength() <= 2 * max_error; if (y.is_empty()) return GetLength() <= 2 * max_error; if (is_full()) return y.GetLength() >= 2 * (M_PI - max_error); if (y.is_full()) return GetLength() >= 2 * (M_PI - max_error); // The purpose of the last test below is to verify that moving the endpoints // does not invert the interval, e.g. [-1e20, 1e20] vs. [1e20, -1e20]. return (fabs(remainder(y.lo() - lo(), 2 * M_PI)) <= max_error && fabs(remainder(y.hi() - hi(), 2 * M_PI)) <= max_error && fabs(GetLength() - y.GetLength()) <= 2 * max_error); } s2geometry-0.10.0/src/s2/s1interval.h000066400000000000000000000252071422156367100172610ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_S1INTERVAL_H_ #define S2_S1INTERVAL_H_ #include #include #include #include #include "s2/base/logging.h" #include "s2/_fp_contract_off.h" #include "s2/util/math/vector.h" // IWYU pragma: export // An S1Interval represents a closed interval on a unit circle (also known // as a 1-dimensional sphere). It is capable of representing the empty // interval (containing no points), the full interval (containing all // points), and zero-length intervals (containing a single point). // // Points are represented by the angle they make with the positive x-axis in // the range [-Pi, Pi]. An interval is represented by its lower and upper // bounds (both inclusive, since the interval is closed). The lower bound may // be greater than the upper bound, in which case the interval is "inverted" // (i.e. it passes through the point (-1, 0)). // // Note that the point (-1, 0) has two valid representations, Pi and -Pi. // The normalized representation of this point internally is Pi, so that // endpoints of normal intervals are in the range (-Pi, Pi]. However, we // take advantage of the point -Pi to construct two special intervals: // the Full() interval is [-Pi, Pi], and the Empty() interval is [Pi, -Pi]. // // This class is intended to be copied by value as desired. It uses // the default copy constructor and assignment operator. class S1Interval { public: // Constructor. Both endpoints must be in the range -Pi to Pi inclusive. // The value -Pi is converted internally to Pi except for the Full() // and Empty() intervals. S1Interval(double lo, double hi); // The default constructor creates an empty interval. // // Note: Don't construct an interval using the default constructor and // set_lo()/set_hi(). If you need to set both endpoints, use the // constructor above: // // lng_bounds_ = S1Interval(lng_lo, lng_hi); S1Interval(); // Returns the empty interval. static S1Interval Empty(); // Returns the full interval. static S1Interval Full(); // Convenience method to construct an interval containing a single point. static S1Interval FromPoint(double p); // Convenience method to construct the minimal interval containing // the two given points. This is equivalent to starting with an empty // interval and calling AddPoint() twice, but it is more efficient. static S1Interval FromPointPair(double p1, double p2); // Accessors methods. double lo() const { return bounds_[0]; } double hi() const { return bounds_[1]; } // Methods that allow the S1Interval to be accessed as a vector. (The // recommended style is to use lo() and hi() whenever possible, but these // methods are useful when the endpoint to be selected is not constant.) // // Only const versions of these methods are provided, since S1Interval // has invariants that must be maintained after each update. double operator[](int i) const { return bounds_[i]; } const Vector2_d& bounds() const { return bounds_; } // An interval is valid if neither bound exceeds Pi in absolute value, // and the value -Pi appears only in the Empty() and Full() intervals. bool is_valid() const; // Return true if the interval contains all points on the unit circle. bool is_full() const { return lo() == -M_PI && hi() == M_PI; } // Return true if the interval is empty, i.e. it contains no points. bool is_empty() const { return lo() == M_PI && hi() == -M_PI; } // Return true if lo() > hi(). (This is true for empty intervals.) bool is_inverted() const { return lo() > hi(); } // Return the midpoint of the interval. For full and empty intervals, // the result is arbitrary. double GetCenter() const; // Return the length of the interval. The length of an empty interval // is negative. double GetLength() const; // Return the complement of the interior of the interval. An interval and // its complement have the same boundary but do not share any interior // values. The complement operator is not a bijection, since the complement // of a singleton interval (containing a single value) is the same as the // complement of an empty interval. S1Interval Complement() const; // Return the midpoint of the complement of the interval. For full and empty // intervals, the result is arbitrary. For a singleton interval (containing a // single point), the result is its antipodal point on S1. double GetComplementCenter() const; // Return true if the interval (which is closed) contains the point 'p'. bool Contains(double p) const; // Return true if the interior of the interval contains the point 'p'. bool InteriorContains(double p) const; // Return true if the interval contains the given interval 'y'. // Works for empty, full, and singleton intervals. bool Contains(const S1Interval& y) const; // Returns true if the interior of this interval contains the entire // interval 'y'. Note that x.InteriorContains(x) is true only when // x is the empty or full interval, and x.InteriorContains(S1Interval(p,p)) // is equivalent to x.InteriorContains(p). bool InteriorContains(const S1Interval& y) const; // Return true if the two intervals contain any points in common. // Note that the point +/-Pi has two representations, so the intervals // [-Pi,-3] and [2,Pi] intersect, for example. bool Intersects(const S1Interval& y) const; // Return true if the interior of this interval contains any point of the // interval 'y' (including its boundary). Works for empty, full, and // singleton intervals. bool InteriorIntersects(const S1Interval& y) const; // Return the Hausdorff distance to the given interval 'y'. For two // S1Intervals x and y, this distance is defined by // h(x, y) = max_{p in x} min_{q in y} d(p, q), // where d(.,.) is measured along S1. double GetDirectedHausdorffDistance(const S1Interval& y) const; // Expand the interval by the minimum amount necessary so that it // contains the given point "p" (an angle in the range [-Pi, Pi]). void AddPoint(double p); // Return the closest point in the interval to the given point "p". // The interval must be non-empty. double Project(double p) const; // Return an interval that has been expanded on each side by the given // distance "margin". If "margin" is negative, then shrink the interval on // each side by "margin" instead. The resulting interval may be empty or // full. Any expansion (positive or negative) of a full interval remains // full, and any expansion of an empty interval remains empty. S1Interval Expanded(double margin) const; // Return the smallest interval that contains this interval and the // given interval "y". S1Interval Union(const S1Interval& y) const; // Return the smallest interval that contains the intersection of this // interval with "y". Note that the region of intersection may // consist of two disjoint intervals. S1Interval Intersection(const S1Interval& y) const; // Return true if two intervals contains the same set of points. bool operator==(const S1Interval& y) const; // Return true if two intervals do not contain the same set of points. bool operator!=(const S1Interval& y) const; // Return true if this interval can be transformed into the given interval by // moving each endpoint by at most "max_error" (and without the endpoints // crossing, which would invert the interval). Empty and full intervals are // considered to start at an arbitrary point on the unit circle, thus any // interval with (length <= 2*max_error) matches the empty interval, and any // interval with (length >= 2*Pi - 2*max_error) matches the full interval. bool ApproxEquals(const S1Interval& y, double max_error = 1e-15) const; // Low-level methods to modify one endpoint of an existing S1Interval. // These methods should really be private because setting just one endpoint // can violate the invariants maintained by S1Interval. In particular: // // - It is not valid to call these methods on an Empty() or Full() // interval, since these intervals do not have any endpoints. // // - It is not allowed to set an endpoint to -Pi. (When these methods are // used internally, values of -Pi have already been normalized to Pi.) // // The preferred way to modify both endpoints of an interval is to use a // constructor, e.g. lng = S1Interval(lng_lo, lng_hi). void set_lo(double p); void set_hi(double p); private: enum ArgsChecked { ARGS_CHECKED }; // Internal constructor that assumes that both arguments are in the // correct range, i.e. normalization from -Pi to Pi is already done. S1Interval(double lo, double hi, ArgsChecked dummy); // Return true if the interval (which is closed) contains the point 'p'. // Skips the normalization of 'p' from -Pi to Pi. bool FastContains(double p) const; Vector2_d bounds_; }; inline S1Interval::S1Interval(double lo, double hi) : bounds_(lo, hi) { if (lo == -M_PI && hi != M_PI) set_lo(M_PI); if (hi == -M_PI && lo != M_PI) set_hi(M_PI); S2_DCHECK(is_valid()); } inline S1Interval::S1Interval(double lo, double hi, ArgsChecked dummy) : bounds_(lo, hi) { S2_DCHECK(is_valid()); } inline S1Interval::S1Interval() : bounds_(M_PI, -M_PI) { } inline S1Interval S1Interval::Empty() { return S1Interval(); } inline S1Interval S1Interval::Full() { return S1Interval(-M_PI, M_PI, ARGS_CHECKED); } inline bool S1Interval::is_valid() const { return (std::fabs(lo()) <= M_PI && std::fabs(hi()) <= M_PI && !(lo() == -M_PI && hi() != M_PI) && !(hi() == -M_PI && lo() != M_PI)); } inline bool S1Interval::operator==(const S1Interval& y) const { return lo() == y.lo() && hi() == y.hi(); } inline bool S1Interval::operator!=(const S1Interval& y) const { return !operator==(y); } inline void S1Interval::set_lo(double p) { bounds_[0] = p; S2_DCHECK(is_valid()); } inline void S1Interval::set_hi(double p) { bounds_[1] = p; S2_DCHECK(is_valid()); } inline std::ostream& operator<<(std::ostream& os, const S1Interval& x) { return os << "[" << x.lo() << ", " << x.hi() << "]"; } #endif // S2_S1INTERVAL_H_ s2geometry-0.10.0/src/s2/s1interval_test.cc000066400000000000000000000502151422156367100204530ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s1interval.h" #include #include #include using std::fabs; class S1IntervalTestBase : public testing::Test { public: // Create some standard intervals to use in the tests. These include the // empty and full intervals, intervals containing a single point, and // intervals spanning one or more "quadrants" which are numbered as follows: // quad1 == [0, Pi/2] // quad2 == [Pi/2, Pi] // quad3 == [-Pi, -Pi/2] // quad4 == [-Pi/2, 0] S1IntervalTestBase() : empty(S1Interval::Empty()), full(S1Interval::Full()), // Single-point intervals: zero(0, 0), pi2(M_PI_2, M_PI_2), pi(M_PI, M_PI), mipi(-M_PI, -M_PI), // Same as "pi" after normalization. mipi2(-M_PI_2, -M_PI_2), // Single quadrants: quad1(0, M_PI_2), quad2(M_PI_2, -M_PI), quad3(M_PI, -M_PI_2), quad4(-M_PI_2, 0), // Quadrant pairs: quad12(0, -M_PI), quad23(M_PI_2, -M_PI_2), quad34(-M_PI, 0), quad41(-M_PI_2, M_PI_2), // Quadrant triples: quad123(0, -M_PI_2), quad234(M_PI_2, 0), quad341(M_PI, M_PI_2), quad412(-M_PI_2, -M_PI), // Small intervals around the midpoints between quadrants, such that // the center of each interval is offset slightly CCW from the midpoint. mid12(M_PI_2 - 0.01, M_PI_2 + 0.02), mid23(M_PI - 0.01, -M_PI + 0.02), mid34(-M_PI_2 - 0.01, -M_PI_2 + 0.02), mid41(-0.01, 0.02) { } protected: const S1Interval empty, full; const S1Interval zero, pi2, pi, mipi, mipi2; const S1Interval quad1, quad2, quad3, quad4; const S1Interval quad12, quad23, quad34, quad41; const S1Interval quad123, quad234, quad341, quad412; const S1Interval mid12, mid23, mid34, mid41; }; TEST_F(S1IntervalTestBase, ConstructorsAndAccessors) { // Spot-check the constructors and accessors. EXPECT_EQ(quad12.lo(), 0); EXPECT_EQ(quad12.hi(), M_PI); EXPECT_EQ(quad34[0], M_PI); EXPECT_EQ(quad34[1], 0); EXPECT_EQ(quad34.bounds(), Vector2_d(M_PI, 0)); EXPECT_EQ(pi.lo(), M_PI); EXPECT_EQ(pi.hi(), M_PI); // Check that [-Pi, -Pi] is normalized to [Pi, Pi]. EXPECT_EQ(mipi.lo(), M_PI); EXPECT_EQ(mipi.hi(), M_PI); EXPECT_EQ(quad23.lo(), M_PI_2); EXPECT_EQ(quad23.hi(), -M_PI_2); // Check that the default S1Interval is identical to Empty(). S1Interval default_empty; EXPECT_TRUE(default_empty.is_valid()); EXPECT_TRUE(default_empty.is_empty()); EXPECT_EQ(empty.lo(), default_empty.lo()); EXPECT_EQ(empty.hi(), default_empty.hi()); } TEST_F(S1IntervalTestBase, SimplePredicates) { // is_valid(), is_empty(), is_full(), is_inverted() EXPECT_TRUE(zero.is_valid() && !zero.is_empty() && !zero.is_full()); EXPECT_TRUE(empty.is_valid() && empty.is_empty() && !empty.is_full()); EXPECT_TRUE(empty.is_inverted()); EXPECT_TRUE(full.is_valid() && !full.is_empty() && full.is_full()); EXPECT_TRUE(!quad12.is_empty() && !quad12.is_full() && !quad12.is_inverted()); EXPECT_TRUE(!quad23.is_empty() && !quad23.is_full() && quad23.is_inverted()); EXPECT_TRUE(pi.is_valid() && !pi.is_empty() && !pi.is_inverted()); EXPECT_TRUE(mipi.is_valid() && !mipi.is_empty() && !mipi.is_inverted()); } TEST_F(S1IntervalTestBase, AlmostEmptyOrFull) { // Test that rounding errors don't cause intervals that are almost empty or // full to be considered empty or full. The following value is the greatest // representable value less than Pi. const double kAlmostPi = M_PI - 2 * DBL_EPSILON; EXPECT_FALSE(S1Interval(-kAlmostPi, M_PI).is_full()); EXPECT_FALSE(S1Interval(-M_PI, kAlmostPi).is_full()); EXPECT_FALSE(S1Interval(M_PI, -kAlmostPi).is_empty()); EXPECT_FALSE(S1Interval(kAlmostPi, -M_PI).is_empty()); } TEST_F(S1IntervalTestBase, GetCenter) { EXPECT_EQ(quad12.GetCenter(), M_PI_2); EXPECT_DOUBLE_EQ(S1Interval(3.1, 2.9).GetCenter(), 3.0 - M_PI); EXPECT_DOUBLE_EQ(S1Interval(-2.9, -3.1).GetCenter(), M_PI - 3.0); EXPECT_DOUBLE_EQ(S1Interval(2.1, -2.1).GetCenter(), M_PI); EXPECT_EQ(pi.GetCenter(), M_PI); EXPECT_EQ(mipi.GetCenter(), M_PI); EXPECT_EQ(fabs(quad23.GetCenter()), M_PI); EXPECT_DOUBLE_EQ(quad123.GetCenter(), 0.75 * M_PI); } TEST_F(S1IntervalTestBase, GetLength) { EXPECT_EQ(quad12.GetLength(), M_PI); EXPECT_EQ(pi.GetLength(), 0); EXPECT_EQ(mipi.GetLength(), 0); EXPECT_DOUBLE_EQ(quad123.GetLength(), 1.5 * M_PI); EXPECT_EQ(fabs(quad23.GetLength()), M_PI); EXPECT_EQ(full.GetLength(), 2 * M_PI); EXPECT_LT(empty.GetLength(), 0); } TEST_F(S1IntervalTestBase, Complement) { EXPECT_TRUE(empty.Complement().is_full()); EXPECT_TRUE(full.Complement().is_empty()); EXPECT_TRUE(pi.Complement().is_full()); EXPECT_TRUE(mipi.Complement().is_full()); EXPECT_TRUE(zero.Complement().is_full()); EXPECT_TRUE(quad12.Complement().ApproxEquals(quad34)); EXPECT_TRUE(quad34.Complement().ApproxEquals(quad12)); EXPECT_TRUE(quad123.Complement().ApproxEquals(quad4)); } TEST_F(S1IntervalTestBase, Contains) { // Contains(double), InteriorContains(double) EXPECT_TRUE(!empty.Contains(0) && !empty.Contains(M_PI) && !empty.Contains(-M_PI)); EXPECT_TRUE(!empty.InteriorContains(M_PI) && !empty.InteriorContains(-M_PI)); EXPECT_TRUE(full.Contains(0) && full.Contains(M_PI) && full.Contains(-M_PI)); EXPECT_TRUE(full.InteriorContains(M_PI) && full.InteriorContains(-M_PI)); EXPECT_TRUE(quad12.Contains(0) && quad12.Contains(M_PI) && quad12.Contains(-M_PI)); EXPECT_TRUE(quad12.InteriorContains(M_PI_2) && !quad12.InteriorContains(0)); EXPECT_TRUE(!quad12.InteriorContains(M_PI) && !quad12.InteriorContains(-M_PI)); EXPECT_TRUE(quad23.Contains(M_PI_2) && quad23.Contains(-M_PI_2)); EXPECT_TRUE(quad23.Contains(M_PI) && quad23.Contains(-M_PI)); EXPECT_TRUE(!quad23.Contains(0)); EXPECT_TRUE(!quad23.InteriorContains(M_PI_2) && !quad23.InteriorContains(-M_PI_2)); EXPECT_TRUE(quad23.InteriorContains(M_PI) && quad23.InteriorContains(-M_PI)); EXPECT_TRUE(!quad23.InteriorContains(0)); EXPECT_TRUE(pi.Contains(M_PI) && pi.Contains(-M_PI) && !pi.Contains(0)); EXPECT_TRUE(!pi.InteriorContains(M_PI) && !pi.InteriorContains(-M_PI)); EXPECT_TRUE(mipi.Contains(M_PI) && mipi.Contains(-M_PI) && !mipi.Contains(0)); EXPECT_TRUE(!mipi.InteriorContains(M_PI) && !mipi.InteriorContains(-M_PI)); EXPECT_TRUE(zero.Contains(0) && !zero.InteriorContains(0)); } static void TestIntervalOps(const S1Interval& x, const S1Interval& y, const char* expected_relation, const S1Interval& expected_union, const S1Interval& expected_intersection) { // Test all of the interval operations on the given pair of intervals. // "expected_relation" is a sequence of "T" and "F" characters corresponding // to the expected results of Contains(), InteriorContains(), Intersects(), // and InteriorIntersects() respectively. EXPECT_EQ(x.Contains(y), expected_relation[0] == 'T'); EXPECT_EQ(x.InteriorContains(y), expected_relation[1] == 'T'); EXPECT_EQ(x.Intersects(y), expected_relation[2] == 'T'); EXPECT_EQ(x.InteriorIntersects(y), expected_relation[3] == 'T'); // bounds() returns a const reference to a member variable, so we need to // make a copy when invoking it on a temporary object. EXPECT_EQ(Vector2_d(x.Union(y).bounds()), expected_union.bounds()); EXPECT_EQ(Vector2_d(x.Intersection(y).bounds()), expected_intersection.bounds()); EXPECT_EQ(x.Contains(y), x.Union(y) == x); EXPECT_EQ(x.Intersects(y), !x.Intersection(y).is_empty()); if (y.lo() == y.hi()) { S1Interval r = x; r.AddPoint(y.lo()); EXPECT_EQ(r.bounds(), expected_union.bounds()); } } TEST_F(S1IntervalTestBase, IntervalOps) { // Contains(S1Interval), InteriorContains(S1Interval), // Intersects(), InteriorIntersects(), Union(), Intersection() TestIntervalOps(empty, empty, "TTFF", empty, empty); TestIntervalOps(empty, full, "FFFF", full, empty); TestIntervalOps(empty, zero, "FFFF", zero, empty); TestIntervalOps(empty, pi, "FFFF", pi, empty); TestIntervalOps(empty, mipi, "FFFF", mipi, empty); TestIntervalOps(full, empty, "TTFF", full, empty); TestIntervalOps(full, full, "TTTT", full, full); TestIntervalOps(full, zero, "TTTT", full, zero); TestIntervalOps(full, pi, "TTTT", full, pi); TestIntervalOps(full, mipi, "TTTT", full, mipi); TestIntervalOps(full, quad12, "TTTT", full, quad12); TestIntervalOps(full, quad23, "TTTT", full, quad23); TestIntervalOps(zero, empty, "TTFF", zero, empty); TestIntervalOps(zero, full, "FFTF", full, zero); TestIntervalOps(zero, zero, "TFTF", zero, zero); TestIntervalOps(zero, pi, "FFFF", S1Interval(0, M_PI), empty); TestIntervalOps(zero, pi2, "FFFF", quad1, empty); TestIntervalOps(zero, mipi, "FFFF", quad12, empty); TestIntervalOps(zero, mipi2, "FFFF", quad4, empty); TestIntervalOps(zero, quad12, "FFTF", quad12, zero); TestIntervalOps(zero, quad23, "FFFF", quad123, empty); TestIntervalOps(pi2, empty, "TTFF", pi2, empty); TestIntervalOps(pi2, full, "FFTF", full, pi2); TestIntervalOps(pi2, zero, "FFFF", quad1, empty); TestIntervalOps(pi2, pi, "FFFF", S1Interval(M_PI_2, M_PI), empty); TestIntervalOps(pi2, pi2, "TFTF", pi2, pi2); TestIntervalOps(pi2, mipi, "FFFF", quad2, empty); TestIntervalOps(pi2, mipi2, "FFFF", quad23, empty); TestIntervalOps(pi2, quad12, "FFTF", quad12, pi2); TestIntervalOps(pi2, quad23, "FFTF", quad23, pi2); TestIntervalOps(pi, empty, "TTFF", pi, empty); TestIntervalOps(pi, full, "FFTF", full, pi); TestIntervalOps(pi, zero, "FFFF", S1Interval(M_PI, 0), empty); TestIntervalOps(pi, pi, "TFTF", pi, pi); TestIntervalOps(pi, pi2, "FFFF", S1Interval(M_PI_2, M_PI), empty); TestIntervalOps(pi, mipi, "TFTF", pi, pi); TestIntervalOps(pi, mipi2, "FFFF", quad3, empty); TestIntervalOps(pi, quad12, "FFTF", S1Interval(0, M_PI), pi); TestIntervalOps(pi, quad23, "FFTF", quad23, pi); TestIntervalOps(mipi, empty, "TTFF", mipi, empty); TestIntervalOps(mipi, full, "FFTF", full, mipi); TestIntervalOps(mipi, zero, "FFFF", quad34, empty); TestIntervalOps(mipi, pi, "TFTF", mipi, mipi); TestIntervalOps(mipi, pi2, "FFFF", quad2, empty); TestIntervalOps(mipi, mipi, "TFTF", mipi, mipi); TestIntervalOps(mipi, mipi2, "FFFF", S1Interval(-M_PI, -M_PI_2), empty); TestIntervalOps(mipi, quad12, "FFTF", quad12, mipi); TestIntervalOps(mipi, quad23, "FFTF", quad23, mipi); TestIntervalOps(quad12, empty, "TTFF", quad12, empty); TestIntervalOps(quad12, full, "FFTT", full, quad12); TestIntervalOps(quad12, zero, "TFTF", quad12, zero); TestIntervalOps(quad12, pi, "TFTF", quad12, pi); TestIntervalOps(quad12, mipi, "TFTF", quad12, mipi); TestIntervalOps(quad12, quad12, "TFTT", quad12, quad12); TestIntervalOps(quad12, quad23, "FFTT", quad123, quad2); TestIntervalOps(quad12, quad34, "FFTF", full, quad12); TestIntervalOps(quad23, empty, "TTFF", quad23, empty); TestIntervalOps(quad23, full, "FFTT", full, quad23); TestIntervalOps(quad23, zero, "FFFF", quad234, empty); TestIntervalOps(quad23, pi, "TTTT", quad23, pi); TestIntervalOps(quad23, mipi, "TTTT", quad23, mipi); TestIntervalOps(quad23, quad12, "FFTT", quad123, quad2); TestIntervalOps(quad23, quad23, "TFTT", quad23, quad23); TestIntervalOps(quad23, quad34, "FFTT", quad234, S1Interval(-M_PI, -M_PI_2)); TestIntervalOps(quad1, quad23, "FFTF", quad123, S1Interval(M_PI_2, M_PI_2)); TestIntervalOps(quad2, quad3, "FFTF", quad23, mipi); TestIntervalOps(quad3, quad2, "FFTF", quad23, pi); TestIntervalOps(quad2, pi, "TFTF", quad2, pi); TestIntervalOps(quad2, mipi, "TFTF", quad2, mipi); TestIntervalOps(quad3, pi, "TFTF", quad3, pi); TestIntervalOps(quad3, mipi, "TFTF", quad3, mipi); TestIntervalOps(quad12, mid12, "TTTT", quad12, mid12); TestIntervalOps(mid12, quad12, "FFTT", quad12, mid12); S1Interval quad12eps(quad12.lo(), mid23.hi()); S1Interval quad2hi(mid23.lo(), quad12.hi()); TestIntervalOps(quad12, mid23, "FFTT", quad12eps, quad2hi); TestIntervalOps(mid23, quad12, "FFTT", quad12eps, quad2hi); // This test checks that the union of two disjoint intervals is the smallest // interval that contains both of them. Note that the center of "mid34" is // slightly CCW of -Pi/2 so that there is no ambiguity about the result. S1Interval quad412eps(mid34.lo(), quad12.hi()); TestIntervalOps(quad12, mid34, "FFFF", quad412eps, empty); TestIntervalOps(mid34, quad12, "FFFF", quad412eps, empty); S1Interval quadeps12(mid41.lo(), quad12.hi()); S1Interval quad1lo(quad12.lo(), mid41.hi()); TestIntervalOps(quad12, mid41, "FFTT", quadeps12, quad1lo); TestIntervalOps(mid41, quad12, "FFTT", quadeps12, quad1lo); S1Interval quad2lo(quad23.lo(), mid12.hi()); S1Interval quad3hi(mid34.lo(), quad23.hi()); S1Interval quadeps23(mid12.lo(), quad23.hi()); S1Interval quad23eps(quad23.lo(), mid34.hi()); S1Interval quadeps123(mid41.lo(), quad23.hi()); TestIntervalOps(quad23, mid12, "FFTT", quadeps23, quad2lo); TestIntervalOps(mid12, quad23, "FFTT", quadeps23, quad2lo); TestIntervalOps(quad23, mid23, "TTTT", quad23, mid23); TestIntervalOps(mid23, quad23, "FFTT", quad23, mid23); TestIntervalOps(quad23, mid34, "FFTT", quad23eps, quad3hi); TestIntervalOps(mid34, quad23, "FFTT", quad23eps, quad3hi); TestIntervalOps(quad23, mid41, "FFFF", quadeps123, empty); TestIntervalOps(mid41, quad23, "FFFF", quadeps123, empty); } TEST_F(S1IntervalTestBase, AddPoint) { S1Interval r = empty; r.AddPoint(0); EXPECT_EQ(r, zero); r = empty; r.AddPoint(M_PI); EXPECT_EQ(r, pi); r = empty; r.AddPoint(-M_PI); EXPECT_EQ(r, mipi); r = empty; r.AddPoint(M_PI); r.AddPoint(-M_PI); EXPECT_EQ(r, pi); r = empty; r.AddPoint(-M_PI); r.AddPoint(M_PI); EXPECT_EQ(r, mipi); r = empty; r.AddPoint(mid12.lo()); r.AddPoint(mid12.hi()); EXPECT_EQ(r, mid12); r = empty; r.AddPoint(mid23.lo()); r.AddPoint(mid23.hi()); EXPECT_EQ(r, mid23); r = quad1; r.AddPoint(-0.9*M_PI); r.AddPoint(-M_PI_2); EXPECT_EQ(r, quad123); r = full; r.AddPoint(0); EXPECT_TRUE(r.is_full()); r = full; r.AddPoint(M_PI); EXPECT_TRUE(r.is_full()); r = full; r.AddPoint(-M_PI); EXPECT_TRUE(r.is_full()); } TEST_F(S1IntervalTestBase, Project) { S1Interval r(-M_PI, -M_PI); EXPECT_EQ(M_PI, r.Project(-M_PI)); EXPECT_EQ(M_PI, r.Project(0)); r = S1Interval(0, M_PI); EXPECT_EQ(0.1, r.Project(0.1)); EXPECT_EQ(0, r.Project(-M_PI_2 + 1e-15)); EXPECT_EQ(M_PI, r.Project(-M_PI_2 - 1e-15)); r = S1Interval(M_PI - 0.1, -M_PI + 0.1); EXPECT_EQ(M_PI, r.Project(M_PI)); EXPECT_EQ(M_PI - 0.1, r.Project(1e-15)); EXPECT_EQ(-M_PI + 0.1, r.Project(-1e-15)); EXPECT_EQ(0, S1Interval::Full().Project(0)); EXPECT_EQ(M_PI, S1Interval::Full().Project(M_PI)); EXPECT_EQ(M_PI, S1Interval::Full().Project(-M_PI)); } TEST_F(S1IntervalTestBase, FromPointPair) { EXPECT_EQ(S1Interval::FromPointPair(-M_PI, M_PI), pi); EXPECT_EQ(S1Interval::FromPointPair(M_PI, -M_PI), pi); EXPECT_EQ(S1Interval::FromPointPair(mid34.hi(), mid34.lo()), mid34); EXPECT_EQ(S1Interval::FromPointPair(mid23.lo(), mid23.hi()), mid23); } TEST_F(S1IntervalTestBase, Expanded) { EXPECT_EQ(empty.Expanded(1), empty); EXPECT_EQ(full.Expanded(1), full); EXPECT_EQ(zero.Expanded(1), S1Interval(-1, 1)); EXPECT_EQ(mipi.Expanded(0.01), S1Interval(M_PI - 0.01, -M_PI + 0.01)); EXPECT_EQ(pi.Expanded(27), full); EXPECT_EQ(pi.Expanded(M_PI_2), quad23); EXPECT_EQ(pi2.Expanded(M_PI_2), quad12); EXPECT_EQ(mipi2.Expanded(M_PI_2), quad34); EXPECT_EQ(empty.Expanded(-1), empty); EXPECT_EQ(full.Expanded(-1), full); EXPECT_EQ(quad123.Expanded(-27), empty); EXPECT_EQ(quad234.Expanded(-27), empty); EXPECT_EQ(quad123.Expanded(-M_PI_2), quad2); EXPECT_EQ(quad341.Expanded(-M_PI_2), quad4); EXPECT_EQ(quad412.Expanded(-M_PI_2), quad1); } TEST_F(S1IntervalTestBase, ApproxEquals) { // Choose two values kLo and kHi such that it's okay to shift an endpoint by // kLo (i.e., the resulting interval is equivalent) but not by kHi. static const double kLo = 4 * DBL_EPSILON; // < max_error default static const double kHi = 6 * DBL_EPSILON; // > max_error default // Empty intervals. EXPECT_TRUE(empty.ApproxEquals(empty)); EXPECT_TRUE(zero.ApproxEquals(empty) && empty.ApproxEquals(zero)); EXPECT_TRUE(pi.ApproxEquals(empty) && empty.ApproxEquals(pi)); EXPECT_TRUE(mipi.ApproxEquals(empty) && empty.ApproxEquals(mipi)); EXPECT_FALSE(empty.ApproxEquals(full)); EXPECT_TRUE(empty.ApproxEquals(S1Interval(1, 1 + 2*kLo))); EXPECT_FALSE(empty.ApproxEquals(S1Interval(1, 1 + 2*kHi))); EXPECT_TRUE(S1Interval(M_PI - kLo, -M_PI + kLo).ApproxEquals(empty)); // Full intervals. EXPECT_TRUE(full.ApproxEquals(full)); EXPECT_FALSE(full.ApproxEquals(empty)); EXPECT_FALSE(full.ApproxEquals(zero)); EXPECT_FALSE(full.ApproxEquals(pi)); EXPECT_TRUE(full.ApproxEquals(S1Interval(kLo, -kLo))); EXPECT_FALSE(full.ApproxEquals(S1Interval(2*kHi, 0))); EXPECT_TRUE(S1Interval(-M_PI + kLo, M_PI - kLo).ApproxEquals(full)); EXPECT_FALSE(S1Interval(-M_PI, M_PI - 2*kHi).ApproxEquals(full)); // Singleton intervals. EXPECT_TRUE(pi.ApproxEquals(pi) && mipi.ApproxEquals(pi)); EXPECT_TRUE(pi.ApproxEquals(S1Interval(M_PI - kLo, M_PI - kLo))); EXPECT_FALSE(pi.ApproxEquals(S1Interval(M_PI - kHi, M_PI - kHi))); EXPECT_TRUE(pi.ApproxEquals(S1Interval(M_PI - kLo, -M_PI + kLo))); EXPECT_FALSE(pi.ApproxEquals(S1Interval(M_PI - kHi, -M_PI))); EXPECT_FALSE(zero.ApproxEquals(pi)); EXPECT_TRUE(pi.Union(mid12).Union(zero).ApproxEquals(quad12)); EXPECT_TRUE(quad2.Intersection(quad3).ApproxEquals(pi)); EXPECT_TRUE(quad3.Intersection(quad2).ApproxEquals(pi)); // Intervals whose corresponding endpoints are nearly the same but where the // endpoints are in opposite order (i.e., inverted intervals). EXPECT_FALSE(S1Interval(0, kLo).ApproxEquals(S1Interval(kLo, 0))); EXPECT_FALSE(S1Interval(M_PI - 0.5 * kLo, -M_PI + 0.5 * kLo). ApproxEquals(S1Interval(-M_PI + 0.5 * kLo, M_PI - 0.5 * kLo))); // Other intervals. EXPECT_TRUE(S1Interval(1 - kLo, 2 + kLo).ApproxEquals(S1Interval(1, 2))); EXPECT_TRUE(S1Interval(1 + kLo, 2 - kLo).ApproxEquals(S1Interval(1, 2))); EXPECT_TRUE(S1Interval(2 - kLo, 1 + kLo).ApproxEquals(S1Interval(2, 1))); EXPECT_TRUE(S1Interval(2 + kLo, 1 - kLo).ApproxEquals(S1Interval(2, 1))); EXPECT_FALSE(S1Interval(1 - kHi, 2 + kLo).ApproxEquals(S1Interval(1, 2))); EXPECT_FALSE(S1Interval(1 + kHi, 2 - kLo).ApproxEquals(S1Interval(1, 2))); EXPECT_FALSE(S1Interval(2 - kHi, 1 + kLo).ApproxEquals(S1Interval(2, 1))); EXPECT_FALSE(S1Interval(2 + kHi, 1 - kLo).ApproxEquals(S1Interval(2, 1))); EXPECT_FALSE(S1Interval(1 - kLo, 2 + kHi).ApproxEquals(S1Interval(1, 2))); EXPECT_FALSE(S1Interval(1 + kLo, 2 - kHi).ApproxEquals(S1Interval(1, 2))); EXPECT_FALSE(S1Interval(2 - kLo, 1 + kHi).ApproxEquals(S1Interval(2, 1))); EXPECT_FALSE(S1Interval(2 + kLo, 1 - kHi).ApproxEquals(S1Interval(2, 1))); } TEST_F(S1IntervalTestBase, OperatorEquals) { EXPECT_EQ(empty, empty); EXPECT_EQ(full, full); EXPECT_NE(full, empty); } TEST_F(S1IntervalTestBase, GetDirectedHausdorffDistance) { EXPECT_FLOAT_EQ(0.0, empty.GetDirectedHausdorffDistance(empty)); EXPECT_FLOAT_EQ(0.0, empty.GetDirectedHausdorffDistance(mid12)); EXPECT_FLOAT_EQ(M_PI, mid12.GetDirectedHausdorffDistance(empty)); EXPECT_EQ(0.0, quad12.GetDirectedHausdorffDistance(quad123)); S1Interval in(3.0, -3.0); // an interval whose complement center is 0. EXPECT_FLOAT_EQ(3.0, S1Interval(-0.1, 0.2).GetDirectedHausdorffDistance(in)); EXPECT_FLOAT_EQ(3.0 - 0.1, S1Interval(0.1, 0.2).GetDirectedHausdorffDistance(in)); EXPECT_FLOAT_EQ(3.0 - 0.1, S1Interval(-0.2, -0.1).GetDirectedHausdorffDistance(in)); } s2geometry-0.10.0/src/s2/s2boolean_operation.cc000066400000000000000000003567451422156367100213110ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // Boolean operations are implemented by constructing the boundary of the // result and then using S2Builder to assemble the edges. The boundary is // obtained by clipping each of the two input regions to the interior or // exterior of the other region. For example, to compute the union of A and // B, we clip the boundary of A to the exterior of B and the boundary of B to // the exterior of A; the resulting set of edges defines the union of the two // regions. // // We use exact predicates, but inexact constructions (e.g. computing the // intersection point of two edges). Nevertheless, the following algorithm is // guaranteed to be 100% robust, in that the computed boundary stays within a // small tolerance (snap_radius + S2::kIntersectionError) of the exact // result, and also preserves the correct topology (i.e., no crossing edges). // // Unfortunately this robustness cannot quite be achieved using the strategy // outlined above (clipping the two input regions and assembling the // resulting edges). Since computed intersection points are not exact, the // input geometry passed to S2Builder might contain self-intersections, and // these self-intersections cannot be eliminated reliably by snap rounding. // // So instead, we pass S2Builder the entire set of input edges where at least // some portion of each edge belongs to the output boundary. We allow // S2Builder to compute the intersection points and snap round the edges // (which it does in a way that is guaranteed to preserve the input topology). // Then once this is finished, we remove the portions of each edge that would // have been clipped if we had done the clipping first. This step only // involves deciding whether to keep or discard each edge in the output, since // all intersection points have already been resolved, and therefore there is // no risk of creating new self-intersections. // // This is implemented using the following classes: // // - S2BooleanOperation::Impl: the top-level class that clips each of // the two regions to the other region. // // - CrossingProcessor: a class that processes edge crossings and maintains // the necessary state in order to clip the boundary // of one region to the interior or exterior of the // other region. // // - EdgeClippingLayer: an S2Builder::Layer that removes graph edges that // correspond to clipped portions of input edges, and // passes the result to another layer for assembly. // // - GraphEdgeClipper: a helper class that does the actual work of the // EdgeClippingLayer. #include "s2/s2boolean_operation.h" #include #include #include #include #include "absl/cleanup/cleanup.h" #include "absl/container/btree_map.h" #include "absl/container/flat_hash_map.h" #include "absl/memory/memory.h" #include "s2/s2builder.h" #include "s2/s2builder_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2contains_point_query.h" #include "s2/s2crossing_edge_query.h" #include "s2/s2edge_crosser.h" #include "s2/s2edge_crossings.h" #include "s2/s2measures.h" #include "s2/s2predicates.h" #include "s2/s2shape_index_measures.h" #include "s2/s2shapeutil_visit_crossing_edge_pairs.h" // TODO(ericv): Remove this debugging output at some point. extern bool s2builder_verbose; namespace { // Anonymous namespace for helper classes. using absl::flat_hash_map; using absl::make_unique; using std::lower_bound; using std::make_pair; using std::max; using std::min; using std::pair; using std::swap; using std::unique_ptr; using std::vector; using EdgeType = S2Builder::EdgeType; using SnapFunction = S2Builder::SnapFunction; using GraphOptions = S2Builder::GraphOptions; using DegenerateEdges = GraphOptions::DegenerateEdges; using DuplicateEdges = GraphOptions::DuplicateEdges; using SiblingPairs = GraphOptions::SiblingPairs; using Graph = S2Builder::Graph; using EdgeId = Graph::EdgeId; using VertexId = Graph::VertexId; using InputEdgeId = Graph::InputEdgeId; using InputEdgeIdSetId = Graph::InputEdgeIdSetId; using PolygonModel = S2BooleanOperation::PolygonModel; using PolylineModel = S2BooleanOperation::PolylineModel; using Precision = S2BooleanOperation::Precision; // A collection of special InputEdgeIds that allow the GraphEdgeClipper state // modifications to be inserted into the list of edge crossings. static const InputEdgeId kSetInside = -1; static const InputEdgeId kSetInvertB = -2; static const InputEdgeId kSetReverseA = -3; // CrossingInputEdge represents an input edge B that crosses some other input // edge A. It stores the input edge id of edge B and also whether it crosses // edge A from left to right (or vice versa). class CrossingInputEdge { public: // Indicates that input edge "input_id" crosses another edge (from left to // right if "left_to_right" is true). CrossingInputEdge(InputEdgeId input_id, bool left_to_right) : left_to_right_(left_to_right), input_id_(input_id) { } InputEdgeId input_id() const { return input_id_; } bool left_to_right() const { return left_to_right_; } bool operator<(const CrossingInputEdge& other) const { return input_id_ < other.input_id_; } bool operator<(const InputEdgeId& other) const { return input_id_ < other; } private: bool left_to_right_ : 1; InputEdgeId input_id_ : 31; }; // InputEdgeCrossings represents all pairs of intersecting input edges and // also certain GraphEdgeClipper state modifications (kSetInside, etc). // It is sorted lexicographically except for entries representing state // modifications, which are sorted by the first InputEdgeId only. using InputEdgeCrossings = vector>; // Given two input edges A and B that intersect, suppose that A maps to a // chain of snapped edges A_0, A_1, ..., A_m and B maps to a chain of snapped // edges B_0, B_1, ..., B_n. CrossingGraphEdge represents an edge from chain // B that shares a vertex with chain A. It is used as a temporary data // representation while processing chain A. The arguments are: // // "id" - the Graph::EdgeId of an edge from chain B. // "a_index" - the index of the vertex (A_i) that is shared with chain A. // "outgoing" - true if the shared vertex is the first vertex of the B edge. // "dst" - the Graph::VertexId of the vertex that is not shared with chain A. // // Note that if an edge from the B chain shares both vertices with the A // chain, there will be two entries: an outgoing edge that treats its first // vertex as being shared, and an incoming edge that treats its second vertex // as being shared. struct CrossingGraphEdge { CrossingGraphEdge(EdgeId _id, int _a_index, bool _outgoing, VertexId _dst) : id(_id), a_index(_a_index), outgoing(_outgoing), dst(_dst) { } EdgeId id; int a_index; bool outgoing; VertexId dst; }; using CrossingGraphEdgeVector = absl::InlinedVector; // Returns a vector of EdgeIds sorted by input edge id. When more than one // output edge has the same input edge id (i.e., the input edge snapped to a // chain of edges), the edges are sorted so that they form a directed edge // chain. // // This function could possibily be moved to S2Builder::Graph, but note that // it has special requirements. Namely, duplicate edges and sibling pairs // must be kept in order to ensure that every output edge corresponds to // exactly one input edge. (See also S2Builder::Graph::GetInputEdgeOrder.) static vector GetInputEdgeChainOrder( const Graph& g, const vector& input_ids) { S2_DCHECK(g.options().edge_type() == EdgeType::DIRECTED); S2_DCHECK(g.options().duplicate_edges() == DuplicateEdges::KEEP); S2_DCHECK(g.options().sibling_pairs() == SiblingPairs::KEEP); // First, sort the edges so that the edges corresponding to each input edge // are consecutive. (Each input edge was snapped to a chain of output // edges, or two chains in the case of undirected input edges.) vector order = g.GetInputEdgeOrder(input_ids); // Now sort the group of edges corresponding to each input edge in edge // chain order (e.g. AB, BC, CD). vector> vmap; // Map from source vertex to edge id. vector indegree(g.num_vertices()); // Restricted to current input edge. for (int end, begin = 0; begin < order.size(); begin = end) { // Gather the edges that came from a single input edge. InputEdgeId input_id = input_ids[order[begin]]; for (end = begin; end < order.size(); ++end) { if (input_ids[order[end]] != input_id) break; } if (end - begin == 1) continue; // Build a map from the source vertex of each edge to its edge id, // and also compute the indegree at each vertex considering only the edges // that came from the current input edge. for (int i = begin; i < end; ++i) { EdgeId e = order[i]; vmap.push_back(make_pair(g.edge(e).first, e)); indegree[g.edge(e).second] += 1; } std::sort(vmap.begin(), vmap.end()); // Find the starting edge for building the edge chain. EdgeId next = g.num_edges(); for (int i = begin; i < end; ++i) { EdgeId e = order[i]; if (indegree[g.edge(e).first] == 0) next = e; } // Build the edge chain. for (int i = begin; ;) { order[i] = next; VertexId v = g.edge(next).second; indegree[v] = 0; // Clear as we go along. if (++i == end) break; auto out = lower_bound(vmap.begin(), vmap.end(), make_pair(v, 0)); S2_DCHECK_EQ(v, out->first); next = out->second; } vmap.clear(); } return order; } // Given a set of clipping instructions encoded as a set of InputEdgeCrossings, // GraphEdgeClipper determines which graph edges correspond to clipped // portions of input edges and removes them. // // The clipping model is as follows. The input consists of edge chains. The // clipper maintains an "inside" boolean state as it clips each chain, and // toggles this state whenever an input edge is crossed. Any edges that are // deemed to be "outside" after clipping are removed. // // The "inside" state can be reset when necessary (e.g., when jumping to the // start of a new chain) by adding a special crossing marked kSetInside. // There are also two other special "crossings" that modify the clipping // parameters: kSetInvertB specifies that edges should be clipped to the // exterior of the other region, and kSetReverseA specifies that edges should // be reversed before emitting them (which is needed to implement difference // operations). class GraphEdgeClipper { public: // "input_dimensions" is a vector specifying the dimension of each input // edge (0, 1, or 2). "input_crossings" is the set of all crossings to be // used when clipping the edges of "g", sorted in lexicographic order. // // The clipped set of edges and their corresponding set of input edge ids // are returned in "new_edges" and "new_input_edge_ids". (These can be used // to construct a new S2Builder::Graph.) GraphEdgeClipper(const Graph& g, const vector& input_dimensions, const InputEdgeCrossings& input_crossings, vector* new_edges, vector* new_input_edge_ids); void Run(); private: void AddEdge(Graph::Edge edge, InputEdgeId input_edge_id); void GatherIncidentEdges( const vector& a, int ai, const vector& b_input_edges, vector* b_edges) const; int GetCrossedVertexIndex( const vector& a, const CrossingGraphEdgeVector& b, bool left_to_right) const; int GetVertexRank(const CrossingGraphEdge& e) const; bool EdgeChainOnLeft(const vector& a, EdgeId b_first, EdgeId b_last) const; const Graph& g_; Graph::VertexInMap in_; Graph::VertexOutMap out_; const vector& input_dimensions_; const InputEdgeCrossings& input_crossings_; vector* new_edges_; vector* new_input_edge_ids_; // Every graph edge is associated with exactly one input edge in our case, // which means that we can declare g_.input_edge_id_set_ids() as a vector of // InputEdgeIds rather than a vector of InputEdgeIdSetIds. (This also takes // advantage of the fact that IdSetLexicon represents a singleton set as the // value of its single element.) const vector& input_ids_; vector order_; // Graph edges sorted in input edge id order. vector rank_; // The rank of each graph edge within order_. }; GraphEdgeClipper::GraphEdgeClipper( const Graph& g, const vector& input_dimensions, const InputEdgeCrossings& input_crossings, vector* new_edges, vector* new_input_edge_ids) : g_(g), in_(g), out_(g), input_dimensions_(input_dimensions), input_crossings_(input_crossings), new_edges_(new_edges), new_input_edge_ids_(new_input_edge_ids), input_ids_(g.input_edge_id_set_ids()), order_(GetInputEdgeChainOrder(g_, input_ids_)), rank_(order_.size()) { for (int i = 0; i < order_.size(); ++i) { rank_[order_[i]] = i; } // new_edges_ is obtained by filtering the graph edges and therefore the // number of graph edges is an upper bound on its size. new_edges_->reserve(g_.num_edges()); new_input_edge_ids_->reserve(g_.num_edges()); } inline void GraphEdgeClipper::AddEdge(Graph::Edge edge, InputEdgeId input_edge_id) { new_edges_->push_back(edge); new_input_edge_ids_->push_back(input_edge_id); } void GraphEdgeClipper::Run() { // Declare vectors here and reuse them to avoid reallocation. vector a_vertices; vector a_num_crossings; vector a_isolated; vector b_input_edges; vector b_edges; bool inside = false; bool invert_b = false; bool reverse_a = false; auto next = input_crossings_.begin(); for (int i = 0; i < order_.size(); ++i) { // For each input edge (the "A" input edge), gather all the input edges // that cross it (the "B" input edges). InputEdgeId a_input_id = input_ids_[order_[i]]; const Graph::Edge& edge0 = g_.edge(order_[i]); b_input_edges.clear(); for (; next != input_crossings_.end(); ++next) { if (next->first != a_input_id) break; if (next->second.input_id() >= 0) { b_input_edges.push_back(next->second); } else if (next->second.input_id() == kSetInside) { inside = next->second.left_to_right(); } else if (next->second.input_id() == kSetInvertB) { invert_b = next->second.left_to_right(); } else { S2_DCHECK_EQ(next->second.input_id(), kSetReverseA); reverse_a = next->second.left_to_right(); } } // Optimization for degenerate edges. // TODO(ericv): If the output layer for this edge dimension specifies // DegenerateEdges::DISCARD, then remove the edge here. if (edge0.first == edge0.second) { inside ^= (b_input_edges.size() & 1); AddEdge(edge0, a_input_id); continue; } // Optimization for the case where there are no crossings. if (b_input_edges.empty()) { // In general the caller only passes edges that are part of the output // (i.e., we could S2_DCHECK(inside) here). The one exception is for // polyline/polygon operations, where the polygon edges are needed to // compute the polyline output but are not emitted themselves. if (inside) { AddEdge(reverse_a ? Graph::reverse(edge0) : edge0, a_input_id); } continue; } // Walk along the chain of snapped edges for input edge A, and at each // vertex collect all the incident edges that belong to one of the // crossing edge chains (the "B" input edges). a_vertices.clear(); a_vertices.push_back(edge0.first); b_edges.clear(); b_edges.resize(b_input_edges.size()); GatherIncidentEdges(a_vertices, 0, b_input_edges, &b_edges); for (; i < order_.size() && input_ids_[order_[i]] == a_input_id; ++i) { a_vertices.push_back(g_.edge(order_[i]).second); GatherIncidentEdges(a_vertices, a_vertices.size() - 1, b_input_edges, &b_edges); } --i; if (s2builder_verbose) { std::cout << "input edge " << a_input_id << " (inside=" << inside << "):"; for (VertexId id : a_vertices) std::cout << " " << id; } // Now for each B edge chain, decide which vertex of the A chain it // crosses, and keep track of the number of signed crossings at each A // vertex. The sign of a crossing depends on whether the other edge // crosses from left to right or right to left. // // This would not be necessary if all calculations were done in exact // arithmetic, because crossings would have strictly alternating signs. // But because we have already snapped the result, some crossing locations // are ambiguous, and GetCrossedVertexIndex() handles this by choosing a // candidate vertex arbitrarily. The end result is that rarely, we may // see two crossings in a row with the same sign. We correct for this by // adding extra output edges that essentially link up the crossings in the // correct (alternating sign) order. Compared to the "correct" behavior, // the only difference is that we have added some extra sibling pairs // (consisting of an edge and its corresponding reverse edge) which do not // affect the result. a_num_crossings.clear(); a_num_crossings.resize(a_vertices.size()); a_isolated.clear(); a_isolated.resize(a_vertices.size()); for (int bi = 0; bi < b_input_edges.size(); ++bi) { bool left_to_right = b_input_edges[bi].left_to_right(); int a_index = GetCrossedVertexIndex(a_vertices, b_edges[bi], left_to_right); if (a_index >= 0) { if (s2builder_verbose) { std::cout << std::endl << " " << "b input edge " << b_input_edges[bi].input_id() << " (l2r=" << left_to_right << ", crossing=" << a_vertices[a_index] << ")"; for (const auto& x : b_edges[bi]) { const Graph::Edge& e = g_.edge(x.id); std::cout << " (" << e.first << ", " << e.second << ")"; } } // Keep track of the number of signed crossings (see above). bool is_line = input_dimensions_[b_input_edges[bi].input_id()] == 1; int sign = is_line ? 0 : (left_to_right == invert_b) ? -1 : 1; a_num_crossings[a_index] += sign; // Any polyline or polygon vertex that has at least one crossing but no // adjacent emitted edge may be emitted as an isolated vertex. a_isolated[a_index] = true; } else { // TODO(b/112043775): fix this condition. S2_LOG(DFATAL) << "Failed to get crossed vertex index."; } } if (s2builder_verbose) std::cout << std::endl; // Finally, we iterate through the A edge chain, keeping track of the // number of signed crossings as we go along. The "multiplicity" is // defined as the cumulative number of signed crossings, and indicates how // many edges should be output (and in which direction) in order to link // up the edge crossings in the correct order. (The multiplicity is // almost always either 0 or 1 except in very rare cases.) int multiplicity = inside + a_num_crossings[0]; for (int ai = 1; ai < a_vertices.size(); ++ai) { if (multiplicity != 0) { a_isolated[ai - 1] = a_isolated[ai] = false; } int edge_count = reverse_a ? -multiplicity : multiplicity; // Output any forward edges required. for (int i = 0; i < edge_count; ++i) { AddEdge(Graph::Edge(a_vertices[ai - 1], a_vertices[ai]), a_input_id); } // Output any reverse edges required. for (int i = edge_count; i < 0; ++i) { AddEdge(Graph::Edge(a_vertices[ai], a_vertices[ai - 1]), a_input_id); } multiplicity += a_num_crossings[ai]; } // Multiplicities other than 0 or 1 can only occur in the edge interior. S2_DCHECK(multiplicity == 0 || multiplicity == 1); inside = (multiplicity != 0); // Output any isolated polyline vertices. // TODO(ericv): Only do this if an output layer wants degenerate edges. if (input_dimensions_[a_input_id] != 0) { for (int ai = 0; ai < a_vertices.size(); ++ai) { if (a_isolated[ai]) { AddEdge(Graph::Edge(a_vertices[ai], a_vertices[ai]), a_input_id); } } } } } // Given the vertices of the snapped edge chain for an input edge A and the // set of input edges B that cross input edge A, this method gathers all of // the snapped edges of B that are incident to a given snapped vertex of A. // The incident edges for each input edge of B are appended to a separate // output vector. (A and B can refer to either the input edge or the // corresponding snapped edge chain.) void GraphEdgeClipper::GatherIncidentEdges( const vector& a, int ai, const vector& b_input_edges, vector* b_edges) const { // Examine all of the edges incident to the given vertex of A. If any edge // comes from a B input edge, append it to the appropriate vector. S2_DCHECK_EQ(b_input_edges.size(), b_edges->size()); for (EdgeId e : in_.edge_ids(a[ai])) { InputEdgeId id = input_ids_[e]; auto it = lower_bound(b_input_edges.begin(), b_input_edges.end(), id); if (it != b_input_edges.end() && it->input_id() == id) { auto& edges = (*b_edges)[it - b_input_edges.begin()]; edges.push_back(CrossingGraphEdge(e, ai, false, g_.edge(e).first)); } } for (EdgeId e : out_.edge_ids(a[ai])) { InputEdgeId id = input_ids_[e]; auto it = lower_bound(b_input_edges.begin(), b_input_edges.end(), id); if (it != b_input_edges.end() && it->input_id() == id) { auto& edges = (*b_edges)[it - b_input_edges.begin()]; edges.push_back(CrossingGraphEdge(e, ai, true, g_.edge(e).second)); } } } // Returns the "vertex rank" of the shared vertex associated with the given // CrossingGraphEdge. Recall that graph edges are sorted in input edge order, // and that the rank of an edge is its position in this order (rank_[e]). // VertexRank(e) is defined such that VertexRank(e.src) == rank_[e] and // VertexRank(e.dst) == rank_[e] + 1. Note that the concept of "vertex rank" // is only defined within a single edge chain (since different edge chains can // have overlapping vertex ranks). int GraphEdgeClipper::GetVertexRank(const CrossingGraphEdge& e) const { return rank_[e.id] + !e.outgoing; } // Given an edge chain A that is crossed by another edge chain B (where // "left_to_right" indicates whether B crosses A from left to right), this // method decides which vertex of A the crossing takes place at. The // parameters are the vertices of the A chain ("a") and the set of edges in // the B chain ("b") that are incident to vertices of A. The B chain edges // are sorted in increasing order of (a_index, outgoing) tuple. int GraphEdgeClipper::GetCrossedVertexIndex( const vector& a, const CrossingGraphEdgeVector& b, bool left_to_right) const { if (a.empty() || b.empty()) { S2_LOG(DFATAL) << "GraphEdgeClipper::GetCrossedVertexIndex called with " << a.size() << " vertex ids and " << b.size() << " crossing graph edges."; return -1; } // The reason this calculation is tricky is that after snapping, the A and B // chains may meet and separate several times. For example, if B crosses A // from left to right, then B may touch A, make an excursion to the left of // A, come back to A, then make an excursion to the right of A and come back // to A again, like this: // // *--B--*-\ /-*-\ // B-\ /-B B-\ 6 7 8 9 // *--A--*--A--*-A,B-*--A--*--A--*-A,B-*--A--*--A--*-A,B-* // 0 1 2 3 4 5 \-B B-/ // \-*-/ // // (where "*" is a vertex, and "A" and "B" are edge labels). Note that B // may also follow A for one or more edges whenever they touch (e.g. between // vertices 2 and 3). In this case the only vertices of A where the // crossing could take place are 5 and 6, i.e. after all excursions of B to // the left of A, and before all excursions of B to the right of A. // // Other factors to consider are that the portion of B before and/or after // the crossing may be degenerate, and some or all of the B edges may be // reversed relative to the A edges. // First, check whether edge A is degenerate. int n = a.size(); if (n == 1) return 0; // If edge chain B is incident to only one vertex of A, we're done. if (b[0].a_index == b.back().a_index) return b[0].a_index; // Determine whether the B chain visits the first and last vertices that it // shares with the A chain in the same order or the reverse order. This is // only needed to implement one special case (see below). bool b_reversed = GetVertexRank(b[0]) > GetVertexRank(b.back()); // Examine each incident B edge and use it to narrow the range of positions // where the crossing could occur in the B chain. Vertex positions are // represented as a range [lo, hi] of vertex ranks in the B chain (see // GetVertexRank). // // Note that if an edge of B is incident to the first or last vertex of A, // we can't test which side of the A chain it is on. (An s2pred::Sign test // doesn't work; e.g. if the B edge is XY and the first edge of A is YZ, // then snapping can change the sign of XYZ while maintaining topological // guarantees.) There can be up to 4 such edges (one incoming and one // outgoing edge at each endpoint of A). Two of these edges logically // extend past the end of the A chain and place no restrictions on the // crossing vertex. The other two edges define the ends of the subchain // where B shares vertices with A. We save these edges in order to handle a // special case (see below). int lo = -1, hi = order_.size(); // Vertex ranks of acceptable crossings EdgeId b_first = -1, b_last = -1; // "b" subchain connecting "a" endpoints for (const auto& e : b) { int ai = e.a_index; if (ai == 0) { if (e.outgoing != b_reversed && e.dst != a[1]) b_first = e.id; } else if (ai == n - 1) { if (e.outgoing == b_reversed && e.dst != a[n - 2]) b_last = e.id; } else { // This B edge is incident to an interior vertex of the A chain. First // check whether this edge is identical (or reversed) to an edge in the // A chain, in which case it does not create any restrictions. if (e.dst == a[ai - 1] || e.dst == a[ai + 1]) continue; // Otherwise we can test which side of the A chain the edge lies on. bool on_left = s2pred::OrderedCCW(g_.vertex(a[ai + 1]), g_.vertex(e.dst), g_.vertex(a[ai - 1]), g_.vertex(a[ai])); // Every B edge that is incident to an interior vertex of the A chain // places some restriction on where the crossing vertex could be. if (left_to_right == on_left) { // This is a pre-crossing edge, so the crossing cannot be before the // destination vertex of this edge. (For example, the input B edge // crosses the input A edge from left to right and this edge of the B // chain is to the left of the A chain.) lo = max(lo, rank_[e.id] + 1); } else { // This is a post-crossing edge, so the crossing cannot be after the // source vertex of this edge. hi = min(hi, rank_[e.id]); } } } // There is one special case. If a subchain of B connects the first and // last vertices of A, then together with the edges of A this forms a loop // whose orientation can be tested to determine whether B is on the left or // right side of A. This is only possible (and only necessary) if the B // subchain does not include any interior vertices of A, since otherwise the // B chain might cross from one side of A to the other. // // Note that it would be possible to avoid this test in some situations by // checking whether either endpoint of the A chain has two incident B edges, // in which case we could check which side of the B chain the A edge is on // and use this to limit the possible crossing locations. if (b_first >= 0 && b_last >= 0) { // Swap the edges if necessary so that they are in B chain order. if (b_reversed) swap(b_first, b_last); // The B subchain connects the first and last vertices of A. We test // whether the chain includes any interior vertices of A by iterating // through the incident B edges again, looking for ones that belong to // the B subchain and are not incident to the first or last vertex of A. bool has_interior_vertex = false; for (const auto& e : b) { if (e.a_index > 0 && e.a_index < n - 1 && rank_[e.id] >= rank_[b_first] && rank_[e.id] <= rank_[b_last]) { has_interior_vertex = true; break; } } if (!has_interior_vertex) { // The B subchain is not incident to any interior vertex of A. bool on_left = EdgeChainOnLeft(a, b_first, b_last); if (left_to_right == on_left) { lo = max(lo, rank_[b_last] + 1); } else { hi = min(hi, rank_[b_first]); } } } // Otherwise we choose the smallest shared VertexId in the acceptable range, // in order to ensure that both chains choose the same crossing vertex. int best = -1; S2_DCHECK_LE(lo, hi); for (const auto& e : b) { int ai = e.a_index; int vrank = GetVertexRank(e); if (vrank >= lo && vrank <= hi && (best < 0 || a[ai] < a[best])) { best = ai; } } return best; } // Given edge chains A and B that form a loop (after possibly reversing the // direction of chain B), returns true if chain B is to the left of chain A. // Chain A is given as a sequence of vertices, while chain B is specified as // the first and last edges of the chain. bool GraphEdgeClipper::EdgeChainOnLeft( const vector& a, EdgeId b_first, EdgeId b_last) const { // Gather all the interior vertices of the B subchain. vector loop; for (int i = rank_[b_first]; i < rank_[b_last]; ++i) { loop.push_back(g_.edge(order_[i]).second); } // Possibly reverse the chain so that it forms a loop when "a" is appended. if (g_.edge(b_last).second != a[0]) std::reverse(loop.begin(), loop.end()); loop.insert(loop.end(), a.begin(), a.end()); // Duplicate the first two vertices to simplify vertex indexing. for (int j = 0; j < 2; j++) { loop.insert(loop.end(), *(loop.begin() + j)); } // Now B is to the left of A if and only if the loop is counterclockwise. double sum = 0; for (int i = 2; i < loop.size(); ++i) { sum += S2::TurnAngle(g_.vertex(loop[i - 2]), g_.vertex(loop[i - 1]), g_.vertex(loop[i])); } return sum > 0; } // Given a set of clipping instructions encoded as a set of intersections // between input edges, EdgeClippingLayer determines which graph edges // correspond to clipped portions of input edges and removes them. It // assembles the remaining edges into a new S2Builder::Graph and passes the // result to the given output layer for assembly. class EdgeClippingLayer : public S2Builder::Layer { public: EdgeClippingLayer(const vector>* layers, const vector* input_dimensions, const InputEdgeCrossings* input_crossings, S2MemoryTracker::Client* tracker) : layers_(*layers), input_dimensions_(*input_dimensions), input_crossings_(*input_crossings), tracker_(tracker) { } // Layer interface: GraphOptions graph_options() const override; void Build(const Graph& g, S2Error* error) override; private: const vector>& layers_; const vector& input_dimensions_; const InputEdgeCrossings& input_crossings_; S2MemoryTracker::Client* tracker_; }; GraphOptions EdgeClippingLayer::graph_options() const { // We keep all edges, including degenerate ones, so that we can figure out // the correspondence between input edge crossings and output edge // crossings. return GraphOptions(EdgeType::DIRECTED, DegenerateEdges::KEEP, DuplicateEdges::KEEP, SiblingPairs::KEEP); } void EdgeClippingLayer::Build(const Graph& g, S2Error* error) { // Data per graph edge: // vector order_; // vector rank_; // vector new_edges; // vector new_input_edge_ids; // Data per graph vertex: // Graph::VertexInMap in_; // Graph::VertexOutMap out_; // // The first and last two vectors above are freed upon GraphEdgeClipper // destruction. There is also a temporary vector "indegree" in // GetInputEdgeChainOrder() but this does not affect peak memory usage. int64 tmp_bytes = g.num_edges() * (sizeof(EdgeId) + sizeof(int)) + g.num_vertices() * (2 * sizeof(EdgeId)); int64 final_bytes = g.num_edges() * (sizeof(Graph::Edge) + sizeof(InputEdgeIdSetId)); // The order of the calls below is important. Note that all memory tracked // through this client is automatically untallied upon object destruction. if (!tracker_->Tally(final_bytes) || !tracker_->TallyTemp(tmp_bytes)) { // We don't need to copy memory tracking errors to "error" because this // is already done for us in S2BooleanOperation::Impl::Build(). return; } // The bulk of the work is handled by GraphEdgeClipper. vector new_edges; vector new_input_edge_ids; // Destroy the GraphEdgeClipper immediately to save memory. GraphEdgeClipper(g, input_dimensions_, input_crossings_, &new_edges, &new_input_edge_ids).Run(); if (s2builder_verbose) { std::cout << "Edges after clipping: " << std::endl; for (int i = 0; i < new_edges.size(); ++i) { std::cout << " " << new_input_edge_ids[i] << " (" << new_edges[i].first << ", " << new_edges[i].second << ")" << std::endl; } } // Construct one or more subgraphs from the clipped edges and pass them to // the given output layer(s). We start with a copy of the input graph's // IdSetLexicon because this is necessary in general, even though in this // case it is guaranteed to be empty because no edges have been merged. IdSetLexicon new_input_edge_id_set_lexicon = g.input_edge_id_set_lexicon(); if (layers_.size() == 1) { Graph new_graph = g.MakeSubgraph( layers_[0]->graph_options(), &new_edges, &new_input_edge_ids, &new_input_edge_id_set_lexicon, g.is_full_polygon_predicate(), error, tracker_); if (tracker_->ok()) layers_[0]->Build(new_graph, error); tracker_->Untally(new_edges); tracker_->Untally(new_input_edge_ids); } else { // The Graph objects must be valid until the last Build() call completes, // so we store all of the graph data in arrays with 3 elements. S2_DCHECK_EQ(3, layers_.size()); vector layer_edges[3]; vector layer_input_edge_ids[3]; // Separate the edges according to their dimension. for (int i = 0; i < new_edges.size(); ++i) { int d = input_dimensions_[new_input_edge_ids[i]]; if (!tracker_->AddSpace(&layer_edges[d], 1)) return; if (!tracker_->AddSpace(&layer_input_edge_ids[d], 1)) return; layer_edges[d].push_back(new_edges[i]); layer_input_edge_ids[d].push_back(new_input_edge_ids[i]); } // Clear variables to save space. if (!tracker_->Clear(&new_edges)) return; if (!tracker_->Clear(&new_input_edge_ids)) return; vector layer_graphs; // No default constructor. layer_graphs.reserve(3); for (int d = 0; d < 3; ++d) { layer_graphs.push_back(g.MakeSubgraph( layers_[d]->graph_options(), &layer_edges[d], &layer_input_edge_ids[d], &new_input_edge_id_set_lexicon, g.is_full_polygon_predicate(), error, tracker_)); if (tracker_->ok()) layers_[d]->Build(layer_graphs[d], error); } for (int d = 0; d < 3; ++d) { tracker_->Untally(layer_edges[d]); tracker_->Untally(layer_input_edge_ids[d]); } } } } // namespace class S2BooleanOperation::Impl { public: explicit Impl(S2BooleanOperation* op) : op_(op), index_crossings_first_region_id_(-1), tracker_(op->options_.memory_tracker()) { } bool Build(S2Error* error); private: class CrossingIterator; class CrossingProcessor; using ShapeEdge = s2shapeutil::ShapeEdge; using ShapeEdgeId = s2shapeutil::ShapeEdgeId; using ShapeEdgeIdHash = s2shapeutil::ShapeEdgeIdHash; // An IndexCrossing represents a pair of intersecting S2ShapeIndex edges // ("a_edge" and "b_edge"). We store all such intersections because the // algorithm needs them twice, once when processing the boundary of region A // and once when processing the boundary of region B. struct IndexCrossing { ShapeEdgeId a, b; // True if S2::CrossingSign(a_edge, b_edge) > 0. uint32 is_interior_crossing : 1; // True if "a_edge" crosses "b_edge" from left to right. Undefined if // is_interior_crossing is false. uint32 left_to_right: 1; // Equal to S2::VertexCrossing(a_edge, b_edge). Undefined if "a_edge" and // "b_edge" do not share exactly one vertex or either edge is degenerate. uint32 is_vertex_crossing : 1; // All flags are "false" by default. IndexCrossing(ShapeEdgeId _a, ShapeEdgeId _b) : a(_a), b(_b), is_interior_crossing(false), left_to_right(false), is_vertex_crossing(false) { } friend bool operator==(const IndexCrossing& x, const IndexCrossing& y) { return x.a == y.a && x.b == y.b; } friend bool operator<(const IndexCrossing& x, const IndexCrossing& y) { // The compiler (2017) doesn't optimize the following as well: // return x.a < y.a || (x.a == y.a && x.b < y.b); if (x.a.shape_id < y.a.shape_id) return true; if (y.a.shape_id < x.a.shape_id) return false; if (x.a.edge_id < y.a.edge_id) return true; if (y.a.edge_id < x.a.edge_id) return false; if (x.b.shape_id < y.b.shape_id) return true; if (y.b.shape_id < x.b.shape_id) return false; return x.b.edge_id < y.b.edge_id; } }; using IndexCrossings = vector; class MemoryTracker : public S2MemoryTracker::Client { public: using S2MemoryTracker::Client::Client; // Used to track memory used by CrossingProcessor::source_id_map_. (The // type is a template parameter so that SourceIdMap can be private.) template bool TallySourceIdMap(int num_entries) { int64 delta_bytes = num_entries * GetBtreeMinBytesPerEntry(); source_id_map_bytes_ += delta_bytes; return Tally(delta_bytes); } // Used to clear CrossingProcessor::source_id_map_ and update the tracked // memory usage accordingly. template bool ClearSourceIdMap(T* source_id_map) { source_id_map->clear(); Tally(-source_id_map_bytes_); source_id_map_bytes_ = 0; return ok(); } private: // The amount of memory used by CrossingProcessor::source_id_map_. int64 source_id_map_bytes_ = 0; }; bool is_boolean_output() const { return op_->result_empty_ != nullptr; } // All of the methods below support "early exit" in the case of boolean // results by returning "false" as soon as the result is known to be // non-empty. bool AddBoundary(int a_region_id, bool invert_a, bool invert_b, bool invert_result, const vector& a_chain_starts, CrossingProcessor* cp); bool GetChainStarts(int a_region_id, bool invert_a, bool invert_b, bool invert_result, CrossingProcessor* cp, vector* chain_starts); bool ProcessIncidentEdges(const ShapeEdge& a, S2ContainsPointQuery* query, CrossingProcessor* cp); static bool HasInterior(const S2ShapeIndex& index); bool AddIndexCrossing(const ShapeEdge& a, const ShapeEdge& b, bool is_interior, IndexCrossings* crossings); bool GetIndexCrossings(int region_id); bool AddBoundaryPair(bool invert_a, bool invert_b, bool invert_result, CrossingProcessor* cp); bool AreRegionsIdentical() const; bool BuildOpType(OpType op_type); bool IsFullPolygonResult(const S2Builder::Graph& g, S2Error* error) const; bool IsFullPolygonUnion(const S2ShapeIndex& a, const S2ShapeIndex& b) const; bool IsFullPolygonIntersection(const S2ShapeIndex& a, const S2ShapeIndex& b) const; bool IsFullPolygonDifference(const S2ShapeIndex& a, const S2ShapeIndex& b) const; bool IsFullPolygonSymmetricDifference(const S2ShapeIndex& a, const S2ShapeIndex& b) const; void DoBuild(S2Error* error); // A bit mask representing all six faces of the S2 cube. static constexpr uint8 kAllFacesMask = 0x3f; S2BooleanOperation* op_; // The S2Builder options used to construct the output. S2Builder::Options builder_options_; // The S2Builder used to construct the output. Note that the S2Builder // object is created only when is_boolean_output() is false. unique_ptr builder_; // A vector specifying the dimension of each edge added to S2Builder. vector input_dimensions_; // The set of all input edge crossings, which is used by EdgeClippingLayer // to construct the clipped output polygon. InputEdgeCrossings input_crossings_; // kSentinel is a sentinel value used to mark the end of vectors. static const ShapeEdgeId kSentinel; // A vector containing all pairs of crossing edges from the two input // regions (including edge pairs that share a common vertex). The first // element of each pair is an edge from "index_crossings_first_region_id_", // while the second element of each pair is an edge from the other region. IndexCrossings index_crossings_; // Indicates that the first element of each crossing edge pair in // "index_crossings_" corresponds to an edge from the given region. // This field is negative if index_crossings_ has not been computed yet. int index_crossings_first_region_id_; // Temporary storage used in GetChainStarts(), declared here to avoid // repeatedly allocating memory. IndexCrossings tmp_crossings_; // An object to track the memory usage of this class. MemoryTracker tracker_; }; const s2shapeutil::ShapeEdgeId S2BooleanOperation::Impl::kSentinel( std::numeric_limits::max(), 0); // A helper class for iterating through the edges from region B that cross a // particular edge from region A. It caches information from the current // shape, chain, and edge so that it doesn't need to be looked up repeatedly. // Typical usage: // // void SomeFunction(ShapeEdgeId a_id, CrossingIterator *it) { // // Iterate through the edges that cross edge "a_id". // for (; !it->Done(a_id); it->Next()) { // ... use it->b_shape(), it->b_edge(), etc ... // } class S2BooleanOperation::Impl::CrossingIterator { public: // Creates an iterator over crossing edge pairs (a, b) where "b" is an edge // from "b_index". "crossings_complete" indicates that "crossings" contains // all edge crossings between the two regions (rather than a subset). CrossingIterator(const S2ShapeIndex* b_index, const IndexCrossings* crossings, bool crossings_complete) : b_index_(*b_index), it_(crossings->begin()), b_shape_id_(-1), crossings_complete_(crossings_complete) { Update(); } void Next() { ++it_; Update(); } bool Done(ShapeEdgeId id) const { return a_id() != id; } // True if all edge crossings are available (see above). bool crossings_complete() const { return crossings_complete_; } // True if this crossing occurs at a point interior to both edges. bool is_interior_crossing() const { return it_->is_interior_crossing; } // Equal to S2::VertexCrossing(a_edge, b_edge), provided that a_edge and // b_edge have exactly one vertex in common and neither edge is degenerate. bool is_vertex_crossing() const { return it_->is_vertex_crossing; } // True if a_edge crosses b_edge from left to right (for interior crossings). bool left_to_right() const { return it_->left_to_right; } ShapeEdgeId a_id() const { return it_->a; } ShapeEdgeId b_id() const { return it_->b; } const S2ShapeIndex& b_index() const { return b_index_; } const S2Shape& b_shape() const { return *b_shape_; } int b_dimension() const { return b_dimension_; } int b_shape_id() const { return b_shape_id_; } int b_edge_id() const { return b_id().edge_id; } S2Shape::Edge b_edge() const { return b_shape_->edge(b_edge_id()); // Opportunity to cache this. } // Information about the chain to which an edge belongs. struct ChainInfo { int chain_id; // chain id int start; // starting edge id int limit; // limit edge id }; // Returns a description of the chain to which the current B edge belongs. const ChainInfo& b_chain_info() const { if (b_info_.chain_id < 0) { b_info_.chain_id = b_shape().chain_position(b_edge_id()).chain_id; auto chain = b_shape().chain(b_info_.chain_id); b_info_.start = chain.start; b_info_.limit = chain.start + chain.length; } return b_info_; } private: // Updates information about the B shape whenever it changes. void Update() { if (a_id() != kSentinel && b_id().shape_id != b_shape_id_) { b_shape_id_ = b_id().shape_id; b_shape_ = b_index_.shape(b_shape_id_); b_dimension_ = b_shape_->dimension(); b_info_.chain_id = -1; // Computed on demand. } } const S2ShapeIndex& b_index_; IndexCrossings::const_iterator it_; const S2Shape* b_shape_; int b_shape_id_; int b_dimension_; mutable ChainInfo b_info_; // Computed on demand. bool crossings_complete_; }; // CrossingProcessor is a helper class that processes all the edges from one // region that cross a specific edge of the other region. It outputs the // appropriate edges to an S2Builder, and outputs other information required // by GraphEdgeClipper to the given vectors. class S2BooleanOperation::Impl::CrossingProcessor { public: // Prepares to build output for the given polygon and polyline boundary // models. Edges are emitted to "builder", while other auxiliary data is // appended to the given vectors. // // If a predicate is being evaluated (i.e., we do not need to construct the // actual result), then "builder" and the various output vectors should all // be nullptr. CrossingProcessor(const PolygonModel& polygon_model, const PolylineModel& polyline_model, bool polyline_loops_have_boundaries, S2Builder* builder, vector* input_dimensions, InputEdgeCrossings* input_crossings, MemoryTracker* tracker) : polygon_model_(polygon_model), polyline_model_(polyline_model), polyline_loops_have_boundaries_(polyline_loops_have_boundaries), builder_(builder), input_dimensions_(input_dimensions), input_crossings_(input_crossings), tracker_(tracker), prev_inside_(false) { } // Starts processing edges from the given region. "invert_a", "invert_b", // and "invert_result" indicate whether region A, region B, and/or the // result should be inverted, which allows operations such as union and // difference to be implemented. For example, union is ~(~A & ~B). // // This method should be called in pairs, once to process the edges from // region A and once to process the edges from region B. void StartBoundary(int a_region_id, bool invert_a, bool invert_b, bool invert_result); // Starts processing edges from the given shape. void StartShape(const S2Shape* a_shape); // Starts processing edges from the given chain. void StartChain(int chain_id, S2Shape::Chain chain, bool inside); // Processes the given edge "a_id". "it" should be positioned to the set of // edges from the other region that cross "a_id" (if any). // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool ProcessEdge(ShapeEdgeId a_id, CrossingIterator* it); // This method should be called after each pair of calls to StartBoundary. // (The only operation that processes more than one pair of boundaries is // SYMMETRIC_DIFFERENCE, which computes the union of A-B and B-A.) // // Resets the state of the CrossingProcessor. void DoneBoundaryPair(); // Indicates whether the point being processed along the current edge chain // is in the polygonal interior of the opposite region, using semi-open // boundaries. If "invert_b_" is true then this field is inverted. // // This value along with the set of incident edges can be used to compute // whether the opposite region contains this point under any of the // supported boundary models (PolylineModel::CLOSED, etc). bool inside() const { return inside_; } private: // SourceEdgeCrossing represents an input edge that crosses some other // edge; it crosses the edge from left to right iff the second parameter // is "true". using SourceEdgeCrossing = pair; struct PointCrossingResult; struct EdgeCrossingResult; InputEdgeId input_edge_id() const { return input_dimensions_->size(); } // Returns true if the edges on either side of the first vertex of the // current edge have not been emitted. // // REQUIRES: This method is called just after updating "inside_" for "v0". bool is_v0_isolated(ShapeEdgeId a_id) const { return !inside_ && v0_emitted_max_edge_id_ < a_id.edge_id; } // Returns true if "a_id" is the last edge of the current chain, and the // edges on either side of the last vertex have not been emitted (including // the possibility that the chain forms a loop). bool is_chain_last_vertex_isolated(ShapeEdgeId a_id) const { return (a_id.edge_id == chain_limit_ - 1 && !chain_v0_emitted_ && v0_emitted_max_edge_id_ <= a_id.edge_id); } // Returns true if the given polyline edge contains "v0", taking into // account the specified PolylineModel. bool polyline_contains_v0(int edge_id, int chain_start) const { return (polyline_model_ != PolylineModel::OPEN || edge_id > chain_start); } bool is_degenerate(ShapeEdgeId a_id) const { return is_degenerate_hole_.contains(a_id); } void AddCrossing(const SourceEdgeCrossing& crossing) { if (!tracker_->AddSpace(&source_edge_crossings_, 1)) return; source_edge_crossings_.push_back(make_pair(input_edge_id(), crossing)); } void AddInteriorCrossing(const SourceEdgeCrossing& crossing) { // Crossing edges are queued until the S2Builder edge that they are // supposed to be associated with is created (see AddEdge() and // pending_source_edge_crossings_ for details). pending_source_edge_crossings_.push_back(crossing); } void SetClippingState(InputEdgeId parameter, bool state) { AddCrossing(SourceEdgeCrossing(SourceId(parameter), state)); } // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool AddEdge(ShapeEdgeId a_id, const S2Shape::Edge& a, int dimension, int interior_crossings) { if (builder_ == nullptr) return false; // Boolean output. if (interior_crossings > 0) { // Add the edges that cross this edge to the output so that // GraphEdgeClipper can find them. if (!tracker_->AddSpace(&source_edge_crossings_, pending_source_edge_crossings_.size())) { return false; } for (const auto& crossing : pending_source_edge_crossings_) { source_edge_crossings_.push_back(make_pair(input_edge_id(), crossing)); } // Build a map that translates temporary edge ids (SourceId) to // the representation used by EdgeClippingLayer (InputEdgeId). if (!tracker_->TallySourceIdMap(1)) { return false; } SourceId src_id(a_region_id_, a_id.shape_id, a_id.edge_id); source_id_map_[src_id] = input_edge_id(); } // Set the GraphEdgeClipper's "inside" state to match ours. if (inside_ != prev_inside_) SetClippingState(kSetInside, inside_); if (!tracker_->AddSpace(input_dimensions_, 1)) return false; input_dimensions_->push_back(dimension); builder_->AddEdge(a.v0, a.v1); inside_ ^= (interior_crossings & 1); prev_inside_ = inside_; return tracker_->ok(); } // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool AddPointEdge(const S2Point& p, int dimension) { if (builder_ == nullptr) return false; // Boolean output. if (!prev_inside_) SetClippingState(kSetInside, true); if (!tracker_->AddSpace(input_dimensions_, 1)) return false; input_dimensions_->push_back(dimension); builder_->AddEdge(p, p); prev_inside_ = true; return tracker_->ok(); } bool ProcessEdge0(ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it); bool ProcessEdge1(ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it); bool ProcessEdge2(ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it); void SkipCrossings(ShapeEdgeId a_id, CrossingIterator* it); PointCrossingResult ProcessPointCrossings( ShapeEdgeId a_id, const S2Point& a0, CrossingIterator* it) const; EdgeCrossingResult ProcessEdgeCrossings( ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it); bool IsPolylineVertexInside(bool matches_polyline, bool matches_polygon) const; bool IsPolylineEdgeInside(const EdgeCrossingResult& r, bool is_degenerate) const; bool PolylineEdgeContainsVertex(const S2Point& v, const CrossingIterator& it, int dimension) const; // Constructor parameters: PolygonModel polygon_model_; PolylineModel polyline_model_; bool polyline_loops_have_boundaries_; // The output of the CrossingProcessor consists of a subset of the input // edges that are emitted to "builder_", and some auxiliary information // that allows GraphEdgeClipper to determine which segments of those input // edges belong to the output. The auxiliary information consists of the // dimension of each input edge, and set of input edges from the other // region that cross each input input edge. S2Builder* builder_; // (nullptr if boolean output was requested) vector* input_dimensions_; InputEdgeCrossings* input_crossings_; MemoryTracker* tracker_; // Fields set by StartBoundary: int a_region_id_, b_region_id_; bool invert_a_, invert_b_, invert_result_; bool is_union_; // True if this is a UNION operation. // Fields set by StartShape: const S2Shape* a_shape_; int a_dimension_; // Fields set by StartChain: int chain_id_; int chain_start_; int chain_limit_; // Fields updated by ProcessEdge: // A temporary representation of input_crossings_ that is used internally // until all necessary edges from *both* polygons have been emitted to the // S2Builder. This field is then converted by DoneBoundaryPair() into // the InputEdgeCrossings format expected by GraphEdgeClipper. // // The reason that we can't construct input_crossings_ directly is that it // uses InputEdgeIds to identify the edges from both polygons, and when we // are processing edges from the first polygon, InputEdgeIds have not yet // been assigned to the second polygon. So instead this field identifies // edges from the first polygon using an InputEdgeId, and edges from the // second polygon using a (region_id, shape_id, edge_id) tuple (i.e., a // SourceId). // // All crossings are represented twice, once to indicate that an edge from // polygon 0 is crossed by an edge from polygon 1, and once to indicate that // an edge from polygon 1 is crossed by an edge from polygon 0. The entries // are sorted lexicographically by their eventual InputEdgeIds except for // GraphEdgeClipper state modifications, which are sorted by the first // InputEdgeId only. using SourceEdgeCrossings = vector>; SourceEdgeCrossings source_edge_crossings_; // A set of edges that cross the current edge being processed by // ProcessEdge() but that have not yet been associated with a particular // S2Builder edge. This is necessary because ProcessEdge can create up to // three S2Builder edges per input edge: one to represent the edge interior, // and up to two more to represent an isolated start and/or end vertex. The // crossing edges must be associated with the S2Builder edge that represents // the edge interior, and they are stored here until that edge is created. vector pending_source_edge_crossings_; // A map that translates from SourceId (the (region_id, shape_id, edge_id) // triple that identifies an S2ShapeIndex edge) to InputEdgeId (the // sequentially increasing numbers assigned to input edges by S2Builder). using SourceIdMap = absl::btree_map; SourceIdMap source_id_map_; // For each edge in region B that defines a degenerate loop (either a point // loop or a sibling pair), indicates whether that loop represents a shell // or a hole. This information is used during the second pass of // AddBoundaryPair() to determine the output for degenerate edges. flat_hash_map is_degenerate_hole_; // Indicates whether the point being processed along the current edge chain // is in the polygonal interior of the opposite region, using semi-open // boundaries. If "invert_b_" is true then this field is inverted. // // Equal to: b_index_.Contains(current point) ^ invert_b_ bool inside_; // The value of that "inside_" would have just before the end of the // previous edge added to S2Builder. This value is used to determine // whether the GraphEdgeClipper state needs to be updated when jumping from // one edge chain to another. bool prev_inside_; // The maximum edge id of any edge in the current chain whose v0 vertex has // already been emitted. This is used to determine when an isolated vertex // needs to be emitted, e.g. when two closed polygons share only a vertex. int v0_emitted_max_edge_id_; // True if the first vertex of the current chain has been emitted. This is // used when processing loops in order to determine whether the first/last // vertex of the loop should be emitted as an isolated vertex. bool chain_v0_emitted_; }; // See documentation above. void S2BooleanOperation::Impl::CrossingProcessor::StartBoundary( int a_region_id, bool invert_a, bool invert_b, bool invert_result) { a_region_id_ = a_region_id; b_region_id_ = 1 - a_region_id; invert_a_ = invert_a; invert_b_ = invert_b; invert_result_ = invert_result; is_union_ = invert_b && invert_result; // Specify to GraphEdgeClipper how these edges should be clipped. SetClippingState(kSetReverseA, invert_a != invert_result); SetClippingState(kSetInvertB, invert_b); } // See documentation above. inline void S2BooleanOperation::Impl::CrossingProcessor::StartShape( const S2Shape* a_shape) { a_shape_ = a_shape; a_dimension_ = a_shape->dimension(); } // See documentation above. inline void S2BooleanOperation::Impl::CrossingProcessor::StartChain( int chain_id, S2Shape::Chain chain, bool inside) { chain_id_ = chain_id; chain_start_ = chain.start; chain_limit_ = chain.start + chain.length; inside_ = inside; v0_emitted_max_edge_id_ = chain.start - 1; // No edges emitted yet. chain_v0_emitted_ = false; } // See documentation above. bool S2BooleanOperation::Impl::CrossingProcessor::ProcessEdge( ShapeEdgeId a_id, CrossingIterator* it) { // chain_edge() is faster than edge() when there are multiple chains. auto a = a_shape_->chain_edge(chain_id_, a_id.edge_id - chain_start_); if (a_dimension_ == 0) { return ProcessEdge0(a_id, a, it); } else if (a_dimension_ == 1) { return ProcessEdge1(a_id, a, it); } else { S2_DCHECK_EQ(2, a_dimension_); return ProcessEdge2(a_id, a, it); } } // PointCrossingResult describes the relationship between a point from region A // and a set of crossing edges from region B. For example, "matches_polygon" // indicates whether a polygon vertex from region B matches the given point. struct S2BooleanOperation::Impl::CrossingProcessor::PointCrossingResult { PointCrossingResult() : matches_point(false), matches_polyline(false), matches_polygon(false) { } // Note that "matches_polyline" is true only if the point matches a polyline // vertex of B *and* the polyline contains that vertex, whereas // "matches_polygon" is true if the point matches any polygon vertex. bool matches_point; // Matches point. bool matches_polyline; // Matches contained polyline vertex. bool matches_polygon; // Matches polygon vertex. }; // Processes an edge of dimension 0 (i.e., a point) from region A. // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::CrossingProcessor::ProcessEdge0( ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it) { S2_DCHECK_EQ(a.v0, a.v1); // When a region is inverted, all points and polylines are discarded. if (invert_a_ != invert_result_) { SkipCrossings(a_id, it); return true; } PointCrossingResult r = ProcessPointCrossings(a_id, a.v0, it); // "contained" indicates whether the current point is inside the polygonal // interior of the opposite region, using semi-open boundaries. bool contained = inside_ ^ invert_b_; if (r.matches_polygon && polygon_model_ != PolygonModel::SEMI_OPEN) { contained = (polygon_model_ == PolygonModel::CLOSED); } if (r.matches_polyline) contained = true; // The output of UNION includes duplicate values, so ensure that points are // not suppressed by other points. if (r.matches_point && !is_union_) contained = true; // Test whether the point is contained after region B is inverted. if (contained == invert_b_) return true; // Don't exit early. return AddPointEdge(a.v0, 0); } // Skip any crossings that were not needed to determine the result. inline void S2BooleanOperation::Impl::CrossingProcessor::SkipCrossings( ShapeEdgeId a_id, CrossingIterator* it) { while (!it->Done(a_id)) it->Next(); } // Returns a summary of the relationship between a point from region A and // a set of crossing edges from region B (see PointCrossingResult). S2BooleanOperation::Impl::CrossingProcessor::PointCrossingResult S2BooleanOperation::Impl::CrossingProcessor::ProcessPointCrossings( ShapeEdgeId a_id, const S2Point& a0, CrossingIterator* it) const { PointCrossingResult r; for (; !it->Done(a_id); it->Next()) { if (it->b_dimension() == 0) { r.matches_point = true; } else if (it->b_dimension() == 1) { if (PolylineEdgeContainsVertex(a0, *it, 0)) { r.matches_polyline = true; } } else { r.matches_polygon = true; } } return r; } // EdgeCrossingResult describes the relationship between an edge (a0, a1) from // region A and a set of crossing edges from region B. For example, // "matches_polygon" indicates whether (a0, a1) matches a polygon edge from // region B. struct S2BooleanOperation::Impl::CrossingProcessor::EdgeCrossingResult { // These fields indicate that (a0, a1) exactly matches an edge of B. bool matches_polyline = false; // Matches polyline edge (either direction). // These fields indicate that a B polyline contains the degenerate polyline // (a0, a0) or (a1, a1). (This is identical to whether the B polyline // contains the point a0 or a1 except when the B polyline is degenerate, // since a degenerate polyline VV contains itself in all boundary models but // contains the point V only in the CLOSED polyline model.) bool a0_matches_polyline = false; // B polyline contains (a0, a0) bool a1_matches_polyline = false; // B polyline contains (a1, a1) // These fields indicate that a vertex of (a0, a1) matches a polygon vertex // of B. (Unlike with polylines, the polygon may not contain that vertex.) bool a0_matches_polygon = false; // a0 matches polygon vertex. bool a1_matches_polygon = false; // a1 matches polygon vertex. // When a0 != a1, the first two fields identify any B polygon edge that // exactly matches (a0, a1) or the sibling edge (a1, a0). The third field // identifies any B polygon edge that exactly matches (a0, a0). ShapeEdgeId polygon_match_id; // B polygon edge that matches (a0, a1). ShapeEdgeId sibling_match_id; // B polygon edge that matches (a1, a0). ShapeEdgeId a0_loop_match_id; // B polygon edge that matches (a0, a0). // Convenience functions to test whether a matching edge was found. bool matches_polygon() const { return polygon_match_id.edge_id >= 0; } bool matches_sibling() const { return sibling_match_id.edge_id >= 0; } bool loop_matches_a0() const { return a0_loop_match_id.edge_id >= 0; } // These fields count the number of edge crossings at a0, a1, and the // interior of (a0, a1). int a0_crossings = 0; // Count of polygon crossings at a0. int a1_crossings = 0; // Count of polygon crossings at a1. int interior_crossings = 0; // Count of polygon crossings in edge interior. }; // Processes an edge of dimension 1 (i.e., a polyline edge) from region A. // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::CrossingProcessor::ProcessEdge1( ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it) { // When a region is inverted, all points and polylines are discarded. if (invert_a_ != invert_result_) { SkipCrossings(a_id, it); return true; } // Evaluate whether the start vertex should belong to the output, in case it // needs to be emitted as an isolated vertex. EdgeCrossingResult r = ProcessEdgeCrossings(a_id, a, it); bool a0_inside = IsPolylineVertexInside(r.a0_matches_polyline, r.a0_matches_polygon); // Test whether the entire polyline edge should be emitted (or not emitted) // because it matches a polyline or polygon edge. bool is_degenerate = (a.v0 == a.v1); inside_ ^= (r.a0_crossings & 1); if (inside_ != IsPolylineEdgeInside(r, is_degenerate)) { inside_ ^= true; // Invert the inside_ state. ++r.a1_crossings; // Restore the correct (semi-open) state later. } // If neither edge adjacent to v0 was emitted, and this polyline contains // v0, and the other region contains v0, then emit an isolated vertex. if (!polyline_loops_have_boundaries_ && a_id.edge_id == chain_start_ && a.v0 == a_shape_->chain_edge(chain_id_, chain_limit_ - chain_start_ - 1).v1) { // This is the first vertex of a polyline loop, so we can't decide if it // is isolated until we process the last polyline edge. chain_v0_emitted_ = inside_; } else if (is_v0_isolated(a_id) && !is_degenerate && polyline_contains_v0(a_id.edge_id, chain_start_) && a0_inside) { if (!AddPointEdge(a.v0, 1)) return false; } // Test whether the entire edge or any part of it belongs to the output. if (inside_ || r.interior_crossings > 0) { // Note: updates "inside_" to correspond to the state just before a1. if (!AddEdge(a_id, a, 1 /*dimension*/, r.interior_crossings)) { return false; } } // Remember whether the edge portion just before "a1" was emitted, so that // we can decide whether "a1" need to be emitted as an isolated vertex. if (inside_) v0_emitted_max_edge_id_ = a_id.edge_id + 1; // Verify that edge crossings are being counted correctly. inside_ ^= (r.a1_crossings & 1); if (it->crossings_complete()) { S2_DCHECK_EQ(MakeS2ContainsPointQuery(&it->b_index()).Contains(a.v1), inside_ ^ invert_b_); } // Special case to test whether the last vertex of a polyline should be // emitted as an isolated vertex. if (it->crossings_complete() && !is_degenerate && is_chain_last_vertex_isolated(a_id) && (polyline_model_ == PolylineModel::CLOSED || (!polyline_loops_have_boundaries_ && a.v1 == a_shape_->chain_edge(chain_id_, chain_start_).v0)) && IsPolylineVertexInside(r.a1_matches_polyline, r.a1_matches_polygon)) { if (!AddPointEdge(a.v1, 1)) return false; } return true; } // Returns true if the current point being processed (which must be a polyline // vertex) is contained by the opposite region (after inversion if "invert_b_" // is true). "matches_polyline" and "matches_polygon" indicate whether the // vertex matches a polyline/polygon vertex of the opposite region. bool S2BooleanOperation::Impl::CrossingProcessor::IsPolylineVertexInside( bool matches_polyline, bool matches_polygon) const { // Initially "contained" indicates whether the current point is inside the // polygonal interior of region B using semi-open boundaries. bool contained = inside_ ^ invert_b_; // For UNION the output includes duplicate polylines. The test below // ensures that isolated polyline vertices are not suppressed by other // polyline vertices in the output. if (matches_polyline && !is_union_) { contained = true; } else if (matches_polygon && polygon_model_ != PolygonModel::SEMI_OPEN) { contained = (polygon_model_ == PolygonModel::CLOSED); } // Finally, invert the result if the opposite region should be inverted. return contained ^ invert_b_; } // Returns true if the current polyline edge is contained by the opposite // region (after inversion if "invert_b_" is true). inline bool S2BooleanOperation::Impl::CrossingProcessor::IsPolylineEdgeInside( const EdgeCrossingResult& r, bool is_degenerate) const { // Initially "contained" indicates whether the current point (just past a0) // is inside the polygonal interior of region B using semi-open boundaries. bool contained = inside_ ^ invert_b_; // Note that if r.matches_polyline and is_union_ is true, then "contained" // will be false (unless there is also a matching polygon edge) since // polyline edges are not allowed in the interior of B. In this case we // leave "contained" as false since it causes both matching edges to be // emitted. if (r.matches_polyline && !is_union_) { contained = true; } else if (is_degenerate) { // First allow the polygon boundary model to override the semi-open rules. // Note that a polygon vertex (dimension 2) is considered to completely // contain degenerate OPEN and SEMI_OPEN polylines (dimension 1) even // though the latter do not contain any points. This is because dimension // 2 points are considered to be a strict superset of dimension 1 points. if (polygon_model_ != PolygonModel::SEMI_OPEN && r.a0_matches_polygon) { contained = (polygon_model_ == PolygonModel::CLOSED); } // Note that r.a0_matches_polyline is true if and only if some B polyline // contains the degenerate polyline (a0, a0). if (r.a0_matches_polyline && !is_union_) contained = true; } else if (r.matches_polygon()) { // In the SEMI_OPEN model, polygon sibling pairs cancel each other and // have no effect on point or edge containment. if (!(polygon_model_ == PolygonModel::SEMI_OPEN && r.matches_sibling())) { contained = (polygon_model_ != PolygonModel::OPEN); } } else if (r.matches_sibling()) { contained = (polygon_model_ == PolygonModel::CLOSED); } // Finally, invert the result if the opposite region should be inverted. return contained ^ invert_b_; } // Processes an edge of dimension 2 (i.e., a polygon edge) from region A. // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::CrossingProcessor::ProcessEdge2( ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it) { // Whenever the two regions contain the same edge, or opposite edges of a // sibling pair, or one region contains a point loop while the other // contains a matching vertex, then in general the result depends on whether // one or both sides represent a degenerate shell or hole. // // In each pass it is easy to determine whether edges in region B represent // degenerate geometry, and if so whether they represent a shell or hole, // since this can be determined from the inside_ state and the // matches_polygon() / matches_sibling() methods of EdgeCrossingResult. // However this information is not readily available for region A. // // We handle this by saving the shell/hole status of each degenerate loop in // region B during the first pass, and deferring the processing of any edges // that meet the criteria above until the second pass. (Note that regions // A,B correspond to regions 0,1 respectively in the first pass whereas they // refer to regions 1,0 respectively in the second pass.) // // The first pass ignores: // - degenerate edges of A that are incident to any edge of B // - non-degenerate edges of A that match or are siblings to an edge of B // // The first pass also records the shell/hole status of: // - degenerate edges of B that are incident to any edge of A // - sibling pairs of B where either edge matches an edge of A // // The second pass processes and perhaps outputs: // - degenerate edges of B that are incident to any edge of A // - non-degenerate edges of B that match or are siblings to an edge of A // // The following flag indicates that we are in the second pass described // above, i.e. that we are emitting any necessary edges that were ignored by // the first pass. bool emit_shared = (a_region_id_ == 1); // Degeneracies such as isolated vertices and sibling pairs can only be // created by intersecting CLOSED polygons or unioning OPEN polygons. bool create_degen = (polygon_model_ == PolygonModel::CLOSED && !invert_a_ && !invert_b_) || (polygon_model_ == PolygonModel::OPEN && invert_a_ && invert_b_); // In addition, existing degeneracies are kept when an open boundary is // subtracted. Note that "keep_degen_b" is only defined for completeness. // It is needed to ensure that the "reverse subtraction operator" (B - A) // preserves degeneracies correctly, however in practice this operator is // only used internally to implement symmetric difference, and in that // situation the preserved degeneracy is always removed from the final // result because it overlaps other geometry. bool keep_degen_a = (polygon_model_ == PolygonModel::OPEN && invert_b_); bool keep_degen_b = (polygon_model_ == PolygonModel::OPEN && invert_a_); EdgeCrossingResult r = ProcessEdgeCrossings(a_id, a, it); S2_DCHECK(!r.matches_polyline); // If only one region is inverted, matching/sibling relations are reversed. if (invert_a_ != invert_b_) swap(r.polygon_match_id, r.sibling_match_id); bool is_point = (a.v0 == a.v1); if (!emit_shared) { // Remember the shell/hole status of degenerate B edges that are incident // to any edge of A. (We don't need to do this for vertex a1 since it is // the same as vertex a0 of the following A loop edge.) if (r.loop_matches_a0()) { is_degenerate_hole_[r.a0_loop_match_id] = inside_; if (is_point) return true; } // Point loops are handled identically to points in the semi-open model, // and are easier to process in the first pass (since otherwise in the // r.a0_matches_polygon case we would need to remember the containment // status of the matching vertex). Otherwise we defer processing such // loops to the second pass so that we can distinguish whether the // degenerate edge represents a hole or shell. if (polygon_model_ != PolygonModel::SEMI_OPEN) { if (is_point && r.a0_matches_polygon) return true; } } inside_ ^= (r.a0_crossings & 1); if (!emit_shared) { // Defer processing A edges that match or are siblings to an edge of B. if (r.matches_polygon() || r.matches_sibling()) { // For sibling pairs, also remember their shell/hole status. if (r.matches_polygon() && r.matches_sibling()) { is_degenerate_hole_[r.polygon_match_id] = inside_; is_degenerate_hole_[r.sibling_match_id] = inside_; } S2_DCHECK_EQ(r.interior_crossings, 0); inside_ ^= (r.a1_crossings & 1); return true; } } // Remember whether the B geometry represents a sibling pair hole. bool is_b_hole = r.matches_polygon() && r.matches_sibling() && inside_; // At this point, "inside_" indicates whether the initial part of the A edge // is contained by the B geometry using semi-open rules. The following code // implements the various other polygon boundary rules by changing the value // of "inside_" when necessary to indicate whether the current A edge should // be emitted to the output or not. "semi_open_inside" remembers the true // value of "inside_" so that it can be restored later. bool semi_open_inside = inside_; if (is_point) { if (r.loop_matches_a0()) { // Both sides are point loops. The edge is kept only: // - for closed intersection, open union, and open difference; // - if A and B are both holes or both shells. inside_ = create_degen || keep_degen_a || (inside_ == is_degenerate_hole_[r.a0_loop_match_id]); } else if (r.a0_matches_polygon) { // A point loop in A matches a polygon vertex in B. Note that this code // can emit an extra isolated vertex if A represents a point hole, but // this doesn't matter (see comments on the call to AddPointEdge below). if (polygon_model_ != PolygonModel::SEMI_OPEN) { inside_ = create_degen || keep_degen_a; } } } else if (r.matches_polygon()) { if (is_degenerate(a_id)) { // The A edge has a sibling. The edge is kept only: // - for closed intersection, open union, and open difference; // - if the A sibling pair is a hole and the B edge has no sibling; or // - if the B geometry is also a sibling pair and A and B are both // holes or both shells. inside_ = create_degen || keep_degen_a || (!r.matches_sibling() || inside_) == is_degenerate_hole_[a_id]; } else { // Matching edges are kept unless the B geometry is a sibling pair, in // which case it is kept only for closed intersection, open union, and // open difference. if (!r.matches_sibling() || create_degen || keep_degen_b) inside_ = true; } } else if (r.matches_sibling()) { if (is_degenerate(a_id)) { // The A edge has a sibling. The edge is kept only if A is a sibling // pair shell and the operation is closed intersection, open union, or // open difference. inside_ = (create_degen || keep_degen_a) && !is_degenerate_hole_[a_id]; } else { inside_ = create_degen; } } if (inside_ != semi_open_inside) { ++r.a1_crossings; // Restores the correct (semi-open) state later. } // Test whether the first vertex of this edge should be emitted as an // isolated degenerate vertex. This is only needed in the second pass when: // - a0 matches a vertex of the B polygon; // - the initial part of the A edge will not be emitted; and // - the operation is closed intersection or open union, or open difference // and the B geometry is a point loop. // // The logic does not attempt to avoid redundant extra vertices (e.g. the // extra code in ProcessEdge1() that checks whether the vertex is the // endpoint of the preceding emitted edge) since these these will be removed // during S2Builder::Graph creation by DegenerateEdges::DISCARD or // DISCARD_EXCESS (which are necessary in any case due to snapping). if (emit_shared && r.a0_matches_polygon && !inside_ && (create_degen || (keep_degen_b && r.loop_matches_a0()))) { if (!AddPointEdge(a.v0, 2)) return false; } // Since we skipped edges in the first pass that only had a sibling pair // match in the B geometry, we sometimes need to emit the sibling pair of an // edge in the second pass. This happens only if: // - the operation is closed intersection, open union, or open difference; // - the A geometry is not a sibling pair (since otherwise we will process // that edge as well); and // - the B geometry is not a sibling pair hole (since then only one edge // should be emitted). if (r.matches_sibling() && (create_degen || keep_degen_b) && !is_degenerate(a_id) && !is_b_hole) { S2Shape::Edge sibling(a.v1, a.v0); if (!AddEdge(r.sibling_match_id, sibling, 2 /*dimension*/, 0)) { return false; } } // Test whether the entire edge or any part of it belongs to the output. if (inside_ || r.interior_crossings > 0) { // Note: updates "inside_" to correspond to the state just before a1. if (!AddEdge(a_id, a, 2 /*dimension*/, r.interior_crossings)) { return false; } } inside_ ^= (r.a1_crossings & 1); // Verify that edge crossings are being counted correctly. if (it->crossings_complete()) { S2_DCHECK_EQ(MakeS2ContainsPointQuery(&it->b_index()).Contains(a.v1), inside_ ^ invert_b_); } return true; } // Returns a summary of the relationship between a test edge from region A and // a set of crossing edges from region B (see EdgeCrossingResult). // // NOTE(ericv): We could save a bit of work when matching polygon vertices by // passing in a flag saying whether this information is needed. For example // it is only needed in ProcessEdge2 when (emit_shared && create_degenerate). S2BooleanOperation::Impl::CrossingProcessor::EdgeCrossingResult S2BooleanOperation::Impl::CrossingProcessor::ProcessEdgeCrossings( ShapeEdgeId a_id, const S2Shape::Edge& a, CrossingIterator* it) { pending_source_edge_crossings_.clear(); EdgeCrossingResult r; if (it->Done(a_id)) return r; for (; !it->Done(a_id); it->Next()) { // Polyline and polygon "inside" states are not affected by point geometry. if (it->b_dimension() == 0) continue; S2Shape::Edge b = it->b_edge(); if (it->is_interior_crossing()) { // The crossing occurs in the edge interior. The condition below says // that (1) polyline crossings don't affect the polygon "inside" state, // and (2) subtracting a crossing polyline from a polyline does not // affect its "inside" state. (Note that vertices are still created at // the intersection points.) if (a_dimension_ <= it->b_dimension() && !(invert_b_ != invert_result_ && it->b_dimension() == 1)) { SourceId src_id(b_region_id_, it->b_shape_id(), it->b_edge_id()); AddInteriorCrossing(make_pair(src_id, it->left_to_right())); } r.interior_crossings += (it->b_dimension() == 1) ? 2 : 1; } else if (it->b_dimension() == 1) { // The polygon "inside" state is not affected by polyline geometry. if (a_dimension_ == 2) continue; if ((a.v0 == b.v0 && a.v1 == b.v1) || (a.v0 == b.v1 && a.v1 == b.v0)) { r.matches_polyline = true; } if ((a.v0 == b.v0 || a.v0 == b.v1) && PolylineEdgeContainsVertex(a.v0, *it, 1)) { r.a0_matches_polyline = true; } if ((a.v1 == b.v0 || a.v1 == b.v1) && PolylineEdgeContainsVertex(a.v1, *it, 1)) { r.a1_matches_polyline = true; } } else { S2_DCHECK_EQ(2, it->b_dimension()); if (a.v0 == a.v1 || b.v0 == b.v1) { // There are no edge crossings since at least one edge is degenerate. if (a.v0 == b.v0 && a.v0 == b.v1) { r.a0_loop_match_id = it->b_id(); } } else if (a.v0 == b.v0 && a.v1 == b.v1) { ++r.a0_crossings; r.polygon_match_id = it->b_id(); } else if (a.v0 == b.v1 && a.v1 == b.v0) { ++r.a0_crossings; r.sibling_match_id = it->b_id(); } else if (it->is_vertex_crossing()) { if (a.v0 == b.v0 || a.v0 == b.v1) { ++r.a0_crossings; } else { ++r.a1_crossings; } } if (a.v0 == b.v0 || a.v0 == b.v1) { r.a0_matches_polygon = true; } if (a.v1 == b.v0 || a.v1 == b.v1) { r.a1_matches_polygon = true; } } } return r; } // Returns true if the vertex "v" is contained by the polyline edge referred // to by the CrossingIterator "it", taking into account the PolylineModel. // "dimension" is 0 or 1 according to whether "v" should be modeled as a point // or as a degenerate polyline. (This only makes a difference when the // containing polyline is degenerate, since the polyline AA contains itself in // all boundary models but contains the point A only in the CLOSED model.) // // REQUIRES: it.b_dimension() == 1 // REQUIRES: "v" is an endpoint of it.b_edge() bool S2BooleanOperation::Impl::CrossingProcessor::PolylineEdgeContainsVertex( const S2Point& v, const CrossingIterator& it, int dimension) const { S2_DCHECK_EQ(1, it.b_dimension()); S2_DCHECK(it.b_edge().v0 == v || it.b_edge().v1 == v); S2_DCHECK(dimension == 0 || dimension == 1); // Closed polylines contain all their vertices. if (polyline_model_ == PolylineModel::CLOSED) return true; // Note that the code below is structured so that it.b_edge() is not usually // needed (since accessing the edge can be relatively expensive). const auto& b_chain = it.b_chain_info(); int b_edge_id = it.b_edge_id(); // A polyline contains its last vertex only when the polyline is degenerate // (v0 == v1) and "v" is modeled as a degenerate polyline (dimension == 1). // This corresponds to the fact that the polyline AA contains itself in all // boundary models, but contains the point A only in the CLOSED model. if (b_edge_id == b_chain.limit - 1 && v == it.b_edge().v1 && (dimension == 0 || b_edge_id > 0 || v != it.b_edge().v0)) { return false; } // Otherwise all interior vertices are contained. The first polyline // vertex is contained if either the polyline model is not OPEN, or the // polyline forms a loop and polyline_loops_have_boundaries_ is false. if (polyline_contains_v0(b_edge_id, b_chain.start)) return true; if (v != it.b_edge().v0) return true; if (polyline_loops_have_boundaries_) return false; return v == it.b_shape().chain_edge(b_chain.chain_id, b_chain.limit - b_chain.start - 1).v1; } // Translates the temporary representation of crossing edges (SourceId) into // the format expected by EdgeClippingLayer (InputEdgeId). void S2BooleanOperation::Impl::CrossingProcessor::DoneBoundaryPair() { tracker_->AddSpaceExact(input_crossings_, source_edge_crossings_.size()); if (!tracker_->TallySourceIdMap(3)) return; // Add entries that translate the "special" crossings. source_id_map_[SourceId(kSetInside)] = kSetInside; source_id_map_[SourceId(kSetInvertB)] = kSetInvertB; source_id_map_[SourceId(kSetReverseA)] = kSetReverseA; for (const auto& tmp : source_edge_crossings_) { auto it = source_id_map_.find(tmp.second.first); S2_DCHECK(it != source_id_map_.end()); input_crossings_->push_back(make_pair( tmp.first, CrossingInputEdge(it->second, tmp.second.second))); } tracker_->Clear(&source_edge_crossings_); tracker_->ClearSourceIdMap(&source_id_map_); } // Clips the boundary of A to the interior of the opposite region B and adds // the resulting edges to the output. Optionally, any combination of region // A, region B, and the result may be inverted, which allows operations such // as union and difference to be implemented. // // Note that when an input region is inverted with respect to the output // (e.g., invert_a != invert_result), all polygon edges are reversed and all // points and polylines are discarded, since the complement of such objects // cannot be represented. (If you want to compute the complement of points // or polylines, you can use S2LaxPolygonShape to represent your geometry as // degenerate polygons instead.) // // This method must be called an even number of times (first to clip A to B // and then to clip B to A), calling DoneBoundaryPair() after each pair. // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::AddBoundary( int a_region_id, bool invert_a, bool invert_b, bool invert_result, const vector& a_chain_starts, CrossingProcessor* cp) { const S2ShapeIndex& a_index = *op_->regions_[a_region_id]; const S2ShapeIndex& b_index = *op_->regions_[1 - a_region_id]; if (!GetIndexCrossings(a_region_id)) return false; cp->StartBoundary(a_region_id, invert_a, invert_b, invert_result); // Walk the boundary of region A and build a list of all edge crossings. // We also keep track of whether the current vertex is inside region B. auto next_start = a_chain_starts.begin(); CrossingIterator next_crossing(&b_index, &index_crossings_, true /*crossings_complete*/); ShapeEdgeId next_id = min(*next_start, next_crossing.a_id()); while (next_id != kSentinel) { int a_shape_id = next_id.shape_id; const S2Shape& a_shape = *a_index.shape(a_shape_id); cp->StartShape(&a_shape); while (next_id.shape_id == a_shape_id) { // TODO(ericv): Special handling of dimension 0? Can omit most of this // code, including the loop, since all chains are of length 1. int edge_id = next_id.edge_id; S2Shape::ChainPosition chain_position = a_shape.chain_position(edge_id); int chain_id = chain_position.chain_id; S2Shape::Chain chain = a_shape.chain(chain_id); bool start_inside = (next_id == *next_start); if (start_inside) ++next_start; cp->StartChain(chain_id, chain, start_inside); int chain_limit = chain.start + chain.length; while (edge_id < chain_limit) { ShapeEdgeId a_id(a_shape_id, edge_id); S2_DCHECK(cp->inside() || next_crossing.a_id() == a_id); if (!cp->ProcessEdge(a_id, &next_crossing)) { return false; } if (cp->inside()) { ++edge_id; } else if (next_crossing.a_id().shape_id == a_shape_id && next_crossing.a_id().edge_id < chain_limit) { edge_id = next_crossing.a_id().edge_id; } else { break; } } next_id = min(*next_start, next_crossing.a_id()); } } return true; } // Returns the first edge of each edge chain from "a_region_id" whose first // vertex is contained by opposite region's polygons (using the semi-open // boundary model). Each input region and the result region are inverted as // specified (invert_a, invert_b, and invert_result) before testing for // containment. The algorithm uses these "chain starts" in order to clip the // boundary of A to the interior of B in an output-senstive way. // // This method supports "early exit" in the case where a boolean predicate is // being evaluated and the algorithm discovers that the result region will be // non-empty. bool S2BooleanOperation::Impl::GetChainStarts( int a_region_id, bool invert_a, bool invert_b, bool invert_result, CrossingProcessor* cp, vector* chain_starts) { const S2ShapeIndex& a_index = *op_->regions_[a_region_id]; const S2ShapeIndex& b_index = *op_->regions_[1 - a_region_id]; if (is_boolean_output()) { // If boolean output is requested, then we use the CrossingProcessor to // determine whether the first edge of each chain will be emitted to the // output region. This lets us terminate the operation early in many // cases. cp->StartBoundary(a_region_id, invert_a, invert_b, invert_result); } // If region B has no two-dimensional shapes and is not inverted, then by // definition no chain starts are contained. However if boolean output is // requested then we check for containment anyway, since as a side effect we // may discover that the result region is non-empty and terminate the entire // operation early. bool b_has_interior = HasInterior(b_index); if (b_has_interior || invert_b || is_boolean_output()) { auto query = MakeS2ContainsPointQuery(&b_index); int num_shape_ids = a_index.num_shape_ids(); for (int shape_id = 0; shape_id < num_shape_ids; ++shape_id) { S2Shape* a_shape = a_index.shape(shape_id); if (a_shape == nullptr) continue; // If region A is being subtracted from region B, points and polylines // in region A can be ignored since these shapes never contribute to the // output (they can only remove edges from region B). if (invert_a != invert_result && a_shape->dimension() < 2) continue; if (is_boolean_output()) cp->StartShape(a_shape); int num_chains = a_shape->num_chains(); for (int chain_id = 0; chain_id < num_chains; ++chain_id) { S2Shape::Chain chain = a_shape->chain(chain_id); if (chain.length == 0) continue; ShapeEdge a(shape_id, chain.start, a_shape->chain_edge(chain_id, 0)); bool inside = (b_has_interior && query.Contains(a.v0())) != invert_b; if (inside) { if (!tracker_.AddSpace(chain_starts, 1)) return false; chain_starts->push_back(ShapeEdgeId(shape_id, chain.start)); } if (is_boolean_output()) { cp->StartChain(chain_id, chain, inside); if (!ProcessIncidentEdges(a, &query, cp)) return false; } } } } if (!tracker_.AddSpace(chain_starts, 1)) return false; chain_starts->push_back(kSentinel); return true; } bool S2BooleanOperation::Impl::ProcessIncidentEdges( const ShapeEdge& a, S2ContainsPointQuery* query, CrossingProcessor* cp) { tmp_crossings_.clear(); query->VisitIncidentEdges(a.v0(), [&a, this](const ShapeEdge& b) { return AddIndexCrossing(a, b, false /*is_interior*/, &tmp_crossings_); }); // Fast path for the common case where there are no incident edges. We // return false (terminating early) if the first chain edge will be emitted. if (tmp_crossings_.empty()) { return !cp->inside(); } // Otherwise we invoke the full CrossingProcessor logic to determine whether // the first chain edge will be emitted. if (tmp_crossings_.size() > 1) { std::sort(tmp_crossings_.begin(), tmp_crossings_.end()); // VisitIncidentEdges() should not generate any duplicate values. S2_DCHECK(std::adjacent_find(tmp_crossings_.begin(), tmp_crossings_.end()) == tmp_crossings_.end()); } tmp_crossings_.push_back(IndexCrossing(kSentinel, kSentinel)); CrossingIterator next_crossing(&query->index(), &tmp_crossings_, false /*crossings_complete*/); return cp->ProcessEdge(a.id(), &next_crossing); } bool S2BooleanOperation::Impl::HasInterior(const S2ShapeIndex& index) { for (int s = index.num_shape_ids(); --s >= 0; ) { S2Shape* shape = index.shape(s); if (shape && shape->dimension() == 2) return true; } return false; } inline bool S2BooleanOperation::Impl::AddIndexCrossing( const ShapeEdge& a, const ShapeEdge& b, bool is_interior, IndexCrossings* crossings) { if (!tracker_.AddSpace(crossings, 1)) return false; crossings->push_back(IndexCrossing(a.id(), b.id())); IndexCrossing* crossing = &crossings->back(); if (is_interior) { crossing->is_interior_crossing = true; if (s2pred::Sign(a.v0(), a.v1(), b.v0()) > 0) { crossing->left_to_right = true; } builder_->AddIntersection( S2::GetIntersection(a.v0(), a.v1(), b.v0(), b.v1())); } else { // TODO(ericv): This field isn't used unless one shape is a polygon and // the other is a polyline or polygon, but we don't have the shape // dimension information readily available here. if (S2::VertexCrossing(a.v0(), a.v1(), b.v0(), b.v1())) { crossing->is_vertex_crossing = true; } } return true; // Continue visiting. } // Initialize index_crossings_ to the set of crossing edge pairs such that the // first element of each pair is an edge from "region_id". // // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::GetIndexCrossings(int region_id) { if (region_id == index_crossings_first_region_id_) return true; if (index_crossings_first_region_id_ < 0) { S2_DCHECK_EQ(region_id, 0); // For efficiency, not correctness. // TODO(ericv): This would be more efficient if VisitCrossingEdgePairs() // returned the sign (+1 or -1) of the interior crossing, i.e. // "int interior_crossing_sign" rather than "bool is_interior". if (!s2shapeutil::VisitCrossingEdgePairs( *op_->regions_[0], *op_->regions_[1], s2shapeutil::CrossingType::ALL, [this](const ShapeEdge& a, const ShapeEdge& b, bool is_interior) { // For all supported operations (union, intersection, and // difference), if the input edges have an interior crossing // then the output is guaranteed to have at least one edge. if (is_interior && is_boolean_output()) return false; return AddIndexCrossing(a, b, is_interior, &index_crossings_); })) { return false; } if (index_crossings_.size() > 1) { std::sort(index_crossings_.begin(), index_crossings_.end()); index_crossings_.erase( std::unique(index_crossings_.begin(), index_crossings_.end()), index_crossings_.end()); } // Add a sentinel value to simplify the loop logic. tracker_.AddSpace(&index_crossings_, 1); index_crossings_.push_back(IndexCrossing(kSentinel, kSentinel)); index_crossings_first_region_id_ = 0; } if (region_id != index_crossings_first_region_id_) { for (auto& crossing : index_crossings_) { swap(crossing.a, crossing.b); // The following predicates get inverted when the edges are swapped. crossing.left_to_right ^= true; crossing.is_vertex_crossing ^= true; } std::sort(index_crossings_.begin(), index_crossings_.end()); index_crossings_first_region_id_ = region_id; } return tracker_.ok(); } // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::AddBoundaryPair( bool invert_a, bool invert_b, bool invert_result, CrossingProcessor* cp) { // Optimization: if the operation is DIFFERENCE or SYMMETRIC_DIFFERENCE, // it is worthwhile checking whether the two regions are identical (in which // case the output is empty). auto type = op_->op_type_; if (type == OpType::DIFFERENCE || type == OpType::SYMMETRIC_DIFFERENCE) { if (AreRegionsIdentical()) return true; } else if (is_boolean_output()) { // TODO(ericv): When boolean output is requested there are other quick // checks that could be done here, such as checking whether a full cell from // one S2ShapeIndex intersects a non-empty cell of the other S2ShapeIndex. } vector a_starts, b_starts; auto _ = absl::MakeCleanup([&]() { tracker_.Untally(a_starts); tracker_.Untally(b_starts); }); if (!GetChainStarts(0, invert_a, invert_b, invert_result, cp, &a_starts) || !GetChainStarts(1, invert_b, invert_a, invert_result, cp, &b_starts) || !AddBoundary(0, invert_a, invert_b, invert_result, a_starts, cp) || !AddBoundary(1, invert_b, invert_a, invert_result, b_starts, cp)) { return false; } if (!is_boolean_output()) cp->DoneBoundaryPair(); return tracker_.ok(); } // Supports "early exit" in the case of boolean results by returning false // as soon as the result is known to be non-empty. bool S2BooleanOperation::Impl::BuildOpType(OpType op_type) { // CrossingProcessor does the real work of emitting the output edges. CrossingProcessor cp(op_->options_.polygon_model(), op_->options_.polyline_model(), op_->options_.polyline_loops_have_boundaries(), builder_.get(), &input_dimensions_, &input_crossings_, &tracker_); switch (op_type) { case OpType::UNION: // A | B == ~(~A & ~B) return AddBoundaryPair(true, true, true, &cp); case OpType::INTERSECTION: // A & B return AddBoundaryPair(false, false, false, &cp); case OpType::DIFFERENCE: // A - B = A & ~B // // Note that degeneracies are implemented such that the symmetric // operation (-B + A) also produces correct results. This can be tested // by swapping op_->regions[0, 1] and calling AddBoundaryPair(true, // false, false), which computes (~B & A). return AddBoundaryPair(false, true, false, &cp); case OpType::SYMMETRIC_DIFFERENCE: // Compute the union of (A - B) and (B - A). return (AddBoundaryPair(false, true, false, &cp) && AddBoundaryPair(true, false, false, &cp)); } S2_LOG(FATAL) << "Invalid S2BooleanOperation::OpType"; return false; } // Returns a bit mask indicating which of the 6 S2 cube faces intersect the // index contents. uint8 GetFaceMask(const S2ShapeIndex& index) { uint8 mask = 0; S2ShapeIndex::Iterator it(&index, S2ShapeIndex::BEGIN); while (!it.done()) { int face = it.id().face(); mask |= 1 << face; it.Seek(S2CellId::FromFace(face + 1).range_min()); } return mask; } // Given a polygon edge graph containing only degenerate edges and sibling edge // pairs, the purpose of this function is to decide whether the polygon is empty // or full except for the degeneracies, i.e. whether the degeneracies represent // shells or holes. bool S2BooleanOperation::Impl::IsFullPolygonResult( const S2Builder::Graph& g, S2Error* error) const { // If there are no edges of dimension 2, the result could be either the // empty polygon or the full polygon. Note that this is harder to determine // than you might think due to snapping. For example, the union of two // non-empty polygons can be empty, because both polygons consist of tiny // loops that are eliminated by snapping. Similarly, even if two polygons // both contain a common point their intersection can still be empty. // // We distinguish empty from full results using two heuristics: // // 1. We compute a bit mask representing the subset of the six S2 cube faces // intersected by each input geometry, and use this to determine if only // one of the two results is possible. (This test is very fast.) Note // that snapping will never cause the result to cover an entire extra // cube face because the maximum allowed snap radius is too small. S2_DCHECK_LE(S2Builder::SnapFunction::kMaxSnapRadius().degrees(), 70); // // 2. We compute the area of each input geometry, and use this to bound the // minimum and maximum area of the result. If only one of {0, 4*Pi} is // possible then we are done. If neither is possible then we choose the // one that is closest to being possible (since snapping can change the // result area). Both results are possible only when computing the // symmetric difference of two regions of area 2*Pi each, in which case we // must resort to additional heuristics (see below). // // TODO(ericv): Implement a predicate that uses the results of edge snapping // directly, rather than computing areas. This would not only be much faster // but would also allows all cases to be handled 100% robustly. const S2ShapeIndex& a = *op_->regions_[0]; const S2ShapeIndex& b = *op_->regions_[1]; switch (op_->op_type_) { case OpType::UNION: return IsFullPolygonUnion(a, b); case OpType::INTERSECTION: return IsFullPolygonIntersection(a, b); case OpType::DIFFERENCE: return IsFullPolygonDifference(a, b); case OpType::SYMMETRIC_DIFFERENCE: return IsFullPolygonSymmetricDifference(a, b); default: S2_LOG(FATAL) << "Invalid S2BooleanOperation::OpType"; return false; } } bool S2BooleanOperation::Impl::IsFullPolygonUnion( const S2ShapeIndex& a, const S2ShapeIndex& b) const { // See comments in IsFullPolygonResult(). The most common case is that // neither input polygon is empty but the result is empty due to snapping. // The result can be full only if the union of the two input geometries // intersects all six faces of the S2 cube. This test is fast. if ((GetFaceMask(a) | GetFaceMask(b)) != kAllFacesMask) return false; // The union area satisfies: // // max(A, B) <= Union(A, B) <= min(4*Pi, A + B) // // where A, B can refer to a polygon or its area. We then choose the result // that assumes the smallest amount of error. double a_area = S2::GetArea(a), b_area = S2::GetArea(b); double min_area = max(a_area, b_area); double max_area = min(4 * M_PI, a_area + b_area); return min_area > 4 * M_PI - max_area; } bool S2BooleanOperation::Impl::IsFullPolygonIntersection( const S2ShapeIndex& a, const S2ShapeIndex& b) const { // See comments in IsFullPolygonResult(). By far the most common case is // that the result is empty. // The result can be full only if each of the two input geometries // intersects all six faces of the S2 cube. This test is fast. if ((GetFaceMask(a) & GetFaceMask(b)) != kAllFacesMask) return false; // The intersection area satisfies: // // max(0, A + B - 4*Pi) <= Intersection(A, B) <= min(A, B) // // where A, B can refer to a polygon or its area. We then choose the result // that assumes the smallest amount of error. double a_area = S2::GetArea(a), b_area = S2::GetArea(b); double min_area = max(0.0, a_area + b_area - 4 * M_PI); double max_area = min(a_area, b_area); return min_area > 4 * M_PI - max_area; } bool S2BooleanOperation::Impl::IsFullPolygonDifference( const S2ShapeIndex& a, const S2ShapeIndex& b) const { // See comments in IsFullPolygonResult(). By far the most common case is // that the result is empty. // The result can be full only if each cube face is intersected by the first // geometry. (The second geometry is irrelevant, since for example it could // consist of a tiny loop on each S2 cube face.) This test is fast. if (GetFaceMask(a) != kAllFacesMask) return false; // The difference area satisfies: // // max(0, A - B) <= Difference(A, B) <= min(A, 4*Pi - B) // // where A, B can refer to a polygon or its area. We then choose the result // that assumes the smallest amount of error. double a_area = S2::GetArea(a), b_area = S2::GetArea(b); double min_area = max(0.0, a_area - b_area); double max_area = min(a_area, 4 * M_PI - b_area); return min_area > 4 * M_PI - max_area; } bool S2BooleanOperation::Impl::IsFullPolygonSymmetricDifference( const S2ShapeIndex& a, const S2ShapeIndex& b) const { // See comments in IsFullPolygonResult(). By far the most common case is // that the result is empty. // The result can be full only if the union of the two input geometries // intersects all six faces of the S2 cube. This test is fast. uint8 a_mask = GetFaceMask(a); uint8 b_mask = GetFaceMask(b); if ((a_mask | b_mask) != kAllFacesMask) return false; // The symmetric difference area satisfies: // // |A - B| <= SymmetricDifference(A, B) <= 4*Pi - |4*Pi - (A + B)| // // where A, B can refer to a polygon or its area. double a_area = S2::GetArea(a), b_area = S2::GetArea(b); double min_area = fabs(a_area - b_area); double max_area = 4 * M_PI - fabs(4 * M_PI - (a_area + b_area)); // Now we choose the result that assumes the smallest amount of error // (min_area in the empty case, and (4*Pi - max_area) in the full case). // However in the case of symmetric difference these two errors may be equal, // meaning that the result is ambiguous. This happens when both polygons have // area 2*Pi. Furthermore, this can happen even when the areas are not // exactly 2*Pi due to snapping and area calculation errors. // // To determine whether the result is ambiguous, we compute a rough estimate // of the maximum expected area error (including errors due to snapping), // using the worst-case error bound for a hemisphere defined by 4 vertices. auto edge_snap_radius = builder_options_.edge_snap_radius(); double hemisphere_area_error = 2 * M_PI * edge_snap_radius.radians() + 40 * DBL_EPSILON; // GetCurvatureMaxError // The following sign is the difference between the error needed for an empty // result and the error needed for a full result. It is negative if an // empty result is possible, positive if a full result is possible, and zero // if both results are possible. double error_sign = min_area - (4 * M_PI - max_area); if (fabs(error_sign) <= hemisphere_area_error) { // Handling the ambiguous case correctly requires a more sophisticated // algorithm (see below), but we can at least handle the simple cases by // testing whether both input geometries intersect all 6 cube faces. If // not, then the result is definitely full. if ((a_mask & b_mask) != kAllFacesMask) return true; // Otherwise both regions have area 2*Pi and intersect all 6 cube faces. // We choose "empty" in this case under the assumption that it is more // likely that the user is computing the difference between two nearly // identical polygons. // // TODO(ericv): Implement a robust algorithm based on examining the edge // snapping results directly, or alternatively add another heuristic (such // as testing containment of random points, or using a larger bit mask in // the tests above, e.g. a 24-bit mask representing all level 1 cells). return false; } return error_sign > 0; } // When subtracting regions, we can save a lot of work by detecting the // relatively common case where the two regions are identical. bool S2BooleanOperation::Impl::AreRegionsIdentical() const { const S2ShapeIndex* a = op_->regions_[0]; const S2ShapeIndex* b = op_->regions_[1]; if (a == b) return true; // If the regions are not identical, we would like to detect that fact as // quickly as possible. In particular we would like to avoid fully decoding // both shapes if they are represented as encoded shape types. // // First we test whether the two geometries have the same dimensions and // chain structure. This can be done without decoding any S2Points. int num_shape_ids = a->num_shape_ids(); if (num_shape_ids != b->num_shape_ids()) return false; for (int s = 0; s < num_shape_ids; ++s) { const S2Shape* a_shape = a->shape(s); const S2Shape* b_shape = b->shape(s); int dimension = a_shape->dimension(); if (dimension != b_shape->dimension()) return false; int num_chains = a_shape->num_chains(); if (num_chains != b_shape->num_chains()) return false; int num_edges = a_shape->num_edges(); if (num_edges != b_shape->num_edges()) return false; if (dimension == 0) { S2_DCHECK_EQ(num_edges, num_chains); // All chains are of length 1. continue; } for (int c = 0; c < num_chains; ++c) { S2Shape::Chain a_chain = a_shape->chain(c); S2Shape::Chain b_chain = b_shape->chain(c); S2_DCHECK_EQ(a_chain.start, b_chain.start); if (a_chain.length != b_chain.length) return false; } } // Next we test whether both geometries have the same vertex positions. for (int s = 0; s < num_shape_ids; ++s) { const S2Shape* a_shape = a->shape(s); const S2Shape* b_shape = b->shape(s); int num_chains = a_shape->num_chains(); for (int c = 0; c < num_chains; ++c) { S2Shape::Chain a_chain = a_shape->chain(c); for (int i = 0; i < a_chain.length; ++i) { S2Shape::Edge a_edge = a_shape->chain_edge(c, i); S2Shape::Edge b_edge = b_shape->chain_edge(c, i); if (a_edge.v0 != b_edge.v0) return false; if (a_edge.v1 != b_edge.v1) return false; } } // Note that we don't need to test whether both shapes have the same // GetReferencePoint(), because S2Shape requires that the full geometry of // the shape (including its interior) must be derivable from its chains // and edges. This is why the "full loop" exists; see s2shape.h. } return true; } void S2BooleanOperation::Impl::DoBuild(S2Error* error) { if (!tracker_.ok()) return; builder_options_ = S2Builder::Options(op_->options_.snap_function()); builder_options_.set_intersection_tolerance(S2::kIntersectionError); builder_options_.set_memory_tracker(tracker_.tracker()); if (op_->options_.split_all_crossing_polyline_edges()) { builder_options_.set_split_crossing_edges(true); } // TODO(ericv): Ideally idempotent() should be true, but existing clients // expect vertices closer than the full "snap_radius" to be snapped. builder_options_.set_idempotent(false); if (is_boolean_output()) { // BuildOpType() returns true if and only if the result has no edges. S2Builder::Graph g; // Unused by IsFullPolygonResult() implementation. *op_->result_empty_ = BuildOpType(op_->op_type_) && !IsFullPolygonResult(g, error); return; } builder_ = make_unique(builder_options_); builder_->StartLayer(make_unique( &op_->layers_, &input_dimensions_, &input_crossings_, &tracker_)); // Add a predicate that decides whether a result with no polygon edges should // be interpreted as the empty polygon or the full polygon. builder_->AddIsFullPolygonPredicate( [this](const S2Builder::Graph& g, S2Error* error) { return IsFullPolygonResult(g, error); }); (void) BuildOpType(op_->op_type_); // Release memory that is no longer needed. if (!tracker_.Clear(&index_crossings_)) return; builder_->Build(error); } bool S2BooleanOperation::Impl::Build(S2Error* error) { // This wrapper ensures that memory tracking errors are reported. error->Clear(); DoBuild(error); if (!tracker_.ok()) *error = tracker_.error(); return error->ok(); } S2BooleanOperation::Options::Options() : snap_function_(make_unique( S1Angle::Zero())) { } S2BooleanOperation::Options::Options(const SnapFunction& snap_function) : snap_function_(snap_function.Clone()) { } S2BooleanOperation::Options::Options(const Options& options) : snap_function_(options.snap_function_->Clone()), polygon_model_(options.polygon_model_), polyline_model_(options.polyline_model_), polyline_loops_have_boundaries_(options.polyline_loops_have_boundaries_), split_all_crossing_polyline_edges_( options.split_all_crossing_polyline_edges_), precision_(options.precision_), conservative_output_(options.conservative_output_), source_id_lexicon_(options.source_id_lexicon_), memory_tracker_(options.memory_tracker_) { } S2BooleanOperation::Options& S2BooleanOperation::Options::operator=( const Options& options) { snap_function_ = options.snap_function_->Clone(); polygon_model_ = options.polygon_model_; polyline_model_ = options.polyline_model_; polyline_loops_have_boundaries_ = options.polyline_loops_have_boundaries_; split_all_crossing_polyline_edges_ = options.split_all_crossing_polyline_edges_; precision_ = options.precision_; conservative_output_ = options.conservative_output_; source_id_lexicon_ = options.source_id_lexicon_; memory_tracker_ = options.memory_tracker_; return *this; } const SnapFunction& S2BooleanOperation::Options::snap_function() const { return *snap_function_; } void S2BooleanOperation::Options::set_snap_function( const SnapFunction& snap_function) { snap_function_ = snap_function.Clone(); } PolygonModel S2BooleanOperation::Options::polygon_model() const { return polygon_model_; } void S2BooleanOperation::Options::set_polygon_model(PolygonModel model) { polygon_model_ = model; } PolylineModel S2BooleanOperation::Options::polyline_model() const { return polyline_model_; } void S2BooleanOperation::Options::set_polyline_model(PolylineModel model) { polyline_model_ = model; } bool S2BooleanOperation::Options::polyline_loops_have_boundaries() const { return polyline_loops_have_boundaries_; } void S2BooleanOperation::Options::set_polyline_loops_have_boundaries( bool value) { polyline_loops_have_boundaries_ = value; } bool S2BooleanOperation::Options::split_all_crossing_polyline_edges() const { return split_all_crossing_polyline_edges_; } void S2BooleanOperation::Options::set_split_all_crossing_polyline_edges( bool value) { split_all_crossing_polyline_edges_ = value; } Precision S2BooleanOperation::Options::precision() const { return precision_; } bool S2BooleanOperation::Options::conservative_output() const { return conservative_output_; } ValueLexicon* S2BooleanOperation::Options::source_id_lexicon() const { return source_id_lexicon_; } S2MemoryTracker* S2BooleanOperation::Options::memory_tracker() const { return memory_tracker_; } void S2BooleanOperation::Options::set_memory_tracker(S2MemoryTracker* tracker) { memory_tracker_ = tracker; } const char* S2BooleanOperation::OpTypeToString(OpType op_type) { switch (op_type) { case OpType::UNION: return "UNION"; case OpType::INTERSECTION: return "INTERSECTION"; case OpType::DIFFERENCE: return "DIFFERENCE"; case OpType::SYMMETRIC_DIFFERENCE: return "SYMMETRIC DIFFERENCE"; default: return "Unknown OpType"; } } const char* S2BooleanOperation::PolygonModelToString(PolygonModel model) { switch (model) { case PolygonModel::OPEN: return "OPEN"; case PolygonModel::SEMI_OPEN: return "SEMI_OPEN"; case PolygonModel::CLOSED: return "CLOSED"; default: return "Unknown PolygonModel"; } } const char* S2BooleanOperation::PolylineModelToString(PolylineModel model) { switch (model) { case PolylineModel::OPEN: return "OPEN"; case PolylineModel::SEMI_OPEN: return "SEMI_OPEN"; case PolylineModel::CLOSED: return "CLOSED"; default: return "Unknown PolylineModel"; } } S2BooleanOperation::S2BooleanOperation(OpType op_type, const Options& options) : options_(options), op_type_(op_type), result_empty_(nullptr) { } S2BooleanOperation::S2BooleanOperation(OpType op_type, bool* result_empty, const Options& options) : options_(options), op_type_(op_type), result_empty_(result_empty) { } S2BooleanOperation::S2BooleanOperation( OpType op_type, unique_ptr layer, const Options& options) : options_(options), op_type_(op_type), result_empty_(nullptr) { layers_.push_back(std::move(layer)); } S2BooleanOperation::S2BooleanOperation( OpType op_type, vector> layers, const Options& options) : options_(options), op_type_(op_type), layers_(std::move(layers)), result_empty_(nullptr) { } bool S2BooleanOperation::Build(const S2ShapeIndex& a, const S2ShapeIndex& b, S2Error* error) { regions_[0] = &a; regions_[1] = &b; return Impl(this).Build(error); } bool S2BooleanOperation::IsEmpty( OpType op_type, const S2ShapeIndex& a, const S2ShapeIndex& b, const Options& options) { bool result_empty; S2BooleanOperation op(op_type, &result_empty, options); S2Error error; op.Build(a, b, &error); S2_DCHECK(error.ok()); return result_empty; } s2geometry-0.10.0/src/s2/s2boolean_operation.h000066400000000000000000000667421422156367100211460ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_S2BOOLEAN_OPERATION_H_ #define S2_S2BOOLEAN_OPERATION_H_ #include #include #include #include "s2/s2builder.h" #include "s2/s2builder_graph.h" #include "s2/s2builder_layer.h" #include "s2/value_lexicon.h" // This class implements boolean operations (intersection, union, difference, // and symmetric difference) for regions whose boundaries are defined by // geodesic edges. // // S2BooleanOperation operates on exactly two input regions at a time. Each // region is represented as an S2ShapeIndex and may contain any number of // points, polylines, and polygons. The region is essentially the union of // these objects, except that polygon interiors must be disjoint from all // other geometry (including other polygon interiors). If the input geometry // for a region does not meet this condition, it can be normalized by // computing its union first. Duplicate polygon edges are not allowed (even // among different polygons), however polylines may have duplicate edges and // may even be self-intersecting. Note that points or polylines are allowed // to coincide with the boundaries of polygons. // // Degeneracies are fully supported. Supported degeneracy types include the // following: // // - Point polylines consisting of a single degenerate edge AA. // // - Point loops consisting of a single vertex A. Such loops may represent // either shells or holes according to whether the loop adds to or // subtracts from the surrounding region of the polygon. // // - Sibling edge pairs of the form {AB, BA}. Such sibling pairs may // represent either shells or holes according to whether they add to or // subtract from the surrounding region. The edges of a sibling pair may // belong to the same polygon loop (e.g. a loop AB) or to different polygon // loops or polygons (e.g. the polygons {ABC, CBD}). // // A big advantage of degeneracy support is that geometry may be simplified // without completely losing small details. For example, if a polygon // representing a land area with many lakes and rivers is simplified using a // tolerance of 1 kilometer, every water feature in the input is guaranteed to // be within 1 kilometer of some water feature in the input (even if some // lakes and rivers are merged and/or reduced to degenerate point or sibling // edge pair holes). Mathematically speaking, degeneracy support allows // geometry to be simplified while guaranteeing that the Hausdorff distance // betweeen the boundaries of the original and simplified geometries is at // most the simplification tolerance. It also allows geometry to be // simplified without changing its dimension, thus preserving boundary // semantics. (Note that the boundary of a polyline ABCD is {A,D}, whereas // the boundary of a degenerate shell ABCDCB is its entire set of vertices and // edges.) // // Points and polyline edges are treated as multisets: if the same point or // polyline edge appears multiple times in the input, it will appear multiple // times in the output. For example, the union of a point with an identical // point consists of two points. This feature is useful for modeling large // sets of points or polylines as a single region while maintaining their // distinct identities, even when the points or polylines intersect each // other. It is also useful for reconstructing polylines that loop back on // themselves (e.g., time series such as GPS tracks). If duplicate geometry // is not desired, it can easily be removed by choosing the appropriate // S2Builder output layer options. // // Self-intersecting polylines can be manipulated without materializing new // vertices at the self-intersection points. This feature is important when // processing polylines with large numbers of self-intersections such as GPS // tracks (e.g., consider the path of a race car in the Indy 500). // // Polylines are always considered to be directed. Polyline edges between the // same pair of vertices are defined to intersect even if the two edges are in // opposite directions. (Undirected polylines can be modeled by specifying // GraphOptions::EdgeType::UNDIRECTED in the S2Builder output layer.) // // The output of each operation is sent to an S2Builder::Layer provided by the // client. This allows clients to build any representation of the geometry // they choose. It also allows the client to do additional postprocessing of // the output before building data structures; for example, the client can // easily discard degeneracies or convert them to another data type. // // The boundaries of polygons and polylines can be modeled as open, semi-open, // or closed. Polyline boundaries are controlled by the PolylineModel class, // whose options are as follows: // // - In the OPEN model, polylines do not contain their first or last vertex // except for one special case: namely, if the polyline forms a loop and // the polyline_loops_have_boundaries() option is set to false, then the // first/last vertex is contained. // // - In the SEMI_OPEN model, polylines contain all vertices except the last. // Therefore if one polyline starts where another polyline stops, the two // polylines do not intersect. // // - In the CLOSED model, polylines contain all of their vertices. // // When multiple polylines are present, they are processed independently and // have no effect on each other. For example, in the OPEN boundary model the // polyline ABC contains the vertex B, while set of polylines {AB, BC} does // not. (If you want to treat the polylines as a union instead, with // boundaries merged according to the "mod 2" rule, this can be achieved by // reassembling the edges into maximal polylines using S2PolylineVectorLayer // with EdgeType::UNDIRECTED, DuplicateEdges::MERGE, and PolylineType::WALK.) // // Polygon boundaries are controlled by the PolygonModel class, which has the // following options: // // - In the OPEN model, polygons do not contain their vertices or edges. // This implies that a polyline that follows the boundary of a polygon will // not intersect it. // // - In the SEMI_OPEN model, polygon point containment is defined such that // if several polygons tile the region around a vertex, then exactly one of // those polygons contains that vertex. Similarly polygons contain all of // their edges, but none of their reversed edges. This implies that a // polyline and polygon edge with the same endpoints intersect if and only // if they are in the same direction. (This rule ensures that if a // polyline is intersected with a polygon and its complement, the two // resulting polylines do not have any edges in common.) // // - In the CLOSED model, polygons contain all their vertices, edges, and // reversed edges. This implies that a polyline that shares an edge (in // either direction) with a polygon is defined to intersect it. Similarly, // this is the only model where polygons that touch at a vertex or along an // edge intersect. // // Note that PolylineModel and PolygonModel are defined as separate classes in // order to allow for possible future extensions. // // Operations between geometry of different dimensions are defined as follows: // // - For UNION, the higher-dimensional shape always wins. For example the // union of a closed polygon A with a polyline B that coincides with the // boundary of A consists only of the polygon A. // // - For INTERSECTION, the lower-dimensional shape always wins. For example, // the intersection of a closed polygon A with a point B that coincides // with a vertex of A consists only of the point B. // // - For DIFFERENCE, higher-dimensional shapes are not affected by // subtracting lower-dimensional shapes. For example, subtracting a point // or polyline from a polygon A yields the original polygon A. This rule // exists because in general, it is impossible to represent the output // using the specified boundary model(s). (Consider subtracting one vertex // from a PolylineModel::CLOSED polyline, or subtracting one edge from a // PolygonModel::CLOSED polygon.) If you want to perform operations like // this, consider representing all boundaries explicitly (topological // boundaries) using OPEN boundary models. Another option for polygons is // to subtract a degenerate loop, which yields a polygon with a degenerate // hole (see S2LaxPolygonShape). // // Note that in the case of Precision::EXACT operations, the above remarks // only apply to the output before snapping. Snapping may cause nearby // distinct edges to become coincident, e.g. a polyline may become coincident // with a polygon boundary. However also note that S2BooleanOperation is // perfectly happy to accept such geometry as input. // // Note the following differences between S2BooleanOperation and the similar // S2MultiBooleanOperation class: // // - S2BooleanOperation operates on exactly two regions at a time, whereas // S2MultiBooleanOperation operates on any number of regions. // // - S2BooleanOperation is potentially much faster when the input is already // represented as S2ShapeIndexes. The algorithm is output sensitive and is // often sublinear in the input size. This can be a big advantage if, say, // // - S2BooleanOperation supports exact predicates and the corresponding // exact operations (i.e., operations that are equivalent to computing the // exact result and then snap rounding it). // // - S2MultiBooleanOperation has better error guarantees when there are many // regions, since it requires only one snapping operation for any number of // input regions. // // Example usage: // S2ShapeIndex a, b; // Input geometry, e.g. containing polygons. // S2Polygon polygon; // Output geometry. // S2BooleanOperation::Options options; // options.set_snap_function(snap_function); // S2BooleanOperation op(S2BooleanOperation::OpType::INTERSECTION, // absl::make_unique(&polygon), // options); // S2Error error; // if (!op.Build(a, b, &error)) { // S2_LOG(ERROR) << error; // ... // } // // If the output includes objects of different dimensions, they can be // assembled into different layers with code like this: // // vector points; // vector> polylines; // S2Polygon polygon; // S2BooleanOperation op( // S2BooleanOperation::OpType::UNION, // absl::make_unique(&points), // absl::make_unique(&polylines), // absl::make_unique(&polygon)); class S2BooleanOperation { public: // The supported operation types. enum class OpType : uint8 { UNION, // Contained by either region. INTERSECTION, // Contained by both regions. DIFFERENCE, // Contained by the first region but not the second. SYMMETRIC_DIFFERENCE // Contained by one region but not the other. }; // Translates OpType to one of the strings above. static const char* OpTypeToString(OpType op_type); // Defines whether polygons are considered to contain their vertices and/or // edges (see definitions above). enum class PolygonModel : uint8 { OPEN, SEMI_OPEN, CLOSED }; // Translates PolygonModel to one of the strings above. static const char* PolygonModelToString(PolygonModel model); // Defines whether polylines are considered to contain their endpoints // (see definitions above). enum class PolylineModel : uint8 { OPEN, SEMI_OPEN, CLOSED }; // Translates PolylineModel to one of the strings above. static const char* PolylineModelToString(PolylineModel model); // With Precision::EXACT, the operation is evaluated using the exact input // geometry. Predicates that use this option will produce exact results; // for example, they can distinguish between a polyline that barely // intersects a polygon from one that barely misses it. Constructive // operations (ones that yield new geometry, as opposed to predicates) are // implemented by computing the exact result and then snap rounding it // according to the given snap_function() (see below). This is as close as // it is possible to get to the exact result while requiring that vertex // coordinates have type "double". // // With Precision::SNAPPED, the input regions are snapped together *before* // the operation is evaluated. So for example, two polygons that overlap // slightly will be treated as though they share a common boundary, and // similarly two polygons that are slightly separated from each other will // be treated as though they share a common boundary. Snapped results are // useful for dealing with points, since in S2 the only points that lie // exactly on a polyline or polygon edge are the endpoints of that edge. // // Conceptually, the difference between these two options is that with // Precision::SNAPPED, the inputs are snap rounded (together), whereas with // Precision::EXACT only the result is snap rounded. enum class Precision : uint8 { EXACT, SNAPPED }; // SourceId identifies an edge from one of the two input S2ShapeIndexes. // It consists of a region id (0 or 1), a shape id within that region's // S2ShapeIndex, and an edge id within that shape. class SourceId { public: SourceId(); SourceId(int region_id, int32 shape_id, int32 edge_id); explicit SourceId(int32 special_edge_id); int region_id() const { return region_id_; } int32 shape_id() const { return shape_id_; } int32 edge_id() const { return edge_id_; } // TODO(ericv): Convert to functions, define all 6 comparisons. bool operator==(SourceId other) const; bool operator<(SourceId other) const; private: uint32 region_id_ : 1; uint32 shape_id_ : 31; int32 edge_id_; }; class Options { public: Options(); // Convenience constructor that calls set_snap_function(). explicit Options(const S2Builder::SnapFunction& snap_function); // Specifies the function to be used for snap rounding. // // DEFAULT: s2builderutil::IdentitySnapFunction(S1Angle::Zero()) // - This does no snapping and preserves all input vertices exactly unless // there are crossing edges, in which case the snap radius is increased // to the maximum intersection point error (S2::kIntersectionError). const S2Builder::SnapFunction& snap_function() const; void set_snap_function(const S2Builder::SnapFunction& snap_function); // Defines whether polygons are considered to contain their vertices // and/or edges (see comments above). // // DEFAULT: PolygonModel::SEMI_OPEN PolygonModel polygon_model() const; void set_polygon_model(PolygonModel model); // Defines whether polylines are considered to contain their vertices (see // comments above). // // DEFAULT: PolylineModel::CLOSED PolylineModel polyline_model() const; void set_polyline_model(PolylineModel model); // Specifies whether a polyline loop is considered to have a non-empty // boundary. By default this option is true, meaning that even if the // first and last vertices of a polyline are the same, the polyline is // considered to have a well-defined "start" and "end". For example, if // the polyline boundary model is OPEN then the polyline loop would not // include the start/end vertices. These are the best semantics for most // applications, such as GPS tracks or road network segments. // // If the polyline forms a loop and this option is set to false, then // instead the first and last vertices are considered to represent a // single vertex in the interior of the polyline. In this case the // boundary of the polyline is empty, meaning that the first/last vertex // will be contained by the polyline even if the boundary model is OPEN. // (Note that this option also has a small effect on the CLOSED boundary // model, because the first/last vertices of a polyline loop are // considered to represent one vertex rather than two.) // // The main reason for this option is to implement the "mod 2 union" // boundary semantics of the OpenGIS Simple Features spec. This can be // achieved by making sure that all polylines are constructed using // S2Builder::Graph::PolylineType::WALK (which ensures that all polylines // are as long as possible), and then setting this option to false. // // DEFAULT: true bool polyline_loops_have_boundaries() const; void set_polyline_loops_have_boundaries(bool value); // Specifies that a new vertex should be added whenever a polyline edge // crosses another polyline edge. Note that this can cause the size of // polylines with many self-intersections to increase quadratically. // // If false, new vertices are added only when a polyline from one input // region cross a polyline from the other input region. This allows // self-intersecting input polylines to be modified as little as possible. // // DEFAULT: false bool split_all_crossing_polyline_edges() const; void set_split_all_crossing_polyline_edges(bool value); // Specifies whether the operation should use the exact input geometry // (Precision::EXACT), or whether the two input regions should be snapped // together first (Precision::SNAPPED). // // DEFAULT: Precision::EXACT Precision precision() const; // void set_precision(Precision precision); // If true, the input geometry is interpreted as representing nearby // geometry that has been snapped or simplified. It then outputs a // conservative result based on the value of polygon_model() and // polyline_model(). For the most part, this only affects the handling of // degeneracies. // // - If the model is OPEN, the result is as open as possible. For // example, the intersection of two identical degenerate shells is empty // under PolygonModel::OPEN because they could have been disjoint before // snapping. Similarly, two identical degenerate polylines have an // empty intersection under PolylineModel::OPEN. // // - If the model is CLOSED, the result is as closed as possible. In the // case of the DIFFERENCE operation, this is equivalent to evaluating // A - B as Closure(A) - Interior(B). For other operations, it affects // only the handling of degeneracies. For example, the union of two // identical degenerate holes is empty under PolygonModel::CLOSED // (i.e., the hole disappears) because the holes could have been // disjoint before snapping. // // - If the model is SEMI_OPEN, the result is as degenerate as possible. // New degeneracies will not be created, but all degeneracies that // coincide with the opposite region's boundary are retained unless this // would cause a duplicate polygon edge to be created. This model is // is very useful for working with input data that has both positive and // negative degeneracies (i.e., degenerate shells and holes). // // DEFAULT: false bool conservative_output() const; // void set_conservative_output(bool conservative); // If specified, then each output edge will be labelled with one or more // SourceIds indicating which input edge(s) it corresponds to. This // can be useful if your input geometry has additional data that needs to // be propagated from the input to the output (e.g., elevations). // // You can access the labels by using an S2Builder::Layer type that // supports labels, such as S2PolygonLayer. The layer outputs a // "label_set_lexicon" and an "label_set_id" for each edge. You can then // look up the source information for each edge like this: // // for (int32 label : label_set_lexicon.id_set(label_set_id)) { // const SourceId& src = source_id_lexicon.value(label); // // region_id() specifies which S2ShapeIndex the edge is from (0 or 1). // DoSomething(src.region_id(), src.shape_id(), src.edge_id()); // } // // DEFAULT: nullptr ValueLexicon* source_id_lexicon() const; // void set_source_id_lexicon(ValueLexicon* source_id_lexicon); // Specifies that internal memory usage should be tracked using the given // S2MemoryTracker. If a memory limit is specified and more more memory // than this is required then an error will be returned. Example usage: // // S2MemoryTracker tracker; // tracker.set_limit(500 << 20); // 500 MB // S2BooleanOperation::Options options; // options.set_memory_tracker(&tracker); // S2BooleanOperation op(..., options); // ... // S2Error error; // if (!op.Build(..., &error)) { // if (error.code() == S2Error::RESOURCE_EXHAUSTED) { // S2_LOG(ERROR) << error; // Memory limit exceeded // } // } // // CAVEATS: // // - Memory used by the input S2ShapeIndexes and the output S2Builder // layers is not counted towards the total. // // - While memory tracking is reasonably complete and accurate, it does // not account for every last byte. It is intended only for the // purpose of preventing clients from running out of memory. // // DEFAULT: nullptr (memory tracking disabled) S2MemoryTracker* memory_tracker() const; void set_memory_tracker(S2MemoryTracker* tracker); // Options may be assigned and copied. Options(const Options& options); Options& operator=(const Options& options); private: std::unique_ptr snap_function_; PolygonModel polygon_model_ = PolygonModel::SEMI_OPEN; PolylineModel polyline_model_ = PolylineModel::CLOSED; bool polyline_loops_have_boundaries_ = true; bool split_all_crossing_polyline_edges_ = false; Precision precision_ = Precision::EXACT; bool conservative_output_ = false; ValueLexicon* source_id_lexicon_ = nullptr; S2MemoryTracker* memory_tracker_ = nullptr; }; // Specifies that the output boundary edges should be sent to a single // S2Builder layer. This version can be used when the dimension of the // output geometry is known (e.g., intersecting two polygons to yield a // third polygon). S2BooleanOperation(OpType op_type, std::unique_ptr layer, const Options& options = Options()); // Specifies that the output boundary edges should be sent to three // different layers according to their dimension. Points (represented by // degenerate edges) are sent to layer 0, polyline edges are sent to // layer 1, and polygon edges are sent to layer 2. // // The dimension of an edge is defined as the minimum dimension of the two // input edges that produced it. For example, the intersection of two // crossing polyline edges is a considered to be a degenerate polyline // rather than a point, so it is sent to layer 1. Clients can easily // reclassify such polylines as points if desired, but this rule makes it // easier for clients that want to process point, polyline, and polygon // inputs differently. // // The layers are always built in the order 0, 1, 2, and all arguments to // the Build() calls are guaranteed to be valid until the last call returns. // All Graph objects have the same set of vertices and the same lexicon // objects, in order to make it easier to write classes that process all the // edges in parallel. S2BooleanOperation(OpType op_type, std::vector> layers, const Options& options = Options()); OpType op_type() const { return op_type_; } const Options& options() const { return options_; } // Executes the given operation. Returns true on success, and otherwise // sets "error" appropriately. (This class does not generate any errors // itself, but the S2Builder::Layer might.) bool Build(const S2ShapeIndex& a, const S2ShapeIndex& b, S2Error* error); // Convenience method that returns true if the result of the given operation // is empty. static bool IsEmpty(OpType op_type, const S2ShapeIndex& a, const S2ShapeIndex& b, const Options& options = Options()); // Convenience method that returns true if A intersects B. static bool Intersects(const S2ShapeIndex& a, const S2ShapeIndex& b, const Options& options = Options()) { return !IsEmpty(OpType::INTERSECTION, b, a, options); } // Convenience method that returns true if A contains B, i.e., if the // difference (B - A) is empty. static bool Contains(const S2ShapeIndex& a, const S2ShapeIndex& b, const Options& options = Options()) { return IsEmpty(OpType::DIFFERENCE, b, a, options); } // Convenience method that returns true if the symmetric difference of A and // B is empty. (Note that A and B may still not be identical, e.g. A may // contain two copies of a polyline while B contains one.) static bool Equals(const S2ShapeIndex& a, const S2ShapeIndex& b, const Options& options = Options()) { return IsEmpty(OpType::SYMMETRIC_DIFFERENCE, b, a, options); } private: class Impl; // The actual implementation. // Internal constructor to reduce code duplication. S2BooleanOperation(OpType op_type, const Options& options); // Specifies that "result_empty" should be set to indicate whether the exact // result of the operation is empty. This constructor is used to efficiently // test boolean relationships (see IsEmpty above). S2BooleanOperation(OpType op_type, bool* result_empty, const Options& options = Options()); Options options_; OpType op_type_; // The input regions. const S2ShapeIndex* regions_[2]; // The output consists either of zero layers, one layer, or three layers. std::vector> layers_; // The following field is set if and only if there are no output layers. bool* result_empty_; }; ////////////////// Implementation details follow //////////////////// inline S2BooleanOperation::SourceId::SourceId() : region_id_(0), shape_id_(0), edge_id_(-1) { } inline S2BooleanOperation::SourceId::SourceId( int region_id, int32 shape_id, int32 edge_id) : region_id_(region_id), shape_id_(shape_id), edge_id_(edge_id) { } inline S2BooleanOperation::SourceId::SourceId(int special_edge_id) : region_id_(0), shape_id_(0), edge_id_(special_edge_id) { } inline bool S2BooleanOperation::SourceId::operator==(SourceId other) const { return (region_id_ == other.region_id_ && shape_id_ == other.shape_id_ && edge_id_ == other.edge_id_); } inline bool S2BooleanOperation::SourceId::operator<(SourceId other) const { if (region_id_ < other.region_id_) return true; if (region_id_ > other.region_id_) return false; if (shape_id_ < other.shape_id_) return true; if (shape_id_ > other.shape_id_) return false; return edge_id_ < other.edge_id_; } #endif // S2_S2BOOLEAN_OPERATION_H_ s2geometry-0.10.0/src/s2/s2boolean_operation_test.cc000066400000000000000000003273141422156367100223360ustar00rootroot00000000000000// Copyright 2017 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s2boolean_operation.h" #include #include #include #include #include #include "absl/flags/reflection.h" #include "absl/flags/flag.h" #include "absl/memory/memory.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/strings/strip.h" #include "s2/mutable_s2shape_index.h" #include "s2/s2builder.h" #include "s2/s2builder_graph.h" #include "s2/s2builder_layer.h" #include "s2/s2builderutil_lax_polygon_layer.h" #include "s2/s2builderutil_s2point_vector_layer.h" #include "s2/s2builderutil_s2polyline_vector_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2builderutil_testing.h" #include "s2/s2lax_polygon_shape.h" #include "s2/s2lax_polyline_shape.h" #include "s2/s2point_vector_shape.h" #include "s2/s2polygon.h" #include "s2/s2polyline.h" #include "s2/s2shapeutil_contains_brute_force.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" S2_DECLARE_int64(s2shape_index_tmp_memory_budget); namespace { using absl::ByAnyChar; using absl::SkipEmpty; using absl::StrContains; using absl::StrSplit; using absl::make_unique; using absl::string_view; using s2builderutil::IndexMatchingLayer; using s2builderutil::LaxPolygonLayer; using s2shapeutil::ContainsBruteForce; using std::pair; using std::unique_ptr; using std::string; using std::vector; using EdgeType = S2Builder::EdgeType; using Graph = S2Builder::Graph; using GraphOptions = S2Builder::GraphOptions; using DegenerateEdges = GraphOptions::DegenerateEdges; using DuplicateEdges = GraphOptions::DuplicateEdges; using SiblingPairs = GraphOptions::SiblingPairs; using OpType = S2BooleanOperation::OpType; using PolygonModel = S2BooleanOperation::PolygonModel; using PolylineModel = S2BooleanOperation::PolylineModel; using DegenerateBoundaries = LaxPolygonLayer::Options::DegenerateBoundaries; void ExpectResult(S2BooleanOperation::OpType op_type, const S2BooleanOperation::Options& options, const S2ShapeIndex& a, const S2ShapeIndex& b, const S2ShapeIndex& expected) { vector> layers; for (int dim = 0; dim < 3; ++dim) { // Since all S2Builder polygon layers require DISCARD or DISCARD_EXCESS // for degenerate edges, we intentionally do not require any specific // multiplicity for degenerate edges and sibling pairs of dimension 2. GraphOptions graph_options( EdgeType::DIRECTED, (dim == 2) ? DegenerateEdges::DISCARD_EXCESS : DegenerateEdges::KEEP, DuplicateEdges::KEEP, (dim == 2) ? SiblingPairs::DISCARD_EXCESS : SiblingPairs::KEEP); layers.push_back(make_unique( graph_options, &expected, dim)); } S2BooleanOperation op(op_type, std::move(layers), options); S2Error error; ASSERT_TRUE(op.Build(a, b, &error)) << S2BooleanOperation::OpTypeToString(op_type) << " failed:\n" << "Expected result: " << s2textformat::ToString(expected) << "\n" << error; // Now try the same thing with boolean output. EXPECT_EQ(expected.num_shape_ids() == 0, S2BooleanOperation::IsEmpty(op_type, a, b, options)); } void ExpectResult(S2BooleanOperation::OpType op_type, const S2BooleanOperation::Options& options, const string& a_str, const string& b_str, const string& expected_str) { auto a = s2textformat::MakeIndexOrDie(a_str); auto b = s2textformat::MakeIndexOrDie(b_str); auto expected = s2textformat::MakeIndexOrDie(expected_str); ExpectResult(op_type, options, *a, *b, *expected); } // The intersections in the "expected" data below were computed in lat-lng // space (i.e., the rectangular projection), while the actual intersections // are computed using geodesics. We can compensate for this by rounding the // intersection points to a fixed precision in degrees (e.g., 2 decimals). static S2BooleanOperation::Options RoundToE(int exp) { S2BooleanOperation::Options options; options.set_snap_function(s2builderutil::IntLatLngSnapFunction(exp)); return options; } // TODO(ericv): Clean up or remove these notes. // // Options to test: // polygon_model: OPEN, SEMI_OPEN, CLOSED // polyline_model: OPEN, SEMI_OPEN, CLOSED // polyline_loops_have_boundaries: true, false // conservative: true, false // // Geometry combinations to test: // // Point/point: // - disjoint, coincident // Point/polyline: // - Start vertex, end vertex, interior vertex, degenerate polyline // - With polyline_loops_have_boundaries: start/end vertex, degenerate polyline // Point/polygon: // - Polygon interior, exterior, vertex // - Vertex of degenerate sibling pair shell, hole // - Vertex of degenerate single point shell, hole // Polyline/polyline: // - Vertex intersection: // - Start, end, interior, degenerate, loop start/end, degenerate loop // - Test cases where vertex is not emitted because an incident edge is. // - Edge/edge: interior crossing, duplicate, reversed, degenerate // - Test that degenerate edges are ignored unless polyline has a single edge. // (For example, AA has one edge but AAA has no edges.) // Polyline/polygon: // - Vertex intersection: polyline vertex cases already covered, but test // polygon normal vertex, sibling pair shell/hole, single vertex shell/hole // - Also test cases where vertex is not emitted because an edge is. // - Edge/edge: interior crossing, duplicate, reversed // - Edge/interior: polyline edge in polygon interior, exterior // Polygon/polygon: // - Vertex intersection: // - normal vertex, sibling pair shell/hole, single vertex shell/hole // - Also test cases where vertex is not emitted because an edge is. // - Test that polygons take priority when there is a polygon vertex and // also isolated polyline vertices. (There should not be any points.) // - Edge/edge: interior crossing, duplicate, reversed // - Interior/interior: polygons in interior/exterior of other polygons TEST(S2BooleanOperation, DegeneratePolylines) { // Verify that degenerate polylines are preserved under all boundary models. S2BooleanOperation::Options options; auto a = "# 0:0, 0:0 #"; auto b = "# #"; options.set_polyline_model(PolylineModel::OPEN); ExpectResult(OpType::UNION, options, a, b, a); options.set_polyline_model(PolylineModel::SEMI_OPEN); ExpectResult(OpType::UNION, options, a, b, a); options.set_polyline_model(PolylineModel::CLOSED); ExpectResult(OpType::UNION, options, a, b, a); } TEST(S2BooleanOperation, DegeneratePolygons) { // Verify that degenerate polygon features (single-vertex and sibling pair // shells and holes) are preserved under all boundary models. S2BooleanOperation::Options options; auto a = "# # 0:0, 0:5, 5:5, 5:0; 1:1; 2:2, 3:3; 6:6; 7:7, 8:8"; auto b = "# #"; options.set_polygon_model(PolygonModel::OPEN); ExpectResult(OpType::UNION, options, a, b, a); options.set_polygon_model(PolygonModel::SEMI_OPEN); ExpectResult(OpType::UNION, options, a, b, a); options.set_polygon_model(PolygonModel::CLOSED); ExpectResult(OpType::UNION, options, a, b, a); } TEST(S2BooleanOperation, PointPoint) { S2BooleanOperation::Options options; auto a = "0:0 | 1:0 # #"; auto b = "0:0 | 2:0 # #"; // Note that these results have duplicates, which is correct. Clients can // eliminated the duplicates with the appropriate GraphOptions. ExpectResult(OpType::UNION, options, a, b, "0:0 | 0:0 | 1:0 | 2:0 # #"); ExpectResult(OpType::INTERSECTION, options, a, b, "0:0 | 0:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "1:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "1:0 | 2:0 # #"); } TEST(S2BooleanOperation, PointOpenPolyline) { // Tests operations between an open polyline and its vertices. // // The polyline "3:0, 3:0" consists of a single degenerate edge and contains // no points (since polyline_model() is OPEN). Since S2BooleanOperation // preserves degeneracies, this means that the union includes *both* the // point 3:0 and the degenerate polyline 3:0, since they do not intersect. // // This test uses Options::polyline_loops_have_boundaries() == true, which // means that the loop "4:0, 5:0, 4:0" does not contain the vertex "4:0". S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::OPEN); auto a = "0:0 | 1:0 | 2:0 | 3:0 | 4:0 | 5:0 # #"; auto b = "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"; ExpectResult(OpType::UNION, options, a, b, "0:0 | 2:0 | 3:0 | 4:0 " "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "1:0 | 5:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "0:0 | 2:0 | 3:0 | 4:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "0:0 | 2:0 | 3:0 | 4:0" "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); } TEST(S2BooleanOperation, PointOpenPolylineLoopBoundariesFalse) { // With Options::polyline_loops_have_boundaries() == false, the loop // "4:0, 5:0, 4:0" has two vertices, both of which are contained. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::OPEN); options.set_polyline_loops_have_boundaries(false); auto a = "0:0 | 1:0 | 2:0 | 3:0 | 4:0 | 5:0 # #"; auto b = "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"; ExpectResult(OpType::UNION, options, a, b, "0:0 | 2:0 | 3:0 " "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "1:0 | 4:0 | 5:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "0:0 | 2:0 | 3:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "0:0 | 2:0 | 3:0 " "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); } TEST(S2BooleanOperation, PointSemiOpenPolyline) { // Degenerate polylines are defined not contain any points under the // SEMI_OPEN model either, so again the point 3:0 and the degenerate // polyline "3:0, 3:0" do not intersect. // // The result does not depend on Options::polyline_loops_have_boundaries(). S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::SEMI_OPEN); for (bool bool_value : {false, true}) { options.set_polyline_loops_have_boundaries(bool_value); auto a = "0:0 | 1:0 | 2:0 | 3:0 | 4:0 | 5:0 # #"; auto b = "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"; ExpectResult(OpType::UNION, options, a, b, "2:0 | 3:0 # 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "0:0 | 1:0 | 4:0 | 5:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "2:0 | 3:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "2:0 | 3:0 # 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); } } TEST(S2BooleanOperation, PointClosedPolyline) { // Under the CLOSED model, the degenerate polyline 3:0 does contain its // vertex. Since polylines take precedence over points, the union of the // point 3:0 and the polyline 3:0 is the polyline only. Similarly, since // subtracting a point from a polyline has no effect, the symmetric // difference includes only the polyline objects. // // The result does not depend on Options::polyline_loops_have_boundaries(). S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::CLOSED); for (bool bool_value : {false, true}) { options.set_polyline_loops_have_boundaries(bool_value); auto a = "0:0 | 1:0 | 2:0 | 3:0 | 4:0 | 5:0 # #"; auto b = "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "0:0 | 1:0 | 2:0 | 3:0 | 4:0 | 5:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0, 4:0 #"); } } TEST(S2BooleanOperation, PointPolygonInterior) { S2BooleanOperation::Options options; // PolygonModel is irrelevant. // One interior point and one exterior point. auto a = "1:1 | 4:4 # #"; auto b = "# # 0:0, 0:3, 3:0"; ExpectResult(OpType::UNION, options, a, b, "4:4 # # 0:0, 0:3, 3:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "1:1 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "4:4 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "4:4 # # 0:0, 0:3, 3:0"); } TEST(S2BooleanOperation, PointOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); // See notes about the two vertices below. auto a = "0:1 | 1:0 # #"; auto b = "# # 0:0, 0:1, 1:0"; ExpectResult(OpType::UNION, options, a, b, "0:1 | 1:0 # # 0:0, 0:1, 1:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "0:1 | 1:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "0:1 | 1:0 # # 0:0, 0:1, 1:0"); } TEST(S2BooleanOperation, PointSemiOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::SEMI_OPEN); // The two vertices are chosen such that the polygon contains one vertex but // not the other under PolygonModel::SEMI_OPEN. (The same vertices are used // for all three PolygonModel options.) auto polygon = s2textformat::MakePolygonOrDie("0:0, 0:1, 1:0"); ASSERT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("0:1"))); ASSERT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("1:0"))); auto a = "0:1 | 1:0 # #"; auto b = "# # 0:0, 0:1, 1:0"; ExpectResult(OpType::UNION, options, a, b, "1:0 # # 0:0, 0:1, 1:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "0:1 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "1:0 # #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "1:0 # # 0:0, 0:1, 1:0"); } TEST(S2BooleanOperation, PointClosedPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::CLOSED); // See notes about the two vertices above. auto a = "0:1 | 1:0 # #"; auto b = "# # 0:0, 0:1, 1:0"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:1, 1:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "0:1 | 1:0 # #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:1, 1:0"); } TEST(S2BooleanOperation, PolylineVertexOpenPolylineVertex) { // Test first, last, and middle vertices of both polylines. Also test // first/last and middle vertices of two polyline loops. // // Degenerate polylines are tested in PolylineEdgePolylineEdgeOverlap below. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::OPEN); auto a = "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"; auto b = "# 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); // The output consists of the portion of each input polyline that intersects // the opposite region, so the intersection vertex is present twice. This // allows reassembling the individual polylins that intersect, if desired. // (Otherwise duplicates can be removed using DuplicateEdges::MERGE.) ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:1, 0:1 | 0:1, 0:1 #"); // Note that all operations are defined such that subtracting a // lower-dimensional subset of an object has no effect. In this case, // subtracting the middle vertex of a polyline has no effect. ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); } TEST(S2BooleanOperation, PolylineVertexOpenPolylineVertexLoopBoundariesFalse) { // With Options::polyline_loops_have_boundaries() == false, the 3 polyline // loops each have two vertices, both of which are contained. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::OPEN); options.set_polyline_loops_have_boundaries(false); auto a = "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"; auto b = "# 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); // Note that the polyline "0:3, 0:4, 0:3" only has two vertices, not three. // This means that 0:3 is emitted only once for that polyline, plus once for // the other polyline, for a total of twice. ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:1, 0:1 | 0:1, 0:1 " "| 0:3, 0:3 | 0:3, 0:3 | 0:4, 0:4 | 0:4, 0:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); } TEST(S2BooleanOperation, PolylineVertexSemiOpenPolylineVertex) { // The result does not depend on Options::polyline_loops_have_boundaries(). S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::SEMI_OPEN); for (bool bool_value : {false, true}) { options.set_polyline_loops_have_boundaries(bool_value); auto a = "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"; auto b = "# 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:0 | 0:1, 0:1 | 0:1, 0:1 " "| 0:3, 0:3 | 0:3, 0:3 | 0:4, 0:4 | 0:4, 0:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); } } TEST(S2BooleanOperation, PolylineVertexClosedPolylineVertex) { S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::CLOSED); auto a = "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"; auto b = "# 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); // Since Options::polyline_loops_have_boundaries() == true, the polyline // "0:3, 0:4, 0:3" has three vertices. Therefore 0:3 is emitted twice for // that polyline, plus once for the other polyline, for a total of thrice. ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:0 | 0:1, 0:1 | 0:1, 0:1 " "| 0:2, 0:2 | 0:2, 0:2 " "| 0:3, 0:3 | 0:3, 0:3 | 0:3, 0:3 " "| 0:4, 0:4 | 0:4, 0:4 | 0:4, 0:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); } TEST(S2BooleanOperation, PolylineVertexClosedPolylineVertexLoopBoundariesFalse) { S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::CLOSED); options.set_polyline_loops_have_boundaries(false); auto a = "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"; auto b = "# 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); // Since Options::polyline_loops_have_boundaries() == false, the polyline // "0:3, 0:4, 0:3" has two vertices. Therefore 0:3 is emitted once for // that polyline, plus once for the other polyline, for a total of twice. ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:0 | 0:1, 0:1 | 0:1, 0:1 " "| 0:2, 0:2 | 0:2, 0:2 " "| 0:3, 0:3 | 0:3, 0:3 | 0:4, 0:4 | 0:4, 0:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:3, 0:4, 0:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 0:1, 0:2 | 0:0, 1:0 | -1:1, 0:1, 1:1 | -1:2, 0:2 " "| 0:3, 0:4, 0:3 | 1:3, 0:3, 1:3 | 0:4, 1:4, 0:4 #"); } // The polygon used in the polyline/polygon vertex tests below. static string kVertexTestPolygonStr() { return "0:0, 0:1, 0:2, 0:3, 0:4, 0:5, 5:5, 5:4, 5:3, 5:2, 5:1, 5:0"; } TEST(S2BooleanOperation, TestSemiOpenPolygonVerticesContained) { // Verify whether certain vertices of the test polygon are contained under // the semi-open boundary model (for use in the tests below). auto polygon = s2textformat::MakePolygonOrDie(kVertexTestPolygonStr()); EXPECT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("0:1"))); EXPECT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("0:2"))); EXPECT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("0:3"))); EXPECT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("0:4"))); EXPECT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("5:1"))); EXPECT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("5:2"))); EXPECT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("5:3"))); EXPECT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("5:4"))); } // Don't bother testing every PolylineModel with every PolygonModel for vertex // intersection, since we have already tested the PolylineModels individually // above. It is sufficient to use PolylineModel::CLOSED with the various // PolygonModel options. TEST(S2BooleanOperation, PolylineVertexOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); // Define some constants to reduce code duplication. // Test all combinations of polylines that start or end on a polygon vertex, // where the polygon vertex is open or closed using semi-open boundaries, // and where the incident edge is inside or outside the polygon. auto a = ("# 1:1, 0:1 | 0:2, 1:2 | -1:3, 0:3 | 0:4, -1:4 " "| 6:1, 5:1 | 5:2, 6:2 | 4:3, 5:3 | 5:4, 4:4 #"); auto b = "# # " + kVertexTestPolygonStr(); const string kDifferenceResult = "# 0:1, 0:1 | 0:2, 0:2 | -1:3, 0:3 | 0:4, -1:4" "| 6:1, 5:1 | 5:2, 6:2 | 5:3, 5:3 | 5:4, 5:4 #"; ExpectResult(OpType::UNION, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 0:1 | 0:2, 1:2 | 4:3, 5:3 | 5:4, 4:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, kDifferenceResult); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); } // Like the test above, except that every polygon vertex is also incident to a // closed polyline vertex. This tests that when an open vertex and a closed // vertex coincide with each other, the result is considered closed. TEST(S2BooleanOperation, PolylineVertexOpenPolygonClosedPolylineVertex) { const string kTestGeometrySuffix = "-2:0, 0:1 | -2:1, 0:2 | -2:2, 0:3 | -2:3, 0:4 | " "7:0, 5:1 | 7:1, 5:2 | 7:2, 5:3 | 7:3, 5:4 # " + kVertexTestPolygonStr(); S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); auto a = ("# 1:1, 0:1 | 0:2, 1:2 | -1:3, 0:3 | 0:4, -1:4 " "| 6:1, 5:1 | 5:2, 6:2 | 4:3, 5:3 | 5:4, 4:4 #"); auto b = ("# " + kTestGeometrySuffix); const string kDifferencePrefix = "# -1:3, 0:3 | 0:4, -1:4 | 6:1, 5:1 | 5:2, 6:2"; ExpectResult(OpType::UNION, options, a, b, kDifferencePrefix + " | 0:1, 0:1 | 0:2, 0:2 | 5:3, 5:3 | 5:4, 5:4 | " + kTestGeometrySuffix); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 0:1 | 0:2, 1:2 | 0:3, 0:3 | 0:4, 0:4" "| 5:1, 5:1 | 5:2, 5:2 | 4:3, 5:3 | 5:4, 4:4" "| 0:1, 0:1 | 0:2, 0:2 | 0:3, 0:3 | 0:4, 0:4" "| 5:1, 5:1 | 5:2, 5:2 | 5:3, 5:3 | 5:4, 5:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, kDifferencePrefix + " #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, kDifferencePrefix + " | " + kTestGeometrySuffix); } TEST(S2BooleanOperation, PolylineVertexSemiOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::SEMI_OPEN); // Test all combinations of polylines that start or end on a polygon vertex, // where the polygon vertex is open or closed using semi-open boundaries, // and where the incident edge is inside or outside the polygon. // // The vertices at latitude 0 used below are all closed while the vertices // at latitude 5 are all open (see TestSemiOpenPolygonVerticesContained). auto a = ("# 1:1, 0:1 | 0:2, 1:2 | -1:3, 0:3 | 0:4, -1:4 " "| 6:1, 5:1 | 5:2, 6:2 | 4:3, 5:3 | 5:4, 4:4 #"); auto b = "# # " + kVertexTestPolygonStr(); const string kDifferenceResult = "# -1:3, 0:3 | 0:4, -1:4 | 6:1, 5:1 | 5:2, 6:2 | 5:3, 5:3 | 5:4, 5:4 #"; ExpectResult(OpType::UNION, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 0:1 | 0:2, 1:2 | 0:3, 0:3 | 0:4, 0:4 " "| 4:3, 5:3 | 5:4, 4:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, kDifferenceResult); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); } TEST(S2BooleanOperation, PolylineVertexClosedPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::CLOSED); // Test all combinations of polylines that start or end on a polygon vertex, // where the polygon vertex is open or closed using semi-open boundaries, // and where the incident edge is inside or outside the polygon. auto a = ("# 1:1, 0:1 | 0:2, 1:2 | -1:3, 0:3 | 0:4, -1:4 " "| 6:1, 5:1 | 5:2, 6:2 | 4:3, 5:3 | 5:4, 4:4 #"); auto b = "# # " + kVertexTestPolygonStr(); const string kDifferenceResult = "# -1:3, 0:3 | 0:4, -1:4 | 6:1, 5:1 | 5:2, 6:2 #"; ExpectResult(OpType::UNION, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 0:1 | 0:2, 1:2 | 0:3, 0:3 | 0:4, 0:4" "| 5:1, 5:1 | 5:2, 5:2 | 4:3, 5:3 | 5:4, 4:4 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, kDifferenceResult); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, kDifferenceResult + kVertexTestPolygonStr()); } TEST(S2BooleanOperation, PolylineEdgePolylineEdgeCrossing) { // Two polyline edges that cross at a point interior to both edges. S2BooleanOperation::Options options = RoundToE(1); auto a = "# 0:0, 2:2 #"; auto b = "# 2:0, 0:2 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 1:1, 2:2 | 2:0, 1:1, 0:2 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 1:1 | 1:1, 1:1 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 1:1, 2:2 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:0, 1:1, 2:2 | 2:0, 1:1, 0:2 #"); } TEST(S2BooleanOperation, PolylineEdgePolylineEdgeOverlap) { // The PolylineModel does not affect this calculation. In particular the // intersection of a degenerate polyline edge with itself is non-empty, even // though the edge contains no points in the OPEN and SEMI_OPEN models. S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); // Test edges in the same and reverse directions, and degenerate edges. auto a = "# 0:0, 1:0, 2:0, 2:5 | 3:0, 3:0 | 6:0, 5:0, 4:0 #"; auto b = "# 0:0, 1:0, 2:0 | 3:0, 3:0 | 4:0, 5:0 #"; // As usual, the expected output includes the relevant portions of *both* // input polylines. Duplicates can be removed using GraphOptions. ExpectResult(OpType::UNION, options, a, b, "# 0:0, 1:0, 2:0, 2:5 | 0:0, 1:0, 2:0 | 3:0, 3:0 | 3:0, 3:0 " "| 6:0, 5:0, 4:0 | 4:0, 5:0 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 1:0, 2:0 | 0:0, 1:0, 2:0 | 3:0, 3:0 | 3:0, 3:0 " "| 5:0, 4:0 | 4:0, 5:0 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 2:0, 2:5 | 6:0, 5:0 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 2:0, 2:5 | 6:0, 5:0 #"); } TEST(S2BooleanOperation, PolylineLoopMultipleOpenPolylineEdge) { // Here we test a polyline loop ABCA with the pairs {AA, AB} and {AA, AC}. // This tests not only what happens when degenerate polylines intersect loop // endpoints, but also what happens when polylines intersect a degenerate // and non-degenerate edge that overlap each other. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::OPEN); auto a = "# 0:0, 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2, 2:2 #"; auto b = "# 0:0, 0:0 | 0:0, 0:1 | 2:2, 2:2 | 2:2, 3:2 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 1:0, 0:0 | 0:0, 0:0 | 0:0, 0:1 " "| 2:2, 2:3, 3:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:1 | 0:0, 0:1 | 2:2, 3:2 | 3:2, 2:2 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 0:0, 0:0 | 2:2, 2:3, 3:2 | 2:2, 2:2 #"); } TEST(S2BooleanOperation, PolylineLoopMultipleSemiOpenPolylineEdge) { // Like the test above but with SEMI_OPEN boundaries. In this case ABCA // intersected with {AA, AB} is {AA, AB, AB} but ABCA intersected with {AA, // AC} is {AA, AA, AC, CA} since the chain ABCA contains its start vertex // but not its end vertex. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::SEMI_OPEN); auto a = "# 0:0, 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2, 2:2 #"; auto b = "# 0:0, 0:0 | 0:0, 0:1 | 2:2, 2:2 | 2:2, 3:2 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 1:0, 0:0 | 0:0, 0:0 | 0:0, 0:1 " "| 2:2, 2:3, 3:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:1 | 0:0, 0:1 " "| 2:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 | 3:2, 2:2 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); } TEST(S2BooleanOperation, PolylineLoopMultipleClosedPolylineEdge) { // Like the test above but with CLOSED boundaries. In this case ABCA // intersected with {AA, AB} is {AA, AA, AB, AB} since the chain ABCA // contains both its start vertex and end vertex. S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::CLOSED); auto a = "# 0:0, 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2, 2:2 #"; auto b = "# 0:0, 0:0 | 0:0, 0:1 | 2:2, 2:2 | 2:2, 3:2 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 1:0, 0:0 | 0:0, 0:0 | 0:0, 0:1 " "| 2:2, 2:3, 3:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:0 | 0:0, 0:1 | 0:0, 0:1 " "| 2:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 | 3:2, 2:2 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); } TEST(S2BooleanOperation, PolylineLoopMultiplePolylineEdgeLoopBoundariesFalse) { // Like the tests above but with polyline_loops_have_boundaries() == false. // In this case the result does not depend on the polyline model. The // polyline AA intersects ABCA exactly once, and the intersection of ABCA // with {AA, AB} is {AA, AB, AB}. for (auto polyline_model : { PolylineModel::OPEN, PolylineModel::SEMI_OPEN, PolylineModel::CLOSED}) { S2BooleanOperation::Options options; options.set_polyline_model(polyline_model); options.set_polyline_loops_have_boundaries(false); auto a = "# 0:0, 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2, 2:2 #"; auto b = "# 0:0, 0:0 | 0:0, 0:1 | 2:2, 2:2 | 2:2, 3:2 #"; ExpectResult(OpType::UNION, options, a, b, "# 0:0, 0:1, 1:0, 0:0 | 0:0, 0:0 | 0:0, 0:1 " "| 2:2, 2:3, 3:2, 2:2 | 2:2, 2:2 | 2:2, 3:2 #"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 0:0, 0:0 | 0:0, 0:1 | 0:0, 0:1 " "| 2:2, 2:2 | 2:2, 3:2 | 3:2, 2:2 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 0:1, 1:0, 0:0 | 2:2, 2:3, 3:2 #"); } } TEST(S2BooleanOperation, PolylineEdgeOpenPolygonEdgeOverlap) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); // A polygon and two polyline edges that coincide with the polygon boundary, // one in the same direction and one in the reverse direction. auto a = "# 1:1, 1:3, 3:3 | 3:3, 1:3 # "; auto b = "# # 1:1, 1:3, 3:3, 3:1"; ExpectResult(OpType::UNION, options, a, b, "# 1:1, 1:3, 3:3 | 3:3, 1:3 # 1:1, 1:3, 3:3, 3:1"); ExpectResult(OpType::INTERSECTION, options, a, b, "# #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 1:1, 1:3, 3:3 | 3:3, 1:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 1:1, 1:3, 3:3 | 3:3, 1:3 # 1:1, 1:3, 3:3, 3:1"); } TEST(S2BooleanOperation, PolylineEdgeSemiOpenPolygonEdgeOverlap) { auto polygon = s2textformat::MakePolygonOrDie("1:1, 1:3, 3:3, 3:1"); ASSERT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("1:1"))); ASSERT_TRUE(polygon->Contains(s2textformat::MakePointOrDie("1:3"))); ASSERT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("3:3"))); ASSERT_FALSE(polygon->Contains(s2textformat::MakePointOrDie("3:1"))); S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::SEMI_OPEN); auto a = "# 1:1, 1:3, 3:3 | 3:3, 1:3 # "; auto b = "# # 1:1, 1:3, 3:3, 3:1"; ExpectResult(OpType::UNION, options, a, b, "# 1:1, 1:1 | 3:3, 3:3 | 3:3, 1:3 # 1:1, 1:3, 3:3, 3:1"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:3, 1:3 | 1:1, 1:3, 3:3 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 1:1, 1:1 | 3:3, 3:3 | 3:3, 1:3 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 1:1, 1:1 | 3:3, 3:3 | 3:3, 1:3 # 1:1, 1:3, 3:3, 3:1"); } TEST(S2BooleanOperation, PolylineEdgeClosedPolygonEdgeOverlap) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::CLOSED); auto a = "# 1:1, 1:3, 3:3 | 3:3, 1:3 # "; auto b = "# # 1:1, 1:3, 3:3, 3:1"; ExpectResult(OpType::UNION, options, a, b, "# # 1:1, 1:3, 3:3, 3:1"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 1:3, 3:3 | 3:3, 1:3 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 1:1, 1:3, 3:3, 3:1"); } TEST(S2BooleanOperation, PolygonVertexMatching) { // This test shows that CrossingProcessor::ProcessEdgeCrossings() must set // a0_matches_polygon and a1_matches_polygon correctly even when (a0, a1) // itself is a polygon edge (or its sibling). (It requires degenerate // polygon geometry to demonstrate this.) S2BooleanOperation::Options options; options.set_polyline_model(PolylineModel::CLOSED); options.set_polygon_model(PolygonModel::CLOSED); auto a = "# 0:0, 1:1 # "; auto b = "# # 0:0, 1:1"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 1:1"); } TEST(S2BooleanOperation, PolylineEdgePolygonInterior) { S2BooleanOperation::Options options; // PolygonModel is irrelevant. // One normal and one degenerate polyline edge in the polygon interior, and // similarly for the polygon exterior. auto a = "# 1:1, 2:2 | 3:3, 3:3 | 6:6, 7:7 | 8:8, 8:8 # "; auto b = "# # 0:0, 0:5, 5:5, 5:0"; ExpectResult(OpType::UNION, options, a, b, "# 6:6, 7:7 | 8:8, 8:8 # 0:0, 0:5, 5:5, 5:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# 1:1, 2:2 | 3:3, 3:3 #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# 6:6, 7:7 | 8:8, 8:8 #"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# 6:6, 7:7 | 8:8, 8:8 # 0:0, 0:5, 5:5, 5:0"); } TEST(S2BooleanOperation, PolylineEdgeIsolatedStartVertexPlusInteriorCrossing) { // Tests a polyline XYZ that when intersected with a polygon results in an // isolated vertex X plus a clipped portion UYV. This case is unusual // because the isolated vertex is handled by creating a separate degenerate // S2Builder input edge XX which is added before the actual edge XY, and the // crossing edge information needs to be associated with XY rather than XX // in order for GraphEdgeClipper to be able to do its work properly. The // test is constructed such that if the crossings are incorrectly associated // with the degenerate edge XX then not only will the output be incorrect, // it will also trigger an internal S2_DCHECK. S2BooleanOperation::Options options = RoundToE(1); auto a = "# 0:0, 0:10, 0:4 # "; auto b = "# # 0:0, -5:5, 5:5"; ExpectResult(OpType::DIFFERENCE, options, a, b, "# 0:0, 0:0 | 0:5, 0:10, 0:5 #"); } TEST(S2BooleanOperation, PolygonEdgeIsolatedStartVertexPlusInteriorCrossing) { // Similar to the case above, but tests a polygon XYZ rather than a // polyline. This requires using the CLOSED polygon model and computing the // intersection with a clockwise loop rather than subtracting a CCW loop. // The test is constructed such that if the crossings for the edge 0:0, 0:8 // are incorrectly associated with the degenerate edge 0:0, then not only // will the output be incorrect, it will also trigger an internal S2_DCHECK. S2BooleanOperation::Options options = RoundToE(1); options.set_polygon_model(PolygonModel::CLOSED); auto a = "# # 0:0, 5:5, -5:5"; auto b = "# # 1:4, 0:0, 0:8"; ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0; 0:5, 0:8, 0.8:5"); } TEST(S2BooleanOperation, PolygonVertexOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::OPEN); auto a = "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"; auto b = "# # 0:0, 5:3, 5:2"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); ExpectResult(OpType::INTERSECTION, options, a, b, "# #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); } TEST(S2BooleanOperation, PolygonVertexSemiOpenPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::SEMI_OPEN); auto a = "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"; auto b = "# # 0:0, 5:3, 5:2"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); ExpectResult(OpType::INTERSECTION, options, a, b, "# #"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); } TEST(S2BooleanOperation, PolygonVertexClosedPolygonVertex) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::CLOSED); auto a = "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"; auto b = "# # 0:0, 5:3, 5:2"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5"); ExpectResult(OpType::DIFFERENCE, options, b, a, "# # 0:0, 5:3, 5:2"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:5, 1:5, 0:0, 2:5, 3:5, 0:0, 5:3, 5:2"); } TEST(S2BooleanOperation, PolygonEdgePolygonEdgeCrossing) { // Two polygons whose edges cross at points interior to both edges. S2BooleanOperation::Options options = RoundToE(2); auto a = "# # 0:0, 0:2, 2:2, 2:0"; auto b = "# # 1:1, 1:3, 3:3, 3:1"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:2, 1:2, 1:3, 3:3, 3:1, 2:1, 2:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 1:1, 1:2, 2:2, 2:1"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:2, 1:2, 1:1, 2:1, 2:0"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:2, 1:2, 1:1, 2:1, 2:0; " "1:2, 1:3, 3:3, 3:1, 2:1, 2:2"); } TEST(S2BooleanOperation, PolygonEdgeOpenPolygonEdgeOverlap) { S2BooleanOperation::Options options; // One shape is a rectangle, the other consists of one triangle inside the // rectangle and one triangle outside the rectangle, where each triangle // shares one edge with the rectangle. This implies that the edges are in // the same direction in one case and opposite directions in the other case. options.set_polygon_model(PolygonModel::OPEN); auto a = "# # 0:0, 0:4, 2:4, 2:0"; auto b = "# # 0:0, 1:1, 2:0; 0:4, 1:5, 2:4"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:4, 2:4, 2:0; 0:4, 1:5, 2:4"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0, 1:1, 2:0"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1; 0:4, 1:5, 2:4"); } TEST(S2BooleanOperation, PolygonEdgeSemiOpenPolygonEdgeOverlap) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::SEMI_OPEN); auto a = "# # 0:0, 0:4, 2:4, 2:0"; auto b = "# # 0:0, 1:1, 2:0; 0:4, 1:5, 2:4"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:4, 1:5, 2:4, 2:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0, 1:1, 2:0"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1"); // Note that SYMMETRIC_DIFFERENCE does not guarantee that results are // normalized, i.e. the output could contain siblings pairs (which can be // discarded using S2Builder::GraphOptions). ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1; 0:4, 1:5, 2:4"); } TEST(S2BooleanOperation, PolygonEdgeClosedPolygonEdgeOverlap) { S2BooleanOperation::Options options; options.set_polygon_model(PolygonModel::CLOSED); auto a = "# # 0:0, 0:4, 2:4, 2:0"; auto b = "# # 0:0, 1:1, 2:0; 0:4, 1:5, 2:4"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:4, 1:5, 2:4, 2:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0, 1:1, 2:0; 0:4, 2:4"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1"); // Note that SYMMETRIC_DIFFERENCE does not guarantee that results are // normalized, i.e. the output could contain siblings pairs. ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:4, 2:4, 2:0, 1:1; 0:4, 1:5, 2:4"); } TEST(S2BooleanOperation, PolygonPolygonInterior) { S2BooleanOperation::Options options; // PolygonModel is irrelevant. // One loop in the interior of another polygon and one loop in the exterior. auto a = "# # 0:0, 0:4, 4:4, 4:0"; auto b = "# # 1:1, 1:2, 2:2, 2:1; 5:5, 5:6, 6:6, 6:5"; ExpectResult(OpType::UNION, options, a, b, "# # 0:0, 0:4, 4:4, 4:0; 5:5, 5:6, 6:6, 6:5"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 1:1, 1:2, 2:2, 2:1"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:0, 0:4, 4:4, 4:0; 2:1, 2:2, 1:2, 1:1"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:0, 0:4, 4:4, 4:0; 2:1, 2:2, 1:2, 1:1; " "5:5, 5:6, 6:6, 6:5"); } TEST(S2BooleanOperation, PolygonEdgesDegenerateAfterSnapping) { S2BooleanOperation::Options options = RoundToE(0); // Two narrow rectangles forming a plus sign. auto a = "# # 0:-1, 0:1, 0.1:1, 0.1:-1"; auto b = "# # -1:0.1, 1:0.1, 1:0, -1:0"; // When snapping causes an output edge to become degenerate, it is still // emitted (since otherwise loops that contract to a single point would be // lost). If the output layer doesn't want such edges, they can be removed // via DegenerateEdges::DISCARD. ExpectResult(OpType::UNION, options, a, b, "# # 0:-1, 0:0, 0:1, 0:0 | " "-1:0, 0:0, 1:0, 0:0"); ExpectResult(OpType::INTERSECTION, options, a, b, "# # 0:0"); ExpectResult(OpType::DIFFERENCE, options, a, b, "# # 0:-1, 0:0, 0:1, 0:0"); ExpectResult(OpType::SYMMETRIC_DIFFERENCE, options, a, b, "# # 0:-1, 0:0, 0:1, 0:0 | " "-1:0, 0:0, 1:0, 0:0"); } // The following class comprehensively tests the handling of degenerate // geometry. It is used to implement over 4,000 individual test cases encoded // as a series of textual tables. class DegeneracyCoverageTest : public ::testing::Test { public: // Verifies that the S2BooleanOperation results for the given OpType and // PolygonModel match the given set of rules (encoded as described below). void Run(OpType op_type, PolygonModel polygon_model, const vector& rules); private: // The inputs to the test cases are intended to span all possible types of // degenerate and non-degenerate edge configurations in the vicinity of an // individual input edge. Essentially we are trying to cover all possible // behaviors of S2BooleanOperation::Impl::CrossingProcessor::ProcessEdge(), // which is responsible for determining which parts of each input edge // should be emitted to the output. // // For this purpose it is sufficient to consider two points A and B and the // possible types of degeneracies that involve just these two points. The // actual locations of the points are immaterial, although for descriptive // purposes we suppose that B is located above A so that the edge AB is // considered "up" while the edge BA is considered "down". Only one point // needs to be considered for point degeneracies, so we use A for this // purpose. This means that all inputs consist of some subset of AA, AB, // and BA represented as edges of dimensions 0, 1, or 2. For example, a // polyline edge intersected with a point located at one of its vertices // would be represented as the edge AB of dimension 1 in region 0 and the // edge AA of dimension 0 in region 1. // // Each possibility is represented as a single letter as follows (with the // corresponding edges in braces): // // Special: . = empty {} // * = full sphere {} // // Dimension 0: p = point {AA} // // Dimension 1: P = point polyline {AA} // u = up edge {AB} // d = down edge {BA} // // Dimension 2: s = point shell {AA} // S = sibling pair shell {AB, BA} // U = up edge {AB} // D = down edge {BA} // H = sibling pair hole {AB, BA} // h = point hole {AA} // // Using this encoding, the test case described above would be represented // as 'u' for region 0 and 'p' for region 1. Note that while the test cases // focus only on what happens to the edges AB, BA, and AA, additional edges // are needed in order to construct valid inputs. For example, the test // case U (the "up" polygon edge AB) is represented as a triangle ABC, while // the test case D (the "down" polygon edge BA) is represented as the // triangle AB(-C). // // The expected result of a given operation is then represented as a // sequence of the characters above. In some cases additional symbols are // needed to define the expected results. For example, in the closed // polygon model the union of U and D is the quadrilateral A(-C)BC which // does not appear in the list of symbols above. So the results use the // following additional symbols: // // Q : the union of the "U" and "D" shapes (a quadrilateral) // B : a point polyline consisting only of the vertex B {BB} // // Finally, in some cases the expected result depends on the polyline model, // or on whether one of the two regions contains the vertex A. For example, // the intersection of 'p' and 'U' in the semi-open model depends on whether // the representation of U (the triangle ABC mentioned above) contains its // vertex A. Conditional results of this sort are encoded using the // following operators: // // ~X : the complement of X (where X must be either U or D) // XY : the result is X if region 1 contains point A, otherwise Y. // X|Y|Z : the result is X, Y, or Z depending on whether the polyline // model is OPEN, SEMI_OPEN, or CLOSED. // // The operators above may be combined, e.g. X<>Y means the result is X if // both regions contain point A and Y otherwise. // These are the characters representing possible input regions, in the // order they are used in the rules listed further below. const string kInputChars = ".pPudsSUDHh*"; // Returns an S2ShapeIndex corresponding to the given string of characters. unique_ptr MakeIndex(string_view chars) const; // Verifies that S2BooleanOperation returns the given result for the inputs // represented by the characters 'ch0' and 'ch1'. void TestResult(OpType op_type, const S2BooleanOperation::Options& options, char ch0, char ch1, string_view result) const; }; void DegeneracyCoverageTest::Run(OpType op_type, PolygonModel polygon_model, const vector& rules) { S2_CHECK_EQ(rules.size(), kInputChars.size()); // For the symmetric operators (i.e., all except difference) we only need to // define half of the expected results. const bool symmetric = (op_type != OpType::DIFFERENCE); // Possible values for Options::polyline_model(). const vector kPolylineModels = { PolylineModel::OPEN, PolylineModel::SEMI_OPEN, PolylineModel::CLOSED }; // Possible values for Options::polyline_loops_have_boundaries(). const vector kPolylineLoopOptions = { true, false }; // The set of characters representing polyline inputs. const string kLineChars = "Pud"; // The following nested loops iterate over all combinations of: // - Input character 0 // - Input character 1 // - polyline model (if either input is a polyline) // - whether a closed polyline loop is considered to have a boundary // (if either input is a degenerate polyline) S2BooleanOperation::Options options; options.set_polygon_model(polygon_model); for (int i = 0; i < kInputChars.size(); ++i) { char ch0 = kInputChars[i]; vector row = StrSplit(rules[i], ' ', SkipEmpty()); // Verify and remove the row label. S2_CHECK_EQ(row[0], string{ch0}); S2_CHECK_EQ(row[1], "|"); row.erase(row.begin(), row.begin() + 2); const int limit = symmetric ? (i + 1) : kInputChars.size(); S2_CHECK_EQ(row.size(), limit); for (int j = 0; j < limit; ++j) { char ch1 = kInputChars[j]; // Only iterate over polyline models if at least one input is a polyline. int num_line_models = (StrContains(kLineChars, ch0) || StrContains(kLineChars, ch1)) ? 3 : 1; for (int k = 0; k < num_line_models; ++k) { options.set_polyline_model(kPolylineModels[k]); // Only iterate over polyline loop boundary options if at least one // input is a degenerate polyline. int num_polyline_loop_options = (ch0 == 'P' || ch1 == 'P') ? 2 : 1; for (int m = 0; m < num_polyline_loop_options; ++m) { options.set_polyline_loops_have_boundaries(kPolylineLoopOptions[m]); string_view code = row[j]; // Process any '<' or '>' operators in the result. vector choices = StrSplit(code, ByAnyChar("<>"), SkipEmpty()); string_view result = choices[0]; if (choices.size() > 1) { S2_CHECK_EQ(choices.size(), 2); // Test whether each input contains the point A. Note that we // can't use S2ContainsPointQuery because the containment test // must be done using the given S2BooleanOperation options. bool in0 = S2BooleanOperation::Contains(*MakeIndex(string{ch0}), *MakeIndex("p"), options); bool in1 = S2BooleanOperation::Contains(*MakeIndex(string{ch1}), *MakeIndex("p"), options); // If the point containment conditions specified by the operators // '<' and '>' are satisfied then the result is choices[0], // otherwise it is choices[1]. if ((StrContains(code, '<') && !in0) || (StrContains(code, '>') && !in1)) { result = choices[1]; } } // Next process any '|' operators in the result. choices = StrSplit(result, '|'); if (choices.size() > 1) { S2_CHECK_EQ(num_line_models, 3) << "No polylines present"; S2_CHECK_EQ(choices.size(), 3); result = choices[k]; } TestResult(op_type, options, ch0, ch1, result); if (symmetric && j != i) { TestResult(op_type, options, ch1, ch0, result); } } } } } } void DegeneracyCoverageTest::TestResult( OpType op_type, const S2BooleanOperation::Options& options, char ch0, char ch1, string_view result) const { auto index0 = MakeIndex(string{ch0}); auto index1 = MakeIndex(string{ch1}); auto expected = MakeIndex(result); SCOPED_TRACE(absl::StrCat( "\n Operation: ", S2BooleanOperation::OpTypeToString(op_type), "\n PolygonModel: ", S2BooleanOperation::PolygonModelToString(options.polygon_model()), "\n PolylineModel: ", S2BooleanOperation::PolylineModelToString(options.polyline_model()), "\n polyline_loops_have_boundaries: ", options.polyline_loops_have_boundaries() ? "true" : "false", "\n Inputs: ", string{ch0}, ", ", string{ch1}, "\n Expected: ", result)); ExpectResult(op_type, options, *index0, *index1, *expected); } unique_ptr DegeneracyCoverageTest::MakeIndex( string_view chars) const { // The locations of A, B, C are arbitrary, however some tests are sensitive // as to whether certain polygons contain the points {A, B} or not. If // these points are moved then some test results may need to be changed. S2Point a(1, 0, 0), b(0, 0, 1), c(0, 1, 0); auto index = make_unique(); for (int i = 0; i < chars.size(); ++i) { using Loops = vector>; char ch = chars[i]; if (ch == '.') { // Empty } else if (ch == 'p') { // Point index->Add(make_unique(vector{a})); } else if (ch == 'P') { // Polyline consisting only of the point A index->Add(make_unique(vector{a, a})); } else if (ch == 'B') { // Polyline consisting only of the point B index->Add(make_unique(vector{b, b})); } else if (ch == 'u') { // Upwards polyline edge index->Add(make_unique(vector{a, b})); } else if (ch == 'd') { // Downwards polyline edge index->Add(make_unique(vector{b, a})); } else if (ch == 's') { // Point shell index->Add(make_unique(Loops{{a}})); } else if (ch == 'S') { // Sibling pair shell index->Add(make_unique(Loops{{a, b}})); } else if (ch == 'U') { // Upwards polygon edge int i = index->Add(make_unique(Loops{{a, b, -c}})); // Some test results require that the U polygon contains A but not B. S2_CHECK(ContainsBruteForce(*index->shape(i), a)); S2_CHECK(!ContainsBruteForce(*index->shape(i), b)); } else if (ch == 'D') { // Downwards polygon edge int i = index->Add(make_unique(Loops{{b, a, c}})); // Some test cases require that the D polygon excludes both A and B. S2_CHECK(!ContainsBruteForce(*index->shape(i), a)); S2_CHECK(!ContainsBruteForce(*index->shape(i), b)); } else if (ch == '~') { // Complement of following region (U or D) ch = chars[++i]; if (ch == 'U') { index->Add(make_unique(Loops{{-c, b, a}})); } else if (ch == 'D') { index->Add(make_unique(Loops{{c, a, b}})); } else { S2_LOG(FATAL) << "Unsupported character for ~ operator: " << string{ch}; } } else if (ch == 'Q') { // Union of 'U' and 'D' shapes index->Add(make_unique(Loops{{a, c, b, -c}})); } else if (ch == 'H') { // Sibling pair hole index->Add(make_unique(Loops{{a, b}, {}})); } else if (ch == 'h') { // Point hole index->Add(make_unique(Loops{{a}, {}})); } else if (ch == '*') { // Full sphere index->Add(make_unique(Loops{{}})); } else { S2_LOG(FATAL) << "Unknown degeneracy coverage symbol: " << string{ch}; } } return index; } TEST_F(DegeneracyCoverageTest, OpenIntersection) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | . pp", "P | . p<. PP", "u | . p<. PP<. uu", "d | . p<. PP<. ud dd", "s | . . . . . s", "S | . . . . . . S", "U | . . . . . . . U", "D | . . . . . . . . D", "H | . . . . . . . U D H", "h | . . . u d . S U D H h", "* | . p P u d s S U D H h *", }; Run(OpType::INTERSECTION, PolygonModel::OPEN, rules); } TEST_F(DegeneracyCoverageTest, SemiOpenIntersection) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | . pp", "P | . p<. PP", "u | . p<. PP<. uu", "d | . p<. PP<. ud dd", "s | . . . . . s", "S | . p<. P<. . . s<. S", "U | . p<. P<. u P<>. s<. . U", "D | . p<. P<. P<>. d s<. . . D", "H | . p<. P<. u d s<. . U D H", "h | . p P u d . S U D H h", "* | . p P u d s S U D H h *", }; Run(OpType::INTERSECTION, PolygonModel::SEMI_OPEN, rules); } TEST_F(DegeneracyCoverageTest, ClosedIntersection) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | . pp", "P | . p<. PP", "u | . p<. PP<. uu", "d | . p<. PP<. ud dd", "s | . p P P>. P>. s", "S | . p P u d s S", "U | . p P u d s S U", "D | . p P u d s S S D", "H | . p P u d s S U D H", "h | . p P u d s S U D H h", "* | . p P u d s S U D H h *", }; Run(OpType::INTERSECTION, PolygonModel::CLOSED, rules); } TEST_F(DegeneracyCoverageTest, OpenUnion) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p pp", "P | P Ph Ph>h h h h h h h", "* | * * * * * * * * * * * *", }; Run(OpType::UNION, PolygonModel::OPEN, rules); } TEST_F(DegeneracyCoverageTest, SemiOpenUnion) { // CAVEAT: The results for (U,u) and (D,d) require the U polygon to contain // vertex A but not vertex B, and the D polygon to contain neither vertex. // This differs from most of the other tests, which encode the results // conditionally using the '<' and '>' operators. That was not practical in // this case because (1) no conditional operators are defined for the 'B' // vertex and (2) encoding the full set of possibilites for all 12 cases // (i.e., the 3 polyline models and whether U contains A and/or B) would be // unwieldy. const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p pp", "P | P Ph *>h *>h *>h h", "* | * * * * * * * * * * * *", }; Run(OpType::UNION, PolygonModel::SEMI_OPEN, rules); } TEST_F(DegeneracyCoverageTest, ClosedUnion) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p pp", "P | P P rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | . . . . . . . . . . . .", "p | p . .>p .>p .>p p p p p p p .", "P | P P . .>P .>P P P P P P P .", "u | u u u . .|P|. u u u u u P<. .", "d | d d d .|B|. . d d d d d P<. .", "s | s s s s s . s s s s s .", "S | S S S S S S . S S S . .", "U | U U U U U U U . U . . .", "D | D D D D D D D D . . . .", "H | H H H H H H H ~U ~D . . .", "h | h h h h h h H ~U ~D S . .", "* | * * * * * h H ~U ~D S s .", }; Run(OpType::DIFFERENCE, PolygonModel::OPEN, rules); } TEST_F(DegeneracyCoverageTest, SemiOpenDifference) { // See SemiOpenUnion notes regarding (u,U) and (d,D). const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | . . . . . . . . . . . .", "p | p . .>p .>p .>p p p .>p .>p . . .", "P | P P . .>P .>P P P .>P .>P . . .", "u | u u u . .|P|. u u .|.|B u . . .", "d | d d d .|B|. . d d d .|B|PB . . .", "s | s s s s s . .>s .>s .>s .>s s .", "S | S S S S S S . . . S s<. .", "U | U U U U U U U . U . s<. .", "D | D D D D D D D D . . s<. .", "H | H H H H H H H ~U ~D . s<. .", "h | h h h h h h H ~U ~D S . .", "* | * * * * * h H ~U ~D S s .", }; Run(OpType::DIFFERENCE, PolygonModel::SEMI_OPEN, rules); } TEST_F(DegeneracyCoverageTest, ClosedDifference) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | . . . . . . . . . . . .", "p | p . .>p .>p .>p . . . . . . .", "P | P P . .>P .>P . . . . . . .", "u | u u u . .|P|. u . . . . . .", "d | d d d .|B|. . d . . . . . .", "s | s s s s s . . . . . s .", "S | S S S S S S . . . S . .", "U | U U U U U U U . U . . .", "D | D D D D D D D D . . . .", "H | H H H H H H H ~U ~D . . .", "h | h h h h h h H ~U ~D S . .", "* | * * * * * h H ~U ~D S s .", }; Run(OpType::DIFFERENCE, PolygonModel::CLOSED, rules); } TEST_F(DegeneracyCoverageTest, OpenSymmetricDifference) { const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p .", "P | P Ph hP>h h H ~U ~D S .", "* | * * * * * h H ~U ~D S s .", }; Run(OpType::SYMMETRIC_DIFFERENCE, PolygonModel::OPEN, rules); } TEST_F(DegeneracyCoverageTest, SemiOpenSymmetricDifference) { // See SemiOpenUnion notes regarding (U,u) and (D,d). const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p .", "P | P PH, (h,s)->h and (U,D)->UD. In all three cases the // shared boundary is present on both sides and therefore these edges should // not be contained by the result, however this is not possible under the // CLOSED model. The indicated results are the best approximation. const vector rules = { // . p P u d s S U D H h * // |----------------------------------------------------------------------- ". | .", "p | p .", "P | P P> loops{{S2Point(0, 0, -1), S2Point(-1, 0, 0), S2Point(0, 0, 1), S2Point(-1, 0, 0)}}; meridian.Add(make_unique(loops)); MutableS2ShapeIndex output; vector> layers(3); layers[0] = make_unique(&output); // TODO(ericv): Implement s2builderutil::IndexedS2LaxPolylineVectorLayer. layers[1] = make_unique(&output); layers[2] = make_unique(&output); S2BooleanOperation op(OpType::DIFFERENCE, std::move(layers)); S2Error error; ASSERT_TRUE(op.Build(*input, meridian, &error)) << error; EXPECT_EQ(expected_str, s2textformat::ToString(output)); } // This test demonstrated that S2 geometry can easily be transformed such that // no edge crosses the 180 degree meridian, as required by formats such as // GeoJSON, by simply subtracting a degenerate loop that follows the 180 degree // meridian. This not only splits polylines along the meridian, it also inserts // the necessary extra vertices at the north/south poles. (The only extra step // is that the vertices along the 180 degree meridian or at the poles may need // to be "doubled" into two vertices, one at longitude 180 and one at longitude // -180, in order to match the longitudes of the adjacent vertices.) TEST(S2BooleanOperation, MeridianSplitting) { // A line along the equator crossing the 180 degree meridian. TestMeridianSplitting("# 0:-160, 0:170 #", "# 0:-160, 0:180, 0:170 #"); // The northern hemisphere. TestMeridianSplitting("# # 0:0, 0:120, 0:-120", "# # 90:0, 0:180, 0:-120, 0:0, 0:120, 0:180"); // A small square that crosses the 180th meridian. Notice that one input // loop is split into two output loops. TestMeridianSplitting( "# # 9:179, 9:-179, 10:-179, 10:179", "# # 9.00134850712993:180, 9:-179, 10:-179, 10.0014925269841:180; " "10.0014925269841:180, 10:179, 9:179, 9.00134850712993:180"); // An annulus that crosses the 180th meridian. This turns into two shells. TestMeridianSplitting( "# # 8:178, 8:-178, 11:-178, 11:178; 9:179, 10:179, 10:-179, 9:-179", "# # 10.0014925269841:180, 10:-179, 9:-179, 9.00134850712993:180, " "8.00481316618607:180, 8:-178, 11:-178, 11.00654129428:180; " "9.00134850712993:180, 9:179, 10:179, 10.0014925269841:180, " "11.00654129428:180, 11:178, 8:178, 8.00481316618607:180"); // An annulus that crosses the 180th meridian. This turns into two shells. TestMeridianSplitting( "# # 8:178, 8:-178, 11:-178, 11:178; 9:179, 10:179, 10:-179, 9:-179", "# # 10.0014925269841:180, 10:-179, 9:-179, 9.00134850712993:180, " "8.00481316618607:180, 8:-178, 11:-178, 11.00654129428:180; " "9.00134850712993:180, 9:179, 10:179, 10.0014925269841:180, " "11.00654129428:180, 11:178, 8:178, 8.00481316618607:180"); // The whole world except for a small square that crosses the 180th meridian. // This is a single loop that visits both poles. The result is correct // except that (1) +180 or -180 needs to be chosen consistently with the // adjacent points, and (2) each pole needs to be duplicated (once with // longitude -180 and once with longitude 180). TestMeridianSplitting( "# # 9:-179, 9:179, 10:179, 10:-179", "# # 0:180, 9.00134850712993:180, 9:179, 10:179, 10.0014925269841:180, " "90:0, 10.0014925269841:180, 10:-179, 9:-179, 9.00134850712993:180, " "0:180, -90:0"); } void ComputeTestUnion(const vector>& a_loops, const vector>& b_loops, S1Angle snap_radius, S2LaxPolygonShape* result) { MutableS2ShapeIndex a, b; a.Add(make_unique(a_loops)); b.Add(make_unique(b_loops)); S2BooleanOperation op(OpType::UNION, make_unique(result), S2BooleanOperation::Options( s2builderutil::IdentitySnapFunction(snap_radius))); S2Error error; ASSERT_TRUE(op.Build(a, b, &error)) << error; EXPECT_FALSE(result->is_empty()) << "\nS2Polygon: " << s2textformat::ToString(a) << "\nS2Polygon: " << s2textformat::ToString(b); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug1) { // This test exercises a rare special case in GetCrossedVertexIndex where // two crossing edge chains snap to a different permutation of the same // vertices. In this example one input edge crosses another edge from right // to left, the first edge snaps to BCD and the second snaps to ABDC, and // triangle BCD is CCW. Since BCD is to the right of BD, this means that // the first edge has not yet crossed the second at vertex B, leaving C or D // as the possible crossing vertices. vector> a_loops = { { {-0.38306437985388492, -0.74921955334206214, 0.54030708099846292}, {-0.3830643798552798, -0.74921955334134249, 0.5403070809984718}, {-0.38306437985529124, -0.74921955334136414, 0.54030708099843361}, {-0.38306437985389635, -0.74921955334208379, 0.54030708099842473}, }, }; vector> b_loops = { { {-0.38306437985390962, -0.74921955334210588, 0.54030708099838465}, {-0.38306437985527797, -0.74921955334134205, 0.54030708099847369}, {-0.38306437985527941, -0.74921955334134405, 0.54030708099847014}, {-0.38306437985391095, -0.74921955334210777, 0.54030708099838098}, }, }; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S2::kIntersectionMergeRadius, &result); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug2) { // This test exercises another rare case where the crossing vertices chosen // by GetCrossedVertexIndex() are not ordered correctly along the edge being // crossed. This is handled by adding extra edges to the output in order to // link up the crossings in the correct order. vector> a_loops = { { {-0.3837392878495085, -0.7477800800281974, 0.5418201831546835}, {-0.38373928785696076, -0.7477800800212292, 0.54182018315902258}, {-0.38373928785701278, -0.74778008002124685, 0.5418201831589613}, {-0.38373928785703426, -0.7477800800212544, 0.54182018315893576}, {-0.38373947205489456, -0.74778014227795497, 0.5418199667802881}, {-0.38373947204434411, -0.74778014228781997, 0.54181996677414512}, {-0.38373947205872994, -0.74778014228185352, 0.54181996677219124}, {-0.38373947218468357, -0.74778014288930306, 0.54181996584462788}, {-0.3837396702525171, -0.74778021044361542, 0.54181973233114322}, {-0.38373967023137123, -0.74778021046333043, 0.54181973231891067}, {-0.38373947216030285, -0.74778014290791484, 0.54181996583620895}, {-0.38373947217087578, -0.74778014289805739, 0.54181996584232528}, {-0.38373947215649007, -0.74778014290402395, 0.54181996584427927}, {-0.3837394720305386, -0.74778014229658485, 0.5418199667718262}, {-0.38373928783585998, -0.74778008004095942, 0.54182018314673686}, {-0.38373928784641037, -0.7477800800310942, 0.54182018315287972}, {-0.38373928783578648, -0.74778008004093421, 0.54182018314682368}, {-0.383739287835765, -0.74778008004092666, 0.54182018314684921}, }, }; vector> b_loops = { { {-0.38373923813692823, -0.7477800632164362, 0.54182024156551456}, {-0.3837392878569364, -0.74778008002122087, 0.54182018315905123}, {-0.38373928784640354, -0.74778008003106944, 0.54182018315291858}, {-0.38373928784638789, -0.74778008003108642, 0.54182018315290648}, {-0.38373928784638023, -0.74778008003109453, 0.54182018315290048}, {-0.38373928783692102, -0.74778008004124585, 0.54182018314559}, {-0.38373928783691913, -0.74778008004124541, 0.54182018314559188}, {-0.38373928784636568, -0.74778008003110774, 0.54182018315289271}, {-0.38373928784637329, -0.74778008003109953, 0.54182018315289848}, {-0.38373928783583561, -0.74778008004095109, 0.5418201831467655}, {-0.38373923811582744, -0.74778006323616641, 0.54182024155322883}, {-0.38373857650312843, -0.74777983961840766, 0.54182101875399913}, {-0.38373857652422921, -0.74777983959867744, 0.54182101876628486}, }, }; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S2::kIntersectionMergeRadius, &result); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug3) { // This test exercise the special case in GetCrossedVertexIndex() that // requires checking the orientation of a loop. This is done by adding up the // turning angles at each vertex, which in turn involves computing the edge // normals and measuring the angles between them. However in this test, some // of the edge normals returned by S2::RobustCrossProd() used to be so small // that there were floating-point underflows when computing the angles between // them. This was fixed by implementing the long-standing TODO of making // S2::RobustCrossProd() actually robust. vector> a_loops = { { {1, 0, 2.4678234835261742e-72}, {0.99984769515639127, 0.017452406437283512, 1.8530922845942552e-27}, {0.99740259703611311, 0.069881849826437858, 0.017452406437283512}, }, }; vector> b_loops = { { {0.99999999999999989, 2.4674476220564615e-72, 2.4678234835261742e-72}, {0.99999999999999989, 2.8837981406657438e-169, 2.4678234835261742e-72}, {1, 2.8837981406657432e-169, 2.4678234835261742e-72}, }, }; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S1Angle::Zero(), &result); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug4) { // This example tests the "special case" in GetCrossedVertexIndex() in // situations where two edges snap to the same sequence of vertices in // different orders. The first two edges (a0, a1) and (b0, b1) of the // following polygons cross such that after snapping, the corresponding edge // chains are: // // a0 a1 -> a0 b0 b1 x a1 // b0 b1 -> b0 x b1 // // where "x" is the computed intersection point of (a0, a1) and (b0, b1). // Previously there was a bug such that the two edge chains did not choose // the same vertex to represent the point where the two chains cross: the // (a0, a1) chain chose "x" as the crossing point while the (b0, b1) chain // chose "b0". This has been fixed such that both chains now choose "x". // (Both "x" and "b1" happen to be valid choices in this example, but it is // essential that both subchains make the same choice.) // S2LatLng coordinates are not accurate enough to reproduce this example. vector> a_loops{{ // 51.5131559470858:-0.130381523356724 {0.62233331065911901, -0.0014161759526823048, 0.78275107466533156}, // 51.5131892038956:-0.130404244210776 {0.6223328557578689, -0.0014164217071954736, 0.78275143589379825}, s2textformat::MakePointOrDie("51.51317:-0.1306") }}; vector> b_loops{{ // 51.5131559705551:-0.13038153939079 {0.62233331033809591, -0.001416176126110953, 0.78275107492024998}, // 51.5131559705551:-0.130381539390786 {0.62233331033809591, -0.0014161761261109063, 0.78275107492025009}, s2textformat::MakePointOrDie("51.52:-0.12"), s2textformat::MakePointOrDie("51.52:-0.14") }}; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S1Angle::Zero(), &result); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug5) { // Yet another bizarre situation where two crossing edges snap (correctly) to // a sequence of vertices in different orders. Using the internal vertex // numbers assigned by S2Builder, input edges 3 and 12 snap to the following // vertex sequences: // // Input edge 3: 14, 8, 4, 9, 2, 5 // Input edge 12: 2, 7, 8, 9 // // Furthermore input edge 3 crosses input edge 12 from left to right. // Schematically, here is what edge 12 crossing edge 3 looks like: // // 14-->--8-->--4-->--9-->--2-->--5 // |\ / / // \ \--->---/ / // \ / // \--<--7--<--/ // // And here is what edge 3 crossing edge 12 looks like: // // 14-->--\ /---4->-\ // \ / \ // 2-->--7-->--8----->-----9 // / \ / // 5--<--/ \---------<---------/ // // In both cases, the only possible choice of crossing vertex consistent with // the fact that edge 3 crosses edge 12 from left to right is vertex 9. // Determining this requires knowing that loop (9, 2, 7, 8) is clockwise // (the "special case" in GetCrossedVertexIndex). The code previously didn't // have quite the correct test to decide when this was necessary. vector> a_loops = { { {0.99984769515639127, 0, 0.017452406437283512}, {0.99923861495548261, 0.017441774902830158, 0.034899496702500969}, {0.99847743863945992, 0.052327985223313139, 0.017452406437283512}, {0.99802119662406841, 0.034851668155187324, 0.052335956242943835}, }, }; vector> b_loops = { { {0.99802119662406841, 0.034851668155187324, 0.052335956242943835}, {0.99619692339885657, 0.052208468483931986, 0.069756473744125302}, {0.99802098681615425, 0.034839714972148959, 0.052347914334467859}, {0.99741208276778681, 0.017411821260589495, 0.069756473744125302}, {0.99741219210106513, 0.017411340538768819, 0.069755030419252628}, {0.99741211642315963, 0.017409893252357169, 0.069756473744125302}, {0.99984769515639116, 4.9500424645560228e-16, 0.017452406437284993}, {0.99984769515639127, 3.7368529835165677e-16, 0.017452406437284632}, {0.99984769515639116, 3.3065924905014365e-16, 0.017452406437284504}, {0.99984769515639127, 9.9060035932242025e-16, 0.017452406437284504}, {0.99969541350954794, 0.017449748351250485, 0.017452406437283512}, }, { {0.99984769515639116, 3.3065924905014365e-16, 0.017452406437284504}, {0.99984769515639116, 3.3006856770496304e-16, 0.017452406437284504}, {0.99984769515639127, 0, 0.017452406437284504}, {0.99984769515639127, 0, 0.017452406437283512}, }, }; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S1Angle::Zero(), &result); } TEST(S2BooleanOperation, GetCrossedVertexIndexBug6) { // This is another test of the code in GetCrossedVertexIndex() that checks // whether the B subchain contains an interior vertex of the A edge. vector> a_loops = { { {0.99870488823558456, 0.026138065586168355, 0.043650289137205818}, {0.99876259434149239, 0.030513215246694664, 0.0392711578586665}, {0.99984769515639127, 0.017452406437283512, 0}, {0.998782023517925, 0.034862286684437908, 0.034915476003791211}, {0.99878202512991221, 0.034878236872062651, 0.034899496702500969}, {0.9975640502598242, 0.069756473744125302, 0}, {0.99877979583714305, 0.034883478425067296, 0.034958008531414335}, {0.99619692339885657, 0.052208468483931986, 0.069756473744125302}, {0.99847581234813876, 0.017465633646566288, 0.052354596713645812}, {0.9975640502598242, 0, 0.069756473744125302}, {0.99847674250410212, 0.017444393356200013, 0.052343937746706169}, {0.99847743863945992, 0.017428488520812163, 0.052335956242943835}, {0.99984769515639127, 0, 0.017452406437283512}, }, { {0.99619692339885657, 0.052208468483931986, 0.069756473744125302}, {0.99802119661969568, 0.034851668280404598, 0.052335956242943835}, {0.9987605225894034, 0.030527121154938986, 0.039313018084772409}, {0.99870321796526884, 0.026161932439896601, 0.043674199670139441}, }, { {0.99619692339885657, 0.052208468483931986, 0.069756473744125302}, {0.99619692339885657, 0.06966087492121549, 0.052335956242943835}, {0.99513403437078507, 0.069586550480032719, 0.069756473744125302}, }, }; vector> b_loops = { { {0.99802200429988497, 0.034828499898458924, 0.052335977377554299}, {0.99862953475457383, 0, 0.052335956242943835}, {0.99923793061512223, 0.017455729388178846, 0.034912111530741322}, {0.99923859085845868, 0.017443155365764275, 0.034899496702500969}, {0.99923793076147094, 0.017455737780810811, 0.034912103145779166}, {0.9992865072388355, 0.020934110218524152, 0.0314362764933699}, {1, 0, 0}, {0.99929987808789411, 0.022418034384064717, 0.029953053064335624}, {0.99931406232431441, 0.02616995393092059, 0.026201876881811362}, {0.99984769515639127, 0.017452406437283512, 0}, {0.99930573320200933, 0.029072747464899757, 0.023298646837028814}, {0.99862953475457383, 0.052335956242943835, 1.700986599320836e-73}, {0.99838518277004218, 0.038347188759395717, 0.041910857059723181}, {0.99619692339885668, 0.052208468483931979, 0.069756473744125289}, }, { {0.99802119662406841, 0.052304074592470849, 0.034899496702500969}, {0.99847743834686298, 0.052327990806397578, 0.017452406437283512}, {0.99619645281505653, 0.052208443821680058, 0.069763212314351342}, {0.99619692339885657, 0.052208468483932, 0.069756473744125316}, {0.99619692339885657, 0.052208468483931986, 0.069756473744125302}, {0.99619692339885679, 0.052208468483931993, 0.069756473744125316}, {0.99619692339885679, 0.052208468483931986, 0.069756473744125302}, {0.99619692339885668, 0.052208468483931979, 0.069756473744125289}, }, }; S2LaxPolygonShape result; ComputeTestUnion(a_loops, b_loops, S1Angle::Zero(), &result); } // Performs the given operation and compares the result to "expected_str". All // arguments are in s2textformat::MakeLaxPolygonOrDie() format. void ExpectPolygon(S2BooleanOperation::OpType op_type, const string& a_str, const string& b_str, const string& expected_str) { auto a = s2textformat::MakeIndexOrDie(string("# # ") + a_str); auto b = s2textformat::MakeIndexOrDie(string("# # ") + b_str); s2builderutil::LaxPolygonLayer::Options polygon_options; polygon_options.set_degenerate_boundaries(DegenerateBoundaries::DISCARD); S2LaxPolygonShape output; S2BooleanOperation op( op_type, make_unique(&output, polygon_options), S2BooleanOperation::Options{s2builderutil::IdentitySnapFunction{ S1Angle::Degrees(1.1)}}); S2Error error; ASSERT_TRUE(op.Build(*a, *b, &error)) << error; EXPECT_EQ(expected_str, s2textformat::ToString(output)); } TEST(S2BooleanOperation, FullAndEmptyResults) { // The following constants are all in s2textformat::MakeLaxPolygonOrDie() // format. string kEmpty = ""; string kFull = "full"; // Two complementary shell/hole pairs, together with alternative shells that // are slightly smaller or larger than the original. string kShell1 = "10:0, 10:10, 20:10"; string kHole1 = "10:0, 20:10, 10:10"; string kShell1Minus = "11:2, 11:9, 18:9"; string kShell1Plus = "9:-2, 9:11, 22:11"; string kShell2 = "10:20, 10:30, 20:30"; string kHole2 = "10:20, 20:30, 10:30"; // The northern and southern hemispheres. string kNorthHemi = "0:0, 0:120, 0:-120"; string kSouthHemi = "0:0, 0:-120, 0:120"; // These edges deviate from kSouthHemi by slightly more than 1 degree. string kSouthHemiPlus = "0.5:0, 0.5:-120, 0.5:120"; // A shell and hole that cover complementary hemispheres, such that each // hemisphere intersects all six S2 cube faces. There are also alternative // shells that are slightly smaller or larger than the original. string k6FaceShell1 = "0:-45, 45:0, 45:90, 0:135, -45:180, -45:-90"; string k6FaceHole1 = "0:-45, -45:-90, -45:180, 0:135, 45:90, 45:0"; string k6FaceShell1Minus = "-1:-45, 44:0, 44:90, -1:135, -46:180, -46:-90"; string k6FaceShell1Plus = "1:-45, 46:0, 46:90, 1:135, -44:180, -44:-90"; // Two complementary shell/hole pairs that are small enough so that they will // disappear when the snap radius chosen above is used. string kAlmostEmpty1 = "2:0, 2:10, 3:0"; string kAlmostFull1 = "2:0, 3:0, 2:10"; string kAlmostEmpty2 = "4:0, 4:10, 5:0"; string kAlmostFull2 = "4:0, 5:0, 4:10"; // A polygon that intersects all 6 faces such but snaps to an empty polygon. string k6FaceAlmostEmpty1 = k6FaceShell1Minus + "; " + k6FaceHole1; // Test empty UNION results. // - Exact result, no input edges. ExpectPolygon(OpType::UNION, kEmpty, kEmpty, kEmpty); // - Empty due to snapping, union does not intersect all 6 cube faces. ExpectPolygon(OpType::UNION, kAlmostEmpty1, kAlmostEmpty2, kEmpty); // - Empty due to snapping, union intersects all 6 cube faces. ExpectPolygon(OpType::UNION, k6FaceAlmostEmpty1, k6FaceAlmostEmpty1, kEmpty); // Test full UNION results. // - Exact result, no input edges. ExpectPolygon(OpType::UNION, kEmpty, kFull, kFull); ExpectPolygon(OpType::UNION, kEmpty, kFull, kFull); ExpectPolygon(OpType::UNION, kFull, kFull, kFull); // - Exact result, some input edges. ExpectPolygon(OpType::UNION, kFull, kShell1, kFull); ExpectPolygon(OpType::UNION, kHole1, kHole2, kFull); ExpectPolygon(OpType::UNION, kHole1, kShell1, kFull); // - Full due to snapping, almost complementary polygons. ExpectPolygon(OpType::UNION, kHole1, kShell1Minus, kFull); ExpectPolygon(OpType::UNION, k6FaceHole1, k6FaceShell1Minus, kFull); // Test empty INTERSECTION results. // - Exact result, no input edges. ExpectPolygon(OpType::INTERSECTION, kEmpty, kEmpty, kEmpty); ExpectPolygon(OpType::INTERSECTION, kEmpty, kFull, kEmpty); ExpectPolygon(OpType::INTERSECTION, kFull, kEmpty, kEmpty); // - Exact result, inputs do not both intersect all 6 cube faces. ExpectPolygon(OpType::INTERSECTION, kEmpty, kHole1, kEmpty); ExpectPolygon(OpType::INTERSECTION, kShell1, kShell2, kEmpty); ExpectPolygon(OpType::INTERSECTION, kShell1, kHole1, kEmpty); // - Exact result, inputs both intersect all 6 cube faces. ExpectPolygon(OpType::INTERSECTION, k6FaceShell1, k6FaceHole1, kEmpty); // - Empty due to snapping, inputs do not both intersect all 6 cube faces. ExpectPolygon(OpType::INTERSECTION, kShell1Plus, kHole1, kEmpty); // - Empty due to snapping, inputs both intersect all 6 cube faces. ExpectPolygon(OpType::INTERSECTION, k6FaceShell1Plus, k6FaceHole1, kEmpty); // Test full INTERSECTION results. // - Exact result, no input edges. ExpectPolygon(OpType::INTERSECTION, kFull, kFull, kFull); // - Full due to snapping, almost full input polygons. ExpectPolygon(OpType::INTERSECTION, kAlmostFull1, kAlmostFull2, kFull); // Test empty DIFFERENCE results. // - Exact result, no input edges. ExpectPolygon(OpType::DIFFERENCE, kEmpty, kEmpty, kEmpty); ExpectPolygon(OpType::DIFFERENCE, kEmpty, kFull, kEmpty); ExpectPolygon(OpType::DIFFERENCE, kFull, kFull, kEmpty); // - Exact result, first input does not intersect all 6 cube faces. ExpectPolygon(OpType::DIFFERENCE, kEmpty, kShell1, kEmpty); ExpectPolygon(OpType::DIFFERENCE, kShell1, kFull, kEmpty); ExpectPolygon(OpType::DIFFERENCE, kShell1, kShell1, kEmpty); ExpectPolygon(OpType::DIFFERENCE, kShell1, kHole2, kEmpty); // - Exact result, first input intersects all 6 cube faces. ExpectPolygon(OpType::DIFFERENCE, k6FaceShell1, k6FaceShell1Plus, kEmpty); // - Empty due to snapping, first input does not intersect all 6 cube faces. ExpectPolygon(OpType::DIFFERENCE, kShell1Plus, kShell1, kEmpty); // - Empty due to snapping, first input intersect all 6 cube faces. ExpectPolygon(OpType::DIFFERENCE, k6FaceShell1Plus, k6FaceShell1, kEmpty); // Test full DIFFERENCE results. // - Exact result, no input edges. ExpectPolygon(OpType::DIFFERENCE, kFull, kEmpty, kFull); // - Full due to snapping, almost full/empty input polygons. ExpectPolygon(OpType::DIFFERENCE, kAlmostFull1, kAlmostEmpty2, kFull); // Test empty SYMMETRIC_DIFFERENCE results. // - Exact result, no input edges. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kEmpty, kEmpty, kEmpty); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kFull, kFull, kEmpty); // - Exact result, union does not intersect all 6 cube faces. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kShell1, kShell1, kEmpty); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kNorthHemi, kNorthHemi, kEmpty); // - Exact result, union intersects all 6 cube faces. This case is only // handled correctly due to the kBiasTowardsEmpty heuristic. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1, k6FaceShell1, kEmpty); // - Empty due to snapping, union does not intersect all 6 cube faces. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kShell1Plus, kShell1, kEmpty); // - Empty due to snapping, union intersects all 6 cube faces. This case is // only handled correctly due to the kBiasTowardsEmpty heuristic. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1Plus, k6FaceShell1, kEmpty); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1Minus, k6FaceShell1, kEmpty); // Test full SYMMETRIC_DIFFERENCE results. // - Exact result, no input edges. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kFull, kEmpty, kFull); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kEmpty, kFull, kFull); // - Exact result, complementary input polygons. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kShell1, kHole1, kFull); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kAlmostEmpty1, kAlmostFull1, kFull); // - Full due to snapping, almost complementary input polygons. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kShell1Plus, kHole1, kFull); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kAlmostFull1, kAlmostEmpty2, kFull); // - Exact result, complementary hemispheres, at least one input does not // intersect all 6 cube faces. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kNorthHemi, kSouthHemi, kFull); // - Exact result, almost complementary hemispheres, at least one input does // not intersect all 6 cube faces. ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, kNorthHemi, kSouthHemiPlus, kFull); // TODO(ericv): The following case is not currently implemented. // - Full result, complementary (to within the snap radius) input polygons // each with an area of approximately 2*Pi, and both polygons intersect all // 6 cube faces. #if 0 ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1, k6FaceHole1, kFull); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1Plus, k6FaceHole1, kFull); ExpectPolygon(OpType::SYMMETRIC_DIFFERENCE, k6FaceShell1Minus, k6FaceHole1, kFull); #endif } // Tests whether the two S2ShapeIndexes are equal according to // S2BooleanOperation::Equals(). bool TestEqual(string_view a_str, string_view b_str) { auto a = s2textformat::MakeIndexOrDie(a_str); auto b = s2textformat::MakeIndexOrDie(b_str); return S2BooleanOperation::Equals(*a, *b); } // Tests S2BooleanOperation::Equals, which computes the symmetric difference // between two geometries and tests whether the result is empty. // // This also indirectly tests IsEmpty(), which is used to implement Contains() // and Intersects(). TEST(S2BooleanOperation, Equals) { EXPECT_TRUE(TestEqual("# #", "# #")); EXPECT_TRUE(TestEqual("# # full", "# # full")); EXPECT_FALSE(TestEqual("# #", "# # full")); EXPECT_FALSE(TestEqual("0:0 # #", "# #")); EXPECT_FALSE(TestEqual("0:0 # #", "# # full")); EXPECT_FALSE(TestEqual("# 0:0, 1:1 #", "# #")); EXPECT_FALSE(TestEqual("# 0:0, 1:1 #", "# # full")); EXPECT_FALSE(TestEqual("# # 0:0, 0:1, 1:0 ", "# #")); EXPECT_FALSE(TestEqual("# # 0:0, 0:1, 1:0 ", "# # full")); } // Tests Contains() on empty and full geometries. TEST(S2BooleanOperation, ContainsEmptyAndFull) { auto empty = s2textformat::MakeIndexOrDie("# #"); auto full = s2textformat::MakeIndexOrDie("# # full"); EXPECT_TRUE(S2BooleanOperation::Contains(*empty, *empty)); EXPECT_FALSE(S2BooleanOperation::Contains(*empty, *full)); EXPECT_TRUE(S2BooleanOperation::Contains(*full, *empty)); EXPECT_TRUE(S2BooleanOperation::Contains(*full, *full)); } // Tests Intersects() on empty and full geometries. TEST(S2BooleanOperation, IntersectsEmptyAndFull) { auto empty = s2textformat::MakeIndexOrDie("# #"); auto full = s2textformat::MakeIndexOrDie("# # full"); EXPECT_FALSE(S2BooleanOperation::Intersects(*empty, *empty)); EXPECT_FALSE(S2BooleanOperation::Intersects(*empty, *full)); EXPECT_FALSE(S2BooleanOperation::Intersects(*full, *empty)); EXPECT_TRUE(S2BooleanOperation::Intersects(*full, *full)); } } // namespace s2geometry-0.10.0/src/s2/s2buffer_operation.cc000066400000000000000000000756361422156367100211400ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // The algorithm below essentially computes the offset curve of the original // boundary, and uses this curve to divide the sphere into regions of constant // winding number. Since winding numbers on the sphere are relative rather // than absolute (see s2winding_operation.h), we also need to keep track of // the desired winding number at a fixed reference point. The initial winding // number for this point is the number of input shapes that contain it. We // then update it during the buffering process by imagining a "sweep edge" // that extends from the current point A on the input boundary to the // corresponding point B on the offset curve. As we process an input loop and // generate the corresponding offset curve, the sweep edge moves continuously // and covers the entire buffer region (i.e., the region added to or // subtracted from the input geometry). We increase the winding number of the // reference point by one whenever it crosses the sweep edge from left to // right, and we decrease the winding number by one whenever it crosses the // sweep edge from right to left. // // Concave vertices require special handling, because the corresponding offset // curve can leave behind regions whose winding number is zero or negative. // We handle this by splicing the concave vertex into the offset curve itself; // this effectively terminates the current buffer region and starts a new one, // such that the region of overlap is counted twice (i.e., its winding number // increases by two). The end result is the same as though we had computed // the union of a sequence of buffered convex boundary segments. This trick // is described in the following paper: "Polygon Offsetting by Computing // Winding Numbers" (Chen and McMains, Proceedings of IDETC/CIE 2005). // // TODO(ericv): The algorithm below is much faster than, say, computing the // union of many buffered edges. However further improvements are possible. // In particular, there is an unimplemented optimization that would make it // much faster to buffer concave boundaries when the buffer radius is large. #include "s2/s2buffer_operation.h" #include #include #include #include #include #include "absl/memory/memory.h" #include "s2/s2builder.h" #include "s2/s2builder_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2contains_point_query.h" #include "s2/s2edge_crosser.h" #include "s2/s2edge_crossings.h" #include "s2/s2edge_distances.h" #include "s2/s2error.h" #include "s2/s2lax_loop_shape.h" #include "s2/s2predicates_internal.h" #include "s2/s2shape_measures.h" #include "s2/s2shapeutil_contains_brute_force.h" #include "s2/util/math/mathutil.h" using absl::make_unique; using s2pred::DBL_ERR; using std::ceil; using std::max; using std::min; using std::vector; using std::unique_ptr; // The errors due to buffering can be categorized as follows: // // 1. Requested error. This represents the error due to approximating the // buffered boundary as a sequence of line segments rather than a sequence // of circular arcs. It is largely controlled by options.error_fraction(), // and can be bounded as // // max(kMinRequestedError, error_fraction * buffer_radius) // // where kMinRequestedError reflects the fact that S2Points do not have // infinite precision. (For example, it makes no sense to attempt to // buffer geometry by 1e-100 radians because the spacing between // representable S2Points is only about 2e-16 radians in general.) // // 2. Relative interpolation errors. These are numerical errors whose // magnitude is proportional to the buffer radius. For such errors the // worst-case coefficient of proportionality turns out to be so tiny // compared to the smallest allowable error fraction (kMinErrorFraction) // that we simply ignore such errors. // // 3. Absolute interpolation errors. These are numerical errors that are not // proportional to the buffer radius. The main sources of such errors are // (1) calling S2::RobustCrossProd() to compute edge normals, and (2) calls // to S2::GetPointOnRay() to interpolate points along the buffered // boundary. It is possible to show that this error is at most // kMaxAbsoluteInterpolationError as defined below. // // Putting these together, the final error bound looks like this: // // max_error = kMaxAbsoluteInterpolationError + // max(kMinRequestedError, // max(kMinErrorFraction, options.error_fraction()) * // options.buffer_radius()) // The maximum angular spacing between representable S2Points on the unit // sphere is roughly 2 * DBL_ERR. We require the requested absolute error to // be at least this large because attempting to achieve a smaller error does // not increase the precision of the result and can increase the running time // and space requirements considerably. static constexpr S1Angle kMinRequestedError = S1Angle::Radians(2 * DBL_ERR); // The maximum absolute error due to interpolating points on the buffered // boundary. The following constant bounds the maximum additional error // perpendicular to the buffered boundary due to all steps of the calculation // (S2::RobustCrossProd, the two calls to GetPointOnRay, etc). // // This distance represents about 10 nanometers on the Earth's surface. Note // that this is a conservative upper bound and that it is difficult to // construct inputs where the error is anywhere close to this large. static constexpr S1Angle kMaxAbsoluteInterpolationError = S2::kGetPointOnLineError + S2::kGetPointOnRayPerpendicularError; // TODO(user, b/210097200): Remove when we require c++17 for opensource. constexpr double S2BufferOperation::Options::kMinErrorFraction; constexpr double S2BufferOperation::Options::kMaxCircleSegments; S2BufferOperation::Options::Options() : snap_function_( make_unique(S1Angle::Zero())) { } S2BufferOperation::Options::Options(S1Angle buffer_radius) : Options() { buffer_radius_ = buffer_radius; } S2BufferOperation::Options::Options(const Options& options) : buffer_radius_(options.buffer_radius_), error_fraction_(options.error_fraction_), end_cap_style_(options.end_cap_style_), polyline_side_(options.polyline_side_), snap_function_(options.snap_function_->Clone()), memory_tracker_(options.memory_tracker_) { } S2BufferOperation::Options& S2BufferOperation::Options::operator=( const Options& options) { buffer_radius_ = options.buffer_radius_; error_fraction_ = options.error_fraction_; end_cap_style_ = options.end_cap_style_; polyline_side_ = options.polyline_side_; snap_function_ = options.snap_function_->Clone(); memory_tracker_ = options.memory_tracker_; return *this; } S1Angle S2BufferOperation::Options::buffer_radius() const { return buffer_radius_; } void S2BufferOperation::Options::set_buffer_radius(S1Angle buffer_radius) { buffer_radius_ = buffer_radius; } double S2BufferOperation::Options::error_fraction() const { return error_fraction_; } void S2BufferOperation::Options::set_error_fraction(double error_fraction) { S2_DCHECK_GE(error_fraction, kMinErrorFraction); S2_DCHECK_LE(error_fraction, 1.0); error_fraction_ = max(kMinErrorFraction, min(1.0, error_fraction)); } const S1Angle S2BufferOperation::Options::max_error() const { // See comments for kMinRequestedError above. S2Builder::Options builder_options(*snap_function_); builder_options.set_split_crossing_edges(true); return max(kMinRequestedError, error_fraction_ * abs(buffer_radius_)) + kMaxAbsoluteInterpolationError + builder_options.max_edge_deviation(); } double S2BufferOperation::Options::circle_segments() const { #if 0 // This formula assumes that vertices can be placed anywhere. TODO(ericv). return M_PI / acos((1 - error_fraction_) / (1 + error_fraction_)); #else // This formula assumes that all vertices are placed on the midline. return M_PI / acos(1 - error_fraction_); #endif } void S2BufferOperation::Options::set_circle_segments(double circle_segments) { S2_DCHECK_GE(circle_segments, 2.0); S2_DCHECK_LE(circle_segments, kMaxCircleSegments); circle_segments = max(2.0, min(kMaxCircleSegments, circle_segments)); // We convert circle_segments to error_fraction using planar geometry, // because the number of segments required to approximate a circle on the // sphere to within a given tolerance is not constant. Unlike in the plane, // the total curvature of a circle on the sphere decreases as the area // enclosed by the circle increases; great circles have no curvature at all. // We round up when converting to ensure that we won't generate any tiny // extra edges. // #if 0 // Note that we take advantage of both positive and negative errors when // approximating circles (i.e., vertices are not necessarily on the midline) // and thus the relationships between circle_segments and error_fraction are // e = (1 - cos(Pi/n)) / (1 + cos(Pi/n)) // n = Pi / acos((1 - e) / (1 + e)) double r = cos(M_PI / circle_segments); set_error_fraction((1 - r) / (1 + r) + 1e-15); #else // When all vertices are on the midline, the relationships are // e = 1 - cos(Pi/n) // n = Pi / acos(1 - e) set_error_fraction(1 - cos(M_PI / circle_segments) + 1e-15); #endif } S2BufferOperation::EndCapStyle S2BufferOperation::Options::end_cap_style() const { return end_cap_style_; } void S2BufferOperation::Options::set_end_cap_style(EndCapStyle end_cap_style) { end_cap_style_ = end_cap_style; } S2BufferOperation::PolylineSide S2BufferOperation::Options::polyline_side() const { return polyline_side_; } void S2BufferOperation::Options::set_polyline_side( PolylineSide polyline_side) { polyline_side_ = polyline_side; } const S2Builder::SnapFunction& S2BufferOperation::Options::snap_function() const { return *snap_function_; } void S2BufferOperation::Options::set_snap_function( const S2Builder::SnapFunction& snap_function) { snap_function_ = snap_function.Clone(); } S2MemoryTracker* S2BufferOperation::Options::memory_tracker() const { return memory_tracker_; } void S2BufferOperation::Options::set_memory_tracker(S2MemoryTracker* tracker) { memory_tracker_ = tracker; } S2BufferOperation::S2BufferOperation() { } S2BufferOperation::S2BufferOperation(unique_ptr result_layer, const Options& options) { Init(std::move(result_layer), options); } void S2BufferOperation::Init(std::unique_ptr result_layer, const Options& options) { options_ = options; ref_point_ = S2::Origin(); ref_winding_ = 0; have_input_start_ = false; have_offset_start_ = false; buffer_sign_ = sgn(options_.buffer_radius().radians()); S1Angle abs_radius = abs(options_.buffer_radius()); S1Angle requested_error = max(kMinRequestedError, options_.error_fraction() * abs_radius); S1Angle max_error = kMaxAbsoluteInterpolationError + requested_error; if (abs_radius <= max_error) { // If the requested radius is smaller than the maximum error, buffering // could yield points on the wrong side of the original input boundary // (e.g., shrinking geometry slightly rather than expanding it). Rather // than taking that risk, we set the buffer radius to zero when this // happens (which causes the original geometry to be returned). abs_radius_ = S1ChordAngle::Zero(); buffer_sign_ = 0; } else if (abs_radius + max_error >= S1Angle::Radians(M_PI)) { // If the permissible range of buffer angles includes Pi then we might // as well take advantage of that. abs_radius_ = S1ChordAngle::Straight(); } else { abs_radius_ = S1ChordAngle(abs_radius); S1Angle vertex_step = GetMaxEdgeSpan(abs_radius, requested_error); vertex_step_ = S1ChordAngle(vertex_step); // We take extra care to ensure that points are buffered as regular // polygons. The step angle is adjusted up slightly to ensure that we // don't wind up with a tiny extra edge. point_step_ = S1ChordAngle::Radians( 2 * M_PI / ceil(2 * M_PI / vertex_step.radians()) + 1e-15); // Edges are buffered only if the buffer radius (including permissible // error) is less than 90 degrees. S1Angle edge_radius = S1Angle::Radians(M_PI_2) - abs_radius; if (edge_radius > max_error) { edge_step_ = S1ChordAngle(GetMaxEdgeSpan(edge_radius, requested_error)); } } // The buffered output should include degeneracies (i.e., isolated points // and/or sibling edge pairs) only if (1) the user specified a non-negative // buffer radius, and (2) the adjusted buffer radius is zero. The only // purpose of keeping degeneracies is to allow points/polylines in the input // geometry to be converted back to points/polylines in the output if the // client so desires. S2WindingOperation::Options winding_options{options.snap_function()}; winding_options.set_include_degeneracies( buffer_sign_ == 0 && options_.buffer_radius() >= S1Angle::Zero()); winding_options.set_memory_tracker(options.memory_tracker()); op_.Init(std::move(result_layer), winding_options); tracker_.Init(options.memory_tracker()); } const S2BufferOperation::Options& S2BufferOperation::options() const { return options_; } S1Angle S2BufferOperation::GetMaxEdgeSpan(S1Angle radius, S1Angle requested_error) const { // If the allowable radius range spans Pi/2 then we can use edges as long as // we like, however we always use at least 3 edges to approximate a circle. S1Angle step = S1Angle::Radians(2 * M_PI / 3 + 1e-15); S1Angle min_radius = radius - requested_error; S2_DCHECK_GE(min_radius, S1Angle::Zero()); if (radius.radians() < M_PI_2) { step = min(step, S1Angle::Radians(2 * acos(tan(min_radius) / tan(radius)))); } else if (min_radius.radians() > M_PI_2) { step = min(step, S1Angle::Radians(2 * acos(tan(radius) / tan(min_radius)))); } return step; } // The sweep edge AB (see introduction) consists of one point on the input // boundary (A) and one point on the offset curve (B). This function advances // the sweep edge by moving its first vertex A to "new_a" and updating the // winding number of the reference point if necessary. void S2BufferOperation::SetInputVertex(const S2Point& new_a) { if (have_input_start_) { S2_DCHECK(have_offset_start_); UpdateRefWinding(sweep_a_, sweep_b_, new_a); } else { input_start_ = new_a; have_input_start_ = true; } sweep_a_ = new_a; } // Adds the point "new_b" to the offset path. Also advances the sweep edge AB // by moving its second vertex B to "new_b" and updating the winding number of // the reference point if necessary (see introduction). void S2BufferOperation::AddOffsetVertex(const S2Point& new_b) { if (!tracker_.AddSpace(&path_, 1)) return; path_.push_back(new_b); if (have_offset_start_) { S2_DCHECK(have_input_start_); UpdateRefWinding(sweep_a_, sweep_b_, new_b); } else { offset_start_ = new_b; have_offset_start_ = true; } sweep_b_ = new_b; } // Finishes buffering the current loop by advancing the sweep edge back to its // starting location, updating the winding number of the reference point if // necessary. void S2BufferOperation::CloseBufferRegion() { if (have_offset_start_ && have_input_start_) { UpdateRefWinding(sweep_a_, sweep_b_, input_start_); UpdateRefWinding(input_start_, sweep_b_, offset_start_); } } // Outputs the current buffered path (which is assumed to be a loop), and // resets the state to prepare for buffering a new loop. void S2BufferOperation::OutputPath() { op_.AddLoop(path_); path_.clear(); // Does not change capacity. have_input_start_ = false; have_offset_start_ = false; } // Given a triangle ABC that has just been covered by the sweep edge AB, // updates the winding number of the reference point if necessary. void S2BufferOperation::UpdateRefWinding( const S2Point& a, const S2Point& b, const S2Point& c) { // TODO(ericv): This code could be made much faster by maintaining a // bounding plane that separates the current sweep edge from the reference // point. Whenever the sweep_a_ or sweep_b_ is updated we would just need // to check that the new vertex is still on the opposite side of the // bounding plane (i.e., one dot product). If not, we test the current // triangle using the code below and then compute a new bounding plane. // // Another optimization would be to choose the reference point to be 90 // degrees away from the first input vertex, since then triangle tests would // not be needed unless the input geometry spans more than 90 degrees. This // would involve adding a new flag have_ref_point_ rather than always // choosing the reference point to be S2::Origin(). // // According to profiling these optimizations are not currently worthwhile, // but this is worth revisiting if and when other improvements are made. int sign = s2pred::Sign(a, b, c); if (sign == 0) return; bool inside = S2::AngleContainsVertex(a, b, c) == (sign > 0); S2EdgeCrosser crosser(&b, &ref_point_); inside ^= crosser.EdgeOrVertexCrossing(&a, &b); inside ^= crosser.EdgeOrVertexCrossing(&b, &c); inside ^= crosser.EdgeOrVertexCrossing(&c, &a); if (inside) ref_winding_ += sign; } // Ensures that the output will be the full polygon. void S2BufferOperation::AddFullPolygon() { ref_winding_ += 1; } void S2BufferOperation::AddPoint(const S2Point& point) { // If buffer_radius < 0, points are discarded. if (buffer_sign_ < 0) return; // Buffering by 180 degrees or more always yields the full polygon. // (We don't need to worry about buffering by 180 degrees yielding // a degenerate hole because error_fraction_ is always positive. if (abs_radius_ >= S1ChordAngle::Straight()) { return AddFullPolygon(); } // If buffer_radius == 0, points are converted into degenerate loops. if (buffer_sign_ == 0) { if (!tracker_.AddSpace(&path_, 1)) return; path_.push_back(point); } else { // Since S1ChordAngle can only represent angles between 0 and 180 degrees, // we generate the circle in four 90 degree increments. SetInputVertex(point); S2Point start = S2::Ortho(point); S1ChordAngle angle = S1ChordAngle::Zero(); for (int quadrant = 0; quadrant < 4; ++quadrant) { // Generate 90 degrees of the circular arc. Normalize "rotate_dir" at // each iteration to avoid magnifying normalization errors in "point". S2Point rotate_dir = point.CrossProd(start).Normalize(); for (; angle < S1ChordAngle::Right(); angle += point_step_) { S2Point dir = S2::GetPointOnRay(start, rotate_dir, angle); AddOffsetVertex(S2::GetPointOnRay(point, dir, abs_radius_)); } angle -= S1ChordAngle::Right(); start = rotate_dir; } CloseBufferRegion(); } OutputPath(); } // Returns the edge normal for the given edge AB. The sign is chosen such // that the normal is on the right of AB if buffer_sign_ > 0, and on the left // of AB if buffer_sign_ < 0. inline S2Point S2BufferOperation::GetEdgeAxis(const S2Point& a, const S2Point& b) const { S2_DCHECK_NE(buffer_sign_, 0); return buffer_sign_ * S2::RobustCrossProd(b, a).Normalize(); } // Adds a semi-open offset arc around vertex V. The arc proceeds CCW from // "start" to "end" (both of which must be perpendicular to V). void S2BufferOperation::AddVertexArc(const S2Point& v, const S2Point& start, const S2Point& end) { // Make sure that we output at least one point even when span == 0. S2Point rotate_dir = buffer_sign_ * v.CrossProd(start).Normalize(); S1ChordAngle angle, span(start, end); do { S2Point dir = S2::GetPointOnRay(start, rotate_dir, angle); AddOffsetVertex(S2::GetPointOnRay(v, dir, abs_radius_)); } while ((angle += vertex_step_) < span); } // Closes the semi-open arc generated by AddVertexArc(). void S2BufferOperation::CloseVertexArc(const S2Point& v, const S2Point& end) { AddOffsetVertex(S2::GetPointOnRay(v, end, abs_radius_)); } // Adds a semi-open offset arc for the given edge AB. void S2BufferOperation::AddEdgeArc(const S2Point& a, const S2Point& b) { S2Point ab_axis = GetEdgeAxis(a, b); if (edge_step_ == S1ChordAngle::Zero()) { // If the buffer radius is more than 90 degrees, edges do not contribute to // the buffered boundary. Instead we force the offset path to pass // through a vertex located at the edge normal. This is similar to the // case of concave vertices (below) where it is necessary to route the // offset path through the concave vertex to ensure that the winding // numbers in all output regions have the correct sign. AddOffsetVertex(ab_axis); } else { // Make sure that we output at least one point even when span == 0. S2Point rotate_dir = buffer_sign_ * a.CrossProd(ab_axis).Normalize(); S1ChordAngle angle, span(a, b); do { S2Point p = S2::GetPointOnRay(a, rotate_dir, angle); AddOffsetVertex(S2::GetPointOnRay(p, ab_axis, abs_radius_)); } while ((angle += edge_step_) < span); } SetInputVertex(b); } // Closes the semi-open arc generated by AddEdgeArc(). void S2BufferOperation::CloseEdgeArc(const S2Point& a, const S2Point& b) { if (edge_step_ != S1ChordAngle::Zero()) { AddOffsetVertex(S2::GetPointOnRay(b, GetEdgeAxis(a, b), abs_radius_)); } } // Buffers the edge AB and the vertex B. (The vertex C is used to determine // the range of angles that should be buffered at B.) // // TODO(ericv): Let A* denote the possible offset points of A with respect to // the edge AB for buffer radii in the range specified by "radius" and // "error_fraction". Rather than requiring that the path so far terminates at // a point in A*, as you might expect, instead we only require that the path // terminates at a point X such that for any point Y in A*, the edge XY does // not leave the valid buffer zone of the previous edge and vertex. void S2BufferOperation::BufferEdgeAndVertex(const S2Point& a, const S2Point& b, const S2Point& c) { S2_DCHECK_NE(a, b); S2_DCHECK_NE(b, c); S2_DCHECK_NE(buffer_sign_, 0); if (!tracker_.ok()) return; // For left (convex) turns we need to add an offset arc. For right // (concave) turns we connect the end of the current offset path to the // vertex itself and then to the start of the offset path for the next edge. // Note that A == C is considered to represent a convex (left) turn. AddEdgeArc(a, b); if (buffer_sign_ * s2pred::Sign(a, b, c) >= 0) { // The boundary makes a convex turn. If there is no following edge arc // then we need to generate a closed vertex arc. S2Point start = GetEdgeAxis(a, b); S2Point end = GetEdgeAxis(b, c); AddVertexArc(b, start, end); if (edge_step_ == S1ChordAngle::Zero()) CloseVertexArc(b, end); } else { // The boundary makes a concave turn. It is tempting to simply connect // the end of the current offset path to the start of the offset path for // the next edge, however this can create output regions where the winding // number is incorrect. A solution that always works is to terminate the // current offset path and start a new one by connecting the two offset // paths through the input vertex whenever it is concave. We first need // to close the previous semi-open edge arc if necessary. CloseEdgeArc(a, b); AddOffsetVertex(b); // Connect through the input vertex. } } // Given a polyline that starts with the edge AB, adds an end cap (as // specified by end_cap_style() and polyline_side()) for the vertex A. void S2BufferOperation::AddStartCap(const S2Point& a, const S2Point& b) { S2Point axis = GetEdgeAxis(a, b); if (options_.end_cap_style() == EndCapStyle::FLAT) { // One-sided flat end caps require no additional vertices since the // "offset curve" for the opposite side is simply the reversed polyline. if (options_.polyline_side() == PolylineSide::BOTH) { AddOffsetVertex(S2::GetPointOnRay(a, -axis, abs_radius_)); } } else { S2_DCHECK(options_.end_cap_style() == EndCapStyle::ROUND); if (options_.polyline_side() == PolylineSide::BOTH) { // The end cap consists of a semicircle. AddVertexArc(a, -axis, axis); } else { // The end cap consists of a quarter circle. Note that for // PolylineSide::LEFT, the polyline direction has been reversed. AddVertexArc(a, axis.CrossProd(a).Normalize(), axis); } } } // Given a polyline that ends with the edge AB, adds an end cap (as specified // by end_cap_style() and polyline_side()) for the vertex B. void S2BufferOperation::AddEndCap(const S2Point& a, const S2Point& b) { S2Point axis = GetEdgeAxis(a, b); if (options_.end_cap_style() == EndCapStyle::FLAT) { CloseEdgeArc(a, b); // Close the previous semi-open edge arc if necessary. } else { S2_DCHECK(options_.end_cap_style() == EndCapStyle::ROUND); if (options_.polyline_side() == PolylineSide::BOTH) { // The end cap consists of a semicircle. AddVertexArc(b, axis, -axis); } else { // The end cap consists of a quarter circle. We close the arc since it // will be followed by the reversed polyline vertices. Note that for // PolylineSide::LEFT, the polyline direction has been reversed. S2Point end = b.CrossProd(axis).Normalize(); AddVertexArc(b, axis, end); CloseVertexArc(b, end); } } } // Helper function that buffers the given loop. void S2BufferOperation::BufferLoop(S2PointLoopSpan loop) { // Empty loops always yield an empty path. if (loop.empty() || !tracker_.ok()) return; // Loops with one degenerate edge are treated as points. if (loop.size() == 1) return AddPoint(loop[0]); // Buffering by 180 degrees or more always yields the full polygon. // Buffering by -180 degrees or more always yields the empty polygon. if (abs_radius_ >= S1ChordAngle::Straight()) { if (buffer_sign_ > 0) AddFullPolygon(); return; } // If buffer_radius == 0, the loop is passed through unchanged. if (buffer_sign_ == 0) { if (!tracker_.AddSpace(&path_, loop.size())) return; path_.assign(loop.begin(), loop.end()); } else { SetInputVertex(loop[0]); for (int i = 0; i < loop.size(); ++i) { BufferEdgeAndVertex(loop[i], loop[i + 1], loop[i + 2]); } CloseBufferRegion(); } OutputPath(); } void S2BufferOperation::AddPolyline(S2PointSpan polyline) { // Left-sided buffering is supported by reversing the polyline and then // buffering on the right. vector reversed; if (options_.polyline_side() == PolylineSide::LEFT) { reversed.reserve(polyline.size()); std::reverse_copy(polyline.begin(), polyline.end(), std::back_inserter(reversed)); polyline = reversed; } // If buffer_radius < 0, polylines are discarded. if (buffer_sign_ < 0 || !tracker_.ok()) return; // Polylines with 0 or 1 vertices are defined to have no edges. int n = polyline.size(); if (n <= 1) return; // Polylines with one degenerate edge are treated as points. if (n == 2 && polyline[0] == polyline[1]) { return AddPoint(polyline[0]); } // Buffering by 180 degrees or more always yields the full polygon. if (abs_radius_ >= S1ChordAngle::Straight()) { return AddFullPolygon(); } // If buffer_radius == 0, polylines are converted into degenerate loops. if (buffer_sign_ == 0) { if (!tracker_.AddSpace(&path_, 2 * (n - 1))) return; path_.assign(polyline.begin(), polyline.end() - 1); path_.insert(path_.end(), polyline.rbegin(), polyline.rend() - 1); } else { // Otherwise we buffer each side of the polyline separately. SetInputVertex(polyline[0]); AddStartCap(polyline[0], polyline[1]); for (int i = 0; i < n - 2; ++i) { BufferEdgeAndVertex(polyline[i], polyline[i + 1], polyline[i + 2]); } AddEdgeArc(polyline[n - 2], polyline[n - 1]); AddEndCap(polyline[n - 2], polyline[n - 1]); if (options_.polyline_side() == PolylineSide::BOTH) { for (int i = n - 3; i >= 0; --i) { BufferEdgeAndVertex(polyline[i + 2], polyline[i + 1], polyline[i]); } AddEdgeArc(polyline[1], polyline[0]); CloseBufferRegion(); } else { // The other side of the polyline is not buffered. Note that for // PolylineSide::LEFT, the polyline direction has been reversed. if (!tracker_.AddSpace(&path_, n)) return; path_.insert(path_.end(), polyline.rbegin(), polyline.rend()); // Don't call CloseBufferRegion() since the path has already been closed. } } OutputPath(); } void S2BufferOperation::AddLoop(S2PointLoopSpan loop) { if (loop.empty()) return; BufferLoop(loop); // The vertex copying below could be avoided by adding a version of // S2LaxLoopShape that doesn't own its vertices. if (!tracker_.ok()) return; ref_winding_ += s2shapeutil::ContainsBruteForce(S2LaxLoopShape(loop), ref_point_); num_polygon_layers_ += 1; } void S2BufferOperation::BufferShape(const S2Shape& shape) { int dimension = shape.dimension(); int num_chains = shape.num_chains(); for (int c = 0; c < num_chains; ++c) { S2Shape::Chain chain = shape.chain(c); if (chain.length == 0) continue; if (dimension == 0) { AddPoint(shape.edge(c).v0); } else { S2::GetChainVertices(shape, c, &tmp_vertices_); if (dimension == 1) { AddPolyline(S2PointSpan(tmp_vertices_)); } else { BufferLoop(S2PointLoopSpan(tmp_vertices_)); } } } } void S2BufferOperation::AddShape(const S2Shape& shape) { BufferShape(shape); ref_winding_ += s2shapeutil::ContainsBruteForce(shape, ref_point_); num_polygon_layers_ += (shape.dimension() == 2); } void S2BufferOperation::AddShapeIndex(const S2ShapeIndex& index) { int max_dimension = -1; for (const S2Shape* shape : index) { if (shape == nullptr) continue; max_dimension = max(max_dimension, shape->dimension()); BufferShape(*shape); } ref_winding_ += MakeS2ContainsPointQuery(&index).Contains(ref_point_); num_polygon_layers_ += (max_dimension == 2); } bool S2BufferOperation::Build(S2Error* error) { if (buffer_sign_ < 0 && num_polygon_layers_ > 1) { error->Init(S2Error::FAILED_PRECONDITION, "Negative buffer radius requires at most one polygon layer"); return false; } return op_.Build(ref_point_, ref_winding_, S2WindingOperation::WindingRule::POSITIVE, error); } s2geometry-0.10.0/src/s2/s2buffer_operation.h000066400000000000000000000361061422156367100207670ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #ifndef S2_S2BUFFER_OPERATION_H_ #define S2_S2BUFFER_OPERATION_H_ #include #include "s2/s1angle.h" #include "s2/s1chord_angle.h" #include "s2/s2builder.h" #include "s2/s2point_span.h" #include "s2/s2winding_operation.h" // This class provides a way to expand an arbitrary collection of geometry by // a fixed radius (an operation variously known as "buffering", "offsetting", // or "Minkowski sum with a disc"). The output consists of a polygon // (possibly with multiple shells) that contains all points within the given // radius of the original geometry. // // The radius can also be negative, in which case the geometry is contracted. // This causes the boundaries of polygons to shrink or disappear, and removes // all points and polylines. // // The input consists of a sequence of layers. Each layer may consist of any // combination of points, polylines, and polygons, with the restriction that // polygon interiors within each layer may not intersect any other geometry // (including other polygon interiors). The output is the union of the // buffered input layers. Note that only a single layer is allowed if the // buffer radius is negative. // // This class may be used to compute polygon unions by setting the buffer // radius to zero. The union is computed using a single snapping operation. // // Note that if you only want to compute an S2CellId covering of the buffered // geometry, it is much faster to use S2ShapeIndexBufferedRegion instead. // // Keywords: buffer, buffering, expand, expanding, offset, offsetting, // widen, contract, shrink, Minkowski sum class S2BufferOperation { public: // For polylines, specifies whether the end caps should be round or flat. // See Options::set_end_cap_style() below. enum class EndCapStyle : uint8 { ROUND, FLAT }; // Specifies whether polylines should be buffered only on the left, only on // the right, or on both sides. enum class PolylineSide : uint8 { LEFT, RIGHT, BOTH }; class Options { public: Options(); // Convenience constructor that calls set_buffer_radius(). explicit Options(S1Angle buffer_radius); // If positive, specifies that all points within the given radius of the // input geometry should be added to the output. If negative, specifies // that all points within the given radius of complement of the input // geometry should be subtracted from the output. If the buffer radius // is zero then the input geometry is passed through to the output layer // after first converting points and polylines into degenerate loops. // // DEFAULT: S1Angle::Zero() S1Angle buffer_radius() const; void set_buffer_radius(S1Angle buffer_radius); // Specifies the allowable error when buffering, expressed as a fraction // of buffer_radius(). The actual buffer distance will be in the range // [(1-f) * r - C, (1 + f) * r + C] where "f" is the error fraction, "r" // is the buffer radius, and "C" is S2BufferOperation::kAbsError. // // Be aware that the number of output edges increases proportionally to // (1 / sqrt(error_fraction)), so setting a small value may increase the // size of the output considerably. // // REQUIRES: error_fraction() >= kMinErrorFraction // REQUIRES: error_fraction() <= 1.0 // // DEFAULT: 0.01 (i.e., maximum error of 1%) static constexpr double kMinErrorFraction = 1e-6; double error_fraction() const; void set_error_fraction(double error_fraction); // Returns the maximum error in the buffered result for the current // buffer_radius(), error_fraction(), and snap_function(). Note that the // error due to buffering consists of both relative errors (those // proportional to the buffer radius) and absolute errors. The maximum // relative error is controlled by error_fraction(), while the maximum // absolute error is about 10 nanometers on the Earth's surface and is // defined internally. The error due to snapping is defined by the // specified snap_function(). const S1Angle max_error() const; // Alternatively, error_fraction() may be specified as the number of // polyline segments used to approximate a planar circle. These two // values are related according to the formula // // error_fraction = (1 - cos(theta)) / (1 + cos(theta)) // ~= 0.25 * (theta ** 2) // // where (theta == Pi / circle_segments), i.e. error decreases // quadratically with the number of circle segments. // // REQUIRES: circle_segments() >= 2.0 // REQUIRES: circle_segments() <= kMaxCircleSegments // (about 1570; corresponds to kMinErrorFraction) // // DEFAULT: about 15.76 (corresponding to error_fraction() default value) static constexpr double kMaxCircleSegments = 1570.7968503979573; double circle_segments() const; void set_circle_segments(double circle_segments); // For polylines, specifies whether the end caps should be round or flat. // // Note that with flat end caps, there is no buffering beyond the polyline // endpoints (unlike "square" end caps, which are not implemented). // // DEFAULT: EndCapStyle::ROUND EndCapStyle end_cap_style() const; void set_end_cap_style(EndCapStyle end_cap_style); // Specifies whether polylines should be buffered only on the left, only // on the right, or on both sides. For one-sided buffering please note // the following: // // - EndCapStyle::ROUND yields two quarter-circles, one at each end. // // - To buffer by a different radius on each side of the polyline, you // can use two S2BufferOperations and compute their union. (Note that // round end caps will yield two quarter-circles at each end of the // polyline with different radii.) // // - Polylines consisting of a single degenerate edge are always buffered // identically to points, i.e. this option has no effect. // // - When the polyline turns right by more than 90 degrees, buffering may // or may not extend to the non-buffered side of the polyline. For // example if ABC makes a 170 degree right turn at B, it is unspecified // whether the buffering of AB extends across edge BC and vice versa. // Similarly if ABCD represents two right turns of 90 degrees where AB // and CD are separated by less than the buffer radius, it is // unspecified whether buffering of AB extends across CD and vice versa. // // DEFAULT: PolylineSide::BOTH PolylineSide polyline_side() const; void set_polyline_side(PolylineSide polyline_side); // Specifies the function used for snap rounding the output during the // call to Build(). Note that any errors due to snapping are in addition // to those specified by error_fraction(). // // DEFAULT: s2builderutil::IdentitySnapFunction(S1Angle::Zero()) const S2Builder::SnapFunction& snap_function() const; void set_snap_function(const S2Builder::SnapFunction& snap_function); // Specifies that internal memory usage should be tracked using the given // S2MemoryTracker. If a memory limit is specified and more more memory // than this is required then an error will be returned. Example usage: // // S2MemoryTracker tracker; // tracker.set_limit(500 << 20); // 500 MB // S2BufferOperation::Options options; // options.set_buffer_radius(S1Angle::Degrees(1e-5)); // options.set_memory_tracker(&tracker); // S2BufferOperation op{options}; // ... // S2Error error; // if (!op.Build(&error)) { // if (error.code() == S2Error::RESOURCE_EXHAUSTED) { // S2_LOG(ERROR) << error; // Memory limit exceeded // } // } // // CAVEATS: // // - Memory allocated by the output S2Builder layer is not tracked. // // - While memory tracking is reasonably complete and accurate, it does // not account for every last byte. It is intended only for the // purpose of preventing clients from running out of memory. // // DEFAULT: nullptr (memory tracking disabled) S2MemoryTracker* memory_tracker() const; void set_memory_tracker(S2MemoryTracker* tracker); // Options may be assigned and copied. Options(const Options& options); Options& operator=(const Options& options); private: S1Angle buffer_radius_ = S1Angle::Zero(); // double error_fraction_ = 0.01; double error_fraction_ = 0.02; EndCapStyle end_cap_style_ = EndCapStyle::ROUND; PolylineSide polyline_side_ = PolylineSide::BOTH; std::unique_ptr snap_function_; S2MemoryTracker* memory_tracker_ = nullptr; }; // Default constructor; requires Init() to be called. S2BufferOperation(); // Convenience constructor that calls Init(). explicit S2BufferOperation(std::unique_ptr result_layer, const Options& options = Options{}); // Starts a buffer operation that sends the output polygon to the given // S2Builder layer. This method may be called more than once. // // Note that buffering always yields a polygon, even if the input includes // polylines and points. If the buffer radius is zero, points and polylines // will be converted into degenerate polygon loops; if the buffer radius is // negative, points and polylines will be removed. void Init(std::unique_ptr result_layer, const Options& options = Options()); const Options& options() const; // Each call below represents a different input layer. Note that if the // buffer radius is negative, then at most one input layer is allowed // (ignoring any layers that contain only points and polylines). // Adds an input layer containing a single point. void AddPoint(const S2Point& point); // Adds an input layer containing a polyline. Note the following: // // - Polylines with 0 or 1 vertices are considered to be empty. // - A polyline with 2 identical vertices is equivalent to a point. // - Polylines have end caps (see Options::end_cap_style). // - One-sided polyline buffering is supported (see Options::polyline_side). void AddPolyline(S2PointSpan polyline); // Adds an input layer containing a loop. Note the following: // // - A loop with no vertices is empty. // - A loop with 1 vertex is equivalent to a point. // - The interior of the loop is on its left. // - Buffering a self-intersecting loop produces undefined results. void AddLoop(S2PointLoopSpan loop); // Adds an input layer containing the given shape. Shapes are handled as // points, polylines, or polygons according to the rules above. In addition // note the following: // // - Polygons holes may be degenerate (e.g., consisting of a // single vertex or entirely of sibling pairs such as ABCBDB). // - Full polygons are supported. Note that since full polygons do // not have a boundary, they are not affected by buffering. void AddShape(const S2Shape& shape); // Adds an input layer containing all of the shapes in the given index. // // REQUIRES: The interiors of polygons must be disjoint from all other // indexed geometry, including other polygon interiors. // (S2BooleanOperation also requires this.) void AddShapeIndex(const S2ShapeIndex& index); // Computes the union of the buffered input shapes and sends the output // polygon to the S2Builder layer specified in the constructor. Returns // true on success and otherwise sets "error" appropriately. // // Note that if the buffer radius is negative, only a single input layer is // allowed (ignoring any layers that contain only points and polylines). bool Build(S2Error* error); private: S1Angle GetMaxEdgeSpan(S1Angle radius, S1Angle requested_error) const; void SetInputVertex(const S2Point& new_a); void AddOffsetVertex(const S2Point& new_b); void CloseBufferRegion(); void OutputPath(); void UpdateRefWinding(const S2Point& a, const S2Point& b, const S2Point& c); void AddFullPolygon(); S2Point GetEdgeAxis(const S2Point& a, const S2Point& b) const; void AddVertexArc(const S2Point& v, const S2Point& start, const S2Point& end); void CloseVertexArc(const S2Point& v, const S2Point& end); void AddEdgeArc(const S2Point& a, const S2Point& b); void CloseEdgeArc(const S2Point& a, const S2Point& b); void BufferEdgeAndVertex(const S2Point& a, const S2Point& b, const S2Point& c); void AddStartCap(const S2Point& a, const S2Point& b); void AddEndCap(const S2Point& a, const S2Point& b); void BufferLoop(S2PointLoopSpan loop); void BufferShape(const S2Shape& shape); Options options_; // The number of layers containing two-dimension geometry that have been // added so far. This is used to enforce the requirement that negative // buffer radii allow only a single such layer. int num_polygon_layers_ = 0; // Parameters for buffering vertices and edges. int buffer_sign_; // The sign of buffer_radius (-1, 0, or +1). S1ChordAngle abs_radius_; S1ChordAngle vertex_step_, edge_step_; // We go to extra effort to ensure that points are transformed into regular // polygons. (We don't do this for arcs in general because we would rather // use the allowable error to reduce the complexity of the output rather // than increase its symmetry.) S1ChordAngle point_step_; // Contains the buffered loops that have been accumulated so far. S2WindingOperation op_; // The current offset path. When each path is completed into a loop it is // added to op_ (the S2WindingOperation). std::vector path_; // As buffered loops are added we keep track of the winding number of a // fixed reference point. This is used to derive the winding numbers of // every region in the spherical partition induced by the buffered loops. S2Point ref_point_; // The winding number associated with ref_point_. int ref_winding_; // The endpoints of the current sweep edge. sweep_a_ is a vertex of the // original geometry and sweep_b_ is a vertex of the current offset path. S2Point sweep_a_, sweep_b_; // The starting vertices of the current input loop and offset curve. These // are used to close the buffer region when a loop is completed. S2Point input_start_, offset_start_; bool have_input_start_, have_offset_start_; // Used internally as a temporary to avoid excessive memory allocation. std::vector tmp_vertices_; S2MemoryTracker::Client tracker_; }; #endif // S2_S2BUFFER_OPERATION_H_ s2geometry-0.10.0/src/s2/s2buffer_operation_test.cc000066400000000000000000000664031422156367100221670ustar00rootroot00000000000000// Copyright 2020 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/s2buffer_operation.h" #include #include #include #include #include #include #include "s2/base/casts.h" #include "s2/base/logging.h" #include #include "absl/base/call_once.h" #include "absl/flags/flag.h" #include "absl/memory/memory.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "s2/s1angle.h" #include "s2/s2builderutil_lax_polygon_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2closest_edge_query.h" #include "s2/s2contains_point_query.h" #include "s2/s2error.h" #include "s2/s2lax_loop_shape.h" #include "s2/s2lax_polygon_shape.h" #include "s2/s2lax_polyline_shape.h" #include "s2/s2loop.h" #include "s2/s2metrics.h" #include "s2/s2point_vector_shape.h" #include "s2/s2shape_measures.h" #include "s2/s2testing.h" #include "s2/s2text_format.h" using absl::make_unique; using std::max; using std::string; using std::unique_ptr; using std::vector; using EndCapStyle = S2BufferOperation::EndCapStyle; using PolylineSide = S2BufferOperation::PolylineSide; namespace { // A callback that allows adding input to an S2BufferOperation. This is // convenient for testing the various input methods (AddPoint, AddShape, etc). using InputCallback = std::function; // Convenience function that calls the given lambda expression to add input to // an S2BufferOperation and returns the buffered result. unique_ptr DoBuffer( InputCallback input_callback, const S2BufferOperation::Options& options) { auto output = make_unique(); S2BufferOperation op( make_unique(output.get()), options); input_callback(&op); S2Error error; EXPECT_TRUE(op.Build(&error)) << error; if (S2_VLOG_IS_ON(1) && output->num_vertices() < 1000) { std::cerr << "\nS2Polygon: " << s2textformat::ToString(*output) << "\n"; } return output; } // Simpler version that accepts a buffer radius and error fraction. unique_ptr DoBuffer( InputCallback input_callback, S1Angle buffer_radius, double error_fraction) { S2BufferOperation::Options options; options.set_buffer_radius(buffer_radius); options.set_error_fraction(error_fraction); return DoBuffer(std::move(input_callback), options); } // Given a callback that adds empty geometry to the S2BufferOperation, // verifies that the result is empty after buffering. void TestBufferEmpty(InputCallback input) { // Test code paths that involve buffering by negative, zero, and positive // values, and also values where the result is usually empty or full. EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(-200), 0.1)->is_empty()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(-1), 0.1)->is_empty()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(0), 0.1)->is_empty()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(1), 0.1)->is_empty()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(200), 0.1)->is_empty()); } TEST(S2BufferOperation, NoInput) { TestBufferEmpty([](S2BufferOperation* op) {}); } TEST(S2BufferOperation, EmptyPolyline) { // Note that polylines with 1 vertex are defined to have no edges. TestBufferEmpty([](S2BufferOperation* op) { op->AddPolyline(vector{S2Point(1, 0, 0)}); }); } TEST(S2BufferOperation, EmptyLoop) { TestBufferEmpty([](S2BufferOperation* op) { op->AddLoop(vector{}); }); } TEST(S2BufferOperation, EmptyPointShape) { TestBufferEmpty([](S2BufferOperation* op) { op->AddShape(S2PointVectorShape{}); }); } TEST(S2BufferOperation, EmptyPolylineShape) { TestBufferEmpty([](S2BufferOperation* op) { op->AddShape(*s2textformat::MakeLaxPolylineOrDie("")); }); } TEST(S2BufferOperation, EmptyPolygonShape) { TestBufferEmpty([](S2BufferOperation* op) { op->AddShape(*s2textformat::MakeLaxPolygonOrDie("")); }); } TEST(S2BufferOperation, EmptyShapeIndex) { TestBufferEmpty([](S2BufferOperation* op) { op->AddShapeIndex(*s2textformat::MakeIndexOrDie("# #")); }); } TEST(S2BufferOperation, PoorlyNormalizedPoint) { // Verify that debugging assertions are not triggered when an input point is // not unit length (but within the limits guaranteed by S2Point::Normalize). // // The purpose of this test is not to check that the result is correct // (which is done elsewhere), simply that no assertions occur. DoBuffer([](S2BufferOperation* op) { S2Point p(1 - 2 * DBL_EPSILON, 0, 0); // Maximum error allowed. S2_CHECK(S2::IsUnitLength(p)); op->AddPoint(p); }, S1Angle::Degrees(1), 0.01); } // Given a callback that adds the full polygon to the S2BufferOperation, // verifies that the result is full after buffering. void TestBufferFull(InputCallback input) { // Test code paths that involve buffering by negative, zero, and positive // values, and also values where the result is usually empty or full. EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(-200), 0.1)->is_full()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(-1), 0.1)->is_full()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(0), 0.1)->is_full()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(1), 0.1)->is_full()); EXPECT_TRUE(DoBuffer(input, S1Angle::Degrees(200), 0.1)->is_full()); } TEST(S2BufferOperation, FullPolygonShape) { TestBufferFull([](S2BufferOperation* op) { op->AddShape(*s2textformat::MakeLaxPolygonOrDie("full")); }); } TEST(S2BufferOperation, FullShapeIndex) { TestBufferFull([](S2BufferOperation* op) { op->AddShapeIndex(*s2textformat::MakeIndexOrDie("# # full")); }); } TEST(S2BufferOperation, PointsAndPolylinesAreRemoved) { // Test that points and polylines are removed with a negative buffer radius. auto output = DoBuffer([](S2BufferOperation* op) { op->AddShapeIndex(*s2textformat::MakeIndexOrDie("0:0 # 2:2, 2:3#")); }, S1Angle::Degrees(-1), 0.1); EXPECT_TRUE(output->is_empty()); } TEST(S2BufferOperation, BufferedPointsAreSymmetric) { // Test that points are buffered into regular polygons. (This is not // guaranteed by the API but makes the output nicer to look at. :) auto output = DoBuffer([](S2BufferOperation* op) { op->AddPoint(S2Point(1, 0, 0)); }, S1Angle::Degrees(5), 0.001234567); // We use the length of the last edge as our reference length. int n = output->num_vertices(); S1Angle edge_len(output->loop_vertex(0, 0), output->loop_vertex(0, n - 1)); for (int i = 1; i < n; ++i) { EXPECT_LE(abs(edge_len - S1Angle(output->loop_vertex(0, i - 1), output->loop_vertex(0, i))), S1Angle::Radians(1e-14)); } } TEST(S2BufferOperation, SetCircleSegments) { // Test that when a point is buffered with a small radius the number of // edges matches options.circle_segments(). (This is not true for large // radii because large circles on the sphere curve less than 360 degrees.) // Using a tiny radius helps to catch rounding problems. S2BufferOperation::Options options(S1Angle::Radians(1e-12)); for (int circle_segments = 3; circle_segments <= 20; ++circle_segments) { options.set_circle_segments(circle_segments); auto output = DoBuffer([](S2BufferOperation* op) { op->AddPoint(S2Point(1, 0, 0)); }, options); EXPECT_EQ(output->num_vertices(), circle_segments); } } TEST(S2BufferOperation, SetSnapFunction) { // Verify that the snap function is passed through to S2Builder. // We use a buffer radius of zero to make the test simpler. S2BufferOperation::Options options; options.set_snap_function(s2builderutil::IntLatLngSnapFunction(0)); auto output = DoBuffer([](S2BufferOperation* op) { op->AddPoint(s2textformat::MakePointOrDie("0.1:-0.4")); }, options); EXPECT_EQ(output->num_vertices(), 1); EXPECT_EQ(output->loop_vertex(0, 0), s2textformat::MakePointOrDie("0:0")); } TEST(S2BufferOperation, NegativeBufferRadiusMultipleLayers) { // Verify that with a negative buffer radius, at most one polygon layer is // allowed. S2LaxPolygonShape output; S2BufferOperation op( make_unique(&output), S2BufferOperation::Options(S1Angle::Radians(-1))); op.AddLoop(S2PointLoopSpan(s2textformat::ParsePointsOrDie("0:0, 0:1, 1:0"))); op.AddShapeIndex(*s2textformat::MakeIndexOrDie("# # 2:2, 2:3, 3:2")); S2Error error; EXPECT_FALSE(op.Build(&error)); EXPECT_EQ(error.code(), S2Error::FAILED_PRECONDITION); } // If buffer_radius > max_error, tests that "output" contains "input". // If buffer_radius < -max_error tests that "input" contains "output". // Otherwise does nothing. void TestContainment(const MutableS2ShapeIndex& input, const MutableS2ShapeIndex& output, S1Angle buffer_radius, S1Angle max_error) { S2BooleanOperation::Options options; options.set_polygon_model(S2BooleanOperation::PolygonModel::CLOSED); options.set_polyline_model(S2BooleanOperation::PolylineModel::CLOSED); if (buffer_radius > max_error) { // For positive buffer radii, the output should contain the input. EXPECT_TRUE(S2BooleanOperation::Contains(output, input, options)); } else if (buffer_radius < -max_error) { // For negative buffer radii, the input should contain the output. EXPECT_TRUE(S2BooleanOperation::Contains(input, output, options)); } } // Tests that the minimum distance from the boundary of "output" to the // boundary of "input" is at least "min_dist" using exact predicates. void TestMinimumDistance(const MutableS2ShapeIndex& input, const MutableS2ShapeIndex& output, S1ChordAngle min_dist) { if (min_dist == S1ChordAngle::Zero()) return; // We do one query to find the edges of "input" that might be too close to // "output", then for each such edge we do another query to find the edges // of "output" that might be too close to it. Then we check the distance // between each edge pair (A, B) using exact predicates. // We make the distance limit big enough to find all edges whose true // distance might be less than "min_dist". S2ClosestEdgeQuery::Options query_options; query_options.set_include_interiors(false); query_options.set_max_distance( min_dist.PlusError(S2::GetUpdateMinDistanceMaxError(min_dist))); S2ClosestEdgeQuery in_query(&input, query_options); S2ClosestEdgeQuery::ShapeIndexTarget out_target(&output); out_target.set_include_interiors(false); S2ClosestEdgeQuery out_query(&output, query_options); for (const auto& in_result : in_query.FindClosestEdges(&out_target)) { auto a = input.shape(in_result.shape_id())->edge(in_result.edge_id()); S2ClosestEdgeQuery::EdgeTarget in_target(a.v0, a.v1); for (const auto& out_result : out_query.FindClosestEdges(&in_target)) { auto b = output.shape(out_result.shape_id())->edge(out_result.edge_id()); ASSERT_GE( s2pred::CompareEdgePairDistance(a.v0, a.v1, b.v0, b.v1, min_dist), 0); } } } // Tests that the Hausdorff distance from the boundary of "output" to the // boundary of "input" is at most (1 + error_fraction) * buffer_radius. The // implementation approximates this by measuring the distance at a set of // points along the boundary of "output". void TestHausdorffDistance(const MutableS2ShapeIndex& input, const MutableS2ShapeIndex& output, S1ChordAngle max_dist) { S2ClosestEdgeQuery::Options query_options; query_options.set_include_interiors(false); query_options.set_max_distance( max_dist.PlusError(S2::GetUpdateMinDistanceMaxError(max_dist))); S2ClosestEdgeQuery in_query(&input, query_options); for (const S2Shape* out_shape : output) { for (int i = 0; i < out_shape->num_edges(); ++i) { S2Shape::Edge e = out_shape->edge(i); // Measure the distance at 5 points along the edge. for (double t = 0; t <= 1.0; t += 0.25) { S2Point b = S2::Interpolate(e.v0, e.v1, t); S2ClosestEdgeQuery::PointTarget out_target(b); // We check the distance bound using exact predicates. for (const auto& in_result : in_query.FindClosestEdges(&out_target)) { auto a = input.shape(in_result.shape_id())->edge(in_result.edge_id()); ASSERT_LE(s2pred::CompareEdgeDistance(b, a.v0, a.v1, max_dist), 0); } } } } } // Buffers the given input with the given buffer_radius and error_fraction and // verifies that the output is correct. void TestBuffer(const MutableS2ShapeIndex& input, S1Angle buffer_radius, double error_fraction) { // Ideally we would verify the correctness of buffering as follows. Suppose // that B = Buffer(A, r) and let ~X denote the complement of region X. Then // if r > 0, we would verify: // // 1a. Minimum distance between ~B and A >= r_min // 1b. Directed Hausdorff distance from B to A <= r_max // // Buffering A by r < 0 is equivalent to buffering ~A by |r|, so instead we // would verify the following (where r_min and r_max are based on |r|): // // 2a. Minimum distance between B and ~A >= r_min // 2b. Directed Hausdorff distance from ~B to ~A <= r_max // // Conditions 1a and 2a can be implemented as follows: // // 1a*: B.Contains(A) && minimum distance between @B and @A >= r_min // 2a*: A.Contains(B) && minimum distance between @B and @A >= r_min // // Note that if r_min <= 0 then there is nothing to be tested, since the // containment condition may not hold. (Note that even when the specified // buffer radius is zero, edges can move slightly when crossing edges are // split during the snapping step.) The correct approach would be to test // instead that the directed Hausdorff distance from A to ~B is at most // -r_min, but Hausdorff distance is not yet implemented. // // Similarly, conditions 1b and 2b need to be approximated because Hausdorff // distance is not yet implemented. We do this by measuring the distance at // a set of points on the boundary of B: // // 1b*: Minimum distance from P to @A <= r_max for a set of points P on @B // 2b*: Minimum distance from P to @A <= r_max for a set of points P on @B // // This is not perfect (e.g., it won't detect cases where an entire boundary // loop of B is missing, such as returning a disc in the place of an // annulus) but it is sufficient to detect many types of errors. S2BufferOperation::Options options; options.set_buffer_radius(buffer_radius); options.set_error_fraction(error_fraction); MutableS2ShapeIndex output; output.Add(DoBuffer( [&input](S2BufferOperation* op) { op->AddShapeIndex(input); }, options)); SCOPED_TRACE(absl::StrFormat( "\nradius = %.17g, error_fraction = %.17g\ninput = %s\noutput = %s", buffer_radius.radians(), error_fraction, s2textformat::ToString(input), s2textformat::ToString(output))); // Check the 1a*/1b* condition above. S1Angle max_error = options.max_error(); TestContainment(input, output, buffer_radius, max_error); S1ChordAngle min_dist(max(S1Angle::Zero(), abs(buffer_radius) - max_error)); TestMinimumDistance(input, output, min_dist); // Check the 2a*/2b* condition (i.e., directed Hausdorff distance). S1ChordAngle max_dist(abs(buffer_radius) + max_error); TestHausdorffDistance(input, output, max_dist); } // Convenience function that takes an S2ShapeIndex in s2textformat format. void TestBuffer(absl::string_view index_str, S1Angle buffer_radius, double error_fraction) { TestBuffer(*s2textformat::MakeIndexOrDie(index_str), buffer_radius, error_fraction); } // Convenience function that tests buffering using +/- the given radius. void TestSignedBuffer(absl::string_view index_str, S1Angle buffer_radius, double error_fraction) { TestBuffer(index_str, buffer_radius, error_fraction); TestBuffer(index_str, -buffer_radius, error_fraction); } TEST(S2BufferOperation, PointShell) { TestSignedBuffer("# # 0:0", S1Angle::Radians(M_PI_2), 0.01); } TEST(S2BufferOperation, SiblingPairShell) { TestSignedBuffer("# # 0:0, 0:5", S1Angle::Radians(M_PI_2), 0.01); } TEST(S2BufferOperation, SiblingPairHole) { TestSignedBuffer("# # 0:0, 0:10, 7:7; 3:4, 3:6", S1Angle::Degrees(1), 0.01); } TEST(S2BufferOperation, Square) { TestSignedBuffer("# # -3:-3, -3:3, 3:3, 3:-3", S1Angle::Degrees(1), 0.01); TestSignedBuffer("# # -3:-3, -3:3, 3:3, 3:-3", S1Angle::Degrees(170), 1e-4); } TEST(S2BufferOperation, HollowSquare) { TestSignedBuffer("# # -3:-3, -3:3, 3:3, 3:-3; 2:2, -2:2, -2:-2, 2:-2", S1Angle::Degrees(1), 0.01); } TEST(S2BufferOperation, ZigZagLoop) { TestSignedBuffer("# # 0:0, 0:7, 5:3, 5:10, 6:10, 6:1, 1:5, 1:0", S1Angle::Degrees(0.2), 0.01); } TEST(S2BufferOperation, Fractals) { for (double dimension : {1.02, 1.8}) { S2Testing::Fractal fractal; fractal.SetLevelForApproxMaxEdges(3 * 64); fractal.set_fractal_dimension(dimension); auto loop = fractal.MakeLoop(S2::GetFrame(S2Point(1, 0, 0)), S1Angle::Degrees(10)); MutableS2ShapeIndex input; input.Add(make_unique(loop.get())); TestBuffer(input, S1Angle::Degrees(0.4), 0.01); } } TEST(S2BufferOperation, S2Curve) { // Tests buffering the S2 curve by an amount that yields the full polygon. constexpr int kLevel = 2; // Number of input edges == 6 * (4 ** kLevel) vector points; for (S2CellId id = S2CellId::Begin(kLevel); id != S2CellId::End(kLevel); id = id.next()) { points.push_back(id.ToPoint()); } // Buffering by this amount or more is guaranteed to yield the full polygon. // (Note that the bound is not tight for S2CellIds at low levels.) S1Angle full_radius = S1Angle::Radians(0.5 * S2::kMaxDiag.GetValue(kLevel)); EXPECT_TRUE(DoBuffer([&points](S2BufferOperation* op) { op->AddShape(S2LaxClosedPolylineShape(points)); }, full_radius, 0.1)->is_full()); } // Tests buffering the given S2ShapeIndex with a variety of radii and error // fractions. This method is intended to be used with relatively simple // shapes since calling it is quite expensive. void TestRadiiAndErrorFractions(absl::string_view index_str) { // Try the full range of radii with a representative error fraction. constexpr double kFrac = 0.01; vector kTestRadiiRadians = { 0, 1e-300, 1e-15, 2e-15, 3e-15, 1e-5, 0.01, 0.1, 1.0, (1 - kFrac) * M_PI_2, M_PI_2 - 1e-15, M_PI_2, M_PI_2 + 1e-15, (1 - kFrac) * M_PI, M_PI - 1e-6, M_PI, 1e300 }; for (double radius : kTestRadiiRadians) { TestSignedBuffer(index_str, S1Angle::Radians(radius), kFrac); } // Now try the full range of error fractions with a few selected radii. vector kTestErrorFractions = {S2BufferOperation::Options::kMinErrorFraction, 0.001, 0.01, 0.1, 1.0}; for (double error_fraction : kTestErrorFractions) { TestBuffer(index_str, S1Angle::Radians(-1e-6), error_fraction); TestBuffer(index_str, S1Angle::Radians(1e-14), error_fraction); TestBuffer(index_str, S1Angle::Radians(1e-2), error_fraction); TestBuffer(index_str, S1Angle::Radians(M_PI - 1e-3), error_fraction); } } TEST(S2BufferOperation, RadiiAndErrorFractionCoverage) { // Test buffering simple shapes with a wide range of different buffer radii // and error fractions. // A single point. TestRadiiAndErrorFractions("1:1 # #"); // A zig-zag polyline. TestRadiiAndErrorFractions("# 0:0, 0:30, 30:30, 30:60 #"); // A triangular polygon with a triangular hole. (The hole is clockwise.) TestRadiiAndErrorFractions("# # 0:0, 0:100, 70:50; 10:20, 50:50, 10:80"); // A triangle with one very short and two very long edges. TestRadiiAndErrorFractions("# # 0:0, 0:179.99999999999, 1e-300:0"); } class TestBufferPolyline { public: TestBufferPolyline(const string& input_str, const S2BufferOperation::Options& options); private: const double kArcLo = 0.001; const double kArcHi = 0.999; const int kArcSamples = 7; S2Point GetEdgeAxis(const S2Point& a, const S2Point& b) { return S2::RobustCrossProd(a, b).Normalize(); } bool PointBufferingUncertain(const S2Point& p, bool expect_contained) { // The only case where a point might be excluded from the buffered output is // if it is on the unbuffered side of the polyline. if (expect_contained && two_sided_) return false; int n = polyline_.size(); for (int i = 0; i < n - 1; ++i) { const S2Point& a = polyline_[i]; const S2Point& b = polyline_[i + 1]; if (!two_sided_) { // Ignore points on the buffered side if expect_contained is true, // and on the unbuffered side if expect_contained is false. if ((s2pred::Sign(a, b, p) < 0) == expect_contained) continue; } // TODO(ericv): Depending on how the erasing optimization is implemented, // it might be possible to add "&& expect_contained" to the test below. if (round_) { if (S2::IsDistanceLess(p, a, b, max_dist_)) return true; } else { if (S2::IsInteriorDistanceLess(p, a, b, max_dist_)) return true; if (i > 0 && S1ChordAngle(p, a) < max_dist_) return true; if (i == n - 2 && S1ChordAngle(p, b) < max_dist_) return true; } } return false; } void TestPoint(const S2Point& p, const S2Point& dir, bool expect_contained) { S2Point x = S2::GetPointOnRay( p, dir, expect_contained ? buffer_radius_ - max_error_ : max_error_); if (!PointBufferingUncertain(x, expect_contained)) { EXPECT_EQ(MakeS2ContainsPointQuery(&output_).Contains(x), expect_contained); } } void TestVertexArc(const S2Point& p, const S2Point& start, const S2Point& end, bool expect_contained) { for (double t = kArcLo; t < 1; t += (kArcHi - kArcLo) / kArcSamples) { S2Point dir = S2::Interpolate(start, end, t); TestPoint(p, dir, expect_contained); } } void TestEdgeArc(const S2Point& ba_axis, const S2Point& a, const S2Point& b, bool expect_contained) { for (double t = kArcLo; t < 1; t += (kArcHi - kArcLo) / kArcSamples) { S2Point p = S2::Interpolate(a, b, t); TestPoint(p, ba_axis, expect_contained); } } void TestEdgeAndVertex(const S2Point& a, const S2Point& b, const S2Point& c, bool expect_contained) { S2Point ba_axis = GetEdgeAxis(b, a); S2Point cb_axis = GetEdgeAxis(c, b); TestEdgeArc(ba_axis, a, b, expect_contained); TestVertexArc(b, ba_axis, cb_axis, expect_contained); } vector polyline_; MutableS2ShapeIndex output_; S1Angle buffer_radius_, max_error_; S1ChordAngle min_dist_, max_dist_; bool round_, two_sided_; }; // Tests buffering a polyline with the given options. This method is intended // only for testing Options::EndCapStyle and Options::PolylineSide; if these // options have their default values then TestBuffer() should be used // instead. Similarly TestBuffer should be used to test negative buffer radii // and polylines with 0 or 1 vertices. TestBufferPolyline::TestBufferPolyline( const string& input_str, const S2BufferOperation::Options& options) : polyline_(s2textformat::ParsePointsOrDie(input_str)), buffer_radius_(options.buffer_radius()), max_error_(options.max_error()), min_dist_(max(S1Angle::Zero(), buffer_radius_ - max_error_)), max_dist_(buffer_radius_ + max_error_), round_(options.end_cap_style() == EndCapStyle::ROUND), two_sided_(options.polyline_side() == PolylineSide::BOTH) { S2_DCHECK_GE(polyline_.size(), 2); S2_DCHECK(buffer_radius_ > S1Angle::Zero()); MutableS2ShapeIndex input; input.Add(s2textformat::MakeLaxPolylineOrDie(input_str)); output_.Add(DoBuffer( [&input](S2BufferOperation* op) { op->AddShapeIndex(input); }, options)); // Even with one-sided buffering and flat end caps the Hausdorff distance // criterion should still be true. (This checks that the buffered result // is never further than (buffer_radius + max_error) from the input.) TestHausdorffDistance(input, output_, max_dist_); // However the minimum distance criterion is different; it only applies to // the portions of the boundary that are buffered using the given radius. // We check this approximately by walking along the polyline and checking // that (1) on portions of the polyline that should be buffered, the output // contains the offset point at distance (buffer_radius - max_error) and (2) // on portions of the polyline that should not be buffered, the output does // not contain the offset point at distance max_error. The tricky part is // that both of these conditions have exceptions: (1) may not hold if the // test point is closest to the non-buffered side of the polyline (see the // last caveat in the documentation for Options::polyline_side), and (2) // may not hold if the test point is within (buffer_radius + max_error) of // the buffered side of any portion of the polyline. if (min_dist_ == S1ChordAngle::Zero()) return; // Left-sided buffering is tested by reversing the polyline and then testing // whether it has been buffered correctly on the right. if (options.polyline_side() == PolylineSide::LEFT) { std::reverse(polyline_.begin(), polyline_.end()); } int n = polyline_.size(); S2Point start0 = polyline_[0], start1 = polyline_[1]; S2Point start_begin = GetEdgeAxis(start0, start1); S2Point start_mid = start0.CrossProd(start_begin); TestVertexArc(start0, start_begin, start_mid, round_ && two_sided_); TestVertexArc(start0, start_mid, -start_begin, round_); for (int i = 0; i < n - 2; ++i) { TestEdgeAndVertex(polyline_[i], polyline_[i + 1], polyline_[i + 2], true); } S2Point end0 = polyline_[n - 1], end1 = polyline_[n - 2]; S2Point end_begin = GetEdgeAxis(end0, end1); S2Point end_mid = end0.CrossProd(end_begin); TestEdgeArc(end_begin, end1, end0, true); TestVertexArc(end0, end_begin, end_mid, round_); TestVertexArc(end0, end_mid, -end_begin, round_ && two_sided_); for (int i = n - 3; i >= 0; --i) { TestEdgeAndVertex(polyline_[i + 2], polyline_[i + 1], polyline_[i], two_sided_); } TestEdgeArc(start_begin, start1, start0, two_sided_); } TEST(S2BufferOperation, ZigZagPolyline) { S2BufferOperation::Options options(S1Angle::Degrees(1)); for (PolylineSide polyline_side : { PolylineSide::LEFT, PolylineSide::RIGHT, PolylineSide::BOTH}) { for (EndCapStyle end_cap_style : { EndCapStyle::ROUND, EndCapStyle::FLAT}) { SCOPED_TRACE(absl::StrFormat( "two_sided = %d, round = %d", polyline_side == PolylineSide::BOTH, end_cap_style == EndCapStyle::ROUND)); options.set_polyline_side(polyline_side); options.set_end_cap_style(end_cap_style); TestBufferPolyline("0:0, 0:7, 5:3, 5:10", options); // NOLINT TestBufferPolyline("10:0, 0:0, 5:1", options); // NOLINT } } } } // namespace s2geometry-0.10.0/src/s2/s2builder.cc000066400000000000000000002775651422156367100172420ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // The algorithm is based on the idea of choosing a set of sites and computing // their "limited radius Voronoi diagram", which is obtained by intersecting // each Voronoi region with a disc of fixed radius (the "snap radius") // centered around the corresponding site. // // For each input edge, we then determine the sequence of Voronoi regions // crossed by that edge, and snap the edge to the corresponding sequence of // sites. (In other words, each input edge is replaced by an edge chain.) // // The sites are chosen by starting with the set of input vertices, optionally // snapping them to discrete point set (such as S2CellId centers or lat/lng E7 // coordinates), and then choosing a subset such that no two sites are closer // than the given "snap_radius". Note that the sites do not need to be spaced // regularly -- their positions are completely arbitrary. // // Rather than computing the full limited radius Voronoi diagram, instead we // compute on demand the sequence of Voronoi regions intersected by each edge. // We do this by first finding all the sites that are within "snap_radius" of // the edge, sorting them by distance from the edge origin, and then using an // incremental algorithm. // // We implement the minimum edge-vertex separation property by snapping all // the input edges and checking whether any site (the "site to avoid") would // then be too close to the edge. If so we add another site (the "separation // site") along the input edge, positioned so that the new snapped edge is // guaranteed to be far enough away from the site to avoid. We then find all // the input edges that are within "snap_radius" of the new site, and resnap // those edges. (It is very rare that we need to add a separation site, even // when sites are densely spaced.) // // Idempotency is implemented by explicitly checking whether the input // geometry already meets the output criteria. This is not as sophisticated // as Stable Snap Rounding (Hershberger); I have worked out the details and it // is possible to adapt those ideas here, but it would make the implementation // significantly more complex. // // The only way that different output layers interact is in the choice of // Voronoi sites: // // - Vertices from all layers contribute to the initial selection of sites. // // - Edges in any layer that pass too close to a site can cause new sites to // be added (which affects snapping in all layers). // // - Simplification can be thought of as removing sites. A site can be // removed only if the snapped edges stay within the error bounds of the // corresponding input edges in all layers. // // Otherwise all layers are processed independently. For example, sibling // edge pairs can only cancel each other within a single layer (if desired). #include "s2/s2builder.h" #include #include #include #include #include #include #include #include #include "absl/cleanup/cleanup.h" #include "absl/memory/memory.h" #include "s2/base/casts.h" #include "s2/base/logging.h" #include "s2/base/log_severity.h" #include "s2/util/bits/bits.h" #include "s2/id_set_lexicon.h" #include "s2/mutable_s2shape_index.h" #include "s2/s1angle.h" #include "s2/s1chord_angle.h" #include "s2/s2builder_graph.h" #include "s2/s2builder_layer.h" #include "s2/s2builderutil_snap_functions.h" #include "s2/s2closest_edge_query.h" #include "s2/s2closest_point_query.h" #include "s2/s2edge_crossings.h" #include "s2/s2edge_distances.h" #include "s2/s2error.h" #include "s2/s2loop.h" #include "s2/s2point_index.h" #include "s2/s2pointutil.h" #include "s2/s2polygon.h" #include "s2/s2polyline.h" #include "s2/s2polyline_simplifier.h" #include "s2/s2predicates.h" #include "s2/s2shapeutil_visit_crossing_edge_pairs.h" #include "s2/s2text_format.h" #include "s2/util/gtl/dense_hash_set.h" using absl::make_unique; using gtl::compact_array; using gtl::dense_hash_set; using std::max; using std::pair; using std::unique_ptr; using std::vector; // Internal flag intended to be set from within a debugger. bool s2builder_verbose = false; S2Builder::Options::Options() : snap_function_( make_unique(S1Angle::Zero())) { } S2Builder::Options::Options(const SnapFunction& snap_function) : snap_function_(snap_function.Clone()) { } S2Builder::Options::Options(const Options& options) : snap_function_(options.snap_function_->Clone()), split_crossing_edges_(options.split_crossing_edges_), intersection_tolerance_(options.intersection_tolerance_), simplify_edge_chains_(options.simplify_edge_chains_), idempotent_(options.idempotent_), memory_tracker_(options.memory_tracker_) { } S2Builder::Options& S2Builder::Options::operator=(const Options& options) { snap_function_ = options.snap_function_->Clone(); split_crossing_edges_ = options.split_crossing_edges_; intersection_tolerance_ = options.intersection_tolerance_; simplify_edge_chains_ = options.simplify_edge_chains_; idempotent_ = options.idempotent_; memory_tracker_ = options.memory_tracker_; return *this; } S1Angle S2Builder::Options::edge_snap_radius() const { return snap_function().snap_radius() + intersection_tolerance(); } S1Angle S2Builder::Options::max_edge_deviation() const { // We want max_edge_deviation() to be large enough compared to snap_radius() // such that edge splitting is rare. // // Using spherical trigonometry, if the endpoints of an edge of length L // move by at most a distance R, the center of the edge moves by at most // asin(sin(R) / cos(L / 2)). Thus the (max_edge_deviation / snap_radius) // ratio increases with both the snap radius R and the edge length L. // // We arbitrarily limit the edge deviation to be at most 10% more than the // snap radius. With the maximum allowed snap radius of 70 degrees, this // means that edges up to 30.6 degrees long are never split. For smaller // snap radii, edges up to 49 degrees long are never split. (Edges of any // length are not split unless their endpoints move far enough so that the // actual edge deviation exceeds the limit; in practice, splitting is rare // even with long edges.) Note that it is always possible to split edges // when max_edge_deviation() is exceeded; see MaybeAddExtraSites(). S2_DCHECK_LE(snap_function().snap_radius(), SnapFunction::kMaxSnapRadius()); const double kMaxEdgeDeviationRatio = 1.1; return kMaxEdgeDeviationRatio * edge_snap_radius(); } bool operator==(const S2Builder::GraphOptions& x, const S2Builder::GraphOptions& y) { return (x.edge_type() == y.edge_type() && x.degenerate_edges() == y.degenerate_edges() && x.duplicate_edges() == y.duplicate_edges() && x.sibling_pairs() == y.sibling_pairs() && x.allow_vertex_filtering() == y.allow_vertex_filtering()); } // Helper functions for computing error bounds: static S1ChordAngle RoundUp(S1Angle a) { S1ChordAngle ca(a); return ca.PlusError(ca.GetS1AngleConstructorMaxError()); } static S1ChordAngle AddPointToPointError(S1ChordAngle ca) { return ca.PlusError(ca.GetS2PointConstructorMaxError()); } static S1ChordAngle AddPointToEdgeError(S1ChordAngle ca) { return ca.PlusError(S2::GetUpdateMinDistanceMaxError(ca)); } S2Builder::S2Builder() { } S2Builder::S2Builder(const Options& options) { Init(options); } void S2Builder::Init(const Options& options) { options_ = options; const SnapFunction& snap_function = options.snap_function(); S1Angle snap_radius = snap_function.snap_radius(); S2_DCHECK_LE(snap_radius, SnapFunction::kMaxSnapRadius()); // Convert the snap radius to an S1ChordAngle. This is the "true snap // radius" used when evaluating exact predicates (s2predicates.h). site_snap_radius_ca_ = S1ChordAngle(snap_radius); // When intersection_tolerance() is non-zero we need to use a larger snap // radius for edges than for vertices to ensure that both edges are snapped // to the edge intersection location. This is because the computed // intersection point is not exact; it may be up to intersection_tolerance() // away from its true position. The computed intersection point might then // be snapped to some other vertex up to snap_radius away. So to ensure // that both edges are snapped to a common vertex, we need to increase the // snap radius for edges to at least the sum of these two values (calculated // conservatively). S1Angle edge_snap_radius = options.edge_snap_radius(); edge_snap_radius_ca_ = RoundUp(edge_snap_radius); snapping_requested_ = (edge_snap_radius > S1Angle::Zero()); // Compute the maximum distance that a vertex can be separated from an // edge while still affecting how that edge is snapped. max_edge_deviation_ = options.max_edge_deviation(); edge_site_query_radius_ca_ = S1ChordAngle( max_edge_deviation_ + snap_function.min_edge_vertex_separation()); // Compute the maximum edge length such that even if both endpoints move by // the maximum distance allowed (i.e., edge_snap_radius), the center of the // edge will still move by less than max_edge_deviation(). This saves us a // lot of work since then we don't need to check the actual deviation. if (!snapping_requested_) { min_edge_length_to_split_ca_ = S1ChordAngle::Infinity(); } else { // This value varies between 30 and 50 degrees depending on the snap radius. min_edge_length_to_split_ca_ = S1ChordAngle::Radians( 2 * acos(sin(edge_snap_radius) / sin(max_edge_deviation_))); } // In rare cases we may need to explicitly check that the input topology is // preserved, i.e. that edges do not cross vertices when snapped. This is // only necessary (1) for vertices added using ForceVertex(), and (2) when the // snap radius is smaller than intersection_tolerance() (which is typically // either zero or S2::kIntersectionError, about 9e-16 radians). This // condition arises because when a geodesic edge is snapped, the edge center // can move further than its endpoints. This can cause an edge to pass on the // wrong side of an input vertex. (Note that this could not happen in a // planar version of this algorithm.) Usually we don't need to consider this // possibility explicitly, because if the snapped edge passes on the wrong // side of a vertex then it is also closer than min_edge_vertex_separation() // to that vertex, which will cause a separation site to be added. // // If the condition below is true then we need to check all sites (i.e., // snapped input vertices) for topology changes. However this is almost never // the case because // // max_edge_deviation() == 1.1 * edge_snap_radius() // and min_edge_vertex_separation() >= 0.219 * snap_radius() // // for all currently implemented snap functions. The condition below is // only true when intersection_tolerance() is non-zero (which causes // edge_snap_radius() to exceed snap_radius() by S2::kIntersectionError) and // snap_radius() is very small (at most S2::kIntersectionError / 1.19). check_all_site_crossings_ = (options.max_edge_deviation() > options.edge_snap_radius() + snap_function.min_edge_vertex_separation()); if (options.intersection_tolerance() <= S1Angle::Zero()) { S2_DCHECK(!check_all_site_crossings_); } // To implement idempotency, we check whether the input geometry could // possibly be the output of a previous S2Builder invocation. This involves // testing whether any site/site or edge/site pairs are too close together. // This is done using exact predicates, which require converting the minimum // separation values to an S1ChordAngle. min_site_separation_ = snap_function.min_vertex_separation(); min_site_separation_ca_ = S1ChordAngle(min_site_separation_); min_edge_site_separation_ca_ = S1ChordAngle(snap_function.min_edge_vertex_separation()); // This is an upper bound on the distance computed by S2ClosestPointQuery // where the true distance might be less than min_edge_site_separation_ca_. min_edge_site_separation_ca_limit_ = AddPointToEdgeError(min_edge_site_separation_ca_); // Compute the maximum possible distance between two sites whose Voronoi // regions touch. (The maximum radius of each Voronoi region is // edge_snap_radius_.) Then increase this bound to account for errors. max_adjacent_site_separation_ca_ = AddPointToPointError(RoundUp(2 * edge_snap_radius)); // Finally, we also precompute sin^2(edge_snap_radius), which is simply the // squared distance between a vertex and an edge measured perpendicular to // the plane containing the edge, and increase this value by the maximum // error in the calculation to compare this distance against the bound. double d = sin(edge_snap_radius); edge_snap_radius_sin2_ = d * d; edge_snap_radius_sin2_ += ((9.5 * d + 2.5 + 2 * sqrt(3)) * d + 9 * DBL_EPSILON) * DBL_EPSILON; // Initialize the current label set. label_set_id_ = label_set_lexicon_.EmptySetId(); label_set_modified_ = false; // If snapping was requested, we try to determine whether the input geometry // already meets the output requirements. This is necessary for // idempotency, and can also save work. If we discover any reason that the // input geometry needs to be modified, snapping_needed_ is set to true. snapping_needed_ = false; tracker_.Init(options.memory_tracker()); } void S2Builder::clear_labels() { label_set_.clear(); label_set_modified_ = true; } void S2Builder::push_label(Label label) { S2_DCHECK_GE(label, 0); label_set_.push_back(label); label_set_modified_ = true; } void S2Builder::pop_label() { label_set_.pop_back(); label_set_modified_ = true; } void S2Builder::set_label(Label label) { S2_DCHECK_GE(label, 0); label_set_.resize(1); label_set_[0] = label; label_set_modified_ = true; } bool S2Builder::IsFullPolygonUnspecified(const S2Builder::Graph& g, S2Error* error) { error->Init(S2Error::BUILDER_IS_FULL_PREDICATE_NOT_SPECIFIED, "A degenerate polygon was found, but no predicate was specified " "to determine whether the polygon is empty or full. Call " "S2Builder::AddIsFullPolygonPredicate() to fix this problem."); return false; // Assumes the polygon is empty. } S2Builder::IsFullPolygonPredicate S2Builder::IsFullPolygon(bool is_full) { return [is_full](const S2Builder::Graph& g, S2Error* error) { return is_full; }; } void S2Builder::StartLayer(unique_ptr layer) { layer_options_.push_back(layer->graph_options()); layer_begins_.push_back(input_edges_.size()); layer_is_full_polygon_predicates_.push_back(IsFullPolygon(false)); layers_.push_back(std::move(layer)); } // Input vertices are stored in a vector, with some removal of duplicates. // Edges are represented as (VertexId, VertexId) pairs. All edges are stored // in a single vector; each layer corresponds to a contiguous range. S2Builder::InputVertexId S2Builder::AddVertex(const S2Point& v) { // Remove duplicate vertices that follow the pattern AB, BC, CD. If we want // to do anything more sophisticated, either use a ValueLexicon, or sort the // vertices once they have all been added, remove duplicates, and update the // edges. if (input_vertices_.empty() || v != input_vertices_.back()) { if (!tracker_.AddSpace(&input_vertices_, 1)) return -1; input_vertices_.push_back(v); } return input_vertices_.size() - 1; } void S2Builder::AddIntersection(const S2Point& vertex) { // It is an error to call this method without first setting // intersection_tolerance() to a non-zero value. S2_DCHECK_GT(options_.intersection_tolerance(), S1Angle::Zero()); // Calling this method also overrides the idempotent() option. snapping_needed_ = true; AddVertex(vertex); } void S2Builder::AddEdge(const S2Point& v0, const S2Point& v1) { S2_DCHECK(!layers_.empty()) << "Call StartLayer before adding any edges"; if (v0 == v1 && (layer_options_.back().degenerate_edges() == GraphOptions::DegenerateEdges::DISCARD)) { return; } InputVertexId j0 = AddVertex(v0); InputVertexId j1 = AddVertex(v1); if (!tracker_.AddSpace(&input_edges_, 1)) return; input_edges_.push_back(InputEdge(j0, j1)); // If there are any labels, then attach them to this input edge. if (label_set_modified_) { if (label_set_ids_.empty()) { // Populate the missing entries with empty label sets. label_set_ids_.assign(input_edges_.size() - 1, label_set_id_); } label_set_id_ = label_set_lexicon_.Add(label_set_); label_set_ids_.push_back(label_set_id_); label_set_modified_ = false; } else if (!label_set_ids_.empty()) { label_set_ids_.push_back(label_set_id_); } } void S2Builder::AddPolyline(S2PointSpan polyline) { for (int i = 1; i < polyline.size(); ++i) { AddEdge(polyline[i - 1], polyline[i]); } } void S2Builder::AddPolyline(const S2Polyline& polyline) { const int n = polyline.num_vertices(); for (int i = 1; i < n; ++i) { AddEdge(polyline.vertex(i - 1), polyline.vertex(i)); } } void S2Builder::AddLoop(S2PointLoopSpan loop) { for (int i = 0; i < loop.size(); ++i) { AddEdge(loop[i], loop[i + 1]); } } void S2Builder::AddLoop(const S2Loop& loop) { // Ignore loops that do not have a boundary. if (loop.is_empty_or_full()) return; // For loops that represent holes, we add the edge from vertex n-1 to vertex // n-2 first. This is because these edges will be assembled into a // clockwise loop, which will eventually be normalized in S2Polygon by // calling S2Loop::Invert(). S2Loop::Invert() reverses the order of the // vertices, so to end up with the original vertex order (0, 1, ..., n-1) we // need to build a clockwise loop with vertex order (n-1, n-2, ..., 0). // This is done by adding the edge (n-1, n-2) first, and then ensuring that // Build() assembles loops starting from edges in the order they were added. const int n = loop.num_vertices(); for (int i = 0; i < n; ++i) { AddEdge(loop.oriented_vertex(i), loop.oriented_vertex(i + 1)); } } void S2Builder::AddPolygon(const S2Polygon& polygon) { for (int i = 0; i < polygon.num_loops(); ++i) { AddLoop(*polygon.loop(i)); } } void S2Builder::AddShape(const S2Shape& shape) { for (int e = 0, n = shape.num_edges(); e < n; ++e) { S2Shape::Edge edge = shape.edge(e); AddEdge(edge.v0, edge.v1); } } void S2Builder::AddIsFullPolygonPredicate(IsFullPolygonPredicate predicate) { layer_is_full_polygon_predicates_.back() = std::move(predicate); } void S2Builder::ForceVertex(const S2Point& vertex) { if (!tracker_.AddSpace(&sites_, 1)) return; sites_.push_back(vertex); } // An S2Shape used to represent the entire collection of S2Builder input edges. // Vertices are specified as indices into a vertex vector to save space. namespace { class VertexIdEdgeVectorShape final : public S2Shape { public: // Requires that "edges" is constant for the lifetime of this object. VertexIdEdgeVectorShape(const vector>& edges, const vector& vertices) : edges_(edges), vertices_(vertices) { } const S2Point& vertex0(int e) const { return vertex(edges_[e].first); } const S2Point& vertex1(int e) const { return vertex(edges_[e].second); } // S2Shape interface: int num_edges() const override { return edges_.size(); } Edge edge(int e) const override { return Edge(vertices_[edges_[e].first], vertices_[edges_[e].second]); } int dimension() const override { return 1; } ReferencePoint GetReferencePoint() const override { return ReferencePoint::Contained(false); } int num_chains() const override { return edges_.size(); } Chain chain(int i) const override { return Chain(i, 1); } Edge chain_edge(int i, int j) const override { return edge(i); } ChainPosition chain_position(int e) const override { return ChainPosition(e, 0); } private: const S2Point& vertex(int i) const { return vertices_[i]; } const vector>& edges_; const vector& vertices_; }; } // namespace bool S2Builder::Build(S2Error* error) { // S2_CHECK rather than S2_DCHECK because this is friendlier than crashing on the // "error->ok()" call below. It would be easy to allow (error == nullptr) // by declaring a local "tmp_error", but it seems better to make clients // think about error handling. S2_CHECK(error != nullptr); error_ = error; error_->Clear(); // Mark the end of the last layer. layer_begins_.push_back(input_edges_.size()); // See the algorithm overview at the top of this file. if (snapping_requested_ && !options_.idempotent()) { snapping_needed_ = true; } ChooseSites(); BuildLayers(); Reset(); if (!tracker_.ok()) *error_ = tracker_.error(); return error_->ok(); } void S2Builder::Reset() { // Note that these calls do not change vector capacities. input_vertices_.clear(); input_edges_.clear(); layers_.clear(); layer_options_.clear(); layer_begins_.clear(); layer_is_full_polygon_predicates_.clear(); label_set_ids_.clear(); label_set_lexicon_.Clear(); label_set_.clear(); label_set_modified_ = false; sites_.clear(); edge_sites_.clear(); snapping_needed_ = false; } void S2Builder::ChooseSites() { if (!tracker_.ok() || input_vertices_.empty()) return; // Note that although we always create an S2ShapeIndex, often it is not // actually built (because this happens lazily). Therefore we only test // its memory usage at the places where it is used. MutableS2ShapeIndex input_edge_index; input_edge_index.set_memory_tracker(tracker_.tracker()); input_edge_index.Add(make_unique(input_edges_, input_vertices_)); if (options_.split_crossing_edges()) { AddEdgeCrossings(input_edge_index); } if (snapping_requested_) { S2PointIndex site_index; auto _ = absl::MakeCleanup([&]() { tracker_.DoneSiteIndex(site_index); }); AddForcedSites(&site_index); ChooseInitialSites(&site_index); if (!tracker_.FixSiteIndexTally(site_index)) return; CollectSiteEdges(site_index); } if (snapping_needed_) { AddExtraSites(input_edge_index); } else { ChooseAllVerticesAsSites(); } } void S2Builder::ChooseAllVerticesAsSites() { // Sort the input vertices, discard duplicates, and use the result as the // list of sites. (We sort in the same order used by ChooseInitialSites() // to avoid inconsistencies in tests.) We also copy the result back to // input_vertices_ and update the input edges to use the new vertex // numbering (so that InputVertexId == SiteId). This simplifies the // implementation of SnapEdge() for this case. sites_.clear(); if (!tracker_.AddSpaceExact(&sites_, input_vertices_.size())) return; const int64 kTempPerVertex = sizeof(InputVertexKey) + sizeof(InputVertexId); if (!tracker_.TallyTemp(input_vertices_.size() * kTempPerVertex)) return; vector sorted = SortInputVertices(); vector vmap(input_vertices_.size()); for (int in = 0; in < sorted.size(); ) { const S2Point& site = input_vertices_[sorted[in].second]; vmap[sorted[in].second] = sites_.size(); while (++in < sorted.size() && input_vertices_[sorted[in].second] == site) { vmap[sorted[in].second] = sites_.size(); } sites_.push_back(site); } input_vertices_ = sites_; // Does not change allocated size. for (InputEdge& e : input_edges_) { e.first = vmap[e.first]; e.second = vmap[e.second]; } } vector S2Builder::SortInputVertices() { // Sort all the input vertices in the order that we wish to consider them as // candidate Voronoi sites. Any sort order will produce correct output, so // we have complete flexibility in choosing the sort key. We could even // leave them unsorted, although this would have the disadvantage that // changing the order of the input edges could cause S2Builder to snap to a // different set of Voronoi sites. // // We have chosen to sort them primarily by S2CellId since this improves the // performance of many S2Builder phases (due to better spatial locality). // It also allows the possibility of replacing the current S2PointIndex // approach with a more efficient recursive divide-and-conquer algorithm. // // However, sorting by leaf S2CellId alone has two small disadvantages in // the case where the candidate sites are densely spaced relative to the // snap radius (e.g., when using the IdentitySnapFunction, or when snapping // to E6/E7 near the poles, or snapping to S2CellId/E6/E7 using a snap // radius larger than the minimum value required): // // - First, it tends to bias the Voronoi site locations towards points that // are earlier on the S2CellId Hilbert curve. For example, suppose that // there are two parallel rows of input vertices on opposite sides of the // edge between two large S2Cells, and the rows are separated by less // than the snap radius. Then only vertices from the cell with the // smaller S2CellId are selected, because they are considered first and // prevent us from selecting the sites from the other cell (because they // are closer than "snap_radius" to an existing site). // // - Second, it tends to choose more Voronoi sites than necessary, because // at each step we choose the first site along the Hilbert curve that is // at least "snap_radius" away from all previously selected sites. This // tends to yield sites whose "coverage discs" overlap quite a bit, // whereas it would be better to cover all the input vertices with a // smaller set of coverage discs that don't overlap as much. (This is // the "geometric set cover problem", which is NP-hard.) // // It is not worth going to much trouble to fix these problems, because they // really aren't that important (and don't affect the guarantees made by the // algorithm), but here are a couple of heuristics that might help: // // 1. Sort the input vertices by S2CellId at a coarse level (down to cells // that are O(snap_radius) in size), and then sort by a fingerprint of the // S2Point coordinates (i.e., quasi-randomly). This would retain most of // the advantages of S2CellId sorting, but makes it more likely that we will // select sites that are further apart. // // 2. Rather than choosing the first uncovered input vertex and snapping it // to obtain the next Voronoi site, instead look ahead through following // candidates in S2CellId order and choose the furthest candidate whose // snapped location covers all previous uncovered input vertices. // // TODO(ericv): Experiment with these approaches. vector keys; keys.reserve(input_vertices_.size()); for (InputVertexId i = 0; i < input_vertices_.size(); ++i) { keys.push_back(InputVertexKey(S2CellId(input_vertices_[i]), i)); } std::sort(keys.begin(), keys.end(), [this](const InputVertexKey& a, const InputVertexKey& b) { if (a.first < b.first) return true; if (b.first < a.first) return false; return input_vertices_[a.second] < input_vertices_[b.second]; }); return keys; } // Check all edge pairs for crossings, and add the corresponding intersection // points to input_vertices_. (The intersection points will be snapped and // merged with the other vertices during site selection.) void S2Builder::AddEdgeCrossings(const MutableS2ShapeIndex& input_edge_index) { input_edge_index.ForceBuild(); if (!tracker_.ok()) return; // We need to build a list of intersections and add them afterwards so that // we don't reallocate vertices_ during the VisitCrossings() call. vector new_vertices; auto _ = absl::MakeCleanup([&]() { tracker_.Untally(new_vertices); }); s2shapeutil::VisitCrossingEdgePairs( input_edge_index, s2shapeutil::CrossingType::INTERIOR, [this, &new_vertices](const s2shapeutil::ShapeEdge& a, const s2shapeutil::ShapeEdge& b, bool) { if (!tracker_.AddSpace(&new_vertices, 1)) return false; new_vertices.push_back( S2::GetIntersection(a.v0(), a.v1(), b.v0(), b.v1())); return true; }); if (new_vertices.empty()) return; snapping_needed_ = true; if (!tracker_.AddSpaceExact(&input_vertices_, new_vertices.size())) return; input_vertices_.insert(input_vertices_.end(), new_vertices.begin(), new_vertices.end()); } void S2Builder::AddForcedSites(S2PointIndex* site_index) { // Sort the forced sites and remove duplicates. std::sort(sites_.begin(), sites_.end()); sites_.erase(std::unique(sites_.begin(), sites_.end()), sites_.end()); // Add the forced sites to the index. for (SiteId id = 0; id < sites_.size(); ++id) { if (!tracker_.TallyIndexedSite()) return; site_index->Add(sites_[id], id); } num_forced_sites_ = sites_.size(); } void S2Builder::ChooseInitialSites(S2PointIndex* site_index) { // Prepare to find all points whose distance is <= min_site_separation_ca_. S2ClosestPointQueryOptions options; options.set_conservative_max_distance(min_site_separation_ca_); S2ClosestPointQuery site_query(site_index, options); vector::Result> results; // Apply the snap_function() to each input vertex, then check whether any // existing site is closer than min_vertex_separation(). If not, then add a // new site. // // NOTE(ericv): There are actually two reasonable algorithms, which we call // "snap first" (the one above) and "snap last". The latter checks for each // input vertex whether any existing site is closer than snap_radius(), and // only then applies the snap_function() and adds a new site. "Snap last" // can yield slightly fewer sites in some cases, but it is also more // expensive and can produce surprising results. For example, if you snap // the polyline "0:0, 0:0.7" using IntLatLngSnapFunction(0), the result is // "0:0, 0:0" rather than the expected "0:0, 0:1", because the snap radius // is approximately sqrt(2) degrees and therefore it is legal to snap both // input points to "0:0". "Snap first" produces "0:0, 0:1" as expected. // // Track the memory used by SortInputVertices() before calling it. if (!tracker_.Tally(input_vertices_.size() * sizeof(InputVertexKey))) return; vector sorted_keys = SortInputVertices(); auto _ = absl::MakeCleanup([&]() { tracker_.Untally(sorted_keys); }); for (const InputVertexKey& key : sorted_keys) { const S2Point& vertex = input_vertices_[key.second]; S2Point site = SnapSite(vertex); // If any vertex moves when snapped, the output cannot be idempotent. snapping_needed_ = snapping_needed_ || site != vertex; bool add_site = true; if (site_snap_radius_ca_ == S1ChordAngle::Zero()) { add_site = sites_.empty() || site != sites_.back(); } else { // FindClosestPoints() measures distances conservatively, so we need to // recheck the distances using exact predicates. // // NOTE(ericv): When the snap radius is large compared to the average // vertex spacing, we could possibly avoid the call the FindClosestPoints // by checking whether sites_.back() is close enough. S2ClosestPointQueryPointTarget target(site); site_query.FindClosestPoints(&target, &results); for (const auto& result : results) { if (s2pred::CompareDistance(site, result.point(), min_site_separation_ca_) <= 0) { add_site = false; // This pair of sites is too close. If the sites are distinct, then // the output cannot be idempotent. snapping_needed_ = snapping_needed_ || site != result.point(); } } } if (add_site) { if (!tracker_.TallyIndexedSite()) return; site_index->Add(site, sites_.size()); if (!tracker_.AddSpace(&sites_, 1)) return; sites_.push_back(site); site_query.ReInit(); } } } S2Point S2Builder::SnapSite(const S2Point& point) const { if (!snapping_requested_) return point; S2Point site = options_.snap_function().SnapPoint(point); S1ChordAngle dist_moved(site, point); if (dist_moved > site_snap_radius_ca_) { error_->Init(S2Error::BUILDER_SNAP_RADIUS_TOO_SMALL, "Snap function moved vertex (%.15g, %.15g, %.15g) " "by %.15g, which is more than the specified snap " "radius of %.15g", point.x(), point.y(), point.z(), dist_moved.ToAngle().radians(), site_snap_radius_ca_.ToAngle().radians()); } return site; } // For each edge, find all sites within edge_site_query_radius_ca_ and // store them in edge_sites_. Also, to implement idempotency this method also // checks whether the input vertices and edges may already satisfy the output // criteria. If any problems are found then snapping_needed_ is set to true. void S2Builder::CollectSiteEdges(const S2PointIndex& site_index) { // Find all points whose distance is <= edge_site_query_radius_ca_. // // Memory used by S2ClosestPointQuery is not tracked, but it is temporary, // typically insignificant, and does not affect the high water mark. S2ClosestPointQueryOptions options; options.set_conservative_max_distance(edge_site_query_radius_ca_); S2ClosestPointQuery site_query(&site_index, options); vector::Result> results; if (!tracker_.AddSpaceExact(&edge_sites_, input_edges_.size())) return; edge_sites_.resize(input_edges_.size()); // Construct all elements. for (InputEdgeId e = 0; e < input_edges_.size(); ++e) { const InputEdge& edge = input_edges_[e]; const S2Point& v0 = input_vertices_[edge.first]; const S2Point& v1 = input_vertices_[edge.second]; if (s2builder_verbose) { std::cout << "S2Polyline: " << s2textformat::ToString(v0) << ", " << s2textformat::ToString(v1) << "\n"; } S2ClosestPointQueryEdgeTarget target(v0, v1); site_query.FindClosestPoints(&target, &results); auto* sites = &edge_sites_[e]; sites->reserve(results.size()); for (const auto& result : results) { sites->push_back(result.data()); if (!snapping_needed_ && result.distance() < min_edge_site_separation_ca_limit_ && result.point() != v0 && result.point() != v1 && s2pred::CompareEdgeDistance(result.point(), v0, v1, min_edge_site_separation_ca_) < 0) { snapping_needed_ = true; } } SortSitesByDistance(v0, sites); if (!tracker_.TallyEdgeSites(*sites)) return; } } // Sorts the sites in increasing order of distance to X. void S2Builder::SortSitesByDistance(const S2Point& x, compact_array* sites) const { std::sort(sites->begin(), sites->end(), [&x, this](SiteId i, SiteId j) { return s2pred::CompareDistances(x, sites_[i], sites_[j]) < 0; }); } // Like the above, but inserts "new_site_id" into an already-sorted list. void S2Builder::InsertSiteByDistance(SiteId new_site_id, const S2Point& x, compact_array* sites) { if (!tracker_.ReserveEdgeSite(sites)) return; sites->insert(std::lower_bound( sites->begin(), sites->end(), new_site_id, [&x, this](SiteId i, SiteId j) { return s2pred::CompareDistances(x, sites_[i], sites_[j]) < 0; }), new_site_id); } // There are two situatons where we need to add extra Voronoi sites in order to // ensure that the snapped edges meet the output requirements: // // (1) If a snapped edge deviates from its input edge by more than // max_edge_deviation(), we add a new site on the input edge near the // middle of the snapped edge. This causes the snapped edge to split // into two pieces, so that it follows the input edge more closely. // // (2) If a snapped edge is closer than min_edge_vertex_separation() to any // nearby site (the "site to avoid") or passes on the wrong side of it // relative to the input edge, then we add a new site (the "separation // site") along the input edge near the site to avoid. This causes the // snapped edge to follow the input edge more closely, so that it is // guaranteed to pass on the correct side of the site to avoid with a // separation of at least the required distance. // // We check these conditions by snapping all the input edges to a chain of // Voronoi sites and then testing each edge in the chain. If a site needs to // be added, we mark all nearby edges for re-snapping. void S2Builder::AddExtraSites(const MutableS2ShapeIndex& input_edge_index) { // Note that we could save some work in AddSnappedEdges() by saving the // snapped edge chains in a vector, but currently this is not worthwhile // since SnapEdge() accounts for less than 5% of the runtime. // Note that we intentionally use dense_hash_set rather than flat_hash_set // in order to ensure that iteration is deterministic when debugging. dense_hash_set edges_to_resnap(16 /*expected_max_elements*/); edges_to_resnap.set_empty_key(-1); edges_to_resnap.set_deleted_key(-2); vector chain; // Temporary storage. int num_edges_after_snapping = 0; // CheckEdge() defines the body of the loops below. const auto CheckEdge = [&](InputEdgeId e) -> bool { if (!tracker_.ok()) return false; SnapEdge(e, &chain); edges_to_resnap.erase(e); num_edges_after_snapping += chain.size(); MaybeAddExtraSites(e, chain, input_edge_index, &edges_to_resnap); return true; }; // The first pass is different because we snap every edge. In the following // passes we only snap edges that are near the extra sites that were added. S2_VLOG(1) << "Before pass 0: sites=" << sites_.size(); for (InputEdgeId e = 0; e < input_edges_.size(); ++e) { if (!CheckEdge(e)) return; } S2_VLOG(1) << "Pass 0: edges snapped=" << input_edges_.size() << ", output edges=" << num_edges_after_snapping << ", sites=" << sites_.size(); for (int num_passes = 1; !edges_to_resnap.empty(); ++num_passes) { auto edges_to_snap = edges_to_resnap; edges_to_resnap.clear(); num_edges_after_snapping = 0; for (InputEdgeId e : edges_to_snap) { if (!CheckEdge(e)) return; } S2_VLOG(1) << "Pass " << num_passes << ": edges snapped=" << edges_to_snap.size() << ", output edges=" << num_edges_after_snapping << ", sites=" << sites_.size(); } } void S2Builder::MaybeAddExtraSites( InputEdgeId edge_id, const vector& chain, const MutableS2ShapeIndex& input_edge_index, dense_hash_set* edges_to_resnap) { // If the memory tracker has a periodic callback function, tally an amount // of memory proportional to the work being done so that the caller has an // opportunity to cancel the operation if necessary. if (!tracker_.TallyTemp(chain.size() * sizeof(chain[0]))) return; // If the input includes NaN vertices, snapping can produce an empty chain. if (chain.empty()) return; // The snapped edge chain is always a subsequence of the nearby sites // (edge_sites_), so we walk through the two arrays in parallel looking for // sites that weren't snapped. These are the "sites to avoid". We also keep // track of the current snapped edge, since it is the only edge that can be // too close or pass on the wrong side of a site to avoid. Vertices beyond // the chain endpoints in either direction can be ignored because only the // interiors of chain edges can be too close to a site to avoid. const InputEdge& edge = input_edges_[edge_id]; const S2Point& a0 = input_vertices_[edge.first]; const S2Point& a1 = input_vertices_[edge.second]; const auto& nearby_sites = edge_sites_[edge_id]; for (int i = 0, j = 0; j < nearby_sites.size(); ++j) { SiteId id = nearby_sites[j]; if (id == chain[i]) { // This site is a vertex of the snapped edge chain. if (++i == chain.size()) { break; // Sites beyond the end of the snapped chain can be ignored. } // Check whether this snapped edge deviates too far from its original // position. If so, we split the edge by adding an extra site. const S2Point& v0 = sites_[chain[i - 1]]; const S2Point& v1 = sites_[chain[i]]; if (S1ChordAngle(v0, v1) < min_edge_length_to_split_ca_) continue; if (!S2::IsEdgeBNearEdgeA(a0, a1, v0, v1, max_edge_deviation_)) { // Add a new site on the input edge, positioned so that it splits the // snapped edge into two approximately equal pieces. Then we find all // the edges near the new site (including this one) and add them to // the snap queue. // // Note that with large snap radii, it is possible that the snapped // edge wraps around the sphere the "wrong way". To handle this we // find the preferred split location by projecting both endpoints onto // the input edge and taking their midpoint. S2Point mid = (S2::Project(v0, a0, a1) + S2::Project(v1, a0, a1)).Normalize(); S2Point new_site = GetSeparationSite(mid, v0, v1, edge_id); AddExtraSite(new_site, input_edge_index, edges_to_resnap); // In the case where the edge wrapped around the sphere the "wrong // way", it is not safe to continue checking this edge. It will be // marked for resnapping and we will come back to it in the next pass. return; } } else { // This site is near the input edge but is not part of the snapped chain. if (i == 0) { continue; // Sites before the start of the chain can be ignored. } // We need to ensure that non-forced sites are separated by at least // min_edge_vertex_separation() from the snapped chain. This happens // automatically as part of the algorithm except where there are portions // of the input edge that are not within edge_snap_radius() of any site. // These portions of the original edge are called "coverage gaps". // Therefore if we find that a site to avoid that is too close to the // snapped edge chain, we can fix the problem by adding a new site (the // "separation site") in the corresponding coverage gap located as closely // as possible to the site to avoid. This technique is is guaranteed to // produce the required minimum separation, and the entire process of // adding separation sites is guaranteed to terminate. const S2Point& site_to_avoid = sites_[id]; const S2Point& v0 = sites_[chain[i - 1]]; const S2Point& v1 = sites_[chain[i]]; bool add_separation_site = false; if (!is_forced(id) && min_edge_site_separation_ca_ > S1ChordAngle::Zero() && s2pred::CompareEdgeDistance( site_to_avoid, v0, v1, min_edge_site_separation_ca_) < 0) { add_separation_site = true; } // Similarly, we also add a separation site whenever a snapped edge passes // on the wrong side of a site to avoid. Normally we don't need to worry // about this, since if an edge passes on the wrong side of a nearby site // then it is also too close to it. However if the snap radius is very // small and intersection_tolerance() is non-zero then we need to check // this condition explicitly (see the "check_all_site_crossings_" flag for // details). We also need to check this condition explicitly for forced // vertices. Again, we can solve this problem by adding a "separation // site" in the corresponding coverage gap located as closely as possible // to the site to avoid. // // It is possible to show that when all points are projected onto the // great circle through (a0, a1), no improper crossing occurs unless the // the site to avoid is located between a0 and a1, and also between v0 // and v1. TODO(ericv): Verify whether all these checks are necessary. if (!add_separation_site && (is_forced(id) || check_all_site_crossings_) && (s2pred::Sign(a0, a1, site_to_avoid) != s2pred::Sign(v0, v1, site_to_avoid))&& s2pred::CompareEdgeDirections(a0, a1, a0, site_to_avoid) > 0 && s2pred::CompareEdgeDirections(a0, a1, site_to_avoid, a1) > 0 && s2pred::CompareEdgeDirections(a0, a1, v0, site_to_avoid) > 0 && s2pred::CompareEdgeDirections(a0, a1, site_to_avoid, v1) > 0) { add_separation_site = true; } if (add_separation_site) { // We add a new site (the separation site) in the coverage gap along the // input edge, located as closely as possible to the site to avoid. // Then we find all the edges near the new site (including this one) and // add them to the snap queue. S2Point new_site = GetSeparationSite(site_to_avoid, v0, v1, edge_id); S2_DCHECK_NE(site_to_avoid, new_site); AddExtraSite(new_site, input_edge_index, edges_to_resnap); // Skip the remaining sites near this chain edge, and then continue // scanning this chain. Note that this is safe even though the call // to AddExtraSite() above added a new site to "nearby_sites". for (; nearby_sites[j + 1] != chain[i]; ++j) {} } } } } // Adds a new site, then updates "edge_sites"_ for all edges near the new site // and adds them to "edges_to_resnap" for resnapping. void S2Builder::AddExtraSite(const S2Point& new_site, const MutableS2ShapeIndex& input_edge_index, dense_hash_set* edges_to_resnap) { if (!sites_.empty()) S2_DCHECK_NE(new_site, sites_.back()); if (!tracker_.AddSpace(&sites_, 1)) return; SiteId new_site_id = sites_.size(); sites_.push_back(new_site); // Find all edges whose distance is <= edge_site_query_radius_ca_. S2ClosestEdgeQuery::Options options; options.set_conservative_max_distance(edge_site_query_radius_ca_); options.set_include_interiors(false); if (!input_edge_index.is_fresh()) input_edge_index.ForceBuild(); if (!tracker_.ok()) return; // Memory used by S2ClosestEdgeQuery is not tracked, but it is temporary, // typically insignificant, and does not affect the high water mark. S2ClosestEdgeQuery query(&input_edge_index, options); S2ClosestEdgeQuery::PointTarget target(new_site); for (const auto& result : query.FindClosestEdges(&target)) { InputEdgeId e = result.edge_id(); const S2Point& v0 = input_vertices_[input_edges_[e].first]; InsertSiteByDistance(new_site_id, v0, &edge_sites_[e]); edges_to_resnap->insert(e); } } S2Point S2Builder::GetSeparationSite(const S2Point& site_to_avoid, const S2Point& v0, const S2Point& v1, InputEdgeId input_edge_id) const { // Define the "coverage disc" of a site S to be the disc centered at S with // radius "snap_radius". Similarly, define the "coverage interval" of S for // an edge XY to be the intersection of XY with the coverage disc of S. The // SnapFunction implementations guarantee that the only way that a snapped // edge can be closer than min_edge_vertex_separation() to a non-snapped // site (i.e., site_to_avoid) if is there is a gap in the coverage of XY // near this site. We can fix this problem simply by adding a new site to // fill this gap, located as closely as possible to the site to avoid. // // To calculate the coverage gap, we look at the two snapped sites on // either side of site_to_avoid, and find the endpoints of their coverage // intervals. The we place a new site in the gap, located as closely as // possible to the site to avoid. Note that the new site may move when it // is snapped by the snap_function, but it is guaranteed not to move by // more than snap_radius and therefore its coverage interval will still // intersect the gap. const InputEdge& edge = input_edges_[input_edge_id]; const S2Point& x = input_vertices_[edge.first]; const S2Point& y = input_vertices_[edge.second]; Vector3_d xy_dir = y - x; S2Point n = S2::RobustCrossProd(x, y); S2Point new_site = S2::Project(site_to_avoid, x, y, n); S2Point gap_min = GetCoverageEndpoint(v0, x, y, n); S2Point gap_max = GetCoverageEndpoint(v1, y, x, -n); if ((new_site - gap_min).DotProd(xy_dir) < 0) { new_site = gap_min; } else if ((gap_max - new_site).DotProd(xy_dir) < 0) { new_site = gap_max; } new_site = SnapSite(new_site); S2_DCHECK_NE(v0, new_site); S2_DCHECK_NE(v1, new_site); return new_site; } // Given a site P and an edge XY with normal N, intersect XY with the disc of // radius snap_radius() around P, and return the intersection point that is // further along the edge XY toward Y. S2Point S2Builder::GetCoverageEndpoint(const S2Point& p, const S2Point& x, const S2Point& y, const S2Point& n) const { // Consider the plane perpendicular to P that cuts off a spherical cap of // radius snap_radius(). This plane intersects the plane through the edge // XY (perpendicular to N) along a line, and that line intersects the unit // sphere at two points Q and R, and we want to return the point R that is // further along the edge XY toward Y. // // Let M be the midpoint of QR. This is the point along QR that is closest // to P. We can now express R as the sum of two perpendicular vectors OM // and MR in the plane XY. Vector MR is in the direction N x P, while // vector OM is in the direction (N x P) x N, where N = X x Y. // // The length of OM can be found using the Pythagorean theorem on triangle // OPM, and the length of MR can be found using the Pythagorean theorem on // triangle OMR. // // In the calculations below, we save some work by scaling all the vectors // by n.CrossProd(p).Norm2(), and normalizing at the end. double n2 = n.Norm2(); double nDp = n.DotProd(p); S2Point nXp = n.CrossProd(p); S2Point nXpXn = n2 * p - nDp * n; Vector3_d om = sqrt(1 - edge_snap_radius_sin2_) * nXpXn; double mr2 = edge_snap_radius_sin2_ * n2 - nDp * nDp; // MR is constructed so that it points toward Y (rather than X). Vector3_d mr = sqrt(max(0.0, mr2)) * nXp; return (om + mr).Normalize(); } void S2Builder::SnapEdge(InputEdgeId e, vector* chain) const { chain->clear(); const InputEdge& edge = input_edges_[e]; if (!snapping_needed_) { // Note that the input vertices have been renumbered such that // InputVertexId and SiteId are the same (see ChooseAllVerticesAsSites). chain->push_back(edge.first); chain->push_back(edge.second); return; } const S2Point& x = input_vertices_[edge.first]; const S2Point& y = input_vertices_[edge.second]; // Optimization: if there is only one nearby site, return. // Optimization: if there are exactly two nearby sites, and one is close // enough to each vertex, then return. // Now iterate through the sites. We keep track of the sequence of sites // that are visited. const auto& candidates = edge_sites_[e]; for (SiteId site_id : candidates) { const S2Point& c = sites_[site_id]; // Skip any sites that are too far away. (There will be some of these, // because we also keep track of "sites to avoid".) Note that some sites // may be close enough to the line containing the edge, but not to the // edge itself, so we can just use the dot product with the edge normal. if (s2pred::CompareEdgeDistance(c, x, y, edge_snap_radius_ca_) > 0) { continue; } // Check whether the new site C excludes the previous site B. If so, // repeat with the previous site, and so on. bool add_site_c = true; for (; !chain->empty(); chain->pop_back()) { S2Point b = sites_[chain->back()]; // First, check whether B and C are so far apart that their clipped // Voronoi regions can't intersect. S1ChordAngle bc(b, c); if (bc >= max_adjacent_site_separation_ca_) break; // Otherwise, we want to check whether site C prevents the Voronoi // region of B from intersecting XY, or vice versa. This can be // determined by computing the "coverage interval" (the segment of XY // intersected by the coverage disc of radius snap_radius) for each // site. If the coverage interval of one site contains the coverage // interval of the other, then the contained site can be excluded. s2pred::Excluded result = s2pred::GetVoronoiSiteExclusion( b, c, x, y, edge_snap_radius_ca_); if (result == s2pred::Excluded::FIRST) continue; // Site B excluded by C if (result == s2pred::Excluded::SECOND) { add_site_c = false; // Site C is excluded by B. break; } S2_DCHECK_EQ(s2pred::Excluded::NEITHER, result); // Otherwise check whether the previous site A is close enough to B and // C that it might further clip the Voronoi region of B. if (chain->size() < 2) break; S2Point a = sites_[chain->end()[-2]]; S1ChordAngle ac(a, c); if (ac >= max_adjacent_site_separation_ca_) break; // If triangles ABC and XYB have the same orientation, the circumcenter // Z of ABC is guaranteed to be on the same side of XY as B. int xyb = s2pred::Sign(x, y, b); if (s2pred::Sign(a, b, c) == xyb) { break; // The circumcenter is on the same side as B but further away. } // Other possible optimizations: // - if AB > max_adjacent_site_separation_ca_ then keep B. // - if d(B, XY) < 0.5 * min(AB, BC) then keep B. // If the circumcenter of ABC is on the same side of XY as B, then B is // excluded by A and C combined. Otherwise B is needed and we can exit. if (s2pred::EdgeCircumcenterSign(x, y, a, b, c) != xyb) break; } if (add_site_c) { chain->push_back(site_id); } } S2_DCHECK(!chain->empty()); if (google::DEBUG_MODE) { for (SiteId site_id : candidates) { if (s2pred::CompareDistances(y, sites_[chain->back()], sites_[site_id]) > 0) { S2_LOG(ERROR) << "Snapping invariant broken!"; } } } if (s2builder_verbose) { std::cout << "(" << edge.first << "," << edge.second << "): "; for (SiteId id : *chain) std::cout << id << " "; std::cout << std::endl; } } void S2Builder::BuildLayers() { if (!tracker_.ok()) return; // Each output edge has an "input edge id set id" (an int32) representing // the set of input edge ids that were snapped to this edge. The actual // InputEdgeIds can be retrieved using "input_edge_id_set_lexicon". vector> layer_edges; vector> layer_input_edge_ids; IdSetLexicon input_edge_id_set_lexicon; vector> layer_vertices; BuildLayerEdges(&layer_edges, &layer_input_edge_ids, &input_edge_id_set_lexicon); auto _ = absl::MakeCleanup([&]() { for (int i = 0; i < layers_.size(); ++i) { tracker_.Untally(layer_edges[i]); tracker_.Untally(layer_input_edge_ids[i]); if (!layer_vertices.empty()) tracker_.Untally(layer_vertices[i]); } }); // If there are a large number of layers, then we build a minimal subset of // vertices for each layer. This ensures that layer types that iterate over // vertices will run in time proportional to the size of that layer rather // than the size of all layers combined. static const int kMinLayersForVertexFiltering = 10; if (layers_.size() >= kMinLayersForVertexFiltering) { // Disable vertex filtering if it is disallowed by any layer. (This could // be optimized, but in current applications either all layers allow // filtering or none of them do.) bool allow_vertex_filtering = true; for (const auto& options : layer_options_) { allow_vertex_filtering &= options.allow_vertex_filtering(); } if (allow_vertex_filtering) { // Track the temporary memory used by FilterVertices(). Note that // although vertex filtering can increase the number of vertices stored // (i.e., if the same vertex is referred to by multiple layers), it // never increases storage quadratically because there can be at most // two filtered vertices per edge. if (!tracker_.TallyFilterVertices(sites_.size(), layer_edges)) return; auto _ = absl::MakeCleanup([this]() { tracker_.DoneFilterVertices(); }); layer_vertices.resize(layers_.size()); vector filter_tmp; // Temporary used by FilterVertices. for (int i = 0; i < layers_.size(); ++i) { layer_vertices[i] = Graph::FilterVertices(sites_, &layer_edges[i], &filter_tmp); if (!tracker_.Tally(layer_vertices[i])) return; } tracker_.Clear(&sites_); // Releases memory. } } if (!tracker_.ok()) return; for (int i = 0; i < layers_.size(); ++i) { const vector& vertices = (layer_vertices.empty() ? sites_ : layer_vertices[i]); Graph graph(layer_options_[i], &vertices, &layer_edges[i], &layer_input_edge_ids[i], &input_edge_id_set_lexicon, &label_set_ids_, &label_set_lexicon_, layer_is_full_polygon_predicates_[i]); layers_[i]->Build(graph, error_); // Don't free the layer data until all layers have been built, in order to // support building multiple layers at once (e.g. ClosedSetNormalizer). } } static void DumpEdges(const vector& edges, const vector& vertices) { for (const auto& e : edges) { vector v; v.push_back(vertices[e.first]); v.push_back(vertices[e.second]); std::cout << "S2Polyline: " << s2textformat::ToString(v) << "(" << e.first << "," << e.second << ")" << std::endl; } } // Snaps and possibly simplifies the edges for each layer, populating the // given output arguments. The resulting edges can be used to construct an // S2Builder::Graph directly (no further processing is necessary). // // This method is not "const" because Graph::ProcessEdges can modify // layer_options_ in some cases (changing undirected edges to directed ones). void S2Builder::BuildLayerEdges( vector>* layer_edges, vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon) { // Edge chains are simplified only when a non-zero snap radius is specified. // If so, we build a map from each site to the set of input vertices that // snapped to that site. (Note that site_vertices is relatively small and // that its memory tracking is deferred until TallySimplifyEdgeChains.) vector> site_vertices; bool simplify = snapping_needed_ && options_.simplify_edge_chains(); if (simplify) site_vertices.resize(sites_.size()); layer_edges->resize(layers_.size()); layer_input_edge_ids->resize(layers_.size()); for (int i = 0; i < layers_.size(); ++i) { AddSnappedEdges(layer_begins_[i], layer_begins_[i+1], layer_options_[i], &(*layer_edges)[i], &(*layer_input_edge_ids)[i], input_edge_id_set_lexicon, &site_vertices); } // We simplify edge chains before processing the per-layer GraphOptions // because simplification can create duplicate edges and/or sibling edge // pairs which may need to be removed. if (simplify) { SimplifyEdgeChains(site_vertices, layer_edges, layer_input_edge_ids, input_edge_id_set_lexicon); vector>().swap(site_vertices); } // At this point we have no further need for nearby site data, so we clear // it to save space. We keep input_vertices_ and input_edges_ so that // S2Builder::Layer implementations can access them if desired. (This is // useful for determining how snapping has changed the input geometry.) tracker_.ClearEdgeSites(&edge_sites_); for (int i = 0; i < layers_.size(); ++i) { // The errors generated by ProcessEdges are really warnings, so we simply // record them and continue. Graph::ProcessEdges(&layer_options_[i], &(*layer_edges)[i], &(*layer_input_edge_ids)[i], input_edge_id_set_lexicon, error_, &tracker_); if (!tracker_.ok()) return; } } // Snaps all the input edges for a given layer, populating the given output // arguments. If (*site_vertices) is non-empty then it is updated so that // (*site_vertices)[site] contains a list of all input vertices that were // snapped to that site. void S2Builder::AddSnappedEdges( InputEdgeId begin, InputEdgeId end, const GraphOptions& options, vector* edges, vector* input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon, vector>* site_vertices) { bool discard_degenerate_edges = (options.degenerate_edges() == GraphOptions::DegenerateEdges::DISCARD); vector chain; for (InputEdgeId e = begin; e < end; ++e) { InputEdgeIdSetId id = input_edge_id_set_lexicon->AddSingleton(e); SnapEdge(e, &chain); int num_snapped_edges = max(1, chain.size() - 1); if (options.edge_type() == EdgeType::UNDIRECTED) num_snapped_edges *= 2; if (!tracker_.AddSpace(edges, num_snapped_edges)) return; if (!tracker_.AddSpace(input_edge_ids, num_snapped_edges)) return; MaybeAddInputVertex(input_edges_[e].first, chain[0], site_vertices); if (chain.size() == 1) { if (discard_degenerate_edges) continue; AddSnappedEdge(chain[0], chain[0], id, options.edge_type(), edges, input_edge_ids); } else { MaybeAddInputVertex(input_edges_[e].second, chain.back(), site_vertices); for (int i = 1; i < chain.size(); ++i) { AddSnappedEdge(chain[i-1], chain[i], id, options.edge_type(), edges, input_edge_ids); } } } if (s2builder_verbose) DumpEdges(*edges, sites_); } // If "site_vertices" is non-empty, ensures that (*site_vertices)[id] contains // "v". Duplicate entries are allowed. The purpose of this function is to // build a map so that SimplifyEdgeChains() can quickly find all the input // vertices that snapped to a particular site. inline void S2Builder::MaybeAddInputVertex( InputVertexId v, SiteId id, vector>* site_vertices) const { if (site_vertices->empty()) return; // Optimization: check if we just added this vertex. This is worthwhile // because the input edges usually form a continuous chain, i.e. the // destination of one edge is the same as the source of the next edge. auto& vertices = (*site_vertices)[id]; if (vertices.empty() || vertices.back() != v) { // Memory tracking is deferred until SimplifyEdgeChains. vertices.push_back(v); } } // Adds the given edge to "edges" and "input_edge_ids". If undirected edges // are being used, also adds an edge in the opposite direction. inline void S2Builder::AddSnappedEdge( SiteId src, SiteId dst, InputEdgeIdSetId id, EdgeType edge_type, vector* edges, vector* input_edge_ids) const { edges->push_back(Edge(src, dst)); input_edge_ids->push_back(id); if (edge_type == EdgeType::UNDIRECTED) { edges->push_back(Edge(dst, src)); // Automatically created edges do not have input edge ids or labels. This // can be used to distinguish the original direction of the undirected edge. input_edge_ids->push_back(IdSetLexicon::EmptySetId()); } } // A class that encapsulates the state needed for simplifying edge chains. class S2Builder::EdgeChainSimplifier { public: // The graph "g" contains all edges from all layers. "edge_layers" // indicates the original layer for each edge. "site_vertices" is a map // from SiteId to the set of InputVertexIds that were snapped to that site. // "layer_edges" and "layer_input_edge_ids" are output arguments where the // simplified edge chains will be placed. The input and output edges are // not sorted. EdgeChainSimplifier( const S2Builder& builder, const Graph& g, const vector& edge_layers, const vector>& site_vertices, vector>* layer_edges, vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon); void Run(); private: using VertexId = Graph::VertexId; class InteriorVertexMatcher; void OutputEdge(EdgeId e); int graph_edge_layer(EdgeId e) const; int input_edge_layer(InputEdgeId id) const; bool IsInterior(VertexId v); void SimplifyChain(VertexId v0, VertexId v1); Graph::VertexId FollowChain(VertexId v0, VertexId v1) const; void OutputAllEdges(VertexId v0, VertexId v1); bool TargetInputVertices(VertexId v, S2PolylineSimplifier* simplifier) const; bool AvoidSites(VertexId v0, VertexId v1, VertexId v2, dense_hash_set* used_vertices, S2PolylineSimplifier* simplifier) const; void MergeChain(const vector& vertices); void AssignDegenerateEdges( const vector& degenerate_ids, vector>* merged_ids) const; // LINT.IfChange const S2Builder& builder_; const Graph& g_; Graph::VertexInMap in_; Graph::VertexOutMap out_; const vector& edge_layers_; const vector>& site_vertices_; vector>* layer_edges_; vector>* layer_input_edge_ids_; IdSetLexicon* input_edge_id_set_lexicon_; // Convenience member copied from builder_. const std::vector& layer_begins_; // is_interior_[v] indicates that VertexId "v" is eligible to be an interior // vertex of a simplified edge chain. You can think of it as vertex whose // indegree and outdegree are both 1 (although the actual definition is a // bit more complicated because of duplicate edges and layers). vector is_interior_; // used_[e] indicates that EdgeId "e" has already been processed. vector used_; // Temporary objects declared here to avoid repeated allocation. vector tmp_vertices_; vector tmp_edges_; dense_hash_set tmp_vertex_set_; // The output edges after simplification. vector new_edges_; vector new_input_edge_ids_; vector new_edge_layers_; }; // Simplifies edge chains, updating its input/output arguments as necessary. void S2Builder::SimplifyEdgeChains( const vector>& site_vertices, vector>* layer_edges, vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon) { if (layers_.empty()) return; if (!tracker_.TallySimplifyEdgeChains(site_vertices, *layer_edges)) return; // Merge the edges from all layers (in order to build a single graph). vector merged_edges; vector merged_input_edge_ids; vector merged_edge_layers; MergeLayerEdges(*layer_edges, *layer_input_edge_ids, &merged_edges, &merged_input_edge_ids, &merged_edge_layers); // The following fields will be reconstructed by EdgeChainSimplifier. for (auto& edges : *layer_edges) edges.clear(); for (auto& input_edge_ids : *layer_input_edge_ids) input_edge_ids.clear(); // The graph options are irrelevant for edge chain simplification, but we // try to set them appropriately anyway. S2Builder::GraphOptions graph_options(EdgeType::DIRECTED, GraphOptions::DegenerateEdges::KEEP, GraphOptions::DuplicateEdges::KEEP, GraphOptions::SiblingPairs::KEEP); Graph graph(graph_options, &sites_, &merged_edges, &merged_input_edge_ids, input_edge_id_set_lexicon, nullptr, nullptr, IsFullPolygonPredicate()); EdgeChainSimplifier simplifier( *this, graph, merged_edge_layers, site_vertices, layer_edges, layer_input_edge_ids, input_edge_id_set_lexicon); simplifier.Run(); } // LINT.ThenChange(:TallySimplifyEdgeChains) // Merges the edges from all layers and sorts them in lexicographic order so // that we can construct a single graph. The sort is stable, which means that // any duplicate edges within each layer will still be sorted by InputEdgeId. void S2Builder::MergeLayerEdges( const vector>& layer_edges, const vector>& layer_input_edge_ids, vector* edges, vector* input_edge_ids, vector* edge_layers) const { vector order; for (int i = 0; i < layer_edges.size(); ++i) { for (int e = 0; e < layer_edges[i].size(); ++e) { order.push_back(LayerEdgeId(i, e)); } } std::sort(order.begin(), order.end(), [&layer_edges](const LayerEdgeId& ai, const LayerEdgeId& bi) { return StableLessThan(layer_edges[ai.first][ai.second], layer_edges[bi.first][bi.second], ai, bi); }); edges->reserve(order.size()); input_edge_ids->reserve(order.size()); edge_layers->reserve(order.size()); for (const LayerEdgeId& id : order) { edges->push_back(layer_edges[id.first][id.second]); input_edge_ids->push_back(layer_input_edge_ids[id.first][id.second]); edge_layers->push_back(id.first); } } // A comparison function that allows stable sorting with std::sort (which is // fast but not stable). It breaks ties between equal edges by comparing // their LayerEdgeIds. inline bool S2Builder::StableLessThan( const Edge& a, const Edge& b, const LayerEdgeId& ai, const LayerEdgeId& bi) { // The compiler doesn't optimize this as well as it should: // return std::make_pair(a, ai) < std::make_pair(b, bi); if (a.first < b.first) return true; if (b.first < a.first) return false; if (a.second < b.second) return true; if (b.second < a.second) return false; return ai < bi; // Stable sort. } S2Builder::EdgeChainSimplifier::EdgeChainSimplifier( const S2Builder& builder, const Graph& g, const vector& edge_layers, const vector>& site_vertices, vector>* layer_edges, vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon) : builder_(builder), g_(g), in_(g), out_(g), edge_layers_(edge_layers), site_vertices_(site_vertices), layer_edges_(layer_edges), layer_input_edge_ids_(layer_input_edge_ids), input_edge_id_set_lexicon_(input_edge_id_set_lexicon), layer_begins_(builder_.layer_begins_), is_interior_(g.num_vertices()), used_(g.num_edges()), tmp_vertex_set_(16) /*expected_max_elements*/ { tmp_vertex_set_.set_empty_key(-1); new_edges_.reserve(g.num_edges()); new_input_edge_ids_.reserve(g.num_edges()); new_edge_layers_.reserve(g.num_edges()); } void S2Builder::EdgeChainSimplifier::Run() { // Determine which vertices can be interior vertices of an edge chain. for (VertexId v = 0; v < g_.num_vertices(); ++v) { is_interior_[v] = IsInterior(v); } // Attempt to simplify all edge chains that start from a non-interior // vertex. (This takes care of all chains except loops.) for (EdgeId e = 0; e < g_.num_edges(); ++e) { if (used_[e]) continue; Edge edge = g_.edge(e); if (is_interior_[edge.first]) continue; if (!is_interior_[edge.second]) { OutputEdge(e); // An edge between two non-interior vertices. } else { SimplifyChain(edge.first, edge.second); } } // If there are any edges left, they form one or more disjoint loops where // all vertices are interior vertices. // // TODO(ericv): It would be better to start from the edge with the smallest // min_input_edge_id(), since that would make the output more predictable // for testing purposes. It also means that we won't create an edge that // spans the start and end of a polyline if the polyline is snapped into a // loop. (Unfortunately there are pathological examples that prevent us // from guaranteeing this in general, e.g. there could be two polylines in // different layers that snap to the same loop but start at different // positions. In general we only consider input edge ids to be a hint // towards the preferred output ordering.) for (EdgeId e = 0; e < g_.num_edges(); ++e) { if (used_[e]) continue; Edge edge = g_.edge(e); if (edge.first == edge.second) { // Note that it is safe to output degenerate edges as we go along, // because this vertex has at least one non-degenerate outgoing edge and // therefore we will (or just did) start an edge chain here. OutputEdge(e); } else { SimplifyChain(edge.first, edge.second); } } // TODO(ericv): The graph is not needed past here, so we could save some // memory by clearing the underlying Edge and InputEdgeIdSetId vectors. // Finally, copy the output edges into the appropriate layers. They don't // need to be sorted because the input edges were also unsorted. for (int e = 0; e < new_edges_.size(); ++e) { int layer = new_edge_layers_[e]; (*layer_edges_)[layer].push_back(new_edges_[e]); (*layer_input_edge_ids_)[layer].push_back(new_input_edge_ids_[e]); } } // Copies the given edge to the output and marks it as used. inline void S2Builder::EdgeChainSimplifier::OutputEdge(EdgeId e) { new_edges_.push_back(g_.edge(e)); new_input_edge_ids_.push_back(g_.input_edge_id_set_id(e)); new_edge_layers_.push_back(edge_layers_[e]); used_[e] = true; } // Returns the layer that a given graph edge belongs to. inline int S2Builder::EdgeChainSimplifier::graph_edge_layer(EdgeId e) const { return edge_layers_[e]; } // Returns the layer than a given input edge belongs to. int S2Builder::EdgeChainSimplifier::input_edge_layer(InputEdgeId id) const { // NOTE(ericv): If this method shows up in profiling, the result could be // stored with each edge (i.e., edge_layers_ and new_edge_layers_). S2_DCHECK_GE(id, 0); return (std::upper_bound(layer_begins_.begin(), layer_begins_.end(), id) - (layer_begins_.begin() + 1)); } // A helper class for determining whether a vertex can be an interior vertex // of a simplified edge chain. Such a vertex must be adjacent to exactly two // vertices (across all layers combined), and in each layer the number of // incoming edges from one vertex must equal the number of outgoing edges to // the other vertex (in both directions). Furthermore the vertex cannot have // any degenerate edges in a given layer unless it has at least one // non-degenerate edge in that layer as well. (Note that usually there will // not be any degenerate edges at all, since most layer types discard them.) // // The last condition is necessary to prevent the following: suppose that one // layer has a chain ABC and another layer has a degenerate edge BB (with no // other edges incident to B). Then we can't simplify ABC to AC because there // would be no suitable replacement for the edge BB (since the input edge that // mapped to BB can't be replaced by any of the edges AA, AC, or CC without // moving further than snap_radius). class S2Builder::EdgeChainSimplifier::InteriorVertexMatcher { public: // Checks whether "v0" can be an interior vertex of an edge chain. explicit InteriorVertexMatcher(VertexId v0) : v0_(v0), v1_(-1), v2_(-1), n0_(0), n1_(0), n2_(0), excess_out_(0), too_many_endpoints_(false) { } // Starts analyzing the edges of a new layer. void StartLayer() { excess_out_ = n0_ = n1_ = n2_ = 0; } // This method should be called for each edge incident to "v0" in a given // layer. (For degenerate edges, it should be called twice.) void Tally(VertexId v, bool outgoing) { excess_out_ += outgoing ? 1 : -1; // outdegree - indegree if (v == v0_) { ++n0_; // Counts both endpoints of each degenerate edge. } else { // We keep track of the total number of edges (incoming or outgoing) // connecting v0 to up to two adjacent vertices. if (v1_ < 0) v1_ = v; if (v1_ == v) { ++n1_; } else { if (v2_ < 0) v2_ = v; if (v2_ == v) { ++n2_; } else { too_many_endpoints_ = true; } } } } // This method should be called after processing the edges for each layer. // It returns true if "v0" is an interior vertex based on the edges so far. bool Matches() const { // We check that there are the same number of incoming and outgoing edges // in each direction by verifying that (1) indegree(v0) == outdegree(v0) // and (2) the total number of edges (incoming and outgoing) to "v1" and // "v2" are equal. We also check the condition on degenerate edges that // is documented above. return (!too_many_endpoints_ && excess_out_ == 0 && n1_ == n2_ && (n0_ == 0 || n1_ > 0)); } private: VertexId v0_, v1_, v2_; int n0_, n1_, n2_; int excess_out_; // outdegree(v0) - indegree(v0) bool too_many_endpoints_; // Have we seen more than two adjacent vertices? }; // Returns true if VertexId "v" can be an interior vertex of a simplified edge // chain. (See the InteriorVertexMatcher class for what this implies.) bool S2Builder::EdgeChainSimplifier::IsInterior(VertexId v) { // Check a few simple prerequisites. if (out_.degree(v) == 0) return false; if (out_.degree(v) != in_.degree(v)) return false; if (builder_.is_forced(v)) return false; // Keep forced vertices. // Sort the edges so that they are grouped by layer. vector& edges = tmp_edges_; // Avoid allocating each time. edges.clear(); for (EdgeId e : out_.edge_ids(v)) edges.push_back(e); for (EdgeId e : in_.edge_ids(v)) edges.push_back(e); std::sort(edges.begin(), edges.end(), [this](EdgeId x, EdgeId y) { return graph_edge_layer(x) < graph_edge_layer(y); }); // Now feed the edges in each layer to the InteriorVertexMatcher. InteriorVertexMatcher matcher(v); for (auto e = edges.begin(); e != edges.end(); ) { int layer = graph_edge_layer(*e); matcher.StartLayer(); for (; e != edges.end() && graph_edge_layer(*e) == layer; ++e) { Edge edge = g_.edge(*e); if (edge.first == v) matcher.Tally(edge.second, true /*outgoing*/); if (edge.second == v) matcher.Tally(edge.first, false /*outgoing*/); } if (!matcher.Matches()) return false; } return true; } // Follows the edge chain starting with (v0, v1) until either we find a // non-interior vertex or we return to the original vertex v0. At each vertex // we simplify a subchain of edges that is as long as possible. void S2Builder::EdgeChainSimplifier::SimplifyChain(VertexId v0, VertexId v1) { // Avoid allocating "chain" each time by reusing it. vector& chain = tmp_vertices_; // Contains the set of vertices that have either been avoided or added to // the chain so far. This is necessary so that AvoidSites() doesn't try to // avoid vertices that have already been added to the chain. dense_hash_set& used_vertices = tmp_vertex_set_; S2PolylineSimplifier simplifier; VertexId vstart = v0; bool done = false; do { // Simplify a subchain of edges starting with (v0, v1). chain.push_back(v0); used_vertices.insert(v0); simplifier.Init(g_.vertex(v0)); // Note that if the first edge (v0, v1) is longer than the maximum length // allowed for simplification, then AvoidSites() will return false and we // exit the loop below after the first iteration. const bool simplify = AvoidSites(v0, v0, v1, &used_vertices, &simplifier); do { chain.push_back(v1); used_vertices.insert(v1); done = !is_interior_[v1] || v1 == vstart; if (done) break; // Attempt to extend the chain to the next vertex. VertexId vprev = v0; v0 = v1; v1 = FollowChain(vprev, v0); } while (simplify && TargetInputVertices(v0, &simplifier) && AvoidSites(chain[0], v0, v1, &used_vertices, &simplifier) && simplifier.Extend(g_.vertex(v1))); if (chain.size() == 2) { OutputAllEdges(chain[0], chain[1]); // Could not simplify. } else { MergeChain(chain); } // Note that any degenerate edges that were not merged into a chain are // output by EdgeChainSimplifier::Run(). chain.clear(); used_vertices.clear(); } while (!done); } // Given an edge (v0, v1) where v1 is an interior vertex, returns the (unique) // next vertex in the edge chain. S2Builder::Graph::VertexId S2Builder::EdgeChainSimplifier::FollowChain( VertexId v0, VertexId v1) const { S2_DCHECK(is_interior_[v1]); for (EdgeId e : out_.edge_ids(v1)) { VertexId v = g_.edge(e).second; if (v != v0 && v != v1) return v; } S2_LOG(FATAL) << "Could not find next edge in edge chain"; } // Copies all input edges between v0 and v1 (in both directions) to the output. void S2Builder::EdgeChainSimplifier::OutputAllEdges(VertexId v0, VertexId v1) { for (EdgeId e : out_.edge_ids(v0, v1)) OutputEdge(e); for (EdgeId e : out_.edge_ids(v1, v0)) OutputEdge(e); } // Ensures that the simplified edge passes within "edge_snap_radius" of all // the *input* vertices that snapped to the given vertex "v". bool S2Builder::EdgeChainSimplifier::TargetInputVertices( VertexId v, S2PolylineSimplifier* simplifier) const { for (InputVertexId i : site_vertices_[v]) { if (!simplifier->TargetDisc(builder_.input_vertices_[i], builder_.edge_snap_radius_ca_)) { return false; } } return true; } // Given the starting vertex v0 and last edge (v1, v2) of an edge chain, // restricts the allowable range of angles in order to ensure that all sites // near the edge (v1, v2) are avoided by at least min_edge_vertex_separation. bool S2Builder::EdgeChainSimplifier::AvoidSites( VertexId v0, VertexId v1, VertexId v2, dense_hash_set* used_vertices, S2PolylineSimplifier* simplifier) const { const S2Point& p0 = g_.vertex(v0); const S2Point& p1 = g_.vertex(v1); const S2Point& p2 = g_.vertex(v2); S1ChordAngle r1(p0, p1); S1ChordAngle r2(p0, p2); // The distance from the start of the edge chain must increase monotonically // for each vertex, since we don't want to simplify chains that backtrack on // themselves (we want a parametric approximation, not a geometric one). if (r2 < r1) return false; // We also limit the maximum edge length in order to guarantee that the // simplified edge stays with max_edge_deviation() of all the input edges // that snap to it. if (r2 >= builder_.min_edge_length_to_split_ca_) return false; // Otherwise it is sufficient to consider the nearby sites (edge_sites_) for // a single input edge that snapped to (v1, v2) or (v2, v1). This is // because each edge has a list of all sites within (max_edge_deviation + // min_edge_vertex_separation), and since the output edge is within // max_edge_deviation of all input edges, this list includes all sites // within min_edge_vertex_separation of the output edge. // // Usually there is only one edge to choose from, but it's not much more // effort to choose the edge with the shortest list of edge_sites_. InputEdgeId best = -1; const auto& edge_sites = builder_.edge_sites_; for (EdgeId e : out_.edge_ids(v1, v2)) { for (InputEdgeId id : g_.input_edge_ids(e)) { if (best < 0 || edge_sites[id].size() < edge_sites[best].size()) best = id; } } for (EdgeId e : out_.edge_ids(v2, v1)) { for (InputEdgeId id : g_.input_edge_ids(e)) { if (best < 0 || edge_sites[id].size() < edge_sites[best].size()) best = id; } } S2_DCHECK_GE(best, 0); // Because there is at least one outgoing edge. for (VertexId v : edge_sites[best]) { // Sites whose distance from "p0" is at least "r2" are not relevant yet. const S2Point& p = g_.vertex(v); S1ChordAngle r(p0, p); if (r >= r2) continue; // The following test prevents us from avoiding previous vertices of the // edge chain that also happen to be nearby the current edge. (It also // happens to ensure that each vertex is avoided at most once, but this is // just an optimization.) if (!used_vertices->insert(v).second) continue; // We need to figure out whether this site is to the left or right of the // edge chain. For the first edge this is easy. Otherwise, since we are // only considering sites in the radius range (r1, r2), we can do this by // checking whether the site is to the left of the wedge (p0, p1, p2). bool disc_on_left = (v1 == v0) ? (s2pred::Sign(p1, p2, p) > 0) : s2pred::OrderedCCW(p0, p2, p, p1); if (!simplifier->AvoidDisc(p, builder_.min_edge_site_separation_ca_, disc_on_left)) { return false; } } return true; } // Given the vertices in a simplified edge chain, adds the corresponding // simplified edge(s) to the output. Note that (1) the edge chain may exist // in multiple layers, (2) the edge chain may exist in both directions, (3) // there may be more than one copy of an edge chain (in either direction) // within a single layer. void S2Builder::EdgeChainSimplifier::MergeChain( const vector& vertices) { // Suppose that all interior vertices have M outgoing edges and N incoming // edges. Our goal is to group the edges into M outgoing chains and N // incoming chains, and then replace each chain by a single edge. vector> merged_input_ids; vector degenerate_ids; int num_out; // Edge count in the outgoing direction. for (int i = 1; i < vertices.size(); ++i) { VertexId v0 = vertices[i-1]; VertexId v1 = vertices[i]; auto out_edges = out_.edge_ids(v0, v1); auto in_edges = out_.edge_ids(v1, v0); if (i == 1) { // Allocate space to store the input edge ids associated with each edge. num_out = out_edges.size(); merged_input_ids.resize(num_out + in_edges.size()); for (vector& ids : merged_input_ids) { ids.reserve(vertices.size() - 1); } } else { // For each interior vertex, we build a list of input edge ids // associated with degenerate edges. Each input edge ids will be // assigned to one of the output edges later. (Normally there are no // degenerate edges at all since most layer types don't want them.) S2_DCHECK(is_interior_[v0]); for (EdgeId e : out_.edge_ids(v0, v0)) { for (InputEdgeId id : g_.input_edge_ids(e)) { degenerate_ids.push_back(id); } used_[e] = true; } } // Because the edges were created in layer order, and all sorts used are // stable, the edges are still in layer order. Therefore we can simply // merge together all the edges in the same relative position. int j = 0; for (EdgeId e : out_edges) { for (InputEdgeId id : g_.input_edge_ids(e)) { merged_input_ids[j].push_back(id); } used_[e] = true; ++j; } for (EdgeId e : in_edges) { for (InputEdgeId id : g_.input_edge_ids(e)) { merged_input_ids[j].push_back(id); } used_[e] = true; ++j; } S2_DCHECK_EQ(merged_input_ids.size(), j); } if (!degenerate_ids.empty()) { std::sort(degenerate_ids.begin(), degenerate_ids.end()); AssignDegenerateEdges(degenerate_ids, &merged_input_ids); } // Output the merged edges. VertexId v0 = vertices[0], v1 = vertices[1], vb = vertices.back(); for (EdgeId e : out_.edge_ids(v0, v1)) { new_edges_.push_back(Edge(v0, vb)); new_edge_layers_.push_back(graph_edge_layer(e)); } for (EdgeId e : out_.edge_ids(v1, v0)) { new_edges_.push_back(Edge(vb, v0)); new_edge_layers_.push_back(graph_edge_layer(e)); } for (const auto& ids : merged_input_ids) { new_input_edge_ids_.push_back(input_edge_id_set_lexicon_->Add(ids)); } } // Given a list of the input edge ids associated with degenerate edges in the // interior of an edge chain, assigns each input edge id to one of the output // edges. void S2Builder::EdgeChainSimplifier::AssignDegenerateEdges( const vector& degenerate_ids, vector>* merged_ids) const { // Each degenerate edge is assigned to an output edge in the appropriate // layer. If there is more than one candidate, we use heuristics so that if // the input consists of a chain of edges provided in consecutive order // (some of which became degenerate), then all those input edges are // assigned to the same output edge. For example, suppose that one output // edge is labeled with input edges 3,4,7,8, while another output edge is // labeled with input edges 50,51,54,57. Then if we encounter degenerate // edges labeled with input edges 5 and 6, we would prefer to assign them to // the first edge (yielding the continuous range 3,4,5,6,7,8). // // The heuristic below is only smart enough to handle the case where the // candidate output edges have non-overlapping ranges of input edges. // (Otherwise there is probably not a good heuristic for assigning the // degenerate edges in any case.) // Duplicate edge ids don't affect the heuristic below, so we don't bother // removing them. (They will be removed by IdSetLexicon::Add.) for (auto& ids : *merged_ids) std::sort(ids.begin(), ids.end()); // Sort the output edges by their minimum input edge id. This is sufficient // for the purpose of determining which layer they belong to. With // EdgeType::UNDIRECTED, some edges might not have any input edge ids (i.e., // if they consist entirely of siblings of input edges). We simply remove // such edges from the lists of candidates. vector order; order.reserve(merged_ids->size()); for (int i = 0; i < merged_ids->size(); ++i) { if (!(*merged_ids)[i].empty()) order.push_back(i); } std::sort(order.begin(), order.end(), [&merged_ids](int i, int j) { return (*merged_ids)[i][0] < (*merged_ids)[j][0]; }); // Now determine where each degenerate edge should be assigned. for (InputEdgeId degenerate_id : degenerate_ids) { int layer = input_edge_layer(degenerate_id); // Find the first output edge whose range of input edge ids starts after // "degenerate_id". If the previous edge belongs to the correct layer, // then we assign the degenerate edge to it. auto it = std::upper_bound(order.begin(), order.end(), degenerate_id, [&merged_ids](InputEdgeId x, unsigned y) { return x < (*merged_ids)[y][0]; }); if (it != order.begin()) { if ((*merged_ids)[it[-1]][0] >= layer_begins_[layer]) --it; } S2_DCHECK_EQ(layer, input_edge_layer((*merged_ids)[it[0]][0])); (*merged_ids)[it[0]].push_back(degenerate_id); } } /////////////////////// S2Builder::MemoryTracker ///////////////////////// // Called to track memory used to store the set of sites near a given edge. bool S2Builder::MemoryTracker::TallyEdgeSites( const compact_array& sites) { int64 size = GetCompactArrayAllocBytes(sites); edge_sites_bytes_ += size; return Tally(size); } // Ensures that "sites" contains space for at least one more edge site. bool S2Builder::MemoryTracker::ReserveEdgeSite(compact_array* sites) { int64 new_size = sites->size() + 1; if (new_size <= sites->capacity()) return true; int64 old_bytes = GetCompactArrayAllocBytes(*sites); sites->reserve(new_size); int64 added_bytes = GetCompactArrayAllocBytes(*sites) - old_bytes; edge_sites_bytes_ += added_bytes; return Tally(added_bytes); } // Releases and tracks the memory used to store nearby edge sites. bool S2Builder::MemoryTracker::ClearEdgeSites( vector>* edge_sites) { Tally(-edge_sites_bytes_); edge_sites_bytes_ = 0; return Clear(edge_sites); } // Called when a site is added to the S2PointIndex. bool S2Builder::MemoryTracker::TallyIndexedSite() { // S2PointIndex stores its data in a btree. In general btree nodes are only // guaranteed to be half full, but in our case all nodes are full except for // the rightmost node at each btree level because the values are added in // sorted order. int64 delta_bytes = GetBtreeMinBytesPerEntry< absl::btree_multimap::PointData>>(); site_index_bytes_ += delta_bytes; return Tally(delta_bytes); } // Corrects the approximate S2PointIndex memory tracking done above. bool S2Builder::MemoryTracker::FixSiteIndexTally( const S2PointIndex& index) { int64 delta_bytes = index.SpaceUsed() - site_index_bytes_; site_index_bytes_ += delta_bytes; return Tally(delta_bytes); } // Tracks memory due to destroying the site index. bool S2Builder::MemoryTracker::DoneSiteIndex( const S2PointIndex& index) { Tally(-site_index_bytes_); site_index_bytes_ = 0; return ok(); } // Called to indicate that edge simplification was requested. // LINT.IfChange(TallySimplifyEdgeChains) bool S2Builder::MemoryTracker::TallySimplifyEdgeChains( const vector>& site_vertices, const vector>& layer_edges) { if (!is_active()) return true; // The simplify_edge_chains() option uses temporary memory per site // (output vertex) and per output edge, as outlined below. // // Per site: // vector> site_vertices; // BuildLayerEdges // - compact_array non-inlined space is tallied separately // vector is_interior_; // EdgeChainSimplifier // Graph::VertexInMap in_; // EdgeChainSimplifier // Graph::VertexOutMap out_; // EdgeChainSimplifier const int64 kTempPerSite = sizeof(compact_array) + sizeof(bool) + 2 * sizeof(EdgeId); // Per output edge: // vector used_; // EdgeChainSimplifier // Graph::VertexInMap in_; // EdgeChainSimplifier // vector merged_edges; // SimplifyEdgeChains // vector merged_input_edge_ids; // SimplifyEdgeChains // vector merged_edge_layers; // SimplifyEdgeChains // vector new_edges_; // EdgeChainSimplifier // vector new_input_edge_ids_; // EdgeChainSimplifier // vector new_edge_layers_; // EdgeChainSimplifier // // Note that the temporary vector in MergeLayerEdges() does not // affect peak usage. const int64 kTempPerEdge = sizeof(bool) + sizeof(EdgeId) + 2 * sizeof(Edge) + 2 * sizeof(InputEdgeIdSetId) + 2 * sizeof(int); int64 simplify_bytes = site_vertices.size() * kTempPerSite; for (const auto& array : site_vertices) { simplify_bytes += GetCompactArrayAllocBytes(array); } for (const auto& edges : layer_edges) { simplify_bytes += edges.size() * kTempPerEdge; } return TallyTemp(simplify_bytes); } // LINT.ThenChange() // Tracks the temporary memory used by Graph::FilterVertices. // LINT.IfChange(TallyFilterVertices) bool S2Builder::MemoryTracker::TallyFilterVertices( int num_sites, const vector>& layer_edges) { if (!is_active()) return true; // Vertex filtering (see BuildLayers) uses temporary space of one VertexId // per Voronoi site plus 2 VertexIds per layer edge, plus space for all the // vertices after filtering. // // vector *tmp; // Graph::FilterVertices // vector used; // Graph::FilterVertices const int64 kTempPerSite = sizeof(Graph::VertexId); const int64 kTempPerEdge = 2 * sizeof(Graph::VertexId); size_t max_layer_edges = 0; for (const auto& edges : layer_edges) { max_layer_edges = max(max_layer_edges, edges.size()); } filter_vertices_bytes_ = (num_sites * kTempPerSite + max_layer_edges * kTempPerEdge); return Tally(filter_vertices_bytes_); } // LINT.ThenChange() bool S2Builder::MemoryTracker::DoneFilterVertices() { Tally(-filter_vertices_bytes_); filter_vertices_bytes_ = 0; return ok(); } s2geometry-0.10.0/src/s2/s2builder.h000066400000000000000000001656701422156367100170750ustar00rootroot00000000000000// Copyright 2016 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // This class is a replacement for S2PolygonBuilder. Once all clients have // been updated to use this class, S2PolygonBuilder will be removed. #ifndef S2_S2BUILDER_H_ #define S2_S2BUILDER_H_ #include #include #include #include #include #include "absl/base/macros.h" #include "s2/base/integral_types.h" #include "s2/_fp_contract_off.h" #include "s2/id_set_lexicon.h" #include "s2/mutable_s2shape_index.h" #include "s2/s1angle.h" #include "s2/s1chord_angle.h" #include "s2/s2cell_id.h" #include "s2/s2edge_distances.h" #include "s2/s2error.h" #include "s2/s2memory_tracker.h" #include "s2/s2point_index.h" #include "s2/s2point_span.h" #include "s2/s2shape_index.h" #include "s2/util/gtl/compact_array.h" class S2Loop; class S2Polygon; class S2Polyline; // S2Builder is a tool for assembling polygonal geometry from edges. Here are // some of the things it is designed for: // // 1. Building polygons, polylines, and polygon meshes from unsorted // collections of edges. // // 2. Snapping geometry to discrete representations (such as S2CellId centers // or E7 lat/lng coordinates) while preserving the input topology and with // guaranteed error bounds. // // 3. Simplifying geometry (e.g. for indexing, display, or storage). // // 4. Importing geometry from other formats, including repairing geometry // that has errors. // // 5. As a tool for implementing more complex operations such as polygon // intersections and unions. // // The implementation is based on the framework of "snap rounding". Unlike // most snap rounding implementations, S2Builder defines edges as geodesics on // the sphere (straight lines) and uses the topology of the sphere (i.e., // there are no "seams" at the poles or 180th meridian). The algorithm is // designed to be 100% robust for arbitrary input geometry. It offers the // following properties: // // - Guaranteed bounds on how far input vertices and edges can move during // the snapping process (i.e., at most the given "snap_radius"). // // - Guaranteed minimum separation between edges and vertices other than // their endpoints (similar to the goals of Iterated Snap Rounding). In // other words, edges that do not intersect in the output are guaranteed // to have a minimum separation between them. // // - Idempotency (similar to the goals of Stable Snap Rounding), i.e. if the // input already meets the output criteria then it will not be modified. // // - Preservation of the input topology (up to the creation of // degeneracies). This means that there exists a continuous deformation // from the input to the output such that no vertex crosses an edge. In // other words, self-intersections won't be created, loops won't change // orientation, etc. // // - The ability to snap to arbitrary discrete point sets (such as S2CellId // centers, E7 lat/lng points on the sphere, or simply a subset of the // input vertices), rather than being limited to an integer grid. // // Here are some of its other features: // // - It can handle both directed and undirected edges. Undirected edges can // be useful for importing data from other formats, e.g. where loops have // unspecified orientations. // // - It can eliminate self-intersections by finding all edge pairs that cross // and adding a new vertex at each intersection point. // // - It can simplify polygons to within a specified tolerance. For example, // if two vertices are close enough they will be merged, and if an edge // passes nearby a vertex then it will be rerouted through that vertex. // Optionally, it can also detect nearly straight chains of short edges and // replace them with a single long edge, while maintaining the same // accuracy, separation, and topology guarantees ("simplify_edge_chains"). // // - It supports many different output types through the concept of "layers" // (polylines, polygons, polygon meshes, etc). You can build multiple // layers at once in order to ensure that snapping does not create // intersections between different objects (for example, you can simplify a // set of contour lines without the risk of having them cross each other). // // - It supports edge labels, which allow you to attach arbitrary information // to edges and have it preserved during the snapping process. (This can // also be achieved using layers, at a coarser level of granularity.) // // Caveats: // // - Because S2Builder only works with edges, it cannot distinguish between // the empty and full polygons. If your application can generate both the // empty and full polygons, you must implement logic outside of this class. // // Example showing how to snap a polygon to E7 coordinates: // // using s2builderutil::IntLatLngSnapFunction; // S2Builder builder(S2Builder::Options(IntLatLngSnapFunction(7))); // S2Polygon output; // builder.StartLayer(absl::make_unique(&output)); // builder.AddPolygon(input); // S2Error error; // if (!builder.Build(&error)) { // S2_LOG(ERROR) << error; // ... // } class S2Builder { public: // Indicates whether the input edges are undirected. Typically this is // specified for each output layer (e.g., s2builderutil::S2PolygonLayer). // // Directed edges are preferred, since otherwise the output is ambiguous. // For example, output polygons may be the *inverse* of the intended result // (e.g., a polygon intended to represent the world's oceans may instead // represent the world's land masses). Directed edges are also somewhat // more efficient. // // However even with undirected edges, most S2Builder layer types try to // preserve the input edge direction whenever possible. Generally, edges // are reversed only when it would yield a simpler output. For example, // S2PolygonLayer assumes that polygons created from undirected edges should // cover at most half of the sphere. Similarly, S2PolylineVectorLayer // assembles edges into as few polylines as possible, even if this means // reversing some of the "undirected" input edges. // // For shapes with interiors, directed edges should be oriented so that the // interior is to the left of all edges. This means that for a polygon with // holes, the outer loops ("shells") should be directed counter-clockwise // while the inner loops ("holes") should be directed clockwise. Note that // S2Builder::AddPolygon() follows this convention automatically. enum class EdgeType : uint8 { DIRECTED, UNDIRECTED }; // A SnapFunction restricts the locations of the output vertices. For // example, there are predefined snap functions that require vertices to be // located at S2CellId centers or at E5/E6/E7 coordinates. The SnapFunction // can also specify a minimum spacing between vertices (the "snap radius"). // // A SnapFunction defines the following methods: // // 1. The SnapPoint() method, which snaps a point P to a nearby point (the // "candidate snap site"). Any point may be returned, including P // itself (this is the "identity snap function"). // // 2. "snap_radius", the maximum distance that vertices can move when // snapped. The snap_radius must be at least as large as the maximum // distance between P and SnapPoint(P) for any point P. // // Note that the maximum distance that edge interiors can move when // snapped is slightly larger than "snap_radius", and is returned by the // function S2Builder::Options::max_edge_deviation() (see there for // details). // // 3. "min_vertex_separation", the guaranteed minimum distance between // vertices in the output. This is generally a fraction of // "snap_radius" where the fraction depends on the snap function. // // 4. "min_edge_vertex_separation", the guaranteed minimum distance between // edges and non-incident vertices in the output. This is generally a // fraction of "snap_radius" where the fraction depends on the snap // function. // // It is important to note that SnapPoint() does not define the actual // mapping from input vertices to output vertices, since the points it // returns (the candidate snap sites) are further filtered to ensure that // they are separated by at least the snap radius. For example, if you // specify E7 coordinates (2cm resolution) and a snap radius of 10m, then a // subset of points returned by SnapPoint will be chosen (the "snap sites"), // and each input vertex will be mapped to the closest site. Therefore you // cannot assume that P is necessarily snapped to SnapPoint(P). // // S2Builder makes the following guarantees: // // 1. Every vertex is at a location returned by SnapPoint(). // // 2. Vertices are within "snap_radius" of the corresponding input vertex. // // 3. Edges are within "max_edge_deviation" of the corresponding input edge // (a distance slightly larger than "snap_radius"). // // 4. Vertices are separated by at least "min_vertex_separation" // (a fraction of "snap_radius" that depends on the snap function). // // 5. Edges and non-incident vertices are separated by at least // "min_edge_vertex_separation" (a fraction of "snap_radius"). // // 6. Vertex and edge locations do not change unless one of the conditions // above is not already met (idempotency / stability). // // 7. The topology of the input geometry is preserved (up to the creation // of degeneracies). This means that there exists a continuous // deformation from the input to the output such that no vertex // crosses an edge. class SnapFunction { public: virtual ~SnapFunction() {} // The maximum distance that vertices can move when snapped. The snap // radius can be any value between zero and SnapFunction::kMaxSnapRadius(). // // If the snap radius is zero, then vertices are snapped together only if // they are identical. Edges will not be snapped to any vertices other // than their endpoints, even if there are vertices whose distance to the // edge is zero, unless split_crossing_edges() is true (see below). // // REQUIRES: snap_radius() <= kMaxSnapRadius virtual S1Angle snap_radius() const = 0; // The maximum supported snap radius (equivalent to about 7800km). static S1Angle kMaxSnapRadius(); // The guaranteed minimum distance between vertices in the output. // This is generally some fraction of "snap_radius". virtual S1Angle min_vertex_separation() const = 0; // The guaranteed minimum spacing between edges and non-incident vertices // in the output. This is generally some fraction of "snap_radius". virtual S1Angle min_edge_vertex_separation() const = 0; // Returns a candidate snap site for the given point. The final vertex // locations are a subset of the snap sites returned by this function // (spaced at least "min_vertex_separation" apart). // // The only requirement is that SnapPoint(x) must return a point whose // distance from "x" is no greater than "snap_radius". virtual S2Point SnapPoint(const S2Point& point) const = 0; // Returns a deep copy of this SnapFunction. virtual std::unique_ptr Clone() const = 0; }; class Options { public: Options(); // Convenience constructor that calls set_snap_function(). explicit Options(const SnapFunction& snap_function); // Sets the desired snap function. The snap function is copied // internally, so you can safely pass a temporary object. // // Note that if your input data includes vertices that were created using // S2::GetIntersection(), then you should use a "snap_radius" of // at least S2::kIntersectionSnapRadius, e.g. by calling // // options.set_snap_function(s2builderutil::IdentitySnapFunction( // S2::kIntersectionSnapRadius)); // // DEFAULT: s2builderutil::IdentitySnapFunction(S1Angle::Zero()) // [This does no snapping and preserves all input vertices exactly.] const SnapFunction& snap_function() const; void set_snap_function(const SnapFunction& snap_function); // The maximum distance from snapped edge vertices to the original edge. // This is the same as snap_function().snap_radius() except when // split_crossing_edges() is true (see below), in which case the edge snap // radius is increased by S2::kIntersectionError. S1Angle edge_snap_radius() const; // The maximum distance that any point along an edge can move when snapped. // It is slightly larger than edge_snap_radius() because when a geodesic // edge is snapped, the edge center moves further than its endpoints. // S2Builder ensures that this distance is at most 10% larger than // edge_snap_radius(). S1Angle max_edge_deviation() const; // If true, then detect all pairs of crossing edges and eliminate them by // adding a new vertex at their intersection point. See also the // AddIntersection() method which allows intersection points to be added // selectively. // // When this option if true, intersection_tolerance() is automatically set // to a minimum of S2::kIntersectionError (see intersection_tolerance() // for why this is necessary). Note that this means that edges can move // by up to S2::kIntersectionError even when the specified snap radius is // zero. The exact distance that edges can move is always given by // max_edge_deviation() defined above. // // Undirected edges should always be used when the output is a polygon, // since splitting a directed loop at a self-intersection converts it into // two loops that don't define a consistent interior according to the // "interior is on the left" rule. (On the other hand, it is fine to use // directed edges when defining a polygon *mesh* because in that case the // input consists of sibling edge pairs.) // // Self-intersections can also arise when importing data from a 2D // projection. You can minimize this problem by subdividing the input // edges so that the S2 edges (which are geodesics) stay close to the // original projected edges (which are curves on the sphere). This can // be done using S2EdgeTessellator, for example. // // DEFAULT: false bool split_crossing_edges() const; void set_split_crossing_edges(bool split_crossing_edges); // Specifes the maximum allowable distance between a vertex added by // AddIntersection() and the edge(s) that it is intended to snap to. This // method must be called before AddIntersection() can be used. It has the // effect of increasing the snap radius for edges (but not vertices) by // the given distance. // // The intersection tolerance should be set to the maximum error in the // intersection calculation used. For example, if S2::GetIntersection() // is used then the error should be set to S2::kIntersectionError. If // S2::GetPointOnLine() is used then the error should be set to // S2::kGetPointOnLineError. If S2::Project() is used then the error // should be set to S2::kProjectPerpendicularError. If more than one // method is used then the intersection tolerance should be set to the // maximum such error. // // The reason this option is necessary is that computed intersection // points are not exact. For example, S2::GetIntersection(a, b, c, d) // returns a point up to S2::kIntersectionError away from the true // mathematical intersection of the edges AB and CD. Furthermore such // intersection points are subject to further snapping in order to ensure // that no pair of vertices is closer than the specified snap radius. For // example, suppose the computed intersection point X of edges AB and CD // is 1 nanonmeter away from both edges, and the snap radius is 1 meter. // In that case X might snap to another vertex Y exactly 1 meter away, // which would leave us with a vertex Y that could be up to 1.000000001 // meters from the edges AB and/or CD. This means that AB and/or CD might // not snap to Y leaving us with two edges that still cross each other. // // However if the intersection tolerance is set to 1 nanometer then the // snap radius for edges is increased to 1.000000001 meters ensuring that // both edges snap to a common vertex even in this worst case. (Tthis // technique does not work if the vertex snap radius is increased as well; // it requires edges and vertices to be handled differently.) // // Note that this option allows edges to move by up to the given // intersection tolerance even when the snap radius is zero. The exact // distance that edges can move is always given by max_edge_deviation() // defined above. // // When split_crossing_edges() is true, the intersection tolerance is // automatically set to a minimum of S2::kIntersectionError. A larger // value can be specified by calling this method explicitly. // // DEFAULT: S1Angle::Zero() S1Angle intersection_tolerance() const; void set_intersection_tolerance(S1Angle intersection_tolerance); // If true, then simplify the output geometry by replacing nearly straight // chains of short edges with a single long edge. // // The combined effect of snapping and simplifying will not change the // input by more than the guaranteed tolerances (see the list documented // with the SnapFunction class). For example, simplified edges are // guaranteed to pass within snap_radius() of the *original* positions of // all vertices that were removed from that edge. This is a much tighter // guarantee than can be achieved by snapping and simplifying separately. // // However, note that this option does not guarantee idempotency. In // other words, simplifying geometry that has already been simplified once // may simplify it further. (This is unavoidable, since tolerances are // measured with respect to the original geometry, which is no longer // available when the geometry is simplified a second time.) // // When the output consists of multiple layers, simplification is // guaranteed to be consistent: for example, edge chains are simplified in // the same way across layers, and simplification preserves topological // relationships between layers (e.g., no crossing edges will be created). // Note that edge chains in different layers do not need to be identical // (or even have the same number of vertices, etc) in order to be // simplified together. All that is required is that they are close // enough together so that the same simplified edge can meet all of their // individual snapping guarantees. // // Note that edge chains are approximated as parametric curves rather than // point sets. This means that if an edge chain backtracks on itself (for // example, ABCDEFEDCDEFGH) then such backtracking will be preserved to // within snap_radius() (for example, if the preceding point were all in a // straight line then the edge chain would be simplified to ACFCFH, noting // that C and F have degree > 2 and therefore can't be simplified away). // // Simplified edges are assigned all labels associated with the edges of // the simplified chain. // // For this option to have any effect, a SnapFunction with a non-zero // snap_radius() must be specified. Also note that vertices specified // using ForceVertex are never simplified away. // // DEFAULT: false bool simplify_edge_chains() const; void set_simplify_edge_chains(bool simplify_edge_chains); // If true, then snapping occurs only when the input geometry does not // already meet the S2Builder output guarantees (see the SnapFunction // class description for details). This means that if all input vertices // are at snapped locations, all vertex pairs are separated by at least // min_vertex_separation(), and all edge-vertex pairs are separated by at // least min_edge_vertex_separation(), then no snapping is done. // // If false, then all vertex pairs and edge-vertex pairs closer than // "snap_radius" will be considered for snapping. This can be useful, for // example, if you know that your geometry contains errors and you want to // make sure that features closer together than "snap_radius" are merged. // // This option is automatically turned off by simplify_edge_chains(), // since simplifying edge chains is never guaranteed to be idempotent. // // DEFAULT: true bool idempotent() const; void set_idempotent(bool idempotent); // Specifies that internal memory usage should be tracked using the given // S2MemoryTracker. If a memory limit is specified and more more memory // than this is required then an error will be returned. Example usage: // // S2MemoryTracker tracker; // tracker.set_limit(500 << 20); // 500 MB // S2Builder::Options options; // options.set_memory_tracker(&tracker); // S2Builder builder{options}; // ... // S2Error error; // if (!builder.Build(&error)) { // if (error.code() == S2Error::RESOURCE_EXHAUSTED) { // S2_LOG(ERROR) << error; // Memory limit exceeded // } // } // // CAVEATS: // // - Memory allocated by the output S2Builder layers is not tracked. // // - While memory tracking is reasonably complete and accurate, it does // not account for every last byte. It is intended only for the // purpose of preventing clients from running out of memory. // // DEFAULT: nullptr (memory tracking disabled) S2MemoryTracker* memory_tracker() const; void set_memory_tracker(S2MemoryTracker* tracker); // Options may be assigned and copied. Options(const Options& options); Options& operator=(const Options& options); private: std::unique_ptr snap_function_; bool split_crossing_edges_ = false; S1Angle intersection_tolerance_ = S1Angle::Zero(); bool simplify_edge_chains_ = false; bool idempotent_ = true; S2MemoryTracker* memory_tracker_ = nullptr; }; // The following classes are only needed by Layer implementations. class GraphOptions; class Graph; // For output layers that represent polygons, there is an ambiguity inherent // in spherical geometry that does not exist in planar geometry. Namely, if // a polygon has no edges, does it represent the empty polygon (containing // no points) or the full polygon (containing all points)? This ambiguity // also occurs for polygons that consist only of degeneracies, e.g. a // degenerate loop with only two edges could be either a degenerate shell in // the empty polygon or a degenerate hole in the full polygon. // // To resolve this ambiguity, an IsFullPolygonPredicate may be specified for // each output layer (see AddIsFullPolygonPredicate below). If the output // after snapping consists only of degenerate edges and/or sibling pairs // (including the case where there are no edges at all), then the layer // implementation calls the given predicate to determine whether the polygon // is empty or full except for those degeneracies. The predicate is given // an S2Builder::Graph containing the output edges, but note that in general // the predicate must also have knowledge of the input geometry in order to // determine the correct result. // // This predicate is only needed by layers that are assembled into polygons. // It is not used by other layer types. using IsFullPolygonPredicate = std::function; // Default constructor; requires Init() to be called. S2Builder(); // Convenience constructor that calls Init(). Note that to use the default // options, C++ syntax requires an extra layer of parentheses: // // S2Builder builder{S2Builder::Options()}; explicit S2Builder(const Options& options); // Initializes an S2Builder with the given options. void Init(const Options& options); const Options& options() const { return options_; } // Starts a new output layer. This method must be called before adding any // edges to the S2Builder. You may call this method multiple times to build // multiple geometric objects that are snapped to the same set of sites. // // For example, if you have a set of contour lines, then you could put each // contour line in a separate layer. This keeps the contour lines separate // from each other, while also ensuring that no crossing edges are created // when they are snapped and/or simplified. (This is not true if the // contour lines are snapped or simplified independently.) // // Similarly, if you have a set of polygons that share common boundaries // (e.g., countries), you can snap and/or simplify them at the same time by // putting them in different layers, while ensuring that their boundaries // remain consistent (i.e., no crossing edges or T-vertices are introduced). // // Ownership of the layer is transferred to the S2Builder. Example usage: // // S2Polyline line1, line2; // builder.StartLayer(make_unique(&line1))); // ... Add edges using builder.AddEdge(), etc ... // builder.StartLayer(make_unique(&line2))); // ... Add edges using builder.AddEdge(), etc ... // S2Error error; // S2_CHECK(builder.Build(&error)) << error; // Builds "line1" & "line2" class Layer; void StartLayer(std::unique_ptr layer); // Adds a degenerate edge (representing a point) to the current layer. void AddPoint(const S2Point& v); // Adds the given edge to the current layer. void AddEdge(const S2Point& v0, const S2Point& v1); // Adds the edges in the given polyline. Note that polylines with 0 or 1 // vertices are defined to have no edges. void AddPolyline(S2PointSpan polyline); void AddPolyline(const S2Polyline& polyline); // Adds the edges in the given loop. Note that a loop consisting of one // vertex adds a single degenerate edge. // // If the sign() of an S2Loop is negative (i.e. the loop represents a hole // within a polygon), the edge directions are automatically reversed to // ensure that the polygon interior is always to the left of every edge. void AddLoop(S2PointLoopSpan loop); void AddLoop(const S2Loop& loop); // Adds the loops in the given polygon. Loops representing holes have their // edge directions automatically reversed as described for AddLoop(). Note // that this method does not distinguish between the empty and full polygons, // i.e. adding a full polygon has the same effect as adding an empty one. void AddPolygon(const S2Polygon& polygon); // Adds the edges of the given shape to the current layer. void AddShape(const S2Shape& shape); // If "vertex" is the intersection point of two edges AB and CD (as computed // by S2::GetIntersection()), this method ensures that AB and CD snap to a // common vertex. (Note that the common vertex may be different than // "vertex" in order to ensure that no pair of vertices is closer than the // given snap radius.) Unlike Options::split_crossing_edges(), this method // may be used to split crossing edge pairs selectively. // // This method can also be used to tessellate edges using S2::GetPointOnLine() // or S2::Project() provided that a suitable intersection tolerance is // specified (see intersection_tolerance() for details). // // This method implicitly overrides the idempotent() option, since adding an // intersection point implies a desire to have nearby edges snapped to it // even if these edges already satsify the S2Builder output guarantees. // (Otherwise for example edges would never be snapped to nearby // intersection points when the snap radius is zero.) // // Note that unlike ForceVertex(), this method maintains all S2Builder // guarantees regarding minimum vertex-vertex separation, minimum // edge-vertex separation, and edge chain simplification. // // REQUIRES: options().intersection_tolerance() > S1Angle::Zero() // REQUIRES: "vertex" was computed by S2::GetIntersection() (in order to // guarantee that both edges snap to a common vertex) void AddIntersection(const S2Point& vertex); // For layers that are assembled into polygons, this method specifies a // predicate that is called when the output consists entirely of degenerate // edges and/or sibling pairs. The predicate is given an S2Builder::Graph // containing the output edges (if any) and is responsible for deciding // whether this graph represents the empty polygon (possibly with degenerate // shells) or the full polygon (possibly with degenerate holes). Note that // this cannot be determined from the output edges alone; it also requires // knowledge of the input geometry. (Also see IsFullPolygonPredicate above.) // // This method should be called at most once per layer; additional calls // simply overwrite the previous value for the current layer. // // The default predicate simply returns false (i.e., degenerate polygons are // assumed to be empty). Arguably it would better to return an error in // this case, but the fact is that relatively few clients need to be able to // construct full polygons, and it is unreasonable to expect all such // clients to supply an appropriate predicate. // // The reason for having a predicate rather than a boolean value is that the // predicate is responsible for determining whether the output polygon is // empty or full. In general the input geometry is not degenerate, but // rather collapses into a degenerate configuration due to snapping and/or // simplification. // // TODO(ericv): Provide standard predicates to handle common cases, // e.g. valid input geometry that becomes degenerate due to snapping. void AddIsFullPolygonPredicate(IsFullPolygonPredicate predicate); // A predicate that returns an error indicating that no polygon predicate // has been specified. static bool IsFullPolygonUnspecified(const S2Builder::Graph& g, S2Error* error); // Returns a predicate that returns a constant value (true or false); static IsFullPolygonPredicate IsFullPolygon(bool is_full); // Forces a vertex to be located at the given position. This can be used to // prevent certain input vertices from moving. However if you are trying to // preserve input edges, be aware that this option does not prevent edges from // being split by new vertices. // // Forced vertices are subject to the following limitations: // // - Forced vertices are never snapped. This is true even when the given // position is not allowed by the given snap function (e.g. you can force // a vertex at a non-S2CellId center when using S2CellIdSnapFunction). // If you want to ensure that forced vertices obey the snap function // restrictions, you must call snap_function().SnapPoint() explicitly. // // - There is no guaranteed minimum separation between pairs of forced // vertices, i.e. snap_function().min_vertex_separation() does not apply. // (This must be true because forced vertices can be placed arbitrarily.) // // - There is no guaranteed minimum separation between forced vertices and // non-incident edges, i.e. snap_function().min_edge_vertex_separation() // does not apply. // // - Forced vertices are never simplified away (i.e. when simplification is // requested using options().simplify_edge_chains()). // // All other guarantees continue to hold, e.g. the input topology will always // be preserved. void ForceVertex(const S2Point& vertex); // Every edge can have a set of non-negative integer labels attached to it. // When used with an appropriate layer type, you can then retrieve the // labels associated with each output edge. This can be useful when merging // or combining data from several sources. (Note that in many cases it is // easier to use separate output layers rather than labels.) // // Labels are 32-bit non-negative integers. To support other label types, // you can use ValueLexicon to store the set of unique labels seen so far: // // ValueLexicon my_label_lexicon; // builder.set_label(my_label_lexicon.Add(label)); // // The current set of labels is represented as a stack. This makes it easy // to add and remove labels hierarchically (e.g., polygon 5, loop 2). Use // set_label() and clear_labels() if you need at most one label per edge. // using Label = int32; // Clear the stack of labels. void clear_labels(); // Add a label to the stack. // REQUIRES: label >= 0. void push_label(Label label); // Remove a label from the stack. void pop_label(); // Convenience function that clears the stack and adds a single label. // REQUIRES: label >= 0. void set_label(Label label); // Performs the requested edge splitting, snapping, simplification, etc, and // then assembles the resulting edges into the requested output layers. // // Returns true if all edges were assembled; otherwise sets "error" // appropriately. Depending on the error, some or all output layers may // have been created. Automatically resets the S2Builder state so that it // can be reused. // // REQUIRES: error != nullptr. bool Build(S2Error* error); // Clears all input data and resets the builder state. Any options // specified are preserved. void Reset(); /////////////////////////////////////////////////////////////////////////// // The following methods may be called at any time, including from // S2Builder::Layer implementations. // Returns the number of input edges. int num_input_edges() const; // Returns the endpoints of the given input edge. // // REQUIRES: 0 <= input_edge_id < num_input_edges() S2Shape::Edge input_edge(int input_edge_id) const; private: ////////////////////// Input Types ///////////////////////// // All types associated with the S2Builder inputs are prefixed with "Input". // Identifies an input vertex. using InputVertexId = int32; // Defines an input edge. using InputEdge = std::pair; // Identifies an input edge. using InputEdgeId = int32; // Identifies the set of input edge ids that were snapped to a given edge. using InputEdgeIdSetId = int32; // Sort key for prioritizing input vertices. (Note that keys are *not* // compared using std::less; see SortInputVertices for details.) using InputVertexKey = std::pair; ////////////////////// Output Types ///////////////////////// // These types define the output vertices and edges. // Identifies a snapped vertex ("snap site"). If there is only one layer, // than SiteId is the same as Graph::VertexId, but if there are many layers // then each Graph may contain only a subset of the sites. Also see // GraphOptions::allow_vertex_filtering(). using SiteId = int32; // Defines an output edge. using Edge = std::pair; // Identifies an output edge. using EdgeId = int32; // Identifies an output edge in a particular layer. using LayerEdgeId = std::pair; ////////////////////// Internal Types ///////////////////////// class EdgeChainSimplifier; // MemoryTracker is a helper class to measure S2Builder memory usage. It is // based on a detailed analysis of the data structures used. This approach // is fragile because the memory tracking code needs to be updated whenever // S2Builder is modified, however S2Builder has been quite stable and this // approach allows the memory usage to be measured quite accurately. // // CAVEATS: // // - Does not track memory used by edge labels. (It is tricky to do this // accurately because they are stored in an IdSetLexicon, and labels // are typically a tiny fraction of the total space used.) // // - Does not track memory used to represent layers internally. (The // number of layers is typically small compared to the numbers of // vertices and edges, and the amount of memory used by the Layer and // IsFullPolygonPredicate objects is difficult to measure.) // // - Does not track memory used by the output layer Build() methods. (This // includes both temporary space, e.g. due to calling S2Builder::Graph // methods, and also any geometric objects created by these layers.) class MemoryTracker : public S2MemoryTracker::Client { public: bool TallyEdgeSites(const gtl::compact_array& sites); bool ReserveEdgeSite(gtl::compact_array* sites); bool ClearEdgeSites(std::vector>* edge_sites); bool TallyIndexedSite(); bool FixSiteIndexTally(const S2PointIndex& index); bool DoneSiteIndex(const S2PointIndex& index); bool TallySimplifyEdgeChains( const std::vector>& site_vertices, const std::vector>& layer_edges); bool TallyFilterVertices(int num_sites, const std::vector>& layer_edges); bool DoneFilterVertices(); private: // The amount of non-inline memory used to store edge sites. int64 edge_sites_bytes_ = 0; // The amount of memory used by the S2PointIndex for sites. int64 site_index_bytes_ = 0; // The amount of temporary memory used by Graph::FilterVertices(). int64 filter_vertices_bytes_ = 0; }; InputVertexId AddVertex(const S2Point& v); void ChooseSites(); void ChooseAllVerticesAsSites(); std::vector SortInputVertices(); void AddEdgeCrossings(const MutableS2ShapeIndex& input_edge_index); void AddForcedSites(S2PointIndex* site_index); bool is_forced(SiteId v) const; void ChooseInitialSites(S2PointIndex* site_index); S2Point SnapSite(const S2Point& point) const; void CollectSiteEdges(const S2PointIndex& site_index); void SortSitesByDistance(const S2Point& x, gtl::compact_array* sites) const; void InsertSiteByDistance(SiteId new_site_id, const S2Point& x, gtl::compact_array* sites); void AddExtraSites(const MutableS2ShapeIndex& input_edge_index); void MaybeAddExtraSites(InputEdgeId edge_id, const std::vector& chain, const MutableS2ShapeIndex& input_edge_index, gtl::dense_hash_set* edges_to_resnap); void AddExtraSite(const S2Point& new_site, const MutableS2ShapeIndex& input_edge_index, gtl::dense_hash_set* edges_to_resnap); S2Point GetSeparationSite(const S2Point& site_to_avoid, const S2Point& v0, const S2Point& v1, InputEdgeId input_edge_id) const; S2Point GetCoverageEndpoint(const S2Point& p, const S2Point& x, const S2Point& y, const S2Point& n) const; void SnapEdge(InputEdgeId e, std::vector* chain) const; void BuildLayers(); void BuildLayerEdges( std::vector>* layer_edges, std::vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon); void AddSnappedEdges( InputEdgeId begin, InputEdgeId end, const GraphOptions& options, std::vector* edges, std::vector* input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon, std::vector>* site_vertices); void MaybeAddInputVertex( InputVertexId v, SiteId id, std::vector>* site_vertices) const; void AddSnappedEdge(SiteId src, SiteId dst, InputEdgeIdSetId id, EdgeType edge_type, std::vector* edges, std::vector* input_edge_ids) const; void SimplifyEdgeChains( const std::vector>& site_vertices, std::vector>* layer_edges, std::vector>* layer_input_edge_ids, IdSetLexicon* input_edge_id_set_lexicon); void MergeLayerEdges( const std::vector>& layer_edges, const std::vector>& layer_input_edge_ids, std::vector* edges, std::vector* input_edge_ids, std::vector* edge_layers) const; static bool StableLessThan(const Edge& a, const Edge& b, const LayerEdgeId& ai, const LayerEdgeId& bi); //////////// Parameters ///////////// // S2Builder options. Options options_; // The maximum distance (inclusive) that a vertex can move when snapped, // equal to S1ChordAngle(options_.snap_function().snap_radius()). S1ChordAngle site_snap_radius_ca_; // The maximum distance (inclusive) that an edge can move when snapping to a // snap site. It can be slightly larger than the site snap radius when // edges are being split at crossings. S1ChordAngle edge_snap_radius_ca_; // True if we need to check that snapping has not changed the input topology // around any vertex (i.e. Voronoi site). Normally this is only necessary for // forced vertices, but if the snap radius is very small (e.g., zero) and // split_crossing_edges() is true then we need to do this for all vertices. // In all other situations, any snapped edge that crosses a vertex will also // be closer than min_edge_vertex_separation() to that vertex, which will // cause us to add a separation site anyway. bool check_all_site_crossings_; S1Angle max_edge_deviation_; S1ChordAngle edge_site_query_radius_ca_; S1ChordAngle min_edge_length_to_split_ca_; S1Angle min_site_separation_; S1ChordAngle min_site_separation_ca_; S1ChordAngle min_edge_site_separation_ca_; S1ChordAngle min_edge_site_separation_ca_limit_; S1ChordAngle max_adjacent_site_separation_ca_; // The squared sine of the edge snap radius. This is equivalent to the snap // radius (squared) for distances measured through the interior of the // sphere to the plane containing an edge. This value is used only when // interpolating new points along edges (see GetSeparationSite). double edge_snap_radius_sin2_; // A copy of the argument to Build(). S2Error* error_; // True if snapping was requested. This is true if either snap_radius() is // positive, or split_crossing_edges() is true (which implicitly requests // snapping to ensure that both crossing edges are snapped to the // intersection point). bool snapping_requested_; // Initially false, and set to true when it is discovered that at least one // input vertex or edge does not meet the output guarantees (e.g., that // vertices are separated by at least snap_function.min_vertex_separation). bool snapping_needed_; //////////// Input Data ///////////// // A flag indicating whether label_set_ has been modified since the last // time label_set_id_ was computed. bool label_set_modified_; std::vector input_vertices_; std::vector input_edges_; std::vector> layers_; std::vector layer_options_; std::vector layer_begins_; std::vector layer_is_full_polygon_predicates_; // Each input edge has "label set id" (an int32) representing the set of // labels attached to that edge. This vector is populated only if at least // one label is used. using LabelSetId = int32; std::vector label_set_ids_; IdSetLexicon label_set_lexicon_; // The current set of labels (represented as a stack). std::vector::template rebind_alloc; public: typedef T value_type; typedef A allocator_type; typedef value_type* pointer; typedef const value_type* const_pointer; typedef value_type& reference; typedef const value_type& const_reference; typedef uint32 size_type; typedef ptrdiff_t difference_type; typedef value_type* iterator; typedef const value_type* const_iterator; typedef std::reverse_iterator reverse_iterator; typedef std::reverse_iterator const_reverse_iterator; // Init() replace the default constructors; so it can be used in "union". // This means Init() must be called for every new compact_array_base void Init() noexcept { memset(this, 0, sizeof(*this)); } // Construct an array of size n and initialize the values to v. // Any old contents, if heap-allocated, will be leaked. void Construct(size_type n, const value_type& v = value_type()) { Init(); value_init(n, v); } // See 23.1.1/9 in the C++ standard for an explanation. template void Copy(Iterator first, Iterator last) { Init(); typedef typename std::is_integral::type Int; initialize(first, last, Int()); } void CopyFrom(const compact_array_base& v) { Init(); initialize(v.begin(), v.end(), std::false_type()); } compact_array_base& AssignFrom(const compact_array_base& v) { // Safe for self-assignment, which is rare. // Optimized to use existing allocated space. // Also to use assignment instead of copying where possible. if (size() < v.size()) { // grow reserve(v.size()); std::copy(v.begin(), v.begin() + size(), begin()); insert(end(), v.begin() + size(), v.end()); } else { // maybe shrink erase(begin() + v.size(), end()); std::copy(v.begin(), v.end(), begin()); } return *this; } // Deallocate the whole array. void Destruct() { if (!MayBeInlined() || Array() != InlinedSpace()) { value_allocator_type allocator; allocator.deallocate(Array(), capacity()); } Init(); } // Safe against self-swapping. // copying/destruction of compact_array_base is fairly trivial as the type // was designed to be useable in a C++98 union. void swap(compact_array_base& v) noexcept { compact_array_base tmp = *this; *this = v; v = tmp; } // The number of active items in the array. size_type size() const { return size_; } bool empty() const { return size() == 0; } // Maximum size that this data structure can hold. static size_type max_size() { return kMaxSize; } static bool MayBeInlined() { return kInlined > 0; } public: // Container interface (tables 65,66). iterator begin() { return Array(); } iterator end() { return Array() + size(); } const_iterator begin() const { return ConstArray(); } const_iterator end() const { return ConstArray() + size(); } reverse_iterator rbegin() { return reverse_iterator(end()); } reverse_iterator rend() { return reverse_iterator(Array()); } const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); } const_reverse_iterator rend() const { return const_reverse_iterator(ConstArray()); } private: // This Insert() is private because it might return the end(). iterator Insert(const_iterator p, const value_type& v) { if (size() >= kMaxSize) { throw std::length_error("compact_array size exceeded"); } iterator r = make_hole(p, 1); *r = v; return r; } public: // Sequence operations, table 67. iterator insert(const_iterator p, const value_type& v) { return Insert(p, v); } void insert(const_iterator p, size_type n, const value_type& v) { if (n + size() > kMaxSize) { throw std::length_error("compact_array size exceeded"); } value_insert(p, n, v); } // See 23.1.1/9 in the C++ standard for an explanation. template void insert(const_iterator p, Iterator first, Iterator last) { typedef typename std::is_integral::type Int; insert(p, first, last, Int()); } template reference emplace_back(Args&&... args) { return *Insert(end(), value_type(std::forward(args)...)); } template iterator emplace(const_iterator p, Args&&... args) { return Insert(p, value_type(std::forward(args)...)); } iterator erase(const_iterator p) { size_type index = p - begin(); erase_aux(p, 1); return begin() + index; } iterator erase(const_iterator first, const_iterator last) { size_type index = first - begin(); erase_aux(first, last - first); return begin() + index; } // clear just resets the size to 0, without deallocating the storage. // To deallocate the array, use Destruct(). void clear() { set_size(0); } reference front() { return begin()[0]; } const_reference front() const { return begin()[0]; } reference back() { return end()[-1]; } const_reference back() const { return end()[-1]; } void push_back(const value_type& v) { iterator p = make_hole(end(), 1); *p = v; } void pop_back() { erase_aux(end()-1, 1); } reference operator[](size_type n) { S2_DCHECK_LT(n, size_); return Array()[n]; } const_reference operator[](size_type n) const { S2_DCHECK_LT(n, size_); return ConstArray()[n]; } reference at(size_type n) { if (n >= size_) { throw std::out_of_range("compact_array index out of range"); } return Array()[n]; } const_reference at(size_type n) const { if (n >= size_) { throw std::out_of_range("compact_array index out of range"); } return ConstArray()[n]; } // Preallocate the array of size n. Only changes the capacity, not size. void reserve(int n) { reallocate(n); } size_type capacity() const { return is_exponent_ ? (1 << capacity_) : capacity_; } void resize(size_type n) { if (n > capacity()) reserve(n); // resize(n) is the only place in the class that exposes uninitialized // memory as live elements, so call a constructor for each element if // needed. // Destroying elements on shrinking resize isn't a concern, since the // value_type must be trivially destructible. if (n > size() && !absl::is_trivially_default_constructible::value) { // Increasing size would expose unconstructed elements. value_type *new_end = Array() + n; for (value_type *p = Array() + size(); p != new_end; ++p) new (p) value_type(); } set_size(n); } template friend H AbslHashValue(H h, const compact_array_base& v) { return H::combine( H::combine_contiguous(std::move(h), v.ConstArray(), v.size()), v.size()); } private: // Low-level helper functions. void set_size(size_type n) { S2_DCHECK_LE(n, capacity()); size_ = n; } void set_capacity(size_type n) { S2_DCHECK_LE(size(), n); is_exponent_ = (n >= kExponentStart); capacity_ = is_exponent_ ? Bits::Log2Ceiling(n) : n; // A tiny optimization here would be to set capacity_ to kInlined if // it's currently less. We don't bother, because doing so would require // changing the existing comments and unittests that say that, for small n, // capacity() will be exactly n if one calls reserve(n). S2_DCHECK(n == capacity() || n > kInlined); } // Make capacity n or more. Reallocate and copy data as necessary. void reallocate(size_type n) { size_type old_capacity = capacity(); if (n <= old_capacity) return; set_capacity(n); if (MayBeInlined()) { if (!IsInlined() && n <= kInlined) { SetInlined(); return; } else if (IsInlined()) { if (n > kInlined) { value_allocator_type allocator; value_type* new_array = allocator.allocate(capacity()); memcpy(new_array, InlinedSpace(), size() * sizeof(T)); SetArray(new_array); } return; } } value_allocator_type allocator; T* new_ptr = allocator.allocate(capacity()); memcpy(new_ptr, Array(), old_capacity * sizeof(T)); allocator.deallocate(Array(), old_capacity); SetArray(new_ptr); } value_type* lastp() { return Array() + size(); } void move(const value_type* first, const value_type* last, value_type* out) { memmove(out, first, (last - first) * sizeof(value_type)); } iterator make_hole(const_iterator p, size_type n) { iterator q = const_cast(p); if (n != 0) { size_type new_size = size() + n; size_type index = q - Array(); reallocate(new_size); q = Array() + index; move(q, Array() + new_size - n, q + n); set_size(new_size); } return q; } void erase_aux(const_iterator p, size_type n) { iterator q = const_cast(p); size_type new_size = size() - n; move(q + n, lastp(), q); reallocate(new_size); set_size(new_size); } private: // Helper functions for range/value. void value_init(size_type n, const value_type& v) { reserve(n); set_size(n); std::fill(Array(), lastp(), v); } template void range_init(InputIter first, InputIter last, std::input_iterator_tag) { for ( ; first != last; ++first) push_back(*first); } template void range_init(ForwIter first, ForwIter last, std::forward_iterator_tag) { size_type n = std::distance(first, last); reserve(n); set_size(n); std::copy(first, last, Array()); } template void initialize(Integer n, Integer v, std::true_type) { value_init(n, v); } template void initialize(Iterator first, Iterator last, std::false_type) { typedef typename std::iterator_traits::iterator_category Cat; range_init(first, last, Cat()); } void value_insert(const_iterator p, size_type n, const value_type& v) { if (n + size() > kMaxSize) { throw std::length_error("compact_array size exceeded"); } iterator hole = make_hole(p, n); std::fill(hole, hole + n, v); } template void range_insert(const_iterator p, InputIter first, InputIter last, std::input_iterator_tag) { size_type pos = p - begin(); size_type old_size = size(); for (; first != last; ++first) push_back(*first); std::rotate(begin() + pos, begin() + old_size, end()); } template void range_insert(const_iterator p, ForwIter first, ForwIter last, std::forward_iterator_tag) { size_type n = std::distance(first, last); if (n + size() > kMaxSize) { throw std::length_error("compact_array size exceeded"); } std::copy(first, last, make_hole(p, n)); } template void insert(const_iterator p, Integer n, Integer v, std::true_type) { value_insert(p, n, v); } template void insert(const_iterator p, Iterator first, Iterator last, std::false_type) { typedef typename std::iterator_traits::iterator_category Cat; range_insert(p, first, last, Cat()); } static_assert(absl::is_trivially_copy_constructible::value && absl::is_trivially_copy_assignable::value && absl::is_trivially_destructible::value, "Requires trivial copy, assignment, and destructor."); }; // Allocates storage for constants in compact_array_base template const int compact_array_base::kSizeNumBits; template const int compact_array_base::kCapacityNumBits; template const int compact_array_base::kMaxSize; template const int compact_array_base::kExponentStart; // compact_array: Wrapper for compact_array_base that provides the // constructors and destructor. template > class compact_array : public compact_array_base { private: typedef compact_array_base Base; public: typedef typename Base::value_type value_type; typedef typename Base::allocator_type allocator_type; typedef typename Base::pointer pointer; typedef typename Base::const_pointer const_pointer; typedef typename Base::reference reference; typedef typename Base::const_reference const_reference; typedef typename Base::size_type size_type; typedef typename Base::iterator iterator; typedef typename Base::const_iterator const_iterator; typedef typename Base::reverse_iterator reverse_iterator; typedef typename Base::const_reverse_iterator const_reverse_iterator; compact_array() noexcept(noexcept(std::declval().Init())) { Base::Init(); } explicit compact_array(size_type n) { Base::Construct(n, value_type()); } compact_array(size_type n, const value_type& v) { Base::Construct(n, v); } // See 23.1.1/9 in the C++ standard for an explanation. template compact_array(Iterator first, Iterator last) { Base::Copy(first, last); } compact_array(const compact_array& v) { Base::CopyFrom(v); } compact_array(compact_array&& v) noexcept( noexcept(compact_array()) && noexcept(std::declval().swap(v))) : compact_array() { Base::swap(v); } compact_array& operator=(const compact_array& v) { Base::AssignFrom(v); return *this; } compact_array& operator=(compact_array&& v) { // swap is only right here because the objects are trivially destructible // and thus there are no side effects on their destructor. // Otherwise we must destroy the objects on `this`. Base::swap(v); return *this; } ~compact_array() { Base::Destruct(); } }; // Comparison operators template bool operator==(const compact_array& x, const compact_array& y) { return x.size() == y.size() && std::equal(x.begin(), x.end(), y.begin()); } template bool operator!=(const compact_array& x, const compact_array& y) { return !(x == y); } template bool operator<(const compact_array& x, const compact_array& y) { return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end()); } template bool operator>(const compact_array& x, const compact_array& y) { return y < x; } template bool operator<=(const compact_array& x, const compact_array& y) { return !(y < x); } template bool operator>=(const compact_array& x, const compact_array& y) { return !(x < y); } // Swap template inline void swap(compact_array& x, compact_array& y) { x.swap(y); } namespace compact_array_internal { struct LogArray : public gtl::LogLegacyUpTo100 { template void Log(std::ostream& out, const ElementT& element) const { // NOLINT out << element; } void Log(std::ostream& out, int8 c) const { // NOLINT out << static_cast(c); } void Log(std::ostream& out, uint8 c) const { // NOLINT out << static_cast(c); } void LogOpening(std::ostream& out) const { out << "["; } // NOLINT void LogClosing(std::ostream& out) const { out << "]"; } // NOLINT }; } // namespace compact_array_internal // Output operator for compact_array. Requires that T has an // operator<< for std::ostream. Note that // compact_array_internal::LogArray ensures that "signed char" and // "unsigned char" types print as integers. template std::ostream& operator<<(std::ostream& out, const compact_array& array) { gtl::LogRangeToStream(out, array.begin(), array.end(), compact_array_internal::LogArray()); return out; } } // namespace gtl #endif // S2_UTIL_GTL_COMPACT_ARRAY_H_ s2geometry-0.10.0/src/s2/util/gtl/container_logging.h000066400000000000000000000234061422156367100224230ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // All Rights Reserved. // // Utilities for container logging. // TODO(user): Broaden the scope and rename to "stream_util.h" // // // The typical use looks like this: // // S2_LOG(INFO) << gtl::LogContainer(container); // // By default, LogContainer() uses the LogShortUpTo100 policy: comma-space // separation, no newlines, and with limit of 100 items. // // Policies can be specified: // // S2_LOG(INFO) << gtl::LogContainer(container, gtl::LogMultiline()); // // The above example will print the container using newlines between // elements, enclosed in [] braces. // // See below for further details on policies. #ifndef S2_UTIL_GTL_CONTAINER_LOGGING_H_ #define S2_UTIL_GTL_CONTAINER_LOGGING_H_ #include #include #include #include #include #include "s2/base/integral_types.h" #include "s2/base/port.h" namespace gtl { // Several policy classes below determine how LogRangeToStream will // format a range of items. A Policy class should have these methods: // // Called to print an individual container element. // void Log(ostream &out, const ElementT &element) const; // // Called before printing the set of elements: // void LogOpening(ostream &out) const; // // Called after printing the set of elements: // void LogClosing(ostream &out) const; // // Called before printing the first element: // void LogFirstSeparator(ostream &out) const; // // Called before printing the remaining elements: // void LogSeparator(ostream &out) const; // // Returns the maximum number of elements to print: // int64 MaxElements() const; // // Called to print an indication that MaximumElements() was reached: // void LogEllipsis(ostream &out) const; namespace internal { struct LogBase { template void Log(std::ostream &out, const ElementT &element) const { // NOLINT out << element; } void LogEllipsis(std::ostream &out) const { // NOLINT out << "..."; } }; struct LogShortBase : public LogBase { void LogOpening(std::ostream &out) const { out << "["; } // NOLINT void LogClosing(std::ostream &out) const { out << "]"; } // NOLINT void LogFirstSeparator(std::ostream &out) const { out << ""; } // NOLINT void LogSeparator(std::ostream &out) const { out << ", "; } // NOLINT }; struct LogMultilineBase : public LogBase { void LogOpening(std::ostream &out) const { out << "["; } // NOLINT void LogClosing(std::ostream &out) const { out << "\n]"; } // NOLINT void LogFirstSeparator(std::ostream &out) const { out << "\n"; } // NOLINT void LogSeparator(std::ostream &out) const { out << "\n"; } // NOLINT }; struct LogLegacyBase : public LogBase { void LogOpening(std::ostream &out) const { out << ""; } // NOLINT void LogClosing(std::ostream &out) const { out << ""; } // NOLINT void LogFirstSeparator(std::ostream &out) const { out << ""; } // NOLINT void LogSeparator(std::ostream &out) const { out << " "; } // NOLINT }; } // namespace internal // LogShort uses [] braces and separates items with comma-spaces. For // example "[1, 2, 3]". struct LogShort : public internal::LogShortBase { int64 MaxElements() const { return std::numeric_limits::max(); } }; // LogShortUpToN(max_elements) formats the same as LogShort but prints no more // than the max_elements elements. class LogShortUpToN : public internal::LogShortBase { public: explicit LogShortUpToN(int64 max_elements) : max_elements_(max_elements) {} int64 MaxElements() const { return max_elements_; } private: int64 max_elements_; }; // LogShortUpTo100 formats the same as LogShort but prints no more // than 100 elements. struct LogShortUpTo100 : public LogShortUpToN { LogShortUpTo100() : LogShortUpToN(100) {} }; // LogMultiline uses [] braces and separates items with // newlines. For example "[ // 1 // 2 // 3 // ]". struct LogMultiline : public internal::LogMultilineBase { int64 MaxElements() const { return std::numeric_limits::max(); } }; // LogMultilineUpToN(max_elements) formats the same as LogMultiline but // prints no more than max_elements elements. class LogMultilineUpToN : public internal::LogMultilineBase { public: explicit LogMultilineUpToN(int64 max_elements) : max_elements_(max_elements) {} int64 MaxElements() const { return max_elements_; } private: int64 max_elements_; }; // LogMultilineUpTo100 formats the same as LogMultiline but // prints no more than 100 elements. struct LogMultilineUpTo100 : public LogMultilineUpToN { LogMultilineUpTo100() : LogMultilineUpToN(100) {} }; // The legacy behavior of LogSequence() does not use braces and // separates items with spaces. For example "1 2 3". struct LogLegacyUpTo100 : public internal::LogLegacyBase { int64 MaxElements() const { return 100; } }; struct LogLegacy : public internal::LogLegacyBase { int64 MaxElements() const { return std::numeric_limits::max(); } }; // The default policy for new code. typedef LogShortUpTo100 LogDefault; // LogRangeToStream should be used to define operator<< for // STL and STL-like containers. For example, see stl_logging.h. template inline void LogRangeToStream(std::ostream &out, // NOLINT IteratorT begin, IteratorT end, const PolicyT &policy) { policy.LogOpening(out); for (int64 i = 0; begin != end && i < policy.MaxElements(); ++i, ++begin) { if (i == 0) { policy.LogFirstSeparator(out); } else { policy.LogSeparator(out); } policy.Log(out, *begin); } if (begin != end) { policy.LogSeparator(out); policy.LogEllipsis(out); } policy.LogClosing(out); } namespace detail { // RangeLogger is a helper class for gtl::LogRange and // gtl::LogContainer; do not use it directly. This object // captures iterators into the argument of the LogRange and // LogContainer functions, so its lifetime should be confined to a // single logging statement. Objects of this type should not be // assigned to local variables. template class RangeLogger { public: RangeLogger(const IteratorT &begin, const IteratorT &end, const PolicyT &policy) : begin_(begin), end_(end), policy_(policy) { } friend std::ostream &operator<<(std::ostream &out, const RangeLogger &range) { gtl::LogRangeToStream(out, range.begin_, range.end_, range.policy_); return out; } // operator<< above is generally recommended. However, some situations may // require a string, so a convenience str() method is provided as well. std::string str() const { std::stringstream ss; ss << *this; return ss.str(); } private: IteratorT begin_; IteratorT end_; PolicyT policy_; }; template class EnumLogger { public: explicit EnumLogger(E e) : e_(e) {} friend std::ostream &operator<<(std::ostream &out, const EnumLogger &v) { using I = typename std::underlying_type::type; return out << static_cast(v.e_); } private: E e_; }; } // namespace detail // Log a range using "policy". For example: // // S2_LOG(INFO) << gtl::LogRange(start_pos, end_pos, gtl::LogMultiline()); // // The above example will print the range using newlines between // elements, enclosed in [] braces. template detail::RangeLogger LogRange( const IteratorT &begin, const IteratorT &end, const PolicyT &policy) { return gtl::detail::RangeLogger(begin, end, policy); } // Log a range. For example: // // S2_LOG(INFO) << gtl::LogRange(start_pos, end_pos); // // By default, Range() uses the LogShortUpTo100 policy: comma-space // separation, no newlines, and with limit of 100 items. template detail::RangeLogger LogRange( const IteratorT &begin, const IteratorT &end) { return gtl::LogRange(begin, end, LogDefault()); } // Log a container using "policy". For example: // // S2_LOG(INFO) << gtl::LogContainer(container, gtl::LogMultiline()); // // The above example will print the container using newlines between // elements, enclosed in [] braces. template auto LogContainer(const ContainerT &container, const PolicyT &policy) -> decltype(gtl::LogRange(container.begin(), container.end(), policy)) { return gtl::LogRange(container.begin(), container.end(), policy); } // Log a container. For example: // // S2_LOG(INFO) << gtl::LogContainer(container); // // By default, Container() uses the LogShortUpTo100 policy: comma-space // separation, no newlines, and with limit of 100 items. template auto LogContainer(const ContainerT &container) -> decltype(gtl::LogContainer(container, LogDefault())) { return gtl::LogContainer(container, LogDefault()); } // Log a (possibly scoped) enum. For example: // // enum class Color { kRed, kGreen, kBlue }; // S2_LOG(INFO) << gtl::LogEnum(kRed); template detail::EnumLogger LogEnum(E e) { static_assert(std::is_enum::value, "must be an enum"); return detail::EnumLogger(e); } } // namespace gtl #endif // S2_UTIL_GTL_CONTAINER_LOGGING_H_ s2geometry-0.10.0/src/s2/util/gtl/dense_hash_set.h000066400000000000000000000343201422156367100217040ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // This is just a very thin wrapper over densehashtable.h, just // like sgi stl's stl_hash_set is a very thin wrapper over // stl_hashtable. // // This is more different from dense_hash_map than you might think, // because all iterators for sets are const (you obviously can't // change the key, and for sets there is no value). // // NOTE: this is exactly like sparse_hash_set.h, with the word // "sparse" replaced by "dense", except for the addition of // set_empty_key(). // // YOU MUST CALL SET_EMPTY_KEY() IMMEDIATELY AFTER CONSTRUCTION. // // Otherwise your program will die in mysterious ways. (Note if you // use the constructor that takes an InputIterator range, you pass in // the empty key in the constructor, rather than after. As a result, // this constructor differs from the standard STL version.) // // In other respects, we adhere mostly to the STL semantics for // hash-map. One important exception is that insert() may invalidate // iterators entirely -- STL semantics are that insert() may reorder // iterators, but they all still refer to something valid in the // hashtable. Not so for us. Likewise, insert() may invalidate // pointers into the hashtable. (Whether insert invalidates iterators // and pointers depends on whether it results in a hashtable resize, // but that's an implementation detail that may change in the future.) // On the plus side, delete() doesn't invalidate iterators or pointers // at all, or even change the ordering of elements. // // Also please note: // // 1) set_deleted_key(): // If you want to use erase() you must call set_deleted_key(), // in addition to set_empty_key(), after construction. // The deleted and empty keys must differ. // // 2) Keys equal to the empty key or deleted key (if any) cannot be // used as keys for find(), count(), insert(), etc. // // 3) min_load_factor(): // Setting the minimum load factor controls how aggressively the // table is shrunk when keys are erased. Setting it to 0.0 // guarantees that the hash table will never shrink. // // 4) resize(0): // When an item is deleted, its memory isn't freed right // away. This allows you to iterate over a hashtable, // and call erase(), without invalidating the iterator. // To force the memory to be freed, call resize(0). // For tr1 compatibility, this can also be called as rehash(0). // Roughly speaking: // (1) dense_hash_set: fastest, uses the most memory unless entries are small // (2) sparse_hash_set: slowest, uses the least memory // (3) hash_set / unordered_set (STL): in the middle // // Typically I use sparse_hash_set when I care about space and/or when // I need to save the hashtable on disk. I use hash_set otherwise. I // don't personally use dense_hash_set ever; some people use it for // small sets with lots of lookups. // // - dense_hash_set has, typically, about 78% memory overhead (if your // data takes up X bytes, the hash_set uses .78X more bytes in overhead). // - sparse_hash_set has about 4 bits overhead per entry. // - sparse_hash_set can be 3-7 times slower than the others for lookup and, // especially, inserts. See time_hash_map.cc for details. // // See /usr/(local/)?doc/sparsehash-*/dense_hash_set.html // for information about how to use this class. #ifndef S2_UTIL_GTL_DENSE_HASH_SET_H_ #define S2_UTIL_GTL_DENSE_HASH_SET_H_ #include #include #include #include #include #include #include "s2/base/port.h" #include "absl/base/macros.h" #include "s2/util/gtl/densehashtable.h" // IWYU pragma: export // Some files test for this symbol. #define S2__DENSE_HASH_SET_H_ namespace gtl { template , class EqualKey = std::equal_to, class Alloc = std::allocator > class dense_hash_set { private: // Apparently identity is not stl-standard, so we define our own struct Identity { typedef const Value& result_type; const Value& operator()(const Value& v) const { return v; } }; struct SetKey { void operator()(Value* value, const Value& new_key) const { *value = new_key; } }; // The actual data typedef dense_hashtable ht; ht rep; public: typedef typename ht::key_type key_type; typedef typename ht::value_type value_type; typedef typename ht::hasher hasher; typedef typename ht::key_equal key_equal; typedef Alloc allocator_type; typedef typename ht::size_type size_type; typedef typename ht::difference_type difference_type; typedef typename ht::const_pointer pointer; typedef typename ht::const_pointer const_pointer; typedef typename ht::const_reference reference; typedef typename ht::const_reference const_reference; typedef typename ht::const_iterator iterator; typedef typename ht::const_iterator const_iterator; typedef typename ht::const_local_iterator local_iterator; typedef typename ht::const_local_iterator const_local_iterator; // Iterator functions -- recall all iterators are const iterator begin() const { return rep.begin(); } iterator end() const { return rep.end(); } // These come from tr1's unordered_set. For us, a bucket has 0 or 1 elements. ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") local_iterator begin(size_type i) const { return rep.begin(i); } ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") local_iterator end(size_type i) const { return rep.end(i); } // Accessor functions allocator_type get_allocator() const { return rep.get_allocator(); } hasher hash_funct() const { return rep.hash_funct(); } hasher hash_function() const { return hash_funct(); } // tr1 name key_equal key_eq() const { return rep.key_eq(); } // Constructors dense_hash_set() {} explicit dense_hash_set(size_type expected_max_items_in_table, const hasher& hf = hasher(), const key_equal& eql = key_equal(), const allocator_type& alloc = allocator_type()) : rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) { } template dense_hash_set(InputIterator f, InputIterator l, const key_type& empty_key_val, size_type expected_max_items_in_table = 0, const hasher& hf = hasher(), const key_equal& eql = key_equal(), const allocator_type& alloc = allocator_type()) : rep(expected_max_items_in_table, hf, eql, Identity(), SetKey(), alloc) { set_empty_key(empty_key_val); rep.insert(f, l); } // We use the default copy constructor // We use the default operator=() // We use the default destructor void clear() { rep.clear(); } // This clears the hash set without resizing it down to the minimum // bucket count, but rather keeps the number of buckets constant void clear_no_resize() { rep.clear_no_resize(); } void swap(dense_hash_set& hs) { rep.swap(hs.rep); } // Functions concerning size size_type size() const { return rep.size(); } size_type max_size() const { return rep.max_size(); } bool empty() const { return rep.empty(); } size_type bucket_count() const { return rep.bucket_count(); } ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") size_type max_bucket_count() const { return rep.max_bucket_count(); } // These are tr1 methods. bucket() is the bucket the key is or would be in. ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") size_type bucket_size(size_type i) const { return rep.bucket_size(i); } ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") size_type bucket(const key_type& key) const { return rep.bucket(key); } float load_factor() const { return size() * 1.0f / bucket_count(); } float max_load_factor() const { float shrink, grow; rep.get_resizing_parameters(&shrink, &grow); return grow; } void max_load_factor(float new_grow) { float shrink, grow; rep.get_resizing_parameters(&shrink, &grow); rep.set_resizing_parameters(shrink, new_grow); } // These aren't tr1 methods but perhaps ought to be. ABSL_DEPRECATED( "This method is slated for removal. Please migrate to " "absl::flat_hash_set.") float min_load_factor() const { float shrink, grow; rep.get_resizing_parameters(&shrink, &grow); return shrink; } void min_load_factor(float new_shrink) { float shrink, grow; rep.get_resizing_parameters(&shrink, &grow); rep.set_resizing_parameters(new_shrink, grow); } // Deprecated; use min_load_factor() or max_load_factor() instead. void set_resizing_parameters(float shrink, float grow) { rep.set_resizing_parameters(shrink, grow); } void resize(size_type hint) { rep.resize(hint); } void rehash(size_type hint) { resize(hint); } // the tr1 name // Lookup routines iterator find(const key_type& key) const { return rep.find(key); } size_type count(const key_type& key) const { return rep.count(key); } std::pair equal_range(const key_type& key) const { return rep.equal_range(key); } // Insertion routines std::pair insert(const value_type& obj) { std::pair p = rep.insert(obj); return std::pair(p.first, p.second); // const to non-const } std::pair insert(value_type&& obj) { // NOLINT std::pair p = rep.insert(std::move(obj)); return std::pair(p.first, p.second); // const to non-const } template void insert(InputIterator f, InputIterator l) { rep.insert(f, l); } void insert(const_iterator f, const_iterator l) { rep.insert(f, l); } // Required for std::insert_iterator; the passed-in iterator is ignored. iterator insert(iterator, const value_type& obj) { return insert(obj).first; } iterator insert(iterator, value_type&& obj) { // NOLINT return insert(std::move(obj)).first; } // Unlike std::set, we cannot construct an element in place, as we do not have // a layer of indirection like std::set nodes. Therefore, emplace* methods do // not provide a performance advantage over insert + move. template std::pair emplace(Args&&... args) { return rep.insert(value_type(std::forward(args)...)); } // The passed-in const_iterator is ignored. template iterator emplace_hint(const_iterator, Args&&... args) { return rep.insert(value_type(std::forward(args)...)).first; } // Deletion and empty routines // THESE ARE NON-STANDARD! I make you specify an "impossible" key // value to identify deleted and empty buckets. You can change the // deleted key as time goes on, or get rid of it entirely to be insert-only. void set_empty_key(const key_type& key) { rep.set_empty_key(key); } void set_deleted_key(const key_type& key) { rep.set_deleted_key(key); } // These are standard size_type erase(const key_type& key) { return rep.erase(key); } void erase(iterator it) { rep.erase(it); } void erase(iterator f, iterator l) { rep.erase(f, l); } // Comparison bool operator==(const dense_hash_set& hs) const { return rep == hs.rep; } bool operator!=(const dense_hash_set& hs) const { return rep != hs.rep; } }; template inline void swap(dense_hash_set& hs1, dense_hash_set& hs2) { hs1.swap(hs2); } } #endif // S2_UTIL_GTL_DENSE_HASH_SET_H_ s2geometry-0.10.0/src/s2/util/gtl/densehashtable.h000066400000000000000000001630011422156367100217010ustar00rootroot00000000000000// Copyright 2005 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- // // A dense hashtable is a particular implementation of // a hashtable: one that is meant to minimize memory allocation. // It does this by using an array to store all the data. We // steal a value from the key space to indicate "empty" array // elements (ie indices where no item lives) and another to indicate // "deleted" elements. // // (Note it is possible to change the value of the delete key // on the fly; you can even remove it, though after that point // the hashtable is insert_only until you set it again. The empty // value however can't be changed.) // // To minimize allocation and pointer overhead, we use internal // probing, in which the hashtable is a single table, and collisions // are resolved by trying to insert again in another bucket. The // most cache-efficient internal probing schemes are linear probing // (which suffers, alas, from clumping) and quadratic probing, which // is what we implement by default. // // Type requirements: value_type is required to be Move Constructible // and Default Constructible. It is not required to be (and commonly // isn't) Assignable. // // You probably shouldn't use this code directly. Use dense_hash_map<> // or dense_hash_set<> instead. // You can change the following below: // HT_OCCUPANCY_PCT -- how full before we double size // HT_EMPTY_PCT -- how empty before we halve size // HT_MIN_BUCKETS -- default smallest bucket size // // You can also change enlarge_factor (which defaults to // HT_OCCUPANCY_PCT), and shrink_factor (which defaults to // HT_EMPTY_PCT) with set_resizing_parameters(). // // How to decide what values to use? // shrink_factor's default of .4 * OCCUPANCY_PCT, is probably good. // HT_MIN_BUCKETS is probably unnecessary since you can specify // (indirectly) the starting number of buckets at construct-time. // For enlarge_factor, you can use this chart to try to trade-off // expected lookup time to the space taken up. By default, this // code uses quadratic probing, though you can change it to linear // via JUMP_ below if you really want to. // // From // L = N / M, // where N is the number of data items in the table and M is the table size. // NUMBER OF PROBES / LOOKUP Successful Unsuccessful // Quadratic collision resolution 1 - ln(1-L) - L/2 1/(1-L) - L - ln(1-L) // Linear collision resolution [1+1/(1-L)]/2 [1+1/(1-L)^2]/2 // // -- enlarge_factor -- 0.10 0.50 0.60 0.75 0.80 0.90 0.99 // QUADRATIC COLLISION RES. // probes/successful lookup 1.05 1.44 1.62 2.01 2.21 2.85 5.11 // probes/unsuccessful lookup 1.11 2.19 2.82 4.64 5.81 11.4 103.6 // LINEAR COLLISION RES. // probes/successful lookup 1.06 1.5 1.75 2.5 3.0 5.5 50.5 // probes/unsuccessful lookup 1.12 2.5 3.6 8.5 13.0 50.0 5000.0 #ifndef S2_UTIL_GTL_DENSEHASHTABLE_H_ #define S2_UTIL_GTL_DENSEHASHTABLE_H_ #include #include #include // for FILE, fwrite, fread #include // For swap(), eg #include #include #include // For iterator tags #include // for numeric_limits #include // For uninitialized_fill #include #include #include #include #include #include "s2/base/port.h" #include "s2/util/gtl/hashtable_common.h" #include // For length_error namespace gtl { // Some files test for this symbol. #define S2__DENSEHASHTABLE_H_ // The probing method // Linear probing // #define JUMP_(key, num_probes) ( 1 ) // Quadratic probing #define JUMP_(key, num_probes) (num_probes) // The weird mod in the offset is entirely to quiet compiler warnings // as is the cast to int after doing the "x mod 256" #define PUT_(take_from, offset) do { \ if (putc(static_cast(offset >= sizeof(take_from)*8) \ ? 0 : ((take_from) >> (offset)) % 256, fp) \ == EOF) \ return false; \ } while (0) #define GET_(add_to, offset) do { \ if ((x=getc(fp)) == EOF) \ return false; \ else if (offset >= sizeof(add_to) * 8) \ assert(x == 0); /* otherwise it's too big for us to represent */ \ else \ add_to |= (static_cast(x) << ((offset) % (sizeof(add_to)*8))); \ } while (0) // Hashtable class, used to implement the hashed associative containers // hash_set and hash_map. // Value: what is stored in the table (each bucket is a Value). // Key: something in a 1-to-1 correspondence to a Value, that can be used // to search for a Value in the table (find() takes a Key). // HashFcn: Takes a Key and returns an integer, the more unique the better. // ExtractKey: given a Value, returns the unique Key associated with it. // Must have a result_type enum indicating the return type of // operator(). // SetKey: given a Value* and a Key, modifies the value such that // ExtractKey(value) == key. We guarantee this is only called // with key == deleted_key or key == empty_key. // EqualKey: Given two Keys, says whether they are the same (that is, // if they are both associated with the same Value). // Alloc: STL allocator to use to allocate memory. template class dense_hashtable; template struct dense_hashtable_const_iterator; // We're just an array, but we need to skip over empty and deleted elements template struct dense_hashtable_iterator { private: using value_alloc_type = typename std::allocator_traits::template rebind_alloc; using value_alloc_traits = std::allocator_traits; public: typedef dense_hashtable_iterator iterator; typedef dense_hashtable_const_iterator const_iterator; typedef std::forward_iterator_tag iterator_category; // very little defined! typedef typename value_alloc_traits::value_type value_type; typedef typename value_alloc_traits::difference_type difference_type; typedef typename value_alloc_traits::size_type size_type; typedef value_type& reference; typedef typename value_alloc_traits::pointer pointer; // "Real" constructor and default constructor dense_hashtable_iterator( const dense_hashtable *h, pointer it, pointer it_end, bool advance) : ht(h), pos(it), end(it_end) { if (advance) advance_past_empty_and_deleted(); } dense_hashtable_iterator() { } // The default destructor is fine; we don't define one // The default operator= is fine; we don't define one // Happy dereferencer reference operator*() const { return *pos; } pointer operator->() const { return &(operator*()); } // Arithmetic. The only hard part is making sure that // we're not on an empty or marked-deleted array element void advance_past_empty_and_deleted() { while (pos != end && (ht->test_empty(*this) || ht->test_deleted(*this)) ) ++pos; } iterator& operator++() { assert(pos != end); ++pos; advance_past_empty_and_deleted(); return *this; } iterator operator++(int /*unused*/) { auto tmp(*this); ++*this; return tmp; } // Comparison. bool operator==(const iterator& it) const { return pos == it.pos; } bool operator!=(const iterator& it) const { return pos != it.pos; } // The actual data const dense_hashtable *ht; pointer pos, end; }; // Now do it all again, but with const-ness! template struct dense_hashtable_const_iterator { private: using value_alloc_type = typename std::allocator_traits::template rebind_alloc; using value_alloc_traits = std::allocator_traits; public: typedef dense_hashtable_iterator iterator; typedef dense_hashtable_const_iterator const_iterator; typedef std::forward_iterator_tag iterator_category; // very little defined! typedef typename value_alloc_traits::value_type value_type; typedef typename value_alloc_traits::difference_type difference_type; typedef typename value_alloc_traits::size_type size_type; typedef const value_type& reference; typedef typename value_alloc_traits::const_pointer pointer; // "Real" constructor and default constructor dense_hashtable_const_iterator( const dense_hashtable *h, pointer it, pointer it_end, bool advance) : ht(h), pos(it), end(it_end) { if (advance) advance_past_empty_and_deleted(); } dense_hashtable_const_iterator() : ht(nullptr), pos(pointer()), end(pointer()) { } // This lets us convert regular iterators to const iterators dense_hashtable_const_iterator(const iterator &it) : ht(it.ht), pos(it.pos), end(it.end) { } // The default destructor is fine; we don't define one // The default operator= is fine; we don't define one // Happy dereferencer reference operator*() const { return *pos; } pointer operator->() const { return &(operator*()); } // Arithmetic. The only hard part is making sure that // we're not on an empty or marked-deleted array element void advance_past_empty_and_deleted() { while (pos != end && (ht->test_empty(*this) || ht->test_deleted(*this))) ++pos; } const_iterator& operator++() { assert(pos != end); ++pos; advance_past_empty_and_deleted(); return *this; } const_iterator operator++(int /*unused*/) { auto tmp(*this); ++*this; return tmp; } // Comparison. bool operator==(const const_iterator& it) const { return pos == it.pos; } bool operator!=(const const_iterator& it) const { return pos != it.pos; } // The actual data const dense_hashtable *ht; pointer pos, end; }; template class dense_hashtable { private: using value_alloc_type = typename std::allocator_traits::template rebind_alloc; using value_alloc_traits = std::allocator_traits; public: typedef Key key_type; typedef Value value_type; typedef HashFcn hasher; typedef EqualKey key_equal; typedef Alloc allocator_type; typedef typename value_alloc_traits::size_type size_type; typedef typename value_alloc_traits::difference_type difference_type; typedef value_type& reference; typedef const value_type& const_reference; typedef typename value_alloc_traits::pointer pointer; typedef typename value_alloc_traits::const_pointer const_pointer; typedef dense_hashtable_iterator iterator; typedef dense_hashtable_const_iterator const_iterator; // These come from tr1. For us they're the same as regular iterators. typedef iterator local_iterator; typedef const_iterator const_local_iterator; // How full we let the table get before we resize, by default. // Knuth says .8 is good -- higher causes us to probe too much, // though it saves memory. static const int HT_OCCUPANCY_PCT; // defined at the bottom of this file // How empty we let the table get before we resize lower, by default. // (0.0 means never resize lower.) // It should be less than OCCUPANCY_PCT / 2 or we thrash resizing static const int HT_EMPTY_PCT; // defined at the bottom of this file // Minimum size we're willing to let hashtables be. // Must be a power of two, and at least 4. // Note, however, that for a given hashtable, the initial size is a // function of the first constructor arg, and may be >HT_MIN_BUCKETS. static const size_type HT_MIN_BUCKETS = 4; // By default, if you don't specify a hashtable size at // construction-time, we use this size. Must be a power of two, and // at least HT_MIN_BUCKETS. static const size_type HT_DEFAULT_STARTING_BUCKETS = 32; // ITERATOR FUNCTIONS iterator begin() { return iterator(this, table, table + num_buckets, true); } iterator end() { return iterator(this, table + num_buckets, table + num_buckets, true); } const_iterator begin() const { return const_iterator(this, table, table+num_buckets, true); } const_iterator end() const { return const_iterator(this, table + num_buckets, table+num_buckets, true); } // These come from tr1 unordered_map. They iterate over 'bucket' n. // We'll just consider bucket n to be the n-th element of the table. local_iterator begin(size_type i) { return local_iterator(this, table + i, table + i+1, false); } local_iterator end(size_type i) { local_iterator it = begin(i); if (!test_empty(i) && !test_deleted(i)) ++it; return it; } const_local_iterator begin(size_type i) const { return const_local_iterator(this, table + i, table + i+1, false); } const_local_iterator end(size_type i) const { const_local_iterator it = begin(i); if (!test_empty(i) && !test_deleted(i)) ++it; return it; } // ACCESSOR FUNCTIONS for the things we templatize on, basically hasher hash_funct() const { return settings; } key_equal key_eq() const { return key_info; } value_alloc_type get_allocator() const { return key_info; } // Accessor function for statistics gathering. int num_table_copies() const { return settings.num_ht_copies(); } private: // Annoyingly, we can't copy values around, because they might have // const components (they're probably std::pair). We use // explicit destructor invocation and placement new to get around // this. Arg. static void set_value(pointer dst, const_reference src) { dst->~value_type(); // delete the old value, if any new(dst) value_type(src); } static void set_value(pointer dst, value_type&& src) { // NOLINT dst->~value_type(); new(dst) value_type(std::move(src)); } void destroy_buckets(size_type first, size_type last) { for ( ; first != last; ++first) table[first].~value_type(); } // DELETE HELPER FUNCTIONS // This lets the user describe a key that will indicate deleted // table entries. This key should be an "impossible" entry -- // if you try to insert it for real, you won't be able to retrieve it! // (NB: while you pass in an entire value, only the key part is looked // at. This is just because I don't know how to assign just a key.) private: // Gets rid of any deleted entries we have. void squash_deleted() { if (num_deleted > 0) { rebucket(settings.min_buckets(size(), num_buckets)); } assert(num_deleted == 0); } // Test if the given key is the deleted indicator. Requires // num_deleted > 0, for correctness of read(), and because that // guarantees that key_info.delkey is valid. bool test_deleted_key(const key_type& key) const { assert(num_deleted > 0); return equals(key_info.delkey, key); } public: void set_deleted_key(const key_type &key) { // the empty indicator (if specified) and the deleted indicator // must be different assert((!settings.use_empty() || !equals(key, key_info.empty)) && "Passed the empty-key to set_deleted_key"); // It's only safe to change what "deleted" means if we purge deleted guys squash_deleted(); settings.set_use_deleted(true); key_info.delkey = key; } key_type deleted_key() const { assert(settings.use_deleted() && "Must set deleted key before calling deleted_key"); return key_info.delkey; } // These are public so the iterators can use them // True if the item at position bucknum is "deleted" marker bool test_deleted(size_type bucknum) const { // Invariant: !use_deleted() implies num_deleted is 0. assert(settings.use_deleted() || num_deleted == 0); return num_deleted > 0 && test_deleted_key(get_key(table[bucknum])); } bool test_deleted(const iterator &it) const { // Invariant: !use_deleted() implies num_deleted is 0. assert(settings.use_deleted() || num_deleted == 0); return num_deleted > 0 && test_deleted_key(get_key(*it)); } bool test_deleted(const const_iterator &it) const { // Invariant: !use_deleted() implies num_deleted is 0. assert(settings.use_deleted() || num_deleted == 0); return num_deleted > 0 && test_deleted_key(get_key(*it)); } private: void check_use_deleted(const char* caller) { (void)caller; // could log it if the assert failed assert(settings.use_deleted()); } // Write the deleted key to the position specified. // Requires: !test_deleted(it) void set_deleted(iterator &it) { check_use_deleted("set_deleted()"); assert(!test_deleted(it)); // &* converts from iterator to value-type. set_key(&(*it), key_info.delkey); } // We also allow to set/clear the deleted bit on a const iterator. // We allow a const_iterator for the same reason you can delete a // const pointer: it's convenient, and semantically you can't use // 'it' after it's been deleted anyway, so its const-ness doesn't // really matter. // Requires: !test_deleted(it) void set_deleted(const_iterator &it) { check_use_deleted("set_deleted()"); assert(!test_deleted(it)); set_key(const_cast(&(*it)), key_info.delkey); } // EMPTY HELPER FUNCTIONS // This lets the user describe a key that will indicate empty (unused) // table entries. This key should be an "impossible" entry -- // if you try to insert it for real, you won't be able to retrieve it! // (NB: while you pass in an entire value, only the key part is looked // at. This is just because I don't know how to assign just a key.) public: // These are public so the iterators can use them // True if the item at position bucknum is "empty" marker bool test_empty(size_type bucknum) const { assert(settings.use_empty()); // we always need to know what's empty! return equals(key_info.empty, get_key(table[bucknum])); } bool test_empty(const iterator &it) const { assert(settings.use_empty()); // we always need to know what's empty! return equals(key_info.empty, get_key(*it)); } bool test_empty(const const_iterator &it) const { assert(settings.use_empty()); // we always need to know what's empty! return equals(key_info.empty, get_key(*it)); } private: bool test_empty(size_type bucknum, const_pointer ptable) const { assert(settings.use_empty()); return equals(key_info.empty, get_key(ptable[bucknum])); } void fill_range_with_empty(pointer table_start, pointer table_end) { for (; table_start != table_end; ++table_start) { new (table_start) value_type(); set_key(table_start, key_info.empty); } } public: // TODO(user): change all callers of this to pass in a key instead, // and take a const key_type instead of const value_type. void set_empty_key(const_reference val) { // Once you set the empty key, you can't change it assert(!settings.use_empty() && "Calling set_empty_key multiple times"); // The deleted indicator (if specified) and the empty indicator // must be different. const key_type& key = get_key(val); assert((!settings.use_deleted() || !equals(key, key_info.delkey)) && "Setting the empty key the same as the deleted key"); settings.set_use_empty(true); key_info.empty.~key_type(); new (&key_info.empty) key_type(key); assert(!table); // must set before first use // num_buckets was set in constructor even though table was nullptr table = get_internal_allocator().allocate(num_buckets); fill_range_with_empty(table, table + num_buckets); } // TODO(user): this should return the key by const reference. value_type empty_key() const { assert(settings.use_empty()); value_type ret = value_type(); set_key(&ret, key_info.empty); return ret; } // FUNCTIONS CONCERNING SIZE public: size_type size() const { return num_elements - num_deleted; } size_type max_size() const { return value_alloc_traits::max_size(get_allocator()); } bool empty() const { return size() == 0; } size_type bucket_count() const { return num_buckets; } size_type max_bucket_count() const { return max_size(); } size_type nonempty_bucket_count() const { return num_elements; } // These are tr1 methods. Their idea of 'bucket' doesn't map well to // what we do. We just say every bucket has 0 or 1 items in it. size_type bucket_size(size_type i) const { return begin(i) == end(i) ? 0 : 1; } private: // Because of the above, size_type(-1) is never legal; use it for errors static const size_type ILLEGAL_BUCKET = size_type(-1); // Used after a string of deletes. Returns true if we actually shrunk. // TODO(user): take a delta so we can take into account inserts // done after shrinking. Maybe make part of the Settings class? bool maybe_shrink() { assert(num_elements >= num_deleted); assert((bucket_count() & (bucket_count()-1)) == 0); // is a power of two assert(bucket_count() >= HT_MIN_BUCKETS); bool retval = false; // If you construct a hashtable with < HT_DEFAULT_STARTING_BUCKETS, // we'll never shrink until you get relatively big, and we'll never // shrink below HT_DEFAULT_STARTING_BUCKETS. Otherwise, something // like "dense_hash_set x; x.insert(4); x.erase(4);" will // shrink us down to HT_MIN_BUCKETS buckets, which is too small. const size_type num_remain = num_elements - num_deleted; const size_type shrink_threshold = settings.shrink_threshold(); if (shrink_threshold > 0 && num_remain < shrink_threshold && bucket_count() > HT_DEFAULT_STARTING_BUCKETS) { const float shrink_factor = settings.shrink_factor(); size_type sz = bucket_count() / 2; // find how much we should shrink while (sz > HT_DEFAULT_STARTING_BUCKETS && num_remain < sz * shrink_factor) { sz /= 2; // stay a power of 2 } rebucket(sz); retval = true; } settings.set_consider_shrink(false); // because we just considered it return retval; } // We'll let you resize a hashtable -- though this makes us copy all! // When you resize, you say, "make it big enough for this many more elements" // Returns true if we actually resized, false if size was already ok. bool resize_delta(size_type delta) { bool did_resize = false; if (settings.consider_shrink()) { // see if lots of deletes happened if (maybe_shrink()) did_resize = true; } if (num_elements >= std::numeric_limits::max() - delta) { throw std::length_error("resize overflow"); } assert(settings.enlarge_threshold() < bucket_count()); // Check if our work is done. if (bucket_count() >= HT_MIN_BUCKETS && num_elements + delta <= settings.enlarge_threshold()) { return did_resize; } // Sometimes, we need to resize just to get rid of all the // "deleted" buckets that are clogging up the hashtable. So when // deciding whether to resize, count the deleted buckets (which // are currently taking up room). But later, when we decide what // size to resize to, *don't* count deleted buckets, since they // get discarded during the resize. const size_type needed_size = settings.min_buckets(num_elements + delta, 0); if (needed_size <= bucket_count()) // we have enough buckets return did_resize; // We will rebucket. size_type resize_to = settings.min_buckets(num_elements - num_deleted + delta, bucket_count()); if (resize_to < needed_size) { // This situation means that we have enough deleted elements, // that once we purge them, we won't actually have needed to // grow. But we may want to grow anyway: if we just purge one // element, say, we'll have to grow anyway next time we // insert. Might as well grow now, since we're already going // through the trouble of rebucketing in order to purge the // deleted elements. (Safety note: Can resize_to * 2 overflow? No. // The output of min_buckets() is always a power of two, so resize_to // and needed_size are powers of two. That plus resize_to < needed_size // proves that overflow isn't a concern.) const size_type target = static_cast(settings.shrink_size(resize_to*2)); if (num_elements - num_deleted + delta >= target) { // Good, we won't be below the shrink threshhold even if we double. resize_to *= 2; } } rebucket(resize_to); return true; } // We require table be non-null and empty before calling this. void resize_table(size_type old_size, size_type new_size) { get_internal_allocator().deallocate(table, old_size); table = get_internal_allocator().allocate(new_size); } // Copy (or, if Iter is a move_iterator, move) the elements from // [src_first, src_last) into dest_table, which we assume has size // dest_bucket_count and has been initialized to the empty key. template void copy_elements(Iter src_first, Iter src_last, pointer dest_table, size_type dest_bucket_count) { assert((dest_bucket_count & (dest_bucket_count - 1)) == 0); // a power of 2 // We use a normal iterator to get non-deleted bcks from ht // We could use insert() here, but since we know there are // no duplicates and no deleted items, we can be more efficient for (; src_first != src_last; ++src_first) { size_type num_probes = 0; // how many times we've probed size_type bucknum; const size_type bucket_count_minus_one = dest_bucket_count - 1; for (bucknum = hash(get_key(*src_first)) & bucket_count_minus_one; !test_empty(bucknum, dest_table); // not empty bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one) { ++num_probes; assert(num_probes < dest_bucket_count && "Hashtable is full: an error in key_equal<> or hash<>"); } // Copies or moves the value into dest_table. set_value(&dest_table[bucknum], *src_first); } } // Used to actually do the rehashing when we grow/shrink a hashtable void copy_from(const dense_hashtable &ht, size_type min_buckets_wanted) { size_type size = ht.size(); // clear_to_size() sets ht.size() to 0. clear_to_size(settings.min_buckets(ht.size(), min_buckets_wanted)); copy_elements(ht.begin(), ht.end(), table, bucket_count()); num_elements = size; settings.inc_num_ht_copies(); } // Rebuckets and resizes the hashtable. Gets rid of any deleted entries. void rebucket(size_type new_num_buckets) { if (table == nullptr) { // When we eventually allocate the table, it will have this many buckets. num_buckets = new_num_buckets; return; } assert(settings.use_empty()); assert((new_num_buckets & (new_num_buckets - 1)) == 0); // a power of two // If settings.shrink_factor() is zero then we must not shrink. assert(settings.shrink_factor() > 0 || new_num_buckets >= num_buckets); pointer new_table = get_internal_allocator().allocate(new_num_buckets); fill_range_with_empty(new_table, new_table + new_num_buckets); copy_elements(std::make_move_iterator(begin()), std::make_move_iterator(end()), new_table, new_num_buckets); destroy_buckets(0, num_buckets); // Destroy table's elements. get_internal_allocator().deallocate(table, num_buckets); table = new_table; num_buckets = new_num_buckets; assert(num_elements >= num_deleted); num_elements -= num_deleted; num_deleted = 0; settings.reset_thresholds(bucket_count()); settings.inc_num_ht_copies(); } // Required by the spec for hashed associative container public: // Though the docs say this should be num_buckets, I think it's much // more useful as num_elements. As a special feature, calling with // req_elements==0 will cause us to shrink if we can, saving space. void resize(size_type req_elements) { // resize to this or larger if ( settings.consider_shrink() || req_elements == 0 ) maybe_shrink(); if ( req_elements > num_elements ) resize_delta(req_elements - num_elements); } // Get and change the value of shrink_factor and enlarge_factor. The // description at the beginning of this file explains how to choose // the values. Setting the shrink parameter to 0.0 ensures that the // table never shrinks. void get_resizing_parameters(float* shrink, float* grow) const { *shrink = settings.shrink_factor(); *grow = settings.enlarge_factor(); } void set_resizing_parameters(float shrink, float grow) { settings.set_resizing_parameters(shrink, grow); settings.reset_thresholds(bucket_count()); } // CONSTRUCTORS -- as required by the specs, we take a size, // but also let you specify a hashfunction, key comparator, // and key extractor. We also define a copy constructor and =. // DESTRUCTOR -- needs to free the table explicit dense_hashtable(size_type expected_max_items_in_table = 0, const HashFcn& hf = HashFcn(), const EqualKey& eql = EqualKey(), const ExtractKey& ext = ExtractKey(), const SetKey& set = SetKey(), const Alloc& alloc = Alloc()) : settings(hf), key_info(ext, set, eql, value_alloc_type(alloc)), num_deleted(0), num_elements(0), num_buckets(expected_max_items_in_table == 0 ? HT_DEFAULT_STARTING_BUCKETS : settings.min_buckets(expected_max_items_in_table, 0)), table(nullptr) { // table is nullptr until the empty key is set. However, we set num_buckets // here so we know how much space to allocate once the empty key is set. settings.reset_thresholds(bucket_count()); } // As a convenience for resize(), we allow an optional second argument // which lets you make this new hashtable a different size than ht dense_hashtable(const dense_hashtable& ht, size_type min_buckets_wanted = HT_DEFAULT_STARTING_BUCKETS) : settings(ht.settings), key_info(ht.key_info.as_extract_key(), ht.key_info.as_set_key(), ht.key_info.as_equal_key(), value_alloc_type( std::allocator_traits:: select_on_container_copy_construction( ht.key_info.as_value_alloc()))), num_deleted(0), num_elements(0), num_buckets(0), table(nullptr) { key_info.delkey = ht.key_info.delkey; key_info.empty = ht.key_info.empty; if (!ht.settings.use_empty()) { // If use_empty isn't set, copy_from will crash, so we do our own copying. assert(ht.empty()); num_buckets = settings.min_buckets(ht.size(), min_buckets_wanted); settings.reset_thresholds(bucket_count()); return; } settings.reset_thresholds(bucket_count()); copy_from(ht, min_buckets_wanted); // copy_from() ignores deleted entries } dense_hashtable& operator=(const dense_hashtable& ht) { if (&ht == this) return *this; // don't copy onto ourselves settings = ht.settings; key_info.as_extract_key() = ht.key_info.as_extract_key(); key_info.as_set_key() = ht.key_info.as_set_key(); key_info.as_equal_key() = ht.key_info.as_equal_key(); if (std::allocator_traits< value_alloc_type>::propagate_on_container_copy_assignment::value) { // If we're about to overwrite our allocator, we need to free all // memory using our old allocator. if (key_info.as_value_alloc() != ht.key_info.as_value_alloc()) { destroy_table(); } static_cast(key_info) = static_cast(ht.key_info); } key_info.empty = ht.key_info.empty; key_info.delkey = ht.key_info.delkey; if (ht.settings.use_empty()) { // copy_from() calls clear and sets num_deleted to 0 too copy_from(ht, HT_MIN_BUCKETS); } else { assert(ht.empty()); destroy_table(); } // we purposefully don't copy the allocator, which may not be copyable return *this; } dense_hashtable(dense_hashtable&& ht) : settings(std::move(ht.settings)), key_info(std::move(ht.key_info)), num_deleted(ht.num_deleted), num_elements(ht.num_elements), num_buckets(ht.num_buckets), table(ht.table) { ht.num_deleted = 0; ht.num_elements = 0; ht.table = nullptr; ht.num_buckets = HT_DEFAULT_STARTING_BUCKETS; ht.settings.set_use_empty(false); ht.settings.set_use_deleted(false); } dense_hashtable& operator=(dense_hashtable&& ht) { if (&ht == this) return *this; // don't move onto ourselves const bool can_move_table = std::allocator_traits< Alloc>::propagate_on_container_move_assignment::value || key_info.as_value_alloc() == ht.key_info.as_value_alloc(); // First, deallocate with this's allocator. destroy_table(); if (std::allocator_traits< value_alloc_type>::propagate_on_container_move_assignment::value) { // This moves the allocator. key_info = std::move(ht.key_info); } else { // Move all other base classes of key_info from ht, but don't move the // allocator. key_info.as_extract_key() = std::move(ht.key_info.as_extract_key()); key_info.as_set_key() = std::move(ht.key_info.as_set_key()); key_info.as_equal_key() = std::move(ht.key_info.as_equal_key()); key_info.delkey = std::move(ht.key_info.delkey); key_info.empty = std::move(ht.key_info.empty); } settings = std::move(ht.settings); num_deleted = ht.num_deleted; ht.num_deleted = 0; num_elements = ht.num_elements; ht.num_elements = 0; num_buckets = ht.num_buckets; ht.num_buckets = HT_DEFAULT_STARTING_BUCKETS; ht.settings.set_use_empty(false); ht.settings.set_use_deleted(false); if (can_move_table) { // We can transfer ownership of the table from ht to this because either // we're propagating the allocator or ht's allocator is equal to this's. table = ht.table; ht.table = nullptr; } else if (ht.table) { // We can't transfer ownership of any memory from ht to this, so the // best we can do is move element-by-element. table = get_internal_allocator().allocate(num_buckets); for (size_type i = 0; i < num_buckets; ++i) { new(table + i) Value(std::move(ht.table[i])); } ht.destroy_table(); } return *this; } ~dense_hashtable() { destroy_table(); } // Many STL algorithms use swap instead of copy constructors void swap(dense_hashtable& ht) { if (this == &ht) return; // swap with self. using std::swap; swap(settings, ht.settings); // Swap everything in key_info but the allocator. swap(key_info.as_extract_key(), ht.key_info.as_extract_key()); swap(key_info.as_set_key(), ht.key_info.as_set_key()); swap(key_info.as_equal_key(), ht.key_info.as_equal_key()); if (std::allocator_traits< value_alloc_type>::propagate_on_container_swap::value) { swap(static_cast(key_info), static_cast(ht.key_info)); } else { // Swapping when allocators are unequal and // propagate_on_container_swap is false is undefined behavior. S2_CHECK(key_info.as_value_alloc() == ht.key_info.as_value_alloc()); } swap(key_info.empty, ht.key_info.empty); swap(key_info.delkey, ht.key_info.delkey); swap(num_deleted, ht.num_deleted); swap(num_elements, ht.num_elements); swap(num_buckets, ht.num_buckets); swap(table, ht.table); } private: void destroy_table() { if (table) { destroy_buckets(0, num_buckets); get_internal_allocator().deallocate(table, num_buckets); table = nullptr; } } void clear_to_size(size_type new_num_buckets) { if (!table) { table = get_internal_allocator().allocate(new_num_buckets); } else { destroy_buckets(0, num_buckets); if (new_num_buckets != num_buckets) { // resize, if necessary resize_table(num_buckets, new_num_buckets); } } assert(table); fill_range_with_empty(table, table + new_num_buckets); num_elements = 0; num_deleted = 0; num_buckets = new_num_buckets; // our new size settings.reset_thresholds(bucket_count()); } public: // It's always nice to be able to clear a table without deallocating it void clear() { // If the table is already empty, and the number of buckets is // already as we desire, there's nothing to do. const size_type new_num_buckets = settings.min_buckets(0, 0); if (num_elements == 0 && new_num_buckets == num_buckets) { return; } clear_to_size(new_num_buckets); } // Clear the table without resizing it. // Mimicks the stl_hashtable's behaviour when clear()-ing in that it // does not modify the bucket count void clear_no_resize() { if (num_elements > 0) { assert(table); destroy_buckets(0, num_buckets); fill_range_with_empty(table, table + num_buckets); } // don't consider to shrink before another erase() settings.reset_thresholds(bucket_count()); num_elements = 0; num_deleted = 0; } // LOOKUP ROUTINES private: template void assert_key_is_not_empty_or_deleted(const K& key) const { assert(settings.use_empty() && "set_empty_key() was not called"); assert(!equals(key, key_info.empty) && "Using the empty key as a regular key"); assert((!settings.use_deleted() || !equals(key, key_info.delkey)) && "Using the deleted key as a regular key"); } template std::pair find_position(const K& key) const { return find_position_using_hash(hash(key), key); } // Returns a pair of positions: 1st where the object is, 2nd where // it would go if you wanted to insert it. 1st is ILLEGAL_BUCKET // if object is not found; 2nd is ILLEGAL_BUCKET if it is. // Note: because of deletions where-to-insert is not trivial: it's the // first deleted bucket we see, as long as we don't find the key later template std::pair find_position_using_hash( const size_type key_hash, const K& key) const { assert_key_is_not_empty_or_deleted(key); size_type num_probes = 0; // how many times we've probed const size_type bucket_count_minus_one = bucket_count() - 1; size_type bucknum = key_hash & bucket_count_minus_one; size_type insert_pos = ILLEGAL_BUCKET; // where we would insert while (true) { // probe until something happens if (test_empty(bucknum)) { // bucket is empty if (insert_pos == ILLEGAL_BUCKET) // found no prior place to insert return std::pair(ILLEGAL_BUCKET, bucknum); else return std::pair(ILLEGAL_BUCKET, insert_pos); } else if (test_deleted(bucknum)) { // keep searching, but mark to insert if ( insert_pos == ILLEGAL_BUCKET ) insert_pos = bucknum; } else if (equals(key, get_key(table[bucknum]))) { return std::pair(bucknum, ILLEGAL_BUCKET); } ++num_probes; // we're doing another probe bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; assert(num_probes < bucket_count() && "Hashtable is full: an error in key_equal<> or hash<>"); } } template std::pair find_if_present(const K& key) const { return find_if_present_using_hash(hash(key), key); } // Return where the key is (if at all), and if it is present. If // the key isn't present then the first part of the return value is // undefined. The same information can be extracted from the result // of find_position(), but that tends to be slower in practice. template std::pair find_if_present_using_hash( const size_type key_hash, const K& key) const { assert_key_is_not_empty_or_deleted(key); size_type num_probes = 0; // how many times we've probed const size_type bucket_count_minus_one = bucket_count() - 1; size_type bucknum = key_hash & bucket_count_minus_one; while (true) { // probe until something happens if (equals(key, get_key(table[bucknum]))) { return std::pair(bucknum, true); } else if (test_empty(bucknum)) { return std::pair(0, false); } ++num_probes; // we're doing another probe bucknum = (bucknum + JUMP_(key, num_probes)) & bucket_count_minus_one; assert(num_probes < bucket_count() && "Hashtable is full: an error in key_equal<> or hash<>"); } } private: template iterator find_impl(const K& key) { std::pair pos = find_if_present(key); return pos.second ? iterator(this, table + pos.first, table + num_buckets, false) : end(); } template const_iterator find_impl(const K& key) const { std::pair pos = find_if_present(key); return pos.second ? const_iterator(this, table + pos.first, table + num_buckets, false) : end(); } template size_type bucket_impl(const K& key) const { std::pair pos = find_position(key); return pos.first == ILLEGAL_BUCKET ? pos.second : pos.first; } template size_type count_impl(const K& key) const { return find_if_present(key).second ? 1 : 0; } template std::pair equal_range_impl(const K& key) { iterator pos = find(key); if (pos == end()) { return std::pair(pos, pos); } else { const iterator startpos = pos++; return std::pair(startpos, pos); } } template std::pair equal_range_impl(const K& key) const { const_iterator pos = find(key); if (pos == end()) { return std::pair(pos, pos); } else { const const_iterator startpos = pos++; return std::pair(startpos, pos); } } public: iterator find(const key_type& key) { return find_impl(key); } const_iterator find(const key_type& key) const { return find_impl(key); } // This is a tr1 method: the bucket a given key is in, or what bucket // it would be put in, if it were to be inserted. Shrug. size_type bucket(const key_type& key) const { return bucket_impl(key); } // Counts how many elements have key key. For maps, it's either 0 or 1. size_type count(const key_type &key) const { return count_impl(key); } // Likewise, equal_range doesn't really make sense for us. Oh well. std::pair equal_range(const key_type& key) { return equal_range_impl(key); } std::pair equal_range(const key_type& key) const { return equal_range_impl(key); } // INSERTION ROUTINES private: // Private method used by insert_noresize and find_or_insert. // 'obj' is either value_type&& or const value_type&. template iterator insert_at(U&& obj, size_type pos) { if (size() >= max_size()) { throw std::length_error("insert overflow"); } if ( test_deleted(pos) ) { // just replace if it's been del. assert(num_deleted > 0); --num_deleted; // used to be, now it isn't } else { ++num_elements; // replacing an empty bucket } set_value(&table[pos], std::forward(obj)); return iterator(this, table + pos, table + num_buckets, false); } // If you know *this is big enough to hold obj, use this routine // 'obj' is value_type&& or const value_type&. template std::pair insert_noresize(U&& obj) { // NOLINT return insert_noresize_using_hash(hash(get_key(obj)), std::forward(obj)); } // If you know *this is big enough to hold obj, use this routine // 'obj' is value_type&& or const value_type&. template std::pair insert_noresize_using_hash(const size_type key_hash, U&& obj) { const std::pair pos = find_position_using_hash(key_hash, get_key(obj)); if (pos.first != ILLEGAL_BUCKET) { // object was already there return std::pair(iterator(this, table + pos.first, table + num_buckets, false), false); // false: we didn't insert } else { // pos.second says where to put it iterator i = insert_at(std::forward(obj), pos.second); return std::pair(i, true); } } // Specializations of insert(it, it) depending on the power of the iterator: // (1) Iterator supports operator-, resize before inserting template void insert(ForwardIterator f, ForwardIterator l, std::forward_iterator_tag) { size_t dist = std::distance(f, l); if (dist >= std::numeric_limits::max()) { throw std::length_error("insert-range overflow"); } resize_delta(static_cast(dist)); for ( ; dist > 0; --dist, ++f) { insert_noresize(*f); } } // (2) Arbitrary iterator, can't tell how much to resize template void insert(InputIterator f, InputIterator l, std::input_iterator_tag) { for ( ; f != l; ++f) insert(*f); } public: // This is the normal insert routine, used by the outside world std::pair insert(const value_type& obj) { resize_delta(1); // adding an object, grow if need be return insert_noresize(obj); } std::pair insert(value_type&& obj) { // NOLINT resize_delta(1); // adding an object, grow if need be return insert_noresize(std::move(obj)); } // When inserting a lot at a time, we specialize on the type of iterator template void insert(InputIterator f, InputIterator l) { // specializes on iterator type insert(f, l, typename std::iterator_traits::iterator_category()); } template value_type& find_or_insert(const key_type& key) { return find_or_insert_using_hash(hash(key), key); } // DefaultValue is a functor that takes a key and returns a value_type // representing the default value to be inserted if none is found. template value_type& find_or_insert_using_hash(const size_type key_hash, const key_type& key) { const std::pair pos = find_position_using_hash(key_hash, key); DefaultValue default_value; if (pos.first != ILLEGAL_BUCKET) { // object was already there return table[pos.first]; } else if (resize_delta(1)) { // needed to rehash to make room // Since we resized, we can't use pos, so recalculate where to insert. return *insert_noresize(default_value(key)).first; } else { // no need to rehash, insert right here return *insert_at(default_value(key), pos.second); } } // DELETION ROUTINES private: template size_type erase_impl(const K& key) { iterator pos = find(key); if (pos != end()) { assert(!test_deleted(pos)); // or find() shouldn't have returned it set_deleted(pos); ++num_deleted; // will think about shrink after next insert settings.set_consider_shrink(true); return 1; // because we deleted one thing } else { return 0; // because we deleted nothing } } public: size_type erase(const key_type& key) { return erase_impl(key); } void erase(iterator pos) { if (pos == end()) return; set_deleted(pos); ++num_deleted; // will think about shrink after next insert settings.set_consider_shrink(true); } void erase(iterator f, iterator l) { for (; f != l; ++f) { set_deleted(f); ++num_deleted; } // will think about shrink after next insert settings.set_consider_shrink(true); } // We allow you to erase a const_iterator just like we allow you to // erase an iterator. This is in parallel to 'delete': you can delete // a const pointer just like a non-const pointer. The logic is that // you can't use the object after it's erased anyway, so it doesn't matter // if it's const or not. void erase(const_iterator pos) { if (pos == end()) return; set_deleted(pos); ++num_deleted; // will think about shrink after next insert settings.set_consider_shrink(true); } void erase(const_iterator f, const_iterator l) { for ( ; f != l; ++f) { set_deleted(f); ++num_deleted; } // will think about shrink after next insert settings.set_consider_shrink(true); } // COMPARISON bool operator==(const dense_hashtable& ht) const { if (size() != ht.size()) { return false; } else if (this == &ht) { return true; } else { // Iterate through the elements in "this" and see if the // corresponding element is in ht for ( const_iterator it = begin(); it != end(); ++it ) { const_iterator it2 = ht.find(get_key(*it)); if ((it2 == ht.end()) || (*it != *it2)) { return false; } } return true; } } bool operator!=(const dense_hashtable& ht) const { return !(*this == ht); } // I/O // We support reading and writing hashtables to disk. Alas, since // I don't know how to write a hasher or key_equal, you have to make // sure everything but the table is the same. We compact before writing. private: // Every time the disk format changes, this should probably change too typedef unsigned long MagicNumberType; static const MagicNumberType MAGIC_NUMBER = 0x13578642; // Package functors with another class to eliminate memory needed for // zero-size functors. Since ExtractKey and hasher's operator() might // have the same function signature, they must be packaged in // different classes. struct Settings : sh_hashtable_settings { explicit Settings(const hasher& hf) : sh_hashtable_settings( hf, HT_OCCUPANCY_PCT / 100.0f, HT_EMPTY_PCT / 100.0f) {} }; // Packages ExtractKey, SetKey, EqualKey functors, allocator and deleted and // empty key values. struct KeyInfo : public ExtractKey, public SetKey, public EqualKey, public value_alloc_type { KeyInfo(const ExtractKey& ek, const SetKey& sk, const EqualKey& eq, const value_alloc_type& a) : ExtractKey(ek), SetKey(sk), EqualKey(eq), value_alloc_type(a), delkey(), empty() {} // Accessors for convenient access to base classes. ExtractKey& as_extract_key() { return *this; } const ExtractKey& as_extract_key() const { return *this; } SetKey& as_set_key() { return *this; } const SetKey& as_set_key() const { return *this; } EqualKey& as_equal_key() { return *this; } const EqualKey& as_equal_key() const { return *this; } value_alloc_type& as_value_alloc() { return *this; } const value_alloc_type& as_value_alloc() const { return *this; } // We want to return the exact same type as ExtractKey: Key or const Key& typename ExtractKey::result_type get_key(const_reference v) const { return ExtractKey::operator()(v); } void set_key(pointer v, const key_type& k) const { SetKey::operator()(v, k); } // We only ever call EqualKey::operator()(key_type, K) -- we never use the // other order of args. This allows consumers to get away with implementing // only half of operator==. template bool equals(const key_type& a, const K& b) const { return EqualKey::operator()(a, b); } pointer allocate(size_type size) { pointer memory = value_alloc_type::allocate(size); assert(memory != nullptr); return memory; } // Which key marks deleted entries. // TODO(user): make a pointer, and get rid of use_deleted (benchmark!) typename std::remove_const::type delkey; // Key value used to mark unused entries. typename std::remove_const::type empty; }; // Returns the value_alloc_type used to allocate and deallocate // the table. This can be different from the one returned by get_allocator(). value_alloc_type& get_internal_allocator() { return key_info; } // Utility functions to access the templated operators size_type hash(const key_type& v) const { return settings.hash(v); } bool equals(const key_type& a, const key_type& b) const { return key_info.equals(a, b); } typename ExtractKey::result_type get_key(const_reference v) const { return key_info.get_key(v); } void set_key(pointer v, const key_type& k) const { key_info.set_key(v, k); } private: // Actual data Settings settings; KeyInfo key_info; size_type num_deleted; // how many occupied buckets are marked deleted size_type num_elements; size_type num_buckets; pointer table; }; // We need a global swap as well template inline void swap(dense_hashtable &x, dense_hashtable &y) { x.swap(y); } #undef JUMP_ #undef PUT_ #undef GET_ template const typename dense_hashtable::size_type dense_hashtable::ILLEGAL_BUCKET; // How full we let the table get before we resize. Knuth says .8 is // good -- higher causes us to probe too much, though saves memory. // However, we go with .5, getting better performance at the cost of // more space (a trade-off densehashtable explicitly chooses to make). // Feel free to play around with different values, though, via // max_load_factor() and/or set_resizing_parameters(). template const int dense_hashtable::HT_OCCUPANCY_PCT = 50; // How empty we let the table get before we resize lower. // It should be less than OCCUPANCY_PCT / 2 or we thrash resizing. template const int dense_hashtable::HT_EMPTY_PCT = static_cast( 0.4 * dense_hashtable::HT_OCCUPANCY_PCT); } #endif // S2_UTIL_GTL_DENSEHASHTABLE_H_ s2geometry-0.10.0/src/s2/util/gtl/hashtable_common.h000066400000000000000000000213311422156367100222310ustar00rootroot00000000000000// Copyright 2010 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // --- #ifndef S2_UTIL_GTL_HASHTABLE_COMMON_H_ #define S2_UTIL_GTL_HASHTABLE_COMMON_H_ #include #include #include #include // For length_error // Settings contains parameters for growing and shrinking the table. // It also packages zero-size functor (ie. hasher). One invariant // enforced in enlarge_size() is that we never allow all slots // occupied. (This is unlikely to matter to users, because using // a load near 1 is slow and not recommended. It allows other code // to assume there is at least one empty bucket.) // // It does some munging of the hash value in cases where we think // (fear) the original hash function might not be very good. In // particular, the default hash of pointers is the identity hash, // so probably all the low bits are 0. We identify when we think // we're hashing a pointer, and chop off the low bits. Note this // isn't perfect: even when the key is a pointer, we can't tell // for sure that the hash is the identity hash. If it's not, this // is needless work (and possibly, though not likely, harmful). template class sh_hashtable_settings : public HashFunc { public: typedef Key key_type; typedef HashFunc hasher; typedef SizeType size_type; public: sh_hashtable_settings(const hasher& hf, const float ht_occupancy_flt, const float ht_empty_flt) : hasher(hf), enlarge_threshold_(0), shrink_threshold_(0), consider_shrink_(false), use_empty_(false), use_deleted_(false), num_ht_copies_(0) { set_enlarge_factor(ht_occupancy_flt); set_shrink_factor(ht_empty_flt); } template size_type hash(const K& v) const { // We munge the hash value when we don't trust hasher::operator(). It is // very important that we use hash_munger instead of hash_munger. // Within a given hashtable, all hash values must be munged in the same way. return hash_munger::MungedHash(hasher::operator()(v)); } float enlarge_factor() const { return enlarge_factor_; } void set_enlarge_factor(float f) { enlarge_factor_ = f; } float shrink_factor() const { return shrink_factor_; } void set_shrink_factor(float f) { shrink_factor_ = f; } size_type enlarge_threshold() const { return enlarge_threshold_; } void set_enlarge_threshold(size_type t) { enlarge_threshold_ = t; } size_type shrink_threshold() const { return shrink_threshold_; } void set_shrink_threshold(size_type t) { shrink_threshold_ = t; } size_type enlarge_size(size_type x) const { return std::min(x - 1, x * enlarge_factor_); } size_type shrink_size(size_type x) const { return static_cast(x * shrink_factor_); } bool consider_shrink() const { return consider_shrink_; } void set_consider_shrink(bool t) { consider_shrink_ = t; } bool use_empty() const { return use_empty_; } void set_use_empty(bool t) { use_empty_ = t; } bool use_deleted() const { return use_deleted_; } void set_use_deleted(bool t) { use_deleted_ = t; } size_type num_ht_copies() const { return static_cast(num_ht_copies_); } void inc_num_ht_copies() { ++num_ht_copies_; } // Reset the enlarge and shrink thresholds void reset_thresholds(size_type num_buckets) { set_enlarge_threshold(enlarge_size(num_buckets)); set_shrink_threshold(shrink_size(num_buckets)); // whatever caused us to reset already considered set_consider_shrink(false); } // Caller is resposible for calling reset_threshold right after // set_resizing_parameters. void set_resizing_parameters(float shrink, float grow) { assert(shrink >= 0.0); assert(grow <= 1.0); if (shrink > grow/2.0f) shrink = grow / 2.0f; // otherwise we thrash hashtable size set_shrink_factor(shrink); set_enlarge_factor(grow); } // This is the smallest size a hashtable can be without being too crowded. // If you like, you can give a min #buckets as well as a min #elts. // This is guaranteed to return a power of two. size_type min_buckets(size_type num_elts, size_type min_buckets_wanted) { float enlarge = enlarge_factor(); size_type sz = HT_MIN_BUCKETS; // min buckets allowed while ( sz < min_buckets_wanted || num_elts >= static_cast(sz * enlarge) ) { // This just prevents overflowing size_type, since sz can exceed // max_size() here. if (static_cast(sz * 2) < sz) { throw std::length_error("resize overflow"); // protect against overflow } sz *= 2; } return sz; } private: template class hash_munger { public: static size_t MungedHash(size_t hash) { return hash; } }; // This matches when the hashtable key is a pointer. template class hash_munger { public: static size_t MungedHash(size_t hash) { // TODO(user): consider rotating instead: // static const int shift = (sizeof(void *) == 4) ? 2 : 3; // return (hash << (sizeof(hash) * 8) - shift)) | (hash >> shift); // This matters if we ever change sparse/dense_hash_* to compare // hashes before comparing actual values. It's speedy on x86. return hash / sizeof(void*); // get rid of known-0 bits } }; size_type enlarge_threshold_; // table.size() * enlarge_factor size_type shrink_threshold_; // table.size() * shrink_factor float enlarge_factor_; // how full before resize float shrink_factor_; // how empty before resize // consider_shrink=true if we should try to shrink before next insert bool consider_shrink_; bool use_empty_; // used only by densehashtable, not sparsehashtable bool use_deleted_; // false until delkey has been set // num_ht_copies is a counter incremented every Copy/Move unsigned int num_ht_copies_; }; // This traits class checks whether T::is_transparent exists and names a type. // // struct Foo { using is_transparent = void; }; // struct Bar {}; // static_assert(sh_is_transparent::value, "Foo is transparent."); // staitc_assert(!sh_is_transparent::value, "Bar is not transparent."); template struct sh_is_transparent { private: struct No { char x; }; struct Yes { No x[2]; }; template static Yes Test(typename U::is_transparent*); template static No Test(...); public: enum { value = sizeof(Test(nullptr)) == sizeof(Yes) }; }; #endif // S2_UTIL_GTL_HASHTABLE_COMMON_H_ s2geometry-0.10.0/src/s2/util/gtl/legacy_random_shuffle.h000066400000000000000000000054431422156367100232540ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // gtl::legacy_random_shuffle is similar in API and behavior to // std::random_shuffle, which was removed in C++17. // // When built for Linux production targets using crosstool 18, // these APIs produce the same results as std::random_shuffle. // // Otherwise, the specification for these functions reverts to that // of std::random_shuffle as specified in C++11. In particular, // these functions do not promise to produce the same shuffle // sequences forever. // // These are deprecated, and intended to be used only for legacy // code that must move off std::random_shuffle simply because the // function is not part of C++17. #ifndef S2_UTIL_GTL_LEGACY_RANDOM_SHUFFLE_H_ #define S2_UTIL_GTL_LEGACY_RANDOM_SHUFFLE_H_ #include #include #include #include "absl/base/macros.h" namespace gtl { // Reorders the elements in the range `[begin, last)` randomly. The // random number generator `rnd` must be a function object returning a // randomly chosen value of type convertible to and from // `std::iterator_traits::difference_type` in the interval // `[0,n)` if invoked as `r(n)`. // // This function is deprecated. See the file comment above for // additional details. template ABSL_DEPRECATED("Use std::shuffle instead; see go/nors-legacy-api") void legacy_random_shuffle(const RandomIt begin, const RandomIt end, RandomFunc&& rnd) { auto size = std::distance(begin, end); for (decltype(size) i = 1; i < size; ++i) { // Loop invariant: elements below i are uniformly shuffled. std::iter_swap(begin + i, begin + rnd(i + 1)); } } // Reorders the elements in the range `[begin, last)` randomly. The // random number generator is `std::rand()`. // // This function is deprecated. See the file comment above for // additional details. template ABSL_DEPRECATED("Use std::shuffle instead; see go/nors-legacy-api") void legacy_random_shuffle(RandomIt begin, RandomIt end) { legacy_random_shuffle( begin, end, [](typename std::iterator_traits::difference_type i) { return std::rand() % i; }); } } // namespace gtl #endif // S2_UTIL_GTL_LEGACY_RANDOM_SHUFFLE_H_ s2geometry-0.10.0/src/s2/util/hash/000077500000000000000000000000001422156367100167125ustar00rootroot00000000000000s2geometry-0.10.0/src/s2/util/hash/mix.h000066400000000000000000000046601422156367100176660ustar00rootroot00000000000000// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: jyrki@google.com (Jyrki Alakuijala) // // This file contains routines for mixing hashes. #ifndef S2_UTIL_HASH_MIX_H_ #define S2_UTIL_HASH_MIX_H_ #include #include // Fast mixing of hash values -- not strong enough for fingerprinting. // May change from time to time. // // Values given are expected to be hashes from good hash functions. // What constitutes a good hash may depend on your application. As a rule of // thumb, if std::hash is strong enough for your hashing need if // your data were just ints, it will most likely be the correct choice // for a mixed hash of data members. HashMix does one round of multiply and // rotate mixing, so you get some additional collision avoidance guarantees // compared to just using std::hash directly. // // Possible use: // // struct Xyzzy { // int x; // int y; // string id; // }; // // #ifndef SWIG // template<> struct XyzzyHash { // size_t operator()(const Xyzzy& c) const { // HashMix mix(hash()(c.x)); // mix.Mix(hash()(c.y)); // mix.Mix(GoodFastHash()(c.id)); // return mix.get(); // } // } // #endif // // HashMix is a lower level interface than std::hash>. // Use std::hash> instead of HashMix where appropriate. class HashMix { public: HashMix() : hash_(1) {} explicit HashMix(size_t val) : hash_(val + 83) {} void Mix(size_t val) { static const size_t kMul = static_cast(0xdc3eb94af8ab4c93ULL); // Multiplicative hashing will mix bits better in the msb end ... hash_ *= kMul; // ... and rotating will move the better mixed msb-bits to lsb-bits. hash_ = ((hash_ << 19) | (hash_ >> (std::numeric_limits::digits - 19))) + val; } size_t get() const { return hash_; } private: size_t hash_; }; #endif // S2_UTIL_HASH_MIX_H_ s2geometry-0.10.0/src/s2/util/math/000077500000000000000000000000001422156367100167205ustar00rootroot00000000000000s2geometry-0.10.0/src/s2/util/math/exactfloat/000077500000000000000000000000001422156367100210525ustar00rootroot00000000000000s2geometry-0.10.0/src/s2/util/math/exactfloat/exactfloat.cc000066400000000000000000000661101422156367100235170ustar00rootroot00000000000000// Copyright 2009 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) #include "s2/util/math/exactfloat/exactfloat.h" #include #include #include #include #include #include #include #include #include // for OPENSSL_free #include "absl/base/macros.h" #include "absl/container/fixed_array.h" #include "s2/base/integral_types.h" #include "s2/base/logging.h" using std::max; using std::min; using std::string; // Define storage for constants. const int ExactFloat::kMinExp; const int ExactFloat::kMaxExp; const int ExactFloat::kMaxPrec; const int32 ExactFloat::kExpNaN; const int32 ExactFloat::kExpInfinity; const int32 ExactFloat::kExpZero; const int ExactFloat::kDoubleMantissaBits; // To simplify the overflow/underflow logic, we limit the exponent and // precision range so that (2 * bn_exp_) does not overflow an "int". We take // advantage of this, for example, by only checking for overflow/underflow // *after* multiplying two numbers. static_assert( ExactFloat::kMaxExp <= INT_MAX / 2 && ExactFloat::kMinExp - ExactFloat::kMaxPrec >= INT_MIN / 2, "exactfloat exponent might overflow"); // We define a few simple extensions to the OpenSSL's BIGNUM interface. // In some cases these depend on BIGNUM internal fields, so they might // require tweaking if the BIGNUM implementation changes significantly. // These are just thin wrappers for BoringSSL. #ifdef OPENSSL_IS_BORINGSSL inline static void BN_ext_set_uint64(BIGNUM* bn, uint64 v) { S2_CHECK(BN_set_u64(bn, v)); } // Return the absolute value of a BIGNUM as a 64-bit unsigned integer. // Requires that BIGNUM fits into 64 bits. inline static uint64 BN_ext_get_uint64(const BIGNUM* bn) { uint64_t u64; if (!BN_get_u64(bn, &u64)) { S2_DCHECK(false) << "BN has " << BN_num_bits(bn) << " bits"; return 0; } return u64; } static int BN_ext_count_low_zero_bits(const BIGNUM* bn) { return BN_count_low_zero_bits(bn); } #else // !defined(OPENSSL_IS_BORINGSSL) // Set a BIGNUM to the given unsigned 64-bit value. inline static void BN_ext_set_uint64(BIGNUM* bn, uint64 v) { #if BN_BITS2 == 64 S2_CHECK(BN_set_word(bn, v)); #else static_assert(BN_BITS2 == 32, "at least 32 bit openssl build needed"); S2_CHECK(BN_set_word(bn, static_cast(v >> 32))); S2_CHECK(BN_lshift(bn, bn, 32)); S2_CHECK(BN_add_word(bn, static_cast(v))); #endif } #if OPENSSL_VERSION_NUMBER < 0x10100000L // Return the absolute value of a BIGNUM as a 64-bit unsigned integer. // Requires that BIGNUM fits into 64 bits. inline static uint64 BN_ext_get_uint64(const BIGNUM* bn) { S2_DCHECK_LE(BN_num_bytes(bn), sizeof(uint64)); #if BN_BITS2 == 64 return BN_get_word(bn); #else static_assert(BN_BITS2 == 32, "at least 32 bit openssl build needed"); if (bn->top == 0) return 0; if (bn->top == 1) return BN_get_word(bn); S2_DCHECK_EQ(bn->top, 2); return (static_cast(bn->d[1]) << 32) + bn->d[0]; #endif } // Count the number of low-order zero bits in the given BIGNUM (ignoring its // sign). Returns 0 if the argument is zero. static int BN_ext_count_low_zero_bits(const BIGNUM* bn) { int count = 0; for (int i = 0; i < bn->top; ++i) { BN_ULONG w = bn->d[i]; if (w == 0) { count += 8 * sizeof(BN_ULONG); } else { for (; (w & 1) == 0; w >>= 1) { ++count; } break; } } return count; } #else // OPENSSL_VERSION_NUMBER >= 0x10100000L // Return the absolute value of a BIGNUM as a 64-bit unsigned integer. // Requires that BIGNUM fits into 64 bits. inline static uint64 BN_ext_get_uint64(const BIGNUM* bn) { uint64 r; #ifdef IS_LITTLE_ENDIAN S2_CHECK_EQ(BN_bn2lebinpad(bn, reinterpret_cast(&r), sizeof(r)), sizeof(r)); #elif IS_BIG_ENDIAN S2_CHECK_EQ(BN_bn2binpad(bn, reinterpret_cast(&r), sizeof(r)), sizeof(r)); #else #error one of IS_LITTLE_ENDIAN or IS_BIG_ENDIAN should be defined! #endif return r; } static int BN_ext_count_low_zero_bits(const BIGNUM* bn) { // In OpenSSL >= 1.1, BIGNUM is an opaque type, so d and top // cannot be accessed. The bytes must be copied out at a ~25% // performance penalty. absl::FixedArray bytes(BN_num_bytes(bn)); // "le" indicates little endian. S2_CHECK_EQ(BN_bn2lebinpad(bn, bytes.data(), bytes.size()), bytes.size()); int count = 0; for (unsigned char c : bytes) { if (c == 0) { count += 8; } else { for (; (c & 1) == 0; c >>= 1) { ++count; } break; } } return count; } #endif // OPENSSL_VERSION_NUMBER >= 0x10100000L #endif // !defined(OPENSSL_IS_BORINGSSL) ExactFloat::ExactFloat(double v) { sign_ = std::signbit(v) ? -1 : 1; if (std::isnan(v)) { set_nan(); } else if (std::isinf(v)) { set_inf(sign_); } else { // The following code is much simpler than messing about with bit masks, // has the advantage of handling denormalized numbers and zero correctly, // and is actually quite efficient (at least compared to the rest of this // code). "f" is a fraction in the range [0.5, 1), so if we shift it left // by the number of mantissa bits in a double (53, including the leading // "1") then the result is always an integer. int exp; double f = frexp(fabs(v), &exp); uint64 m = static_cast(ldexp(f, kDoubleMantissaBits)); BN_ext_set_uint64(bn_.get(), m); bn_exp_ = exp - kDoubleMantissaBits; Canonicalize(); } } ExactFloat::ExactFloat(int v) { sign_ = (v >= 0) ? 1 : -1; // Note that this works even for INT_MIN because the parameter type for // BN_set_word() is unsigned. S2_CHECK(BN_set_word(bn_.get(), abs(v))); bn_exp_ = 0; Canonicalize(); } ExactFloat::ExactFloat(const ExactFloat& b) : sign_(b.sign_), bn_exp_(b.bn_exp_) { BN_copy(bn_.get(), b.bn_.get()); } ExactFloat ExactFloat::SignedZero(int sign) { ExactFloat r; r.set_zero(sign); return r; } ExactFloat ExactFloat::Infinity(int sign) { ExactFloat r; r.set_inf(sign); return r; } ExactFloat ExactFloat::NaN() { ExactFloat r; r.set_nan(); return r; } int ExactFloat::prec() const { return BN_num_bits(bn_.get()); } int ExactFloat::exp() const { S2_DCHECK(is_normal()); return bn_exp_ + BN_num_bits(bn_.get()); } void ExactFloat::set_zero(int sign) { sign_ = sign; bn_exp_ = kExpZero; if (!BN_is_zero(bn_.get())) BN_zero(bn_.get()); } void ExactFloat::set_inf(int sign) { sign_ = sign; bn_exp_ = kExpInfinity; if (!BN_is_zero(bn_.get())) BN_zero(bn_.get()); } void ExactFloat::set_nan() { sign_ = 1; bn_exp_ = kExpNaN; if (!BN_is_zero(bn_.get())) BN_zero(bn_.get()); } double ExactFloat::ToDouble() const { // If the mantissa has too many bits, we need to round it. if (prec() <= kDoubleMantissaBits) { return ToDoubleHelper(); } else { ExactFloat r = RoundToMaxPrec(kDoubleMantissaBits, kRoundTiesToEven); return r.ToDoubleHelper(); } } double ExactFloat::ToDoubleHelper() const { S2_DCHECK_LE(BN_num_bits(bn_.get()), kDoubleMantissaBits); if (!is_normal()) { if (is_zero()) return copysign(0, sign_); if (is_inf()) { return std::copysign(std::numeric_limits::infinity(), sign_); } return std::copysign(std::numeric_limits::quiet_NaN(), sign_); } uint64 d_mantissa = BN_ext_get_uint64(bn_.get()); // We rely on ldexp() to handle overflow and underflow. (It will return a // signed zero or infinity if the result is too small or too large.) return sign_ * ldexp(static_cast(d_mantissa), bn_exp_); } ExactFloat ExactFloat::RoundToMaxPrec(int max_prec, RoundingMode mode) const { // The "kRoundTiesToEven" mode requires at least 2 bits of precision // (otherwise both adjacent representable values may be odd). S2_DCHECK_GE(max_prec, 2); S2_DCHECK_LE(max_prec, kMaxPrec); // The following test also catches zero, infinity, and NaN. int shift = prec() - max_prec; if (shift <= 0) return *this; // Round by removing the appropriate number of bits from the mantissa. Note // that if the value is rounded up to a power of 2, the high-order bit // position may increase, but in that case Canonicalize() will remove at // least one zero bit and so the output will still have prec() <= max_prec. return RoundToPowerOf2(bn_exp_ + shift, mode); } ExactFloat ExactFloat::RoundToPowerOf2(int bit_exp, RoundingMode mode) const { S2_DCHECK_GE(bit_exp, kMinExp - kMaxPrec); S2_DCHECK_LE(bit_exp, kMaxExp); // If the exponent is already large enough, or the value is zero, infinity, // or NaN, then there is nothing to do. int shift = bit_exp - bn_exp_; if (shift <= 0) return *this; S2_DCHECK(is_normal()); // Convert rounding up/down to toward/away from zero, so that we don't need // to consider the sign of the number from this point onward. if (mode == kRoundTowardPositive) { mode = (sign_ > 0) ? kRoundAwayFromZero : kRoundTowardZero; } else if (mode == kRoundTowardNegative) { mode = (sign_ > 0) ? kRoundTowardZero : kRoundAwayFromZero; } // Rounding consists of right-shifting the mantissa by "shift", and then // possibly incrementing the result (depending on the rounding mode, the // bits that were discarded, and sometimes the lowest kept bit). The // following code figures out whether we need to increment. ExactFloat r; bool increment = false; if (mode == kRoundTowardZero) { // Never increment. } else if (mode == kRoundTiesAwayFromZero) { // Increment if the highest discarded bit is 1. if (BN_is_bit_set(bn_.get(), shift - 1)) increment = true; } else if (mode == kRoundAwayFromZero) { // Increment unless all discarded bits are zero. if (BN_ext_count_low_zero_bits(bn_.get()) < shift) increment = true; } else { S2_DCHECK_EQ(mode, kRoundTiesToEven); // Let "w/xyz" denote a mantissa where "w" is the lowest kept bit and // "xyz" are the discarded bits. Then using regexp notation: // ./0.* -> Don't increment (fraction < 1/2) // 0/10* -> Don't increment (fraction = 1/2, kept part even) // 1/10* -> Increment (fraction = 1/2, kept part odd) // ./1.*1.* -> Increment (fraction > 1/2) if (BN_is_bit_set(bn_.get(), shift - 1) && ((BN_is_bit_set(bn_.get(), shift) || BN_ext_count_low_zero_bits(bn_.get()) < shift - 1))) { increment = true; } } r.bn_exp_ = bn_exp_ + shift; S2_CHECK(BN_rshift(r.bn_.get(), bn_.get(), shift)); if (increment) { S2_CHECK(BN_add_word(r.bn_.get(), 1)); } r.sign_ = sign_; r.Canonicalize(); return r; } int ExactFloat::NumSignificantDigitsForPrec(int prec) { // The simplest bound is // // d <= 1 + ceil(prec * log10(2)) // // The following bound is tighter by 0.5 digits on average, but requires // the exponent to be known as well: // // d <= ceil(exp * log10(2)) - floor((exp - prec) * log10(2)) // // Since either of these bounds can be too large by 0, 1, or 2 digits, we // stick with the simpler first bound. return static_cast(1 + ceil(prec * (M_LN2 / M_LN10))); } // Numbers are always formatted with at least this many significant digits. // This prevents small integers from being formatted in exponential notation // (e.g. 1024 formatted as 1e+03), and also avoids the confusion of having // supposedly "high precision" numbers formatted with just 1 or 2 digits // (e.g. 1/512 == 0.001953125 formatted as 0.002). static const int kMinSignificantDigits = 10; std::string ExactFloat::ToString() const { int max_digits = max(kMinSignificantDigits, NumSignificantDigitsForPrec(prec())); return ToStringWithMaxDigits(max_digits); } std::string ExactFloat::ToStringWithMaxDigits(int max_digits) const { S2_DCHECK_GT(max_digits, 0); if (!is_normal()) { if (is_nan()) return "nan"; if (is_zero()) return (sign_ < 0) ? "-0" : "0"; return (sign_ < 0) ? "-inf" : "inf"; } std::string digits; int exp10 = GetDecimalDigits(max_digits, &digits); std::string str; if (sign_ < 0) str.push_back('-'); // We use the standard '%g' formatting rules. If the exponent is less than // -4 or greater than or equal to the requested precision (i.e., max_digits) // then we use exponential notation. // // But since "exp10" is the base-10 exponent corresponding to a mantissa in // the range [0.1, 1), whereas the '%g' rules assume a mantissa in the range // [1.0, 10), we need to adjust these parameters by 1. if (exp10 <= -4 || exp10 > max_digits) { // Use exponential format. str.push_back(digits[0]); if (digits.size() > 1) { str.push_back('.'); str.append(digits.begin() + 1, digits.end()); } char exp_buf[20]; sprintf(exp_buf, "e%+02d", exp10 - 1); str += exp_buf; } else { // Use fixed format. We split this into two cases depending on whether // the integer portion is non-zero or not. if (exp10 > 0) { if (static_cast(exp10) >= digits.size()) { str += digits; for (int i = exp10 - digits.size(); i > 0; --i) { str.push_back('0'); } } else { str.append(digits.begin(), digits.begin() + exp10); str.push_back('.'); str.append(digits.begin() + exp10, digits.end()); } } else { str += "0."; for (int i = exp10; i < 0; ++i) { str.push_back('0'); } str += digits; } } return str; } // Increment an unsigned integer represented as a string of ASCII digits. static void IncrementDecimalDigits(std::string* digits) { std::string::iterator pos = digits->end(); while (--pos >= digits->begin()) { if (*pos < '9') { ++*pos; return; } *pos = '0'; } digits->insert(0, "1"); } int ExactFloat::GetDecimalDigits(int max_digits, std::string* digits) const { S2_DCHECK(is_normal()); // Convert the value to the form (bn * (10 ** bn_exp10)) where "bn" is a // positive integer (BIGNUM). BIGNUM* bn = BN_new(); int bn_exp10; if (bn_exp_ >= 0) { // The easy case: bn = bn_ * (2 ** bn_exp_)), bn_exp10 = 0. S2_CHECK(BN_lshift(bn, bn_.get(), bn_exp_)); bn_exp10 = 0; } else { // Set bn = bn_ * (5 ** -bn_exp_) and bn_exp10 = bn_exp_. This is // equivalent to the original value of (bn_ * (2 ** bn_exp_)). BIGNUM* power = BN_new(); S2_CHECK(BN_set_word(power, -bn_exp_)); S2_CHECK(BN_set_word(bn, 5)); BN_CTX* ctx = BN_CTX_new(); S2_CHECK(BN_exp(bn, bn, power, ctx)); S2_CHECK(BN_mul(bn, bn, bn_.get(), ctx)); BN_CTX_free(ctx); BN_free(power); bn_exp10 = bn_exp_; } // Now convert "bn" to a decimal string. char* all_digits = BN_bn2dec(bn); S2_DCHECK(all_digits != nullptr); BN_free(bn); // Check whether we have too many digits and round if necessary. int num_digits = strlen(all_digits); if (num_digits <= max_digits) { *digits = all_digits; } else { digits->assign(all_digits, max_digits); // Standard "printf" formatting rounds ties to an even number. This means // that we round up (away from zero) if highest discarded digit is '5' or // more, unless all other discarded digits are zero in which case we round // up only if the lowest kept digit is odd. if (all_digits[max_digits] >= '5' && ((all_digits[max_digits-1] & 1) == 1 || strpbrk(all_digits + max_digits + 1, "123456789") != nullptr)) { // This can increase the number of digits by 1, but in that case at // least one trailing zero will be stripped off below. IncrementDecimalDigits(digits); } // Adjust the base-10 exponent to reflect the digits we have removed. bn_exp10 += num_digits - max_digits; } OPENSSL_free(all_digits); // Now strip any trailing zeros. S2_DCHECK_NE((*digits)[0], '0'); std::string::iterator pos = digits->end(); while (pos[-1] == '0') --pos; if (pos < digits->end()) { bn_exp10 += digits->end() - pos; digits->erase(pos, digits->end()); } S2_DCHECK_LE(digits->size(), max_digits); // Finally, we adjust the base-10 exponent so that the mantissa is a // fraction in the range [0.1, 1) rather than an integer. return bn_exp10 + digits->size(); } std::string ExactFloat::ToUniqueString() const { char prec_buf[20]; sprintf(prec_buf, "<%d>", prec()); return ToString() + prec_buf; } ExactFloat& ExactFloat::operator=(const ExactFloat& b) { if (this != &b) { sign_ = b.sign_; bn_exp_ = b.bn_exp_; BN_copy(bn_.get(), b.bn_.get()); } return *this; } ExactFloat ExactFloat::operator-() const { return CopyWithSign(-sign_); } ExactFloat operator+(const ExactFloat& a, const ExactFloat& b) { return ExactFloat::SignedSum(a.sign_, &a, b.sign_, &b); } ExactFloat operator-(const ExactFloat& a, const ExactFloat& b) { return ExactFloat::SignedSum(a.sign_, &a, -b.sign_, &b); } ExactFloat ExactFloat::SignedSum(int a_sign, const ExactFloat* a, int b_sign, const ExactFloat* b) { if (!a->is_normal() || !b->is_normal()) { // Handle zero, infinity, and NaN according to IEEE 754-2008. if (a->is_nan()) return *a; if (b->is_nan()) return *b; if (a->is_inf()) { // Adding two infinities with opposite sign yields NaN. if (b->is_inf() && a_sign != b_sign) return NaN(); return Infinity(a_sign); } if (b->is_inf()) return Infinity(b_sign); if (a->is_zero()) { if (!b->is_zero()) return b->CopyWithSign(b_sign); // Adding two zeros with the same sign preserves the sign. if (a_sign == b_sign) return SignedZero(a_sign); // Adding two zeros of opposite sign produces +0. return SignedZero(+1); } S2_DCHECK(b->is_zero()); return a->CopyWithSign(a_sign); } // Swap the numbers if necessary so that "a" has the larger bn_exp_. if (a->bn_exp_ < b->bn_exp_) { using std::swap; swap(a_sign, b_sign); swap(a, b); } // Shift "a" if necessary so that both values have the same bn_exp_. ExactFloat r; if (a->bn_exp_ > b->bn_exp_) { S2_CHECK(BN_lshift(r.bn_.get(), a->bn_.get(), a->bn_exp_ - b->bn_exp_)); a = &r; // The only field of "a" used below is bn_. } r.bn_exp_ = b->bn_exp_; if (a_sign == b_sign) { S2_CHECK(BN_add(r.bn_.get(), a->bn_.get(), b->bn_.get())); r.sign_ = a_sign; } else { // Note that the BIGNUM documentation is out of date -- all methods now // allow the result to be the same as any input argument, so it is okay if // (a == &r) due to the shift above. S2_CHECK(BN_sub(r.bn_.get(), a->bn_.get(), b->bn_.get())); if (BN_is_zero(r.bn_.get())) { r.sign_ = +1; } else if (BN_is_negative(r.bn_.get())) { // The magnitude of "b" was larger. r.sign_ = b_sign; BN_set_negative(r.bn_.get(), false); } else { // They were equal, or the magnitude of "a" was larger. r.sign_ = a_sign; } } r.Canonicalize(); return r; } void ExactFloat::Canonicalize() { if (!is_normal()) return; // Underflow/overflow occurs if exp() is not in [kMinExp, kMaxExp]. // We also convert a zero mantissa to signed zero. int my_exp = exp(); if (my_exp < kMinExp || BN_is_zero(bn_.get())) { set_zero(sign_); } else if (my_exp > kMaxExp) { set_inf(sign_); } else if (!BN_is_odd(bn_.get())) { // Remove any low-order zero bits from the mantissa. S2_DCHECK(!BN_is_zero(bn_.get())); int shift = BN_ext_count_low_zero_bits(bn_.get()); if (shift > 0) { S2_CHECK(BN_rshift(bn_.get(), bn_.get(), shift)); bn_exp_ += shift; } } // If the mantissa has too many bits, we replace it by NaN to indicate // that an inexact calculation has occurred. if (prec() > kMaxPrec) { set_nan(); } } ExactFloat operator*(const ExactFloat& a, const ExactFloat& b) { int result_sign = a.sign_ * b.sign_; if (!a.is_normal() || !b.is_normal()) { // Handle zero, infinity, and NaN according to IEEE 754-2008. if (a.is_nan()) return a; if (b.is_nan()) return b; if (a.is_inf()) { // Infinity times zero yields NaN. if (b.is_zero()) return ExactFloat::NaN(); return ExactFloat::Infinity(result_sign); } if (b.is_inf()) { if (a.is_zero()) return ExactFloat::NaN(); return ExactFloat::Infinity(result_sign); } S2_DCHECK(a.is_zero() || b.is_zero()); return ExactFloat::SignedZero(result_sign); } ExactFloat r; r.sign_ = result_sign; r.bn_exp_ = a.bn_exp_ + b.bn_exp_; BN_CTX* ctx = BN_CTX_new(); S2_CHECK(BN_mul(r.bn_.get(), a.bn_.get(), b.bn_.get(), ctx)); BN_CTX_free(ctx); r.Canonicalize(); return r; } bool operator==(const ExactFloat& a, const ExactFloat& b) { // NaN is not equal to anything, not even itself. if (a.is_nan() || b.is_nan()) return false; // Since Canonicalize() strips low-order zero bits, all other cases // (including non-normal values) require bn_exp_ to be equal. if (a.bn_exp_ != b.bn_exp_) return false; // Positive and negative zero are equal. if (a.is_zero() && b.is_zero()) return true; // Otherwise, the signs and mantissas must match. Note that non-normal // values such as infinity have a mantissa of zero. return a.sign_ == b.sign_ && BN_ucmp(a.bn_.get(), b.bn_.get()) == 0; } int ExactFloat::ScaleAndCompare(const ExactFloat& b) const { S2_DCHECK(is_normal() && b.is_normal() && bn_exp_ >= b.bn_exp_); ExactFloat tmp = *this; S2_CHECK(BN_lshift(tmp.bn_.get(), tmp.bn_.get(), bn_exp_ - b.bn_exp_)); return BN_ucmp(tmp.bn_.get(), b.bn_.get()); } bool ExactFloat::UnsignedLess(const ExactFloat& b) const { // Handle the zero/infinity cases (NaN has already been done). if (is_inf() || b.is_zero()) return false; if (is_zero() || b.is_inf()) return true; // If the high-order bit positions differ, we are done. int cmp = exp() - b.exp(); if (cmp != 0) return cmp < 0; // Otherwise shift one of the two values so that they both have the same // bn_exp_ and then compare the mantissas. return (bn_exp_ >= b.bn_exp_ ? ScaleAndCompare(b) < 0 : b.ScaleAndCompare(*this) > 0); } bool operator<(const ExactFloat& a, const ExactFloat& b) { // NaN is unordered compared to everything, including itself. if (a.is_nan() || b.is_nan()) return false; // Positive and negative zero are equal. if (a.is_zero() && b.is_zero()) return false; // Otherwise, anything negative is less than anything positive. if (a.sign_ != b.sign_) return a.sign_ < b.sign_; // Now we just compare absolute values. return (a.sign_ > 0) ? a.UnsignedLess(b) : b.UnsignedLess(a); } ExactFloat fabs(const ExactFloat& a) { return abs(a); } ExactFloat abs(const ExactFloat& a) { return a.CopyWithSign(+1); } ExactFloat fmax(const ExactFloat& a, const ExactFloat& b) { // If one argument is NaN, return the other argument. if (a.is_nan()) return b; if (b.is_nan()) return a; // Not required by IEEE 754, but we prefer +0 over -0. if (a.sign_ != b.sign_) { return (a.sign_ < b.sign_) ? b : a; } return (a < b) ? b : a; } ExactFloat fmin(const ExactFloat& a, const ExactFloat& b) { // If one argument is NaN, return the other argument. if (a.is_nan()) return b; if (b.is_nan()) return a; // Not required by IEEE 754, but we prefer -0 over +0. if (a.sign_ != b.sign_) { return (a.sign_ < b.sign_) ? a : b; } return (a < b) ? a : b; } ExactFloat fdim(const ExactFloat& a, const ExactFloat& b) { // This formulation has the correct behavior for NaNs. return (a <= b) ? 0 : (a - b); } ExactFloat ceil(const ExactFloat& a) { return a.RoundToPowerOf2(0, ExactFloat::kRoundTowardPositive); } ExactFloat floor(const ExactFloat& a) { return a.RoundToPowerOf2(0, ExactFloat::kRoundTowardNegative); } ExactFloat trunc(const ExactFloat& a) { return a.RoundToPowerOf2(0, ExactFloat::kRoundTowardZero); } ExactFloat round(const ExactFloat& a) { return a.RoundToPowerOf2(0, ExactFloat::kRoundTiesAwayFromZero); } ExactFloat rint(const ExactFloat& a) { return a.RoundToPowerOf2(0, ExactFloat::kRoundTiesToEven); } template T ExactFloat::ToInteger(RoundingMode mode) const { using std::numeric_limits; static_assert(sizeof(T) <= sizeof(uint64), "max 64 bits supported"); static_assert(numeric_limits::is_signed, "only signed types supported"); const int64 kMinValue = numeric_limits::min(); const int64 kMaxValue = numeric_limits::max(); ExactFloat r = RoundToPowerOf2(0, mode); if (r.is_nan()) return kMaxValue; if (r.is_zero()) return 0; if (!r.is_inf()) { // If the unsigned value has more than 63 bits it is always clamped. if (r.exp() < 64) { int64 value = BN_ext_get_uint64(r.bn_.get()) << r.bn_exp_; if (r.sign_ < 0) value = -value; return max(kMinValue, min(kMaxValue, value)); } } return (r.sign_ < 0) ? kMinValue : kMaxValue; } long lrint(const ExactFloat& a) { return a.ToInteger(ExactFloat::kRoundTiesToEven); } long long llrint(const ExactFloat& a) { return a.ToInteger(ExactFloat::kRoundTiesToEven); } long lround(const ExactFloat& a) { return a.ToInteger(ExactFloat::kRoundTiesAwayFromZero); } long long llround(const ExactFloat& a) { return a.ToInteger(ExactFloat::kRoundTiesAwayFromZero); } ExactFloat copysign(const ExactFloat& a, const ExactFloat& b) { return a.CopyWithSign(b.sign_); } ExactFloat frexp(const ExactFloat& a, int* exp) { if (!a.is_normal()) { // If a == 0, exp should be zero. If a.is_inf() or a.is_nan(), exp is not // defined but the glibc implementation returns zero. *exp = 0; return a; } *exp = a.exp(); return ldexp(a, -a.exp()); } ExactFloat ldexp(const ExactFloat& a, int exp) { if (!a.is_normal()) return a; // To prevent integer overflow, we first clamp "exp" so that // (kMinExp - 1) <= (a_exp + exp) <= (kMaxExp + 1). int a_exp = a.exp(); exp = min(ExactFloat::kMaxExp + 1 - a_exp, max(ExactFloat::kMinExp - 1 + a_exp, exp)); // Now modify the exponent and check for overflow/underflow. ExactFloat r = a; r.bn_exp_ += exp; r.Canonicalize(); return r; } ExactFloat scalbln(const ExactFloat& a, long exp) { // Clamp the exponent to the range of "int" in order to avoid truncation. exp = max(static_cast(INT_MIN), min(static_cast(INT_MAX), exp)); return ldexp(a, exp); } int ilogb(const ExactFloat& a) { if (a.is_zero()) return FP_ILOGB0; if (a.is_inf()) return INT_MAX; if (a.is_nan()) return FP_ILOGBNAN; // a.exp() assumes the significand is in the range [0.5, 1). return a.exp() - 1; } ExactFloat logb(const ExactFloat& a) { if (a.is_zero()) return ExactFloat::Infinity(-1); if (a.is_inf()) return ExactFloat::Infinity(+1); // Even if a < 0. if (a.is_nan()) return a; // exp() assumes the significand is in the range [0.5,1). return ExactFloat(a.exp() - 1); } ExactFloat ExactFloat::Unimplemented() { S2_LOG(FATAL) << "Unimplemented ExactFloat method called"; return NaN(); } s2geometry-0.10.0/src/s2/util/math/exactfloat/exactfloat.h000066400000000000000000000653311422156367100233650ustar00rootroot00000000000000// Copyright 2009 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Author: ericv@google.com (Eric Veach) // // ExactFloat is a multiple-precision floating point type that uses the OpenSSL // Bignum library for numerical calculations. It has the same interface as the // built-in "float" and "double" types, but only supports the subset of // operators and intrinsics where it is possible to compute the result exactly. // So for example, ExactFloat supports addition and multiplication but not // division (since in general, the quotient of two floating-point numbers cannot // be represented exactly). Exact arithmetic is useful for geometric // algorithms, especially for disambiguating cases where ordinary // double-precision arithmetic yields an uncertain result. // // ExactFloat is a subset of the now-retired MPFloat class, which used the GNU // MPFR library for numerical calculations. The main reason for the switch to // ExactFloat is that OpenSSL has a BSD-style license whereas MPFR has a much // more restrictive LGPL license. // // ExactFloat has the following features: // // - It uses the same syntax as the built-in "float" and "double" // types, for example: x += 4 + fabs(2*y*y - z*z). There are a few // differences (see below), but the syntax is compatible enough so that // ExactFloat can be used as a template argument to templatized classes // such as Vector2, VectorN, Matrix3x3, etc. // // - Results are not rounded; instead, precision is increased so that the // result can be represented exactly. An inexact result is returned only // in the case of underflow or overflow (yielding signed zero or infinity // respectively), or if the maximum allowed precision is exceeded (yielding // NaN). ExactFloat uses IEEE 754-2008 rules for handling infinities, NaN, // rounding to integers, etc. // // - ExactFloat only supports calculations where the result can be // represented exactly. Therefore it supports intrinsics such as fabs() // but not transcendentals such as sin(), sqrt(), etc. // // Syntax Compatibility with "float" and "double" // ---------------------------------------------- // // ExactFloat supports a subset of the operators and intrinsics for the // built-in "double" type. (Thus it supports fabs() but not fabsf(), for // example.) The syntax is different only in the following cases: // // - Casts and implicit conversions to built-in types (including "bool") are // not supported. So for example, the following will not compile: // // ExactFloat x = 7.5; // double y = x; // ERROR: use x.ToDouble() instead // long z = x; // ERROR: use x.ToDouble() or lround(trunc(x)) // q = static_cast(x); // ERROR: use x.ToDouble() or lround(trunc(x)) // if (x) { ... } // ERROR: use (x != 0) instead // // - The glibc floating-point classification macros (fpclassify, isfinite, // isnormal, isnan, isinf) are not supported. Instead there are // zero-argument methods: // // ExactFloat x; // if (isnan(x)) { ... } // ERROR: use (x.is_nan()) instead // if (isinf(x)) { ... } // ERROR: use (x.is_inf()) instead // // Using ExactFloat with Vector3, etc. // ----------------------------------- // // ExactFloat can be used with templatized classes such as Vector2 and Vector3 // (see "util/math/vector.h"), with the following limitations: // // - Cast() can be used to convert other vector types to an ExactFloat vector // type, but not the other way around. This is because there are no // implicit conversions from ExactFloat to built-in types. You can work // around this by calling an explicit conversion method such as // ToDouble(). For example: // // typedef Vector3 Vector3_xf; // Vector3_xf x; // Vector3_d y; // x = Vector3_xf::Cast(y); // This works. // y = Vector3_d::Cast(x); // This doesn't. // y = Vector3_d(x[0].ToDouble(), x[1].ToDouble(), x[2].ToDouble()); // OK // // - IsNaN() is not supported because it calls isnan(), which is defined as a // macro in and therefore can't easily be overrided. // // Precision Semantics // ------------------- // // Unlike MPFloat, ExactFloat does not allow a maximum precision to be // specified (it is always unbounded). Therefore it does not have any of the // corresponding constructors. // // The current precision of an ExactFloat (i.e., the number of bits in its // mantissa) is returned by prec(). The precision is increased as necessary // so that the result of every operation can be represented exactly. #ifndef S2_UTIL_MATH_EXACTFLOAT_EXACTFLOAT_H_ #define S2_UTIL_MATH_EXACTFLOAT_EXACTFLOAT_H_ #include #include #include #include #include #include #include #include #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "s2/base/port.h" class ExactFloat { public: // The following limits are imposed by OpenSSL. // The maximum exponent supported. If a value has an exponent larger than // this, it is replaced by infinity (with the appropriate sign). static constexpr int kMaxExp = 200 * 1000 * 1000; // About 10**(60 million) // The minimum exponent supported. If a value has an exponent less than // this, it is replaced by zero (with the appropriate sign). static constexpr int kMinExp = -kMaxExp; // About 10**(-60 million) // The maximum number of mantissa bits supported. If a value has more // mantissa bits than this, it is replaced with NaN. (It is expected that // users of this class will never want this much precision.) static constexpr int kMaxPrec = 64 << 20; // About 20 million digits // Rounding modes. kRoundTiesToEven and kRoundTiesAwayFromZero both round // to the nearest representable value unless two values are equally close. // In that case kRoundTiesToEven rounds to the nearest even value, while // kRoundTiesAwayFromZero always rounds away from zero. enum RoundingMode { kRoundTiesToEven, kRoundTiesAwayFromZero, kRoundTowardZero, kRoundAwayFromZero, kRoundTowardPositive, kRoundTowardNegative }; ///////////////////////////////////////////////////////////////////////////// // Constructors // The default constructor initializes the value to zero. (The initial // value must be zero rather than NaN for compatibility with the built-in // float types.) inline ExactFloat(); // Construct an ExactFloat from a "double". The constructor is implicit so // that this class can be used as a replacement for "float" or "double" in // templatized libraries. (With an explicit constructor, code such as // "ExactFloat f = 2.5;" would not compile.) All double-precision values are // supported, including denormalized numbers, infinities, and NaNs. ExactFloat(double v); // Construct an ExactFloat from an "int". Note that in general, ints are // automatically converted to doubles and so would be handled by the // constructor above. However, the particular argument (0) would be // ambiguous; the compiler wouldn't know whether to treat it as a "double" or // "const char*" (since 0 is a valid null pointer constant). Adding an "int" // constructor solves this problem. // // We do not provide constructors for "unsigned", "long", "unsigned long", // "long long", or "unsigned long long", since these types are not typically // used in floating-point calculations and it is safer to require them to be // explicitly cast. ExactFloat(int v); // Construct an ExactFloat from a string (such as "1.2e50"). Requires that // the value is exactly representable as a floating-point number (so for // example, "0.125" is allowed but "0.1" is not). explicit ExactFloat(const char* s) { Unimplemented(); } // Copy constructor. ExactFloat(const ExactFloat& b); // The destructor is not virtual for efficiency reasons. Therefore no // subclass should declare additional fields that require destruction. inline ~ExactFloat() = default; ///////////////////////////////////////////////////////////////////// // Constants // // As an alternative to the constants below, you can also just use the // constants defined in , for example: // // ExactFloat x = NAN, y = -INFINITY; // Return an ExactFloat equal to positive zero (if sign >= 0) or // negative zero (if sign < 0). static ExactFloat SignedZero(int sign); // Return an ExactFloat equal to positive infinity (if sign >= 0) or // negative infinity (if sign < 0). static ExactFloat Infinity(int sign); // Return an ExactFloat that is NaN (Not-a-Number). static ExactFloat NaN(); ///////////////////////////////////////////////////////////////////////////// // Accessor Methods // Return the maximum precision of the ExactFloat. This method exists only // for compatibility with MPFloat. int max_prec() const { return kMaxPrec; } // Return the actual precision of this ExactFloat (the current number of // bits in its mantissa). Returns 0 for non-normal numbers such as NaN. int prec() const; // Return the exponent of this ExactFloat given that the mantissa is in the // range [0.5, 1). It is an error to call this method if the value is zero, // infinity, or NaN. int exp() const; // Set the value of the ExactFloat to +0 (if sign >= 0) or -0 (if sign < 0). void set_zero(int sign); // Set the value of the ExactFloat to positive infinity (if sign >= 0) or // negative infinity (if sign < 0). void set_inf(int sign); // Set the value of the ExactFloat to NaN (Not-a-Number). void set_nan(); // Unfortunately, isinf(x), isnan(x), isnormal(x), and isfinite(x) are // defined as macros in . Therefore we can't easily extend them // here. Instead we provide methods with underscores in their names that do // the same thing: x.is_inf(), etc. // // These macros are not implemented: signbit(x), fpclassify(x). // Return true if this value is zero (including negative zero). inline bool is_zero() const; // Return true if this value is infinity (positive or negative). inline bool is_inf() const; // Return true if this value is NaN (Not-a-Number). inline bool is_nan() const; // Return true if this value is a normal floating-point number. Non-normal // values (zero, infinity, and NaN) often need to be handled separately // because they are represented using special exponent values and their // mantissa is not defined. inline bool is_normal() const; // Return true if this value is a normal floating-point number or zero, // i.e. it is not infinity or NaN. inline bool is_finite() const; // Return true if the sign bit is set (this includes negative zero). inline bool sign_bit() const; // Return +1 if this ExactFloat is positive, -1 if it is negative, and 0 // if it is zero or NaN. Note that unlike sign_bit(), sgn() returns 0 for // both positive and negative zero. inline int sgn() const; ///////////////////////////////////////////////////////////////////////////// // Conversion Methods // // Note that some conversions are defined as functions further below, // e.g. to convert to an integer you can use lround(), llrint(), etc. // Round to double precision. Note that since doubles have a much smaller // exponent range than ExactFloats, very small values may be rounded to // (positive or negative) zero, and very large values may be rounded to // infinity. // // It is very important to make this a named method rather than an implicit // conversion, because otherwise there would be a silent loss of precision // whenever some desired operator or function happens not to be implemented. // For example, if fabs() were not implemented and "x" and "y" were // ExactFloats, then x = fabs(y) would silently convert "y" to a "double", // take its absolute value, and convert it back to an ExactFloat. double ToDouble() const; // Return a human-readable string such that if two values with the same // precision are different, then their string representations are different. // The format is similar to printf("%g"), except that the number of // significant digits depends on the precision (with a minimum of 10). // Trailing zeros are stripped (just like "%g"). // // Note that if two values have different precisions, they may have the same // ToString() value even though their values are slightly different. If you // need to distinguish such values, use ToUniqueString() intead. std::string ToString() const; // Return a string formatted according to printf("%Ng") where N is the given // maximum number of significant digits. std::string ToStringWithMaxDigits(int max_digits) const; // Return a human-readable string such that if two ExactFloats have different // values, then their string representations are always different. This // method is useful for debugging. The string has the form "value", // where "prec" is the actual precision of the ExactFloat (e.g., "0.215<50>"). std::string ToUniqueString() const; // Return an upper bound on the number of significant digits required to // distinguish any two floating-point numbers with the given precision when // they are formatted as decimal strings in exponential format. static int NumSignificantDigitsForPrec(int prec); // Output the ExactFloat in human-readable format, e.g. for logging. friend std::ostream& operator<<(std::ostream& o, ExactFloat const& f) { return o << f.ToString(); } ///////////////////////////////////////////////////////////////////////////// // Other Methods // Round the ExactFloat so that its mantissa has at most "max_prec" bits // using the given rounding mode. Requires "max_prec" to be at least 2 // (since kRoundTiesToEven doesn't make sense with fewer bits than this). ExactFloat RoundToMaxPrec(int max_prec, RoundingMode mode) const; ///////////////////////////////////////////////////////////////////////////// // Operators // Assignment operator. ExactFloat& operator=(const ExactFloat& b); // Unary plus. ExactFloat operator+() const { return *this; } // Unary minus. ExactFloat operator-() const; // Addition. friend ExactFloat operator+(const ExactFloat& a, const ExactFloat& b); // Subtraction. friend ExactFloat operator-(const ExactFloat& a, const ExactFloat& b); // Multiplication. friend ExactFloat operator*(const ExactFloat& a, const ExactFloat& b); // Division is not implemented because the result cannot be represented // exactly in general. Doing this properly would require extending all the // operations to support rounding to a specified precision. // Arithmetic assignment operators (+=, -=, *=). ExactFloat& operator+=(const ExactFloat& b) { return (*this = *this + b); } ExactFloat& operator-=(const ExactFloat& b) { return (*this = *this - b); } ExactFloat& operator*=(const ExactFloat& b) { return (*this = *this * b); } // Comparison operators (==, !=, <, <=, >, >=). friend bool operator==(const ExactFloat& a, const ExactFloat& b); friend bool operator<(const ExactFloat& a, const ExactFloat& b); // These don't need to be friends but are declared here for completeness. inline friend bool operator!=(const ExactFloat& a, const ExactFloat& b); inline friend bool operator<=(const ExactFloat& a, const ExactFloat& b); inline friend bool operator>(const ExactFloat& a, const ExactFloat& b); inline friend bool operator>=(const ExactFloat& a, const ExactFloat& b); ///////////////////////////////////////////////////////////////////// // Math Intrinsics // // The math intrinsics currently supported by ExactFloat are listed below. // Except as noted, they behave identically to the usual glibc intrinsics // except that they have greater precision. See the man pages for more // information. //////// Miscellaneous simple arithmetic functions. // Absolute value. friend ExactFloat fabs(const ExactFloat& a); friend ExactFloat abs(const ExactFloat& a); // Maximum of two values. friend ExactFloat fmax(const ExactFloat& a, const ExactFloat& b); // Minimum of two values. friend ExactFloat fmin(const ExactFloat& a, const ExactFloat& b); // Positive difference: max(a - b, 0). friend ExactFloat fdim(const ExactFloat& a, const ExactFloat& b); //////// Integer rounding functions that return ExactFloat values. // Round up to the nearest integer. friend ExactFloat ceil(const ExactFloat& a); // Round down to the nearest integer. friend ExactFloat floor(const ExactFloat& a); // Round to the nearest integer not larger in absolute value. // For example: f(-1.9) = -1, f(2.9) = 2. friend ExactFloat trunc(const ExactFloat& a); // Round to the nearest integer, rounding halfway cases away from zero. // For example: f(-0.5) = -1, f(0.5) = 1, f(1.5) = 2, f(2.5) = 3. friend ExactFloat round(const ExactFloat& a); // Round to the nearest integer, rounding halfway cases to an even integer. // For example: f(-0.5) = 0, f(0.5) = 0, f(1.5) = 2, f(2.5) = 2. friend ExactFloat rint(const ExactFloat& a); // A synonym for rint(). friend ExactFloat nearbyint(const ExactFloat& a) { return rint(a); } //////// Integer rounding functions that return C++ integer types. // Like rint(), but rounds to the nearest "long" value. Returns the // minimum/maximum possible integer if the value is out of range. friend long lrint(const ExactFloat& a); // Like rint(), but rounds to the nearest "long long" value. Returns the // minimum/maximum possible integer if the value is out of range. friend long long llrint(const ExactFloat& a); // Like round(), but rounds to the nearest "long" value. Returns the // minimum/maximum possible integer if the value is out of range. friend long lround(const ExactFloat& a); // Like round(), but rounds to the nearest "long long" value. Returns the // minimum/maximum possible integer if the value is out of range. friend long long llround(const ExactFloat& a); //////// Remainder functions. // The remainder of dividing "a" by "b", where the quotient is rounded toward // zero to the nearest integer. Similar to (a - trunc(a / b) * b). friend ExactFloat fmod(const ExactFloat& a, const ExactFloat& b) { // Note that it is possible to implement this operation exactly, it just // hasn't been done. return Unimplemented(); } // The remainder of dividing "a" by "b", where the quotient is rounded to the // nearest integer, rounding halfway cases to an even integer. Similar to // (a - rint(a / b) * b). friend ExactFloat remainder(const ExactFloat& a, const ExactFloat& b) { // Note that it is possible to implement this operation exactly, it just // hasn't been done. return Unimplemented(); } // A synonym for remainder(). friend ExactFloat drem(const ExactFloat& a, const ExactFloat& b) { return remainder(a, b); } // Break the argument "a" into integer and fractional parts, each of which // has the same sign as "a". The fractional part is returned, and the // integer part is stored in the output parameter "i_ptr". Both output // values are set to have the same maximum precision as "a". friend ExactFloat modf(const ExactFloat& a, ExactFloat* i_ptr) { // Note that it is possible to implement this operation exactly, it just // hasn't been done. return Unimplemented(); } //////// Floating-point manipulation functions. // Return an ExactFloat with the magnitude of "a" and the sign bit of "b". // (Note that an IEEE zero can be either positive or negative.) friend ExactFloat copysign(const ExactFloat& a, const ExactFloat& b); // Convert "a" to a normalized fraction in the range [0.5, 1) times a power // of two. Return the fraction and set "exp" to the exponent. If "a" is // zero, infinity, or NaN then return "a" and set "exp" to zero. friend ExactFloat frexp(const ExactFloat& a, int* exp); // Return "a" multiplied by 2 raised to the power "exp". friend ExactFloat ldexp(const ExactFloat& a, int exp); // A synonym for ldexp(). friend ExactFloat scalbn(const ExactFloat& a, int exp) { return ldexp(a, exp); } // A version of ldexp() where "exp" is a long integer. friend ExactFloat scalbln(const ExactFloat& a, long exp); // Convert "a" to a normalized fraction in the range [1,2) times a power of // two, and return the exponent value as an integer. This is equivalent to // lrint(floor(log2(fabs(a)))) but it is computed more efficiently. Returns // the constants documented in the man page for zero, infinity, or NaN. friend int ilogb(const ExactFloat& a); // Convert "a" to a normalized fraction in the range [1,2) times a power of // two, and return the exponent value as an ExactFloat. This is equivalent to // floor(log2(fabs(a))) but it is computed more efficiently. friend ExactFloat logb(const ExactFloat& a); protected: // OpenSSL >= 1.1 does not have BN_init, and does not support stack- // allocated BIGNUMS. We use BN_init when possible, but BN_new otherwise. // If the performance penalty is too high, an object pool can be added // in the future. #if defined(OPENSSL_IS_BORINGSSL) || OPENSSL_VERSION_NUMBER < 0x10100000L // BoringSSL and OpenSSL < 1.1 support stack allocated BIGNUMs and BN_init. class BigNum { public: BigNum() { BN_init(&bn_); } // Prevent accidental, expensive, copying. BigNum(const BigNum&) = delete; BigNum& operator=(const BigNum&) = delete; ~BigNum() { BN_free(&bn_); } BIGNUM* get() { return &bn_; } const BIGNUM* get() const { return &bn_; } private: BIGNUM bn_; }; #else class BigNum { public: BigNum() : bn_(BN_new()) {} BigNum(const BigNum&) = delete; BigNum& operator=(const BigNum&) = delete; ~BigNum() { BN_free(bn_); } BIGNUM* get() { return bn_; } const BIGNUM* get() const { return bn_; } private: BIGNUM* bn_; }; #endif // Non-normal numbers are represented using special exponent values and a // mantissa of zero. Do not change these values; methods such as // is_normal() make assumptions about their ordering. Non-normal numbers // can have either a positive or negative sign (including zero and NaN). static constexpr int32 kExpNaN = INT_MAX; static constexpr int32 kExpInfinity = INT_MAX - 1; static constexpr int32 kExpZero = INT_MAX - 2; // Normal numbers are represented as (sign_ * bn_ * (2 ** bn_exp_)), where: // - sign_ is either +1 or -1 // - bn_ is a BIGNUM with a positive value // - bn_exp_ is the base-2 exponent applied to bn_. int32 sign_; int32 bn_exp_; BigNum bn_; // A standard IEEE "double" has a 53-bit mantissa consisting of a 52-bit // fraction plus an implicit leading "1" bit. static constexpr int kDoubleMantissaBits = 53; // Convert an ExactFloat with no more than 53 bits in its mantissa to a // "double". This method handles non-normal values (NaN, etc). double ToDoubleHelper() const; // Round an ExactFloat so that it is a multiple of (2 ** bit_exp), using the // given rounding mode. ExactFloat RoundToPowerOf2(int bit_exp, RoundingMode mode) const; // Convert the ExactFloat to a decimal value of the form 0.ddd * (10 ** x), // with at most "max_digits" significant digits (trailing zeros are removed). // Set (*digits) to the ASCII digits and return the decimal exponent "x". int GetDecimalDigits(int max_digits, std::string* digits) const; // Return a_sign * fabs(a) + b_sign * fabs(b). Used to implement addition // and subtraction. static ExactFloat SignedSum(int a_sign, const ExactFloat* a, int b_sign, const ExactFloat* b); // Convert an ExactFloat to its canonical form. Underflow results in signed // zero, overflow results in signed infinity, and precision overflow results // in NaN. A zero mantissa is converted to the canonical zero value with // the given sign; otherwise the mantissa is normalized so that its low bit // is 1. Non-normal numbers are left unchanged. void Canonicalize(); // Scale the mantissa of this ExactFloat so that it has the same bn_exp_ as // "b", then return -1, 0, or 1 according to whether the scaled mantissa is // less, equal, or greater than the mantissa of "b". Requires that both // values are normal. int ScaleAndCompare(const ExactFloat& b) const; // Return true if the magnitude of this ExactFloat is less than the // magnitude of "b". Requires that neither value is NaN. bool UnsignedLess(const ExactFloat& b) const; // Return an ExactFloat with the magnitude of this ExactFloat and the given // sign. (Similar to copysign, except that the sign is given explicitly // rather than being copied from another ExactFloat.) inline ExactFloat CopyWithSign(int sign) const; // Convert an ExactFloat to an integer of type "T" using the given rounding // mode. The type "T" must be signed. Returns the largest possible integer // for NaN, and clamps out of range values to the largest or smallest // possible values. template T ToInteger(RoundingMode mode) const; // Log a fatal error message (used for unimplemented methods). static ExactFloat Unimplemented(); }; ///////////////////////////////////////////////////////////////////////// // Implementation details follow: inline ExactFloat::ExactFloat() : sign_(1), bn_exp_(kExpZero) { } inline bool ExactFloat::is_zero() const { return bn_exp_ == kExpZero; } inline bool ExactFloat::is_inf() const { return bn_exp_ == kExpInfinity; } inline bool ExactFloat::is_nan() const { return bn_exp_ == kExpNaN; } inline bool ExactFloat::is_normal() const { return bn_exp_ < kExpZero; } inline bool ExactFloat::is_finite() const { return bn_exp_ <= kExpZero; } inline bool ExactFloat::sign_bit() const { return sign_ < 0; } inline int ExactFloat::sgn() const { return (is_nan() || is_zero()) ? 0 : sign_; } inline bool operator!=(const ExactFloat& a, const ExactFloat& b) { return !(a == b); } inline bool operator<=(const ExactFloat& a, const ExactFloat& b) { // NaN is unordered compared to everything, including itself. if (a.is_nan() || b.is_nan()) return false; return !(b < a); } inline bool operator>(const ExactFloat& a, const ExactFloat& b) { return b < a; } inline bool operator>=(const ExactFloat& a, const ExactFloat& b) { return b <= a; } inline ExactFloat ExactFloat::CopyWithSign(int sign) const { ExactFloat r = *this; r.sign_ = sign; return r; } #endif // S2_UTIL_MATH_EXACTFLOAT_EXACTFLOAT_H_ s2geometry-0.10.0/src/s2/util/math/mathutil.cc000066400000000000000000000043261422156367100210630ustar00rootroot00000000000000// Copyright 2008 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #include "s2/util/math/mathutil.h" #include #include bool MathUtil::RealRootsForCubic(long double const a, long double const b, long double const c, long double *const r1, long double *const r2, long double *const r3) { // According to Numerical Recipes (pp. 184-5), what // follows is an arrangement of computations to // compute the roots of a cubic that minimizes // roundoff error (as pointed out by A.J. Glassman). long double const a_squared = a * a, a_third = a / 3.0, b_tripled = 3.0 * b; long double const Q = (a_squared - b_tripled) / 9.0; long double const R = (2.0 * a_squared * a - 3.0 * a * b_tripled + 27.0 * c) / 54.0; long double const R_squared = R * R; long double const Q_cubed = Q * Q * Q; if (R_squared < Q_cubed) { long double const root_Q = sqrt(Q); long double const two_pi_third = 2.0 * M_PI / 3.0; long double const theta_third = acos(R / sqrt(Q_cubed)) / 3.0; long double const minus_two_root_Q = -2.0 * root_Q; *r1 = minus_two_root_Q * cos(theta_third) - a_third; *r2 = minus_two_root_Q * cos(theta_third + two_pi_third) - a_third; *r3 = minus_two_root_Q * cos(theta_third - two_pi_third) - a_third; return true; } long double const A = -sgn(R) * pow(std::abs(R) + sqrt(R_squared - Q_cubed), 1.0 / 3.0L); if (A != 0.0) { // in which case, B from NR is zero *r1 = A + Q / A - a_third; return false; } *r1 = *r2 = *r3 = -a_third; return true; } s2geometry-0.10.0/src/s2/util/math/mathutil.h000066400000000000000000000213741422156367100207270ustar00rootroot00000000000000// Copyright 2001 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // // This class is intended to contain a collection of useful (static) // mathematical functions, properly coded (by consulting numerical // recipes or another authoritative source first). #ifndef S2_UTIL_MATH_MATHUTIL_H_ #define S2_UTIL_MATH_MATHUTIL_H_ #include #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "s2/util/bits/bits.h" // Returns the sign of x: // -1 if x < 0, // +1 if x > 0, // 0 if x = 0, // unspecified if x is NaN. template inline T sgn(const T x) { return (x == 0 ? 0 : (x < 0 ? -1 : 1)); } class MathUtil { public: // Solves for the real roots of x^3+ax^2+bx+c=0, returns true iff // all three are real, in which case the roots are stored (in any // order) in r1, r2, r3; otherwise, exactly one real root exists and // it is stored in r1. static bool RealRootsForCubic(long double a, long double b, long double c, long double *r1, long double *r2, long double *r3); // -------------------------------------------------------------------- // Round // This function rounds a floating-point number to an integer. It // works for positive or negative numbers. // // Values that are halfway between two integers may be rounded up or // down, for example Round(0.5) == 0 and Round(1.5) == 2. // This allows the function to be implemented efficiently on Intel // processors (see the template specializations at the bottom of this // file). You should not use this function if you care about which // way such half-integers are rounded. // // Example usage: // double y, z; // int x = Round(y + 3.7); // int64 b = Round(0.3 * z); // // Note that the floating-point template parameter is typically inferred // from the argument type, i.e. there is no need to specify it explicitly. // -------------------------------------------------------------------- template static IntOut Round(FloatIn x) { static_assert(!std::is_integral::value, "FloatIn is integer"); static_assert(std::is_integral::value, "IntOut is not integer"); // We don't use sgn(x) below because there is no need to distinguish the // (x == 0) case. Also note that there are specialized faster versions // of this function for Intel processors at the bottom of this file. return static_cast(x < 0 ? (x - 0.5) : (x + 0.5)); } // -------------------------------------------------------------------- // FastIntRound, FastInt64Round // Fast routines for converting floating-point numbers to integers. // // These routines are approximately 6 times faster than the default // implementation of Round on Intel processors (12 times faster on // the Pentium 3). They are also more than 5 times faster than simply // casting a "double" to an "int" using static_cast. This is // because casts are defined to truncate towards zero, which on Intel // processors requires changing the rounding mode and flushing the // floating-point pipeline (unless programs are compiled specifically // for the Pentium 4, which has a new instruction to avoid this). // // Numbers that are halfway between two integers may be rounded up or // down. This is because the conversion is done using the default // rounding mode, which rounds towards the closest even number in case // of ties. So for example, FastIntRound(0.5) == 0, but // FastIntRound(1.5) == 2. These functions should only be used with // applications that don't care about which way such half-integers are // rounded. // // There are template specializations of Round() which call these // functions (for "int" and "int64" only), but it's safer to call them // directly. // // This functions are equivalent to lrint() and llrint() as defined in // the ISO C99 standard. Unfortunately this standard does not seem to // widely adopted yet and these functions are not available by default. // -------------------------------------------------------------------- static int32 FastIntRound(double x) { // This function is not templatized because gcc doesn't seem to be able // to deal with inline assembly code in templatized functions, and there // is no advantage to passing an argument type of "float" on Intel // architectures anyway. #if defined __GNUC__ && (defined __i386__ || defined __SSE2__) #if defined __SSE2__ // SSE2. int32 result; __asm__ __volatile__ ("cvtsd2si %1, %0" : "=r" (result) // Output operand is a register : "x" (x)); // Input operand is an xmm register return result; #elif defined __i386__ // FPU stack. Adapted from /usr/include/bits/mathinline.h. int32 result; __asm__ __volatile__ ("fistpl %0" : "=m" (result) // Output operand is a memory location : "t" (x) // Input operand is top of FP stack : "st"); // Clobbers (pops) top of FP stack return result; #endif // if defined __x86_64__ || ... #else return Round(x); #endif // if defined __GNUC__ && ... } static int64 FastInt64Round(double x) { #if defined __GNUC__ && (defined __i386__ || defined __x86_64__) #if defined __x86_64__ // SSE2. int64 result; __asm__ __volatile__ ("cvtsd2si %1, %0" : "=r" (result) // Output operand is a register : "x" (x)); // Input operand is an xmm register return result; #elif defined __i386__ // There is no CVTSD2SI in i386 to produce a 64 bit int, even with SSE2. // FPU stack. Adapted from /usr/include/bits/mathinline.h. int64 result; __asm__ __volatile__ ("fistpll %0" : "=m" (result) // Output operand is a memory location : "t" (x) // Input operand is top of FP stack : "st"); // Clobbers (pops) top of FP stack return result; #endif // if defined __i386__ #else return Round(x); #endif // if defined __GNUC__ && ... } // Computes v^i, where i is a non-negative integer. // When T is a floating point type, this has the same semantics as pow(), but // is much faster. // T can also be any integral type, in which case computations will be // performed in the value domain of this integral type, and overflow semantics // will be those of T. // You can also use any type for which operator*= is defined. template static T IPow(T base, int exp) { S2_DCHECK_GE(exp, 0); uint32 uexp = static_cast(exp); if (uexp < 16) { T result = (uexp & 1) ? base : static_cast(1); if (uexp >= 2) { base *= base; if (uexp & 2) { result *= base; } if (uexp >= 4) { base *= base; if (uexp & 4) { result *= base; } if (uexp >= 8) { base *= base; result *= base; } } } return result; } T result = base; int count = 31 ^ Bits::Log2FloorNonZero(uexp); uexp <<= count; count ^= 31; while (count--) { uexp <<= 1; result *= result; if (uexp >= 0x80000000) { result *= base; } } return result; } }; // ========================================================================= // #if (defined __i386__ || defined __x86_64__) && defined __GNUC__ // We define template specializations of Round() to get the more efficient // Intel versions when possible. Note that gcc does not currently support // partial specialization of templatized functions. template<> inline int32 MathUtil::Round(double x) { return FastIntRound(x); } template<> inline int32 MathUtil::Round(float x) { return FastIntRound(x); } template<> inline int64 MathUtil::Round(double x) { return FastInt64Round(x); } template<> inline int64 MathUtil::Round(float x) { return FastInt64Round(x); } #endif #endif // S2_UTIL_MATH_MATHUTIL_H_ s2geometry-0.10.0/src/s2/util/math/matrix3x3.h000066400000000000000000000435451422156367100207460ustar00rootroot00000000000000// Copyright 2003 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // // A simple class to handle 3x3 matrices // The aim of this class is to be able to manipulate 3x3 matrices // and 3D vectors as naturally as possible and make calculations // readable. // For that reason, the operators +, -, * are overloaded. // (Reading a = a + b*2 - c is much easier to read than // a = Sub(Add(a, Mul(b,2)),c) ) // // Please be careful about overflows when using those matrices wth integer types // The calculations are carried with VType. eg : if you are using uint8 as the // base type, all values will be modulo 256. // This feature is necessary to use the class in a more general framework with // VType != plain old data type. #ifndef S2_UTIL_MATH_MATRIX3X3_H_ #define S2_UTIL_MATH_MATRIX3X3_H_ #include #include #include #include "s2/base/logging.h" #include "s2/util/math/mathutil.h" #include "s2/util/math/vector.h" template class Matrix3x3 { private: VType m_[3][3]; public: typedef Matrix3x3 Self; typedef VType BaseType; typedef Vector3 MVector; // Initialize the matrix to 0 Matrix3x3() { m_[0][2] = m_[0][1] = m_[0][0] = VType(); m_[1][2] = m_[1][1] = m_[1][0] = VType(); m_[2][2] = m_[2][1] = m_[2][0] = VType(); } // Constructor explicitly setting the values of all the coefficient of // the matrix Matrix3x3(const VType &m00, const VType &m01, const VType &m02, const VType &m10, const VType &m11, const VType &m12, const VType &m20, const VType &m21, const VType &m22) { m_[0][0] = m00; m_[0][1] = m01; m_[0][2] = m02; m_[1][0] = m10; m_[1][1] = m11; m_[1][2] = m12; m_[2][0] = m20; m_[2][1] = m21; m_[2][2] = m22; } // Casting constructor template static Matrix3x3 Cast(const Matrix3x3 &mb) { return Matrix3x3(static_cast(mb(0, 0)), // static_cast(mb(0, 1)), // static_cast(mb(0, 2)), // static_cast(mb(1, 0)), // static_cast(mb(1, 1)), // static_cast(mb(1, 2)), // static_cast(mb(2, 0)), // static_cast(mb(2, 1)), // static_cast(mb(2, 2))); } // Change the value of all the coefficients of the matrix inline Matrix3x3 &Set(const VType &m00, const VType &m01, const VType &m02, const VType &m10, const VType &m11, const VType &m12, const VType &m20, const VType &m21, const VType &m22) { m_[0][0] = m00; m_[0][1] = m01; m_[0][2] = m02; m_[1][0] = m10; m_[1][1] = m11; m_[1][2] = m12; m_[2][0] = m20; m_[2][1] = m21; m_[2][2] = m22; return (*this); } // Matrix addition inline Matrix3x3 &operator+=(const Matrix3x3 &mb) { m_[0][0] += mb.m_[0][0]; m_[0][1] += mb.m_[0][1]; m_[0][2] += mb.m_[0][2]; m_[1][0] += mb.m_[1][0]; m_[1][1] += mb.m_[1][1]; m_[1][2] += mb.m_[1][2]; m_[2][0] += mb.m_[2][0]; m_[2][1] += mb.m_[2][1]; m_[2][2] += mb.m_[2][2]; return (*this); } // Matrix subtration inline Matrix3x3 &operator-=(const Matrix3x3 &mb) { m_[0][0] -= mb.m_[0][0]; m_[0][1] -= mb.m_[0][1]; m_[0][2] -= mb.m_[0][2]; m_[1][0] -= mb.m_[1][0]; m_[1][1] -= mb.m_[1][1]; m_[1][2] -= mb.m_[1][2]; m_[2][0] -= mb.m_[2][0]; m_[2][1] -= mb.m_[2][1]; m_[2][2] -= mb.m_[2][2]; return (*this); } // Matrix multiplication by a scalar inline Matrix3x3 &operator*=(const VType &k) { m_[0][0] *= k; m_[0][1] *= k; m_[0][2] *= k; m_[1][0] *= k; m_[1][1] *= k; m_[1][2] *= k; m_[2][0] *= k; m_[2][1] *= k; m_[2][2] *= k; return (*this); } // Matrix addition inline Matrix3x3 operator+(const Matrix3x3 &mb) const { return Matrix3x3(*this) += mb; } // Matrix subtraction inline Matrix3x3 operator-(const Matrix3x3 &mb) const { return Matrix3x3(*this) -= mb; } // Change the sign of all the coefficients in the matrix friend inline Matrix3x3 operator-(const Matrix3x3 &vb) { return Matrix3x3(-vb.m_[0][0], -vb.m_[0][1], -vb.m_[0][2], // -vb.m_[1][0], -vb.m_[1][1], -vb.m_[1][2], // -vb.m_[2][0], -vb.m_[2][1], -vb.m_[2][2]); } // Matrix multiplication by a scalar inline Matrix3x3 operator*(const VType &k) const { return Matrix3x3(*this) *= k; } friend inline Matrix3x3 operator*(const VType &k, const Matrix3x3 &mb) { return Matrix3x3(mb) * k; } // Matrix multiplication inline Matrix3x3 operator*(const Matrix3x3 &mb) const { // clang-format off return Matrix3x3( m_[0][0] * mb.m_[0][0] + m_[0][1] * mb.m_[1][0] + m_[0][2] * mb.m_[2][0], m_[0][0] * mb.m_[0][1] + m_[0][1] * mb.m_[1][1] + m_[0][2] * mb.m_[2][1], m_[0][0] * mb.m_[0][2] + m_[0][1] * mb.m_[1][2] + m_[0][2] * mb.m_[2][2], m_[1][0] * mb.m_[0][0] + m_[1][1] * mb.m_[1][0] + m_[1][2] * mb.m_[2][0], m_[1][0] * mb.m_[0][1] + m_[1][1] * mb.m_[1][1] + m_[1][2] * mb.m_[2][1], m_[1][0] * mb.m_[0][2] + m_[1][1] * mb.m_[1][2] + m_[1][2] * mb.m_[2][2], m_[2][0] * mb.m_[0][0] + m_[2][1] * mb.m_[1][0] + m_[2][2] * mb.m_[2][0], m_[2][0] * mb.m_[0][1] + m_[2][1] * mb.m_[1][1] + m_[2][2] * mb.m_[2][1], m_[2][0] * mb.m_[0][2] + m_[2][1] * mb.m_[1][2] + m_[2][2] * mb.m_[2][2]); // clang-format on } // Multiplication of a matrix by a vector inline MVector operator*(const MVector &v) const { return MVector(m_[0][0] * v[0] + m_[0][1] * v[1] + m_[0][2] * v[2], m_[1][0] * v[0] + m_[1][1] * v[1] + m_[1][2] * v[2], m_[2][0] * v[0] + m_[2][1] * v[1] + m_[2][2] * v[2]); } // Return the determinant of the matrix inline VType Det() const { return m_[0][0] * m_[1][1] * m_[2][2] + m_[0][1] * m_[1][2] * m_[2][0] + m_[0][2] * m_[1][0] * m_[2][1] - m_[2][0] * m_[1][1] * m_[0][2] - m_[2][1] * m_[1][2] * m_[0][0] - m_[2][2] * m_[1][0] * m_[0][1]; } // Return the trace of the matrix inline VType Trace() const { return m_[0][0] + m_[1][1] + m_[2][2]; } // Return a pointer to the data array for interface with other libraries // like opencv VType *Data() { return reinterpret_cast(m_); } const VType *Data() const { return reinterpret_cast(m_); } // Return matrix element (i,j) with 0<=i<=2 0<=j<=2 inline VType &operator()(const int i, const int j) { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); S2_DCHECK_GE(j, 0); S2_DCHECK_LT(j, 3); return m_[i][j]; } inline VType operator()(const int i, const int j) const { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); S2_DCHECK_GE(j, 0); S2_DCHECK_LT(j, 3); return m_[i][j]; } // Return matrix element (i/3,i%3) with 0<=i<=8 (access concatenated rows). inline VType &operator[](const int i) { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 9); return reinterpret_cast(m_)[i]; } inline VType operator[](const int i) const { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 9); return reinterpret_cast(m_)[i]; } // Return the transposed matrix inline Matrix3x3 Transpose() const { return Matrix3x3(m_[0][0], m_[1][0], m_[2][0], // m_[0][1], m_[1][1], m_[2][1], // m_[0][2], m_[1][2], m_[2][2]); } // Return the transposed of the matrix of the cofactors // (Useful for inversion for example) inline Matrix3x3 ComatrixTransposed() const { return Matrix3x3(m_[1][1] * m_[2][2] - m_[2][1] * m_[1][2], m_[2][1] * m_[0][2] - m_[0][1] * m_[2][2], m_[0][1] * m_[1][2] - m_[1][1] * m_[0][2], m_[1][2] * m_[2][0] - m_[2][2] * m_[1][0], m_[2][2] * m_[0][0] - m_[0][2] * m_[2][0], m_[0][2] * m_[1][0] - m_[1][2] * m_[0][0], m_[1][0] * m_[2][1] - m_[2][0] * m_[1][1], m_[2][0] * m_[0][1] - m_[0][0] * m_[2][1], m_[0][0] * m_[1][1] - m_[1][0] * m_[0][1]); } // Matrix inversion inline Matrix3x3 Inverse() const { VType det = Det(); S2_CHECK_NE(det, VType(0)) << " Can't inverse. Determinant = 0."; return (VType(1) / det) * ComatrixTransposed(); } // Return the vector 3D at row i inline MVector Row(const int i) const { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); return MVector(m_[i][0], m_[i][1], m_[i][2]); } // Return the vector 3D at col i inline MVector Col(const int i) const { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); return MVector(m_[0][i], m_[1][i], m_[2][i]); } // Create a matrix from 3 row vectors static inline Matrix3x3 FromRows(const MVector &v1, const MVector &v2, const MVector &v3) { Matrix3x3 temp; temp.Set(v1[0], v1[1], v1[2], // v2[0], v2[1], v2[2], // v3[0], v3[1], v3[2]); return temp; } // Create a matrix from 3 column vectors static inline Matrix3x3 FromCols(const MVector &v1, const MVector &v2, const MVector &v3) { Matrix3x3 temp; temp.Set(v1[0], v2[0], v3[0], // v1[1], v2[1], v3[1], // v1[2], v2[2], v3[2]); return temp; } // Set the vector in row i to be v1 void SetRow(int i, const MVector &v1) { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); m_[i][0] = v1[0]; m_[i][1] = v1[1]; m_[i][2] = v1[2]; } // Set the vector in column i to be v1 void SetCol(int i, const MVector &v1) { S2_DCHECK_GE(i, 0); S2_DCHECK_LT(i, 3); m_[0][i] = v1[0]; m_[1][i] = v1[1]; m_[2][i] = v1[2]; } // Return a matrix M close to the original but verifying MtM = I // (useful to compensate for errors in a rotation matrix) Matrix3x3 Orthogonalize() const { MVector r1, r2, r3; r1 = Row(0).Normalize(); r2 = (Row(2).CrossProd(r1)).Normalize(); r3 = (r1.CrossProd(r2)).Normalize(); return FromRows(r1, r2, r3); } // Return the identity matrix static inline Matrix3x3 Identity() { Matrix3x3 temp; temp.Set(VType(1), VType(0), VType(0), // VType(0), VType(1), VType(0), // VType(0), VType(0), VType(1)); return temp; } // Return a matrix full of zeros static inline Matrix3x3 Zero() { return Matrix3x3(); } // Return a diagonal matrix with the coefficients in v static inline Matrix3x3 Diagonal(const MVector &v) { return Matrix3x3(v[0], VType(), VType(), // VType(), v[1], VType(), // VType(), VType(), v[2]); } // Return the matrix vvT static Matrix3x3 Sym3(const MVector &v) { return Matrix3x3(v[0] * v[0], v[0] * v[1], v[0] * v[2], // v[1] * v[0], v[1] * v[1], v[1] * v[2], // v[2] * v[0], v[2] * v[1], v[2] * v[2]); } // Return a matrix M such that: // for each u, M * u = v.CrossProd(u) static Matrix3x3 AntiSym3(const MVector &v) { return Matrix3x3(VType(), -v[2], v[1], // v[2], VType(), -v[0], // -v[1], v[0], VType()); } // Returns matrix that rotates |rot| radians around axis rot. static Matrix3x3 Rodrigues(const MVector &rot) { Matrix3x3 R; VType theta = rot.Norm(); MVector w = rot.Normalize(); Matrix3x3 Wv = Matrix3x3::AntiSym3(w); Matrix3x3 I = Matrix3x3::Identity(); Matrix3x3 A = Matrix3x3::Sym3(w); R = (1 - std::cos(theta)) * A + std::sin(theta) * Wv + std::cos(theta) * I; return R; } // Returns v.Transpose() * (*this) * u VType MulBothSides(const MVector &v, const MVector &u) const { return ((*this) * u).DotProd(v); } // Use the 3x3 matrix as a projective transform for 2d points Vector2 Project(const Vector2 &v) const { MVector temp = (*this) * MVector(v[0], v[1], 1); return Vector2(temp[0] / temp[2], temp[1] / temp[2]); } // Return the Frobenius norm of the matrix: sqrt(sum(aij^2)) VType FrobeniusNorm() const { VType sum = VType(); for (int i = 0; i < 3; i++) { for (int j = 0; j < 3; j++) { sum += m_[i][j] * m_[i][j]; } } return std::sqrt(sum); } // Finds the eigen values of the matrix. Return the number of real eigenvalues // found. // If the matrix is known to be symmetric due to your problem formulation, // then please use SymmetricEigenSolver, since this method does not guarantee // finding all 3 real eigenvalues in pathological cases. See CL 49170250. int EigenValues(MVector *eig_val) const { long double r1, r2, r3; // NOLINT // characteristic polynomial // x^3 + a*x^2 + b*x + c VType a = -Trace(); VType b = m_[0][0] * m_[1][1] + m_[1][1] * m_[2][2] + m_[2][2] * m_[0][0] - m_[1][0] * m_[0][1] - m_[2][1] * m_[1][2] - m_[0][2] * m_[2][0]; VType c = -Det(); bool res = MathUtil::RealRootsForCubic(a, b, c, &r1, &r2, &r3); (*eig_val)[0] = r1; if (res) { (*eig_val)[1] = r2; (*eig_val)[2] = r3; return 3; } return 1; } // Finds the eigen values and optional associated eigen vectors of a // symmetric 3x3 matrix (not necessarily positive definite). // eigen values are sorted in decreasing order; // eig_val corresponds to the columns of the eig_vec matrix. // Note: The routine will only use the lower diagonal part // of the matrix, i.e. // | a00, | // | a10, a11, | // | a20, a21, a22 | void SymmetricEigenSolver(MVector *eig_val, Matrix3x3 *eig_vec /*nullable*/) const { // Compute characteristic polynomial coefficients. double c2 = -Trace(); double c1 = -(m_[1][0] * m_[1][0] - m_[0][0] * m_[1][1] - m_[0][0] * m_[2][2] - m_[1][1] * m_[2][2] + m_[2][0] * m_[2][0] + m_[2][1] * m_[2][1]); double c0 = -(m_[0][0] * m_[1][1] * m_[2][2] // - m_[2][0] * m_[2][0] * m_[1][1] // - m_[1][0] * m_[1][0] * m_[2][2] // - m_[0][0] * m_[2][1] * m_[2][1] // + 2 * m_[1][0] * m_[2][0] * m_[2][1]); // Root finding x^3 + c2*x^2 + c1*x + c0 = 0. // NOTE: Cannot reuse general cubic solver MathUtil::RealRootsForCubic() // because it doesn't guarantee finding 3 real roots, e.g. it won't always // return roots {2, 2, 0} for the cubic x^3 - 4*x^2 + 4*x + epsilon = 0. double q = (c2 * c2 - 3 * c1) / 9.0; double r = (2 * c2 * c2 * c2 - 9 * c2 * c1 + 27 * c0) / 54.0; // Assume R^2 <= Q^3 so there are three real roots. // Avoid sqrt of negative q, which can only happen due to numerical error. if (q < 0) q = 0; double sqrt_q = -2.0 * sqrt(q); double q3_r2 = q * q * q - r * r; // Avoid sqrt of negative q3_r2, which can only happen due to numerical // error. double theta = atan2(q3_r2 <= 0 ? 0 : sqrt(q3_r2), r); double c2_3 = c2 / 3; (*eig_val)[0] = sqrt_q * cos(theta / 3.0) - c2_3; (*eig_val)[1] = sqrt_q * cos((theta + 2.0 * M_PI) / 3.0) - c2_3; (*eig_val)[2] = sqrt_q * cos((theta - 2.0 * M_PI) / 3.0) - c2_3; // Sort eigen value in decreasing order Vector3 d_order = eig_val->ComponentOrder(); (*eig_val) = MVector((*eig_val)[d_order[2]], (*eig_val)[d_order[1]], (*eig_val)[d_order[0]]); // Compute eigenvectors if (!eig_vec) return; for (int i = 0; i < 3; ++i) { MVector r1, r2, r3, e1, e2, e3; r1[0] = m_[0][0] - (*eig_val)[i]; r2[0] = m_[1][0]; r3[0] = m_[2][0]; r1[1] = m_[1][0]; r2[1] = m_[1][1] - (*eig_val)[i]; r3[1] = m_[2][1]; r1[2] = m_[2][0]; r2[2] = m_[2][1]; r3[2] = m_[2][2] - (*eig_val)[i]; e1 = r1.CrossProd(r2); e2 = r2.CrossProd(r3); e3 = r3.CrossProd(r1); // Make e2 and e3 point in the same direction as e1 if (e2.DotProd(e1) < 0) e2 = -e2; if (e3.DotProd(e1) < 0) e3 = -e3; MVector e = (e1 + e2 + e3).Normalize(); eig_vec->SetCol(i, e); } } // Return true is one of the elements of the matrix is NaN bool IsNaN() const { for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { if (isnan(m_[i][j])) { return true; } } } return false; } friend bool operator==(const Matrix3x3 &a, const Matrix3x3 &b) { return a.m_[0][0] == b.m_[0][0] && a.m_[0][1] == b.m_[0][1] && a.m_[0][2] == b.m_[0][2] && a.m_[1][0] == b.m_[1][0] && a.m_[1][1] == b.m_[1][1] && a.m_[1][2] == b.m_[1][2] && a.m_[2][0] == b.m_[2][0] && a.m_[2][1] == b.m_[2][1] && a.m_[2][2] == b.m_[2][2]; } friend bool operator!=(const Matrix3x3 &a, const Matrix3x3 &b) { return !(a == b); } friend std::ostream &operator<<(std::ostream &out, const Matrix3x3 &mb) { int i, j; for (i = 0; i < 3; i++) { if (i == 0) { out << "["; } else { out << " "; } for (j = 0; j < 3; j++) { out << mb(i, j) << " "; } if (i == 2) { out << "]"; } else { out << std::endl; } } return out; } template friend H AbslHashValue(H h, const Matrix3x3 &m) { return H::combine_contiguous(std::move(h), m.Data(), 3 * 3); } }; typedef Matrix3x3 Matrix3x3_i; typedef Matrix3x3 Matrix3x3_f; typedef Matrix3x3 Matrix3x3_d; #endif // S2_UTIL_MATH_MATRIX3X3_H_ s2geometry-0.10.0/src/s2/util/math/vector.h000066400000000000000000000441141422156367100203770ustar00rootroot00000000000000// Copyright Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS-IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Simple classes to handle vectors in 2D, 3D, and 4D. // // Maintainers: Please be mindful of extreme degradations in unoptimized // performance here. #ifndef S2_UTIL_MATH_VECTOR_H_ #define S2_UTIL_MATH_VECTOR_H_ #include #include #include #include #include #include #include // NOLINT(readability/streams) #include #include #include #include "s2/base/integral_types.h" #include "s2/base/logging.h" #include "absl/base/macros.h" #include "absl/utility/utility.h" template class Vector2; template class Vector3; template class Vector4; namespace util { namespace math { namespace internal_vector { // CRTP base class for all Vector templates. template