package.xml0000644000076500000240000101761313572250761012514 0ustar alcaeusstaff mongodb pecl.php.net MongoDB driver for PHP The purpose of this driver is to provide exceptionally thin glue between MongoDB and PHP, implementing only fundamental and performance-critical components necessary to build a fully-functional MongoDB driver. Andreas Braun alcaeus alcaeus@php.net yes Jeremy Mikola jmikola jmikola@php.net yes Derick Rethans derick derick@php.net no Hannes Magnusson bjori bjori@php.net no Katherine Walker kvwalker kvwalker@php.net no 2019-12-05 1.6.1 1.6.1 stable stable Apache License ** Bug * [PHPC-1503] - Fix MONGOC_CC assignment in config.w32 for PHP 7.4 ** Task * [PHPC-1504] - Add PHP 7.4 to AppVeyor build matrix * [PHPC-1505] - Update to libmongoc 1.15.2 5.6.0 7.99.99 1.4.8 mongodb mongodb-1.6.1/scripts/autotools/libbson/CheckAtomics.m40000644000076500000240000000211213572250757022424 0ustar alcaeusstaffAC_LANG_PUSH([C]) AC_MSG_CHECKING([for __sync_add_and_fetch_4]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], [[int32_t v = 1; return __sync_add_and_fetch_4 (&v, (int32_t)10);]])], [AC_MSG_RESULT(yes) have_sync_add_and_fetch_4=yes], [AC_MSG_RESULT(no) have_sync_add_and_fetch_4=no]) AS_IF([test "$have_sync_add_and_fetch_4" = "yes"], [AC_SUBST(BSON_HAVE_ATOMIC_32_ADD_AND_FETCH, 1)], [AC_SUBST(BSON_HAVE_ATOMIC_32_ADD_AND_FETCH, 0)]) AC_MSG_CHECKING([for __sync_add_and_fetch_8]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], [[int64_t v; return __sync_add_and_fetch_8 (&v, (int64_t)10);]])], [AC_MSG_RESULT(yes) have_sync_add_and_fetch_8=yes], [AC_MSG_RESULT(no) have_sync_add_and_fetch_8=no]) AS_IF([test "$have_sync_add_and_fetch_8" = "yes"], [AC_SUBST(BSON_HAVE_ATOMIC_64_ADD_AND_FETCH, 1)], [AC_SUBST(BSON_HAVE_ATOMIC_64_ADD_AND_FETCH, 0)]) AC_LANG_POP([C]) mongodb-1.6.1/scripts/autotools/libbson/CheckHeaders.m40000644000076500000240000000042113572250757022401 0ustar alcaeusstaffAC_HEADER_STDBOOL AC_SUBST(BSON_HAVE_STDBOOL_H, 0) if test "$ac_cv_header_stdbool_h" = "yes"; then AC_SUBST(BSON_HAVE_STDBOOL_H, 1) fi AC_CHECK_HEADER([strings.h], [AC_SUBST(BSON_HAVE_STRINGS_H, 1)], [AC_SUBST(BSON_HAVE_STRINGS_H, 0)]) mongodb-1.6.1/scripts/autotools/libbson/Endian.m40000644000076500000240000000020313572250757021264 0ustar alcaeusstaffAC_C_BIGENDIAN AC_SUBST(BSON_BYTE_ORDER, 1234) if test "x$ac_cv_c_bigendian" = "xyes"; then AC_SUBST(BSON_BYTE_ORDER, 4321) fi mongodb-1.6.1/scripts/autotools/libbson/FindDependencies.m40000644000076500000240000000741213572250757023266 0ustar alcaeusstaff# Check for strnlen() dnl AC_CHECK_FUNC isn't properly respecting _XOPEN_SOURCE for strnlen for unknown reason AC_SUBST(BSON_HAVE_STRNLEN, 0) AC_CACHE_CHECK([for strnlen], bson_cv_have_strnlen, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include int strnlen () { return 0; } ]])], [bson_cv_have_strnlen=no], [bson_cv_have_strnlen=yes])]) if test "$bson_cv_have_strnlen" = yes; then AC_SUBST(BSON_HAVE_STRNLEN, 1) fi # Check for reallocf() (BSD/Darwin) AC_SUBST(BSON_HAVE_REALLOCF, 0) AC_CACHE_CHECK([for reallocf], bson_cv_have_reallocf, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include int reallocf () { return 0; } ]])], [bson_cv_have_reallocf=no], [bson_cv_have_reallocf=yes])]) if test "$bson_cv_have_reallocf" = yes; then AC_SUBST(BSON_HAVE_REALLOCF, 1) fi # Check for syscall() AC_SUBST(BSON_HAVE_SYSCALL_TID, 0) AC_CACHE_CHECK([for syscall], bson_cv_have_syscall_tid, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include #include int syscall () { return 0; } ]])], [bson_cv_have_syscall_tid=no], [bson_cv_have_syscall_tid=yes])]) if test "$bson_cv_have_syscall_tid" = yes -a "$os_darwin" != "yes"; then AC_CACHE_CHECK([for SYS_gettid], bson_cv_have_sys_gettid_tid, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include #include int gettid () { return SYS_gettid; } ]])], [bson_cv_have_sys_gettid_tid=yes], [bson_cv_have_sys_gettid_tid=no])]) if test "$bson_cv_have_sys_gettid_tid" = yes; then AC_SUBST(BSON_HAVE_SYSCALL_TID, 1) fi fi # Check for snprintf() AC_SUBST(BSON_HAVE_SNPRINTF, 0) AC_CHECK_FUNC(snprintf, [AC_SUBST(BSON_HAVE_SNPRINTF, 1)]) # Check for struct timespec AC_SUBST(BSON_HAVE_TIMESPEC, 0) AC_CHECK_TYPE([struct timespec], [AC_SUBST(BSON_HAVE_TIMESPEC, 1)], [], [#include ]) # Check for clock_gettime and if it needs -lrt AC_SUBST(BSON_HAVE_CLOCK_GETTIME, 0) AC_SEARCH_LIBS([clock_gettime], [rt], [AC_SUBST(BSON_HAVE_CLOCK_GETTIME, 1)]) # Check if math functions need -lm AC_SEARCH_LIBS([floor], [m]) # Check for gmtime_r() AC_SUBST(BSON_HAVE_GMTIME_R, 0) AC_CHECK_FUNC(gmtime_r, [AC_SUBST(BSON_HAVE_GMTIME_R, 1)]) # Check for rand_r() AC_SUBST(BSON_HAVE_RAND_R, 0) AC_CHECK_FUNC(rand_r, [AC_SUBST(BSON_HAVE_RAND_R, 1)]) # Check for pthreads. We might need to make this better to handle mingw, # but I actually think it is okay to just check for it even though we will # use win32 primatives. AX_PTHREAD([ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PTHREAD_CFLAGS" PHP_EVAL_LIBLINE([$PTHREAD_LIBS],[MONGODB_SHARED_LIBADD]) # PTHREAD_CFLAGS may come back as "-pthread", which should also be used when # linking. We can trust PHP_EVAL_LIBLINE to ignore other values. PHP_EVAL_LIBLINE([$PTHREAD_CFLAGS],[MONGODB_SHARED_LIBADD]) ],[ AC_MSG_ERROR([libbson requires pthreads on non-Windows platforms.]) ]) # The following is borrowed from the guile configure script. # # On past versions of Solaris, believe 8 through 10 at least, you # had to write "pthread_once_t foo = { PTHREAD_ONCE_INIT };". # This is contrary to POSIX: # http://www.opengroup.org/onlinepubs/000095399/functions/pthread_once.html # Check here if this style is required. # # glibc (2.3.6 at least) works both with or without braces, so the # test checks whether it works without. # AC_SUBST(BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES, 0) AC_CACHE_CHECK([whether PTHREAD_ONCE_INIT needs braces], bson_cv_need_braces_on_pthread_once_init, [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include pthread_once_t foo = PTHREAD_ONCE_INIT;]])], [bson_cv_need_braces_on_pthread_once_init=no], [bson_cv_need_braces_on_pthread_once_init=yes])]) if test "$bson_cv_need_braces_on_pthread_once_init" = yes; then AC_SUBST(BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES, 1) fi mongodb-1.6.1/scripts/autotools/libbson/Versions.m40000644000076500000240000000120213572250757021676 0ustar alcaeusstaffBSON_CURRENT_FILE=[]PHP_EXT_SRCDIR(mongodb)[/src/LIBMONGOC_VERSION_CURRENT] BSON_VERSION=$(cat $BSON_CURRENT_FILE) dnl Ensure newline for "cut" implementations that need it, e.g. HP-UX. BSON_MAJOR_VERSION=$( (cat $BSON_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f1 ) BSON_MINOR_VERSION=$( (cat $BSON_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f2 ) BSON_MICRO_VERSION=$( (cat $BSON_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f3 ) BSON_PRERELEASE_VERSION=$(cut -s -d- -f2 $BSON_CURRENT_FILE) AC_SUBST(BSON_VERSION) AC_SUBST(BSON_MAJOR_VERSION) AC_SUBST(BSON_MINOR_VERSION) AC_SUBST(BSON_MICRO_VERSION) AC_SUBST(BSON_PRERELEASE_VERSION) mongodb-1.6.1/scripts/autotools/libmongoc/CheckCompression.m40000644000076500000240000000412613572250757023656 0ustar alcaeusstafffound_snappy="no" found_zlib="no" bundled_zlib="no" PKG_CHECK_MODULES([PHP_MONGODB_SNAPPY],[snappy],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_SNAPPY_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_SNAPPY_LIBS],[MONGODB_SHARED_LIBADD]) found_snappy="yes" ],[ PHP_CHECK_LIBRARY([snappy], [snappy_uncompress], [have_snappy_lib="yes"], [have_snappy_lib="no"]) AC_CHECK_HEADER([snappy-c.h], [have_snappy_headers=yes], [have_snappy_headers=no]) if test "$have_snappy_lib" = "yes" -a "$have_snappy_headers" = "yes"; then PHP_ADD_LIBRARY([snappy],,[MONGODB_SHARED_LIBADD]) found_snappy="yes" fi ]) PKG_CHECK_MODULES([PHP_MONGODB_ZLIB],[zlib],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_ZLIB_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_ZLIB_LIBS],[MONGODB_SHARED_LIBADD]) found_zlib="yes" ],[ PHP_CHECK_LIBRARY([zlib], [compress2], [have_zlib_lib="yes"], [have_zlib_lib="no"]) AC_CHECK_HEADER([zlib.h], [have_zlib_headers=yes], [have_zlib_headers=no]) if test "$have_zlib_lib" = "yes" -a "$have_zlib_headers" = "yes"; then PHP_ADD_LIBRARY([z],,[MONGODB_SHARED_LIBADD]) found_zlib="yes" fi ]) dnl If zlib was not found, use libmongoc's bundled version AS_IF([test "$found_zlib" != "yes"],[ bundled_zlib="yes" ]) if test "$found_snappy" = "yes" -o "$found_zlib" = "yes" -o "$bundled_zlib" = "yes"; then AC_SUBST(MONGOC_ENABLE_COMPRESSION, 1) if test "$found_snappy" = "yes"; then AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 1) else AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 0) fi if test "$found_zlib" = "yes" -o "$bundled_zlib" = "yes"; then AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 1) else AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 0) fi else AC_SUBST(MONGOC_ENABLE_COMPRESSION, 0) AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZLIB, 0) AC_SUBST(MONGOC_ENABLE_COMPRESSION_SNAPPY, 0) fi AC_SUBST(MONGOC_ENABLE_COMPRESSION_ZSTD, 0) mongodb-1.6.1/scripts/autotools/libmongoc/CheckICU.m40000644000076500000240000000225613572250757021777 0ustar alcaeusstaffPHP_ARG_WITH([mongodb-icu], [whether to enable ICU for SASLPrep with SCRAM-SHA-256 authentication], [AS_HELP_STRING([--with-mongodb-icu=@<:@auto/yes/no@:>@], [MongoDB: Enable ICU for SASLPrep with SCRAM-SHA-256 authentication [default=auto]])], [auto], [no]) AS_IF([test "$PHP_MONGODB_ICU" = "auto" -o "$PHP_MONGODB_ICU" = "yes"],[ found_icu=no PKG_CHECK_MODULES([PHP_MONGODB_ICU],[icu-uc],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_ICU_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_ICU_LIBS],[MONGODB_SHARED_LIBADD]) PHP_MONGODB_ICU="yes" found_icu=yes ],[ found_icu=no ]) if test "$PHP_MONGODB_ICU" = "yes" -a "$found_icu" = "no"; then AC_MSG_ERROR([ICU libraries and development headers could not be found]) fi ]) AS_IF([test "$PHP_MONGODB_ICU" = "auto"],[ PHP_MONGODB_ICU="no" ]) if test ! \( "$PHP_MONGODB_ICU" = "yes" -o "$PHP_MONGODB_ICU" = "no" \); then AC_MSG_WARN([unsupported --with-mongodb-icu value: $PHP_MONGODB_ICU]) fi if test "$PHP_MONGODB_ICU" = "yes"; then AC_SUBST(MONGOC_ENABLE_ICU, 1) else AC_SUBST(MONGOC_ENABLE_ICU, 0) fi mongodb-1.6.1/scripts/autotools/libmongoc/CheckResolv.m40000644000076500000240000000475413572250757022636 0ustar alcaeusstaffdnl Disable Windows DNSAPI AC_SUBST(MONGOC_HAVE_DNSAPI, 0) found_resolv="no" old_LIBS="$LIBS" LIBS="$LIBS -lresolv" dnl Thread-safe DNS query function for _mongoc_client_get_srv. dnl Could be a macro, not a function, so check with AC_LINK_IFELSE. AC_MSG_CHECKING([for res_nsearch]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include #include #include #include ]], [[ int len; unsigned char reply[1024]; res_state statep; len = res_nsearch( statep, "example.com", ns_c_in, ns_t_srv, reply, sizeof(reply)); ]])], [ AC_MSG_RESULT([yes]) AC_SUBST(MONGOC_HAVE_RES_SEARCH, 0) AC_SUBST(MONGOC_HAVE_RES_NSEARCH, 1) found_resolv="yes" dnl We have res_nsearch. Call res_ndestroy (BSD/Mac) or res_nclose (Linux)? AC_MSG_CHECKING([for res_ndestroy]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include #include #include #include ]], [[ res_state statep; res_ndestroy(statep); ]])], [ AC_MSG_RESULT([yes]) AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 1) AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0) ], [ AC_MSG_RESULT([no]) AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 0) AC_MSG_CHECKING([for res_nclose]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include #include #include #include ]], [[ res_state statep; res_nclose(statep); ]])], [ AC_MSG_RESULT([yes]) AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 1) ], [ AC_MSG_RESULT([no]) AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0) ]) ]) ],[ AC_MSG_RESULT([no]) AC_SUBST(MONGOC_HAVE_RES_NSEARCH, 0) AC_SUBST(MONGOC_HAVE_RES_NDESTROY, 0) AC_SUBST(MONGOC_HAVE_RES_NCLOSE, 0) dnl Thread-unsafe function. AC_MSG_CHECKING([for res_search]) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ #include #include #include #include ]], [[ int len; unsigned char reply[1024]; len = res_search("example.com", ns_c_in, ns_t_srv, reply, sizeof(reply)); ]])], [ AC_MSG_RESULT([yes]) AC_SUBST(MONGOC_HAVE_RES_SEARCH, 1) found_resolv="yes" ], [ AC_MSG_RESULT([no]) AC_SUBST(MONGOC_HAVE_RES_SEARCH, 0) ]) ]) LIBS="$old_LIBS" AS_IF([test "$found_resolv" = "yes"],[ PHP_ADD_LIBRARY([resolv],,[MONGODB_SHARED_LIBADD]) ]) mongodb-1.6.1/scripts/autotools/libmongoc/CheckSSL.m40000644000076500000240000002130513572250757022014 0ustar alcaeusstaffPHP_ARG_WITH([mongodb-ssl], [whether to enable crypto and TLS], [AS_HELP_STRING([--with-mongodb-ssl=@<:@auto/no/openssl/libressl/darwin@:>@], [MongoDB: Enable TLS connections and SCRAM-SHA-1 authentication [default=auto]])], [auto], [no]) PHP_ARG_WITH([openssl-dir], [deprecated option for OpenSSL library path], [AS_HELP_STRING([--with-openssl-dir=@<:@auto/DIR@:>@], [MongoDB: OpenSSL library path (deprecated for pkg-config) [default=auto]])], [auto], [no]) dnl PHP_ARG_WITH without a value assigns "yes". Treat it like "auto" but required. AS_IF([test "$PHP_MONGODB_SSL" = "yes"],[ crypto_required="yes" PHP_MONGODB_SSL="auto" ]) AS_IF([test "$PHP_MONGODB_SSL" = "darwin" -o \( "$PHP_MONGODB_SSL" = "auto" -a "$os_darwin" = "yes" \)],[ AC_MSG_NOTICE([checking whether Darwin SSL is available]) if test "$os_darwin" = "no"; then AC_MSG_ERROR([Darwin SSL is only supported on macOS]) fi dnl PHP_FRAMEWORKS is only used for SAPI builds, so use MONGODB_SHARED_LIBADD for shared builds if test "$ext_shared" = "yes"; then MONGODB_SHARED_LIBADD="-framework Security -framework CoreFoundation $MONGODB_SHARED_LIBADD" else PHP_ADD_FRAMEWORK([Security]) PHP_ADD_FRAMEWORK([CoreFoundation]) fi PHP_MONGODB_SSL="darwin" ]) AS_IF([test "$PHP_MONGODB_SSL" = "openssl" -o "$PHP_MONGODB_SSL" = "auto"],[ AC_MSG_NOTICE([checking whether OpenSSL is available]) found_openssl="no" PKG_CHECK_MODULES([PHP_MONGODB_SSL],[openssl],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_SSL_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_SSL_LIBS],[MONGODB_SHARED_LIBADD]) PHP_MONGODB_SSL="openssl" found_openssl="yes" old_CFLAGS="$CFLAGS" CFLAGS="$PHP_MONGODB_SSL_CFLAGS $CFLAGS" AC_CHECK_DECLS([ASN1_STRING_get0_data], [have_ASN1_STRING_get0_data="yes"], [have_ASN1_STRING_get0_data="no"], [[#include ]]) CFLAGS="$old_CFLAGS" ],[ unset OPENSSL_INCDIR unset OPENSSL_LIBDIR dnl Use a list of directories from PHP_SETUP_OPENSSL by default. dnl Support documented "auto" and older, undocumented "yes" options if test "$PHP_OPENSSL_DIR" = "auto" -o "$PHP_OPENSSL_DIR" = "yes"; then PHP_OPENSSL_DIR="/usr/local/ssl /usr/local /usr /usr/local/openssl" fi for i in $PHP_OPENSSL_DIR; do if test -r $i/include/openssl/evp.h; then OPENSSL_INCDIR="$i/include" fi if test -r $i/$PHP_LIBDIR/libssl.a -o -r $i/$PHP_LIBDIR/libssl.$SHLIB_SUFFIX_NAME; then OPENSSL_LIBDIR="$i/$PHP_LIBDIR" fi test -n "$OPENSSL_INCDIR" && test -n "$OPENSSL_LIBDIR" && break done if test -n "$OPENSSL_LIBDIR"; then OPENSSL_LIBDIR_LDFLAG="-L$OPENSSL_LIBDIR" fi PHP_CHECK_LIBRARY([crypto], [EVP_DigestInit_ex], [have_crypto_lib="yes"], [have_crypto_lib="no"], [$OPENSSL_LIBDIR_LDFLAG]) AC_MSG_NOTICE([checking whether OpenSSL >= 1.1.0 is available]) PHP_CHECK_LIBRARY([ssl], [OPENSSL_init_ssl], [have_ssl_lib="yes"], [have_ssl_lib="no"], [$OPENSSL_LIBDIR_LDFLAG -lcrypto]) if test "$have_ssl_lib" = "no"; then AC_MSG_NOTICE([checking whether OpenSSL < 1.1.0 is available]) PHP_CHECK_LIBRARY([ssl], [SSL_library_init], [have_ssl_lib="yes"], [have_ssl_lib="no"], [$OPENSSL_LIBDIR_LDFLAG -lcrypto]) fi if test "$have_ssl_lib" = "yes" -a "$have_crypto_lib" = "yes"; then PHP_ADD_LIBRARY([ssl],,[MONGODB_SHARED_LIBADD]) PHP_ADD_LIBRARY([crypto],,[MONGODB_SHARED_LIBADD]) if test -n "$OPENSSL_LIBDIR"; then PHP_ADD_LIBPATH([$OPENSSL_LIBDIR],[MONGODB_SHARED_LIBADD]) fi if test -n "$OPENSSL_INCDIR"; then PHP_ADD_INCLUDE($OPENSSL_INCDIR) fi old_CFLAGS="$CFLAGS" CFLAGS="-I$OPENSSL_INCDIR $CFLAGS" AC_CHECK_DECLS([ASN1_STRING_get0_data], [have_ASN1_STRING_get0_data="yes"], [have_ASN1_STRING_get0_data="no"], [[#include ]]) CFLAGS="$old_CFLAGS" PHP_MONGODB_SSL="openssl" found_openssl="yes" fi ]) if test "$PHP_MONGODB_SSL" = "openssl" -a "$found_openssl" != "yes"; then AC_MSG_ERROR([OpenSSL libraries and development headers could not be found]) fi ]) AS_IF([test "$PHP_MONGODB_SSL" = "libressl" -o "$PHP_MONGODB_SSL" = "auto"],[ AC_MSG_NOTICE([checking whether LibreSSL is available]) found_libressl="no" PKG_CHECK_MODULES([PHP_MONGODB_SSL],[libtls libcrypto],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_SSL_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_SSL_LIBS],[MONGODB_SHARED_LIBADD]) PHP_MONGODB_SSL="libressl" found_libressl="yes" ],[ PHP_CHECK_LIBRARY([crypto], [EVP_DigestInit_ex], [have_crypto_lib="yes"], [have_crypto_lib="no"]) PHP_CHECK_LIBRARY([tls], [tls_init], [have_ssl_lib="yes"], [have_ssl_lib="no"], [-lcrypto]) if test "$have_ssl_lib" = "yes" -a "$have_crypto_lib" = "yes"; then PHP_ADD_LIBRARY([tls],,[MONGODB_SHARED_LIBADD]) PHP_ADD_LIBRARY([crypto],,[MONGODB_SHARED_LIBADD]) PHP_MONGODB_SSL="libressl" found_libressl="yes" fi ]) if test "$PHP_MONGODB_SSL" = "libressl" -a "$found_libressl" != "yes"; then AC_MSG_ERROR([LibreSSL libraries and development headers could not be found]) fi ]) AS_IF([test "$PHP_MONGODB_SSL" = "auto"],[ if test "x$crypto_required" = "xyes"; then AC_MSG_ERROR([crypto and TLS libraries not found]) fi PHP_MONGODB_SSL="no" ]) AC_MSG_CHECKING([which TLS library to use]) AC_MSG_RESULT([$PHP_MONGODB_SSL]) dnl Disable Windows SSL and crypto AC_SUBST(MONGOC_ENABLE_SSL_SECURE_CHANNEL, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_CNG, 0) if test "$PHP_MONGODB_SSL" = "openssl" -o "$PHP_MONGODB_SSL" = "libressl" -o "$PHP_MONGODB_SSL" = "darwin"; then AC_SUBST(MONGOC_ENABLE_SSL, 1) AC_SUBST(MONGOC_ENABLE_CRYPTO, 1) if test "$PHP_MONGODB_SSL" = "darwin"; then AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 1) AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 1) elif test "$PHP_MONGODB_SSL" = "openssl"; then AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 1) AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 1) AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0) elif test "$PHP_MONGODB_SSL" = "libressl"; then AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 1) AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 1) AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0) fi else AC_SUBST(MONGOC_ENABLE_SSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_LIBRESSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_OPENSSL, 0) AC_SUBST(MONGOC_ENABLE_SSL_SECURE_TRANSPORT, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_LIBCRYPTO, 0) AC_SUBST(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO, 0) fi if test "x$have_ASN1_STRING_get0_data" = "xyes"; then AC_SUBST(MONGOC_HAVE_ASN1_STRING_GET0_DATA, 1) else AC_SUBST(MONGOC_HAVE_ASN1_STRING_GET0_DATA, 0) fi PHP_ARG_ENABLE([mongodb-crypto-system-profile], [whether to use system crypto profile], [AS_HELP_STRING([--enable-mongodb-crypto-system-profile], [MongoDB: Use system crypto profile (OpenSSL only) [default=no]])], [no], [no]) PHP_ARG_WITH([system-ciphers], [deprecated option for whether to use system crypto profile], AS_HELP_STRING([--enable-system-ciphers], [MongoDB: whether to use system crypto profile (deprecated for --enable-mongodb-crypto-system-profile) [default=no]]), [no], [no]) dnl Also consider the deprecated --enable-system-ciphers option if test "$PHP_MONGODB_CRYPTO_SYSTEM_PROFILE" = "yes" -o "$PHP_SYSTEM_CIPHERS" = "yes"; then if test "$PHP_MONGODB_SSL" = "openssl"; then AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 1) else AC_MSG_ERROR([System crypto profile is only available with OpenSSL]) fi else AC_SUBST(MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE, 0) fi mongodb-1.6.1/scripts/autotools/libmongoc/CheckSasl.m40000644000076500000240000000510713572250757022257 0ustar alcaeusstaffPHP_ARG_WITH([mongodb-sasl], [whether to enable SASL for Kerberos authentication], [AS_HELP_STRING([--with-mongodb-sasl=@<:@auto/no/cyrus@:>@], [MongoDB: Enable SASL for Kerberos authentication [default=auto]])], [auto], [no]) dnl PHP_ARG_WITH without a value assigns "yes". Treat it like "auto" but required. AS_IF([test "$PHP_MONGODB_SASL" = "cyrus" -o "$PHP_MONGODB_SASL" = "auto" -o "$PHP_MONGODB_SASL" = "yes"],[ found_cyrus="no" PKG_CHECK_MODULES([PHP_MONGODB_SASL],[libsasl2],[ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PHP_MONGODB_SASL_CFLAGS" PHP_EVAL_LIBLINE([$PHP_MONGODB_SASL_LIBS],[MONGODB_SHARED_LIBADD]) PHP_MONGODB_SASL="cyrus" found_cyrus="yes" ],[ PHP_CHECK_LIBRARY([sasl2], [sasl_client_init], [have_sasl2_lib="yes"], [have_sasl2_lib="no"]) AC_CHECK_HEADER([sasl/sasl.h], [have_sasl_headers=yes], [have_sasl_headers=no]) if test "$have_sasl2_lib" = "yes" -a "$have_sasl_headers" = "yes"; then PHP_ADD_LIBRARY([sasl2],,[MONGODB_SHARED_LIBADD]) PHP_MONGODB_SASL="cyrus" found_cyrus="yes" fi ]) if test "$found_cyrus" = "yes"; then PHP_CHECK_LIBRARY([sasl2], [sasl_client_done], [have_sasl_client_done=yes], [have_sasl_client_done=no], $MONGODB_SHARED_LIBADD) fi if test \( "$PHP_MONGODB_SASL" = "cyrus" -o "$PHP_MONGODB_SASL" = "yes" \) -a "$found_cyrus" != "yes"; then AC_MSG_ERROR([Cyrus SASL libraries and development headers could not be found]) fi ]) AS_IF([test "$PHP_MONGODB_SASL" = "auto"],[ PHP_MONGODB_SASL="no" ]) dnl Warn for unsupported values (e.g. Cyrus SASL search path) if test ! \( "$PHP_MONGODB_SASL" = "cyrus" -o "$PHP_MONGODB_SASL" = "no" \); then AC_MSG_WARN([unsupported --with-mongodb-sasl value: $PHP_MONGODB_SASL]) fi AC_MSG_CHECKING([which SASL library to use]) AC_MSG_RESULT([$PHP_MONGODB_SASL]) dnl Disable Windows SSPI and GSSAPI AC_SUBST(MONGOC_ENABLE_SASL_SSPI, 0) AC_SUBST(MONGOC_ENABLE_SASL_GSSAPI, 0) if test "$PHP_MONGODB_SASL" = "cyrus"; then AC_SUBST(MONGOC_ENABLE_SASL, 1) AC_SUBST(MONGOC_ENABLE_SASL_CYRUS, 1) if test "x$have_sasl_client_done" = "xyes"; then AC_SUBST(MONGOC_HAVE_SASL_CLIENT_DONE, 1) else AC_SUBST(MONGOC_HAVE_SASL_CLIENT_DONE, 0) fi else AC_SUBST(MONGOC_ENABLE_SASL, 0) AC_SUBST(MONGOC_ENABLE_SASL_CYRUS, 0) AC_SUBST(MONGOC_HAVE_SASL_CLIENT_DONE, 0) fi mongodb-1.6.1/scripts/autotools/libmongoc/FindDependencies.m40000644000076500000240000000311713572250757023605 0ustar alcaeusstaff# Solaris needs to link against socket libs if test "$os_solaris" = "yes"; then PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS -D__EXTENSIONS__" PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS -D_XOPEN_SOURCE=1" PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS -D_XOPEN_SOURCE_EXTENDED=1" PHP_ADD_LIBRARY([socket],,[MONGODB_SHARED_LIBADD]) PHP_ADD_LIBRARY([nsl],,[MONGODB_SHARED_LIBADD]) fi # Check for shm functions AC_CHECK_FUNCS([shm_open],[],[ AC_CHECK_LIB([rt], [shm_open], [PHP_ADD_LIBRARY([rt],,[MONGODB_SHARED_LIBADD])], []) ]) # Check for sched_getcpu AC_CHECK_FUNC([sched_getcpu], [AC_SUBST(MONGOC_HAVE_SCHED_GETCPU, 1)], [AC_SUBST(MONGOC_HAVE_SCHED_GETCPU, 0)]) AC_CHECK_TYPE([socklen_t], [AC_SUBST(MONGOC_HAVE_SOCKLEN, 1)], [AC_SUBST(MONGOC_HAVE_SOCKLEN, 0)], [#include ]) # Check for pthreads. libmongoc's original FindDependencies.m4 script did not # require pthreads, but it does appear to be necessary on non-Windows platforms # based on mongoc-openssl.c and mongoc-thread-private.h. AX_PTHREAD([ PHP_MONGODB_BUNDLED_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS $PTHREAD_CFLAGS" PHP_EVAL_LIBLINE([$PTHREAD_LIBS],[MONGODB_SHARED_LIBADD]) # PTHREAD_CFLAGS may come back as "-pthread", which should also be used when # linking. We can trust PHP_EVAL_LIBLINE to ignore other values. PHP_EVAL_LIBLINE([$PTHREAD_CFLAGS],[MONGODB_SHARED_LIBADD]) ],[ AC_MSG_ERROR([libmongoc requires pthreads on non-Windows platforms.]) ]) mongodb-1.6.1/scripts/autotools/libmongoc/PlatformFlags.m40000644000076500000240000000162313572250757023157 0ustar alcaeusstaffdnl Ignore OpenSSL deprecation warnings on OSX AS_IF([test "$os_darwin" = "yes"], [AX_CHECK_COMPILE_FLAG([-Wno-deprecated-declarations], [STD_CFLAGS="$STD_CFLAGS -Wno-deprecated-declarations"])]) dnl We know there are some cast-align issues on OSX AS_IF([test "$os_darwin" = "yes"], [AX_CHECK_COMPILE_FLAG([-Wno-cast-align], [STD_CFLAGS="$STD_CFLAGS -Wno-cast-align"])]) AS_IF([test "$os_darwin" = "yes"], [AX_CHECK_COMPILE_FLAG([-Wno-unneeded-internal-declaration], [STD_CFLAGS="$STD_CFLAGS -Wno-unneeded-internal-declaration"])]) AS_IF([test "$os_darwin" = "yes"], [AX_CHECK_COMPILE_FLAG([-Wno-error=unused-command-line-argument], [STD_CFLAGS="$STD_CFLAGS -Wno-error=unused-command-line-argument"])]) dnl We know there are some cast-align issues on Solaris AS_IF([test "$os_solaris" = "yes"], [AX_CHECK_COMPILE_FLAG([-Wno-cast-align], [STD_CFLAGS="$STD_CFLAGS -Wno-cast-align"])]) mongodb-1.6.1/scripts/autotools/libmongoc/Versions.m40000644000076500000240000000124213572250757022223 0ustar alcaeusstaffMONGOC_CURRENT_FILE=[]PHP_EXT_SRCDIR(mongodb)[/src/LIBMONGOC_VERSION_CURRENT] MONGOC_VERSION=$(cat $MONGOC_CURRENT_FILE) dnl Ensure newline for "cut" implementations that need it, e.g. HP-UX. MONGOC_MAJOR_VERSION=$( (cat $MONGOC_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f1 ) MONGOC_MINOR_VERSION=$( (cat $MONGOC_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f2 ) MONGOC_MICRO_VERSION=$( (cat $MONGOC_CURRENT_FILE; echo) | cut -d- -f1 | cut -d. -f3 ) MONGOC_PRERELEASE_VERSION=$(cut -s -d- -f2 $MONGOC_CURRENT_FILE) AC_SUBST(MONGOC_VERSION) AC_SUBST(MONGOC_MAJOR_VERSION) AC_SUBST(MONGOC_MINOR_VERSION) AC_SUBST(MONGOC_MICRO_VERSION) AC_SUBST(MONGOC_PRERELEASE_VERSION) mongodb-1.6.1/scripts/autotools/libmongoc/WeakSymbols.m40000644000076500000240000000040113572250757022647 0ustar alcaeusstaffAC_MSG_CHECKING(if weak symbols are supported) AC_LINK_IFELSE([AC_LANG_PROGRAM([[ __attribute__((weak)) void __dummy(void *x) { } void f(void *x) { __dummy(x); } ]], [[ ]] )], [AC_MSG_RESULT(yes) AC_SUBST(MONGOC_HAVE_WEAK_SYMBOLS, 1)], [AC_MSG_RESULT(no)]) mongodb-1.6.1/scripts/autotools/m4/as_var_copy.m40000644000076500000240000000040213572250757021264 0ustar alcaeusstaffdnl AS_VAR_COPY is available in AC 2.64 and on, but we only require 2.59. dnl If we're on an older version, we define it ourselves: m4_ifndef([AS_VAR_COPY], [m4_define([AS_VAR_COPY], [AS_LITERAL_IF([$1[]$2], [$1=$$2], [eval $1=\$$2])])]) mongodb-1.6.1/scripts/autotools/m4/ax_check_compile_flag.m40000644000076500000240000000625113572250757023235 0ustar alcaeusstaff# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_check_compile_flag.html # =========================================================================== # # SYNOPSIS # # AX_CHECK_COMPILE_FLAG(FLAG, [ACTION-SUCCESS], [ACTION-FAILURE], [EXTRA-FLAGS]) # # DESCRIPTION # # Check whether the given FLAG works with the current language's compiler # or gives an error. (Warnings, however, are ignored) # # ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on # success/failure. # # If EXTRA-FLAGS is defined, it is added to the current language's default # flags (e.g. CFLAGS) when the check is done. The check is thus made with # the flags: "CFLAGS EXTRA-FLAGS FLAG". This can for example be used to # force the compiler to issue an error when a bad flag is given. # # NOTE: Implementation based on AX_CFLAGS_GCC_OPTION. Please keep this # macro in sync with AX_CHECK_{PREPROC,LINK}_FLAG. # # LICENSE # # Copyright (c) 2008 Guido U. Draheim # Copyright (c) 2011 Maarten Bosmans # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 2 AC_DEFUN([AX_CHECK_COMPILE_FLAG], [AC_PREREQ(2.59)dnl for _AC_LANG_PREFIX AS_VAR_PUSHDEF([CACHEVAR],[ax_cv_check_[]_AC_LANG_ABBREV[]flags_$4_$1])dnl AC_CACHE_CHECK([whether _AC_LANG compiler accepts $1], CACHEVAR, [ ax_check_save_flags=$[]_AC_LANG_PREFIX[]FLAGS _AC_LANG_PREFIX[]FLAGS="$[]_AC_LANG_PREFIX[]FLAGS $4 $1" AC_COMPILE_IFELSE([AC_LANG_PROGRAM()], [AS_VAR_SET(CACHEVAR,[yes])], [AS_VAR_SET(CACHEVAR,[no])]) _AC_LANG_PREFIX[]FLAGS=$ax_check_save_flags]) AS_IF([test x"AS_VAR_GET(CACHEVAR)" = xyes], [m4_default([$2], :)], [m4_default([$3], :)]) AS_VAR_POPDEF([CACHEVAR])dnl ])dnl AX_CHECK_COMPILE_FLAGS mongodb-1.6.1/scripts/autotools/m4/ax_prototype.m40000644000076500000240000001726713572250757021535 0ustar alcaeusstaff# =========================================================================== # https://www.gnu.org/software/autoconf-archive/ax_prototype.html # =========================================================================== # # SYNOPSIS # # AX_PROTOTYPE(function, includes, code, TAG1, values1 [, TAG2, values2 [...]]) # # DESCRIPTION # # Try all the combinations of , ... to successfully compile # . , , ... are substituted in and with # values found in , , ... respectively. , # , ... contain a list of possible values for each corresponding # tag and all combinations are tested. When AC_TRY_COMPILE(include, code) # is successfull for a given substitution, the macro stops and defines the # following macros: FUNCTION_TAG1, FUNCTION_TAG2, ... using AC_DEFINE() # with values set to the current values of , , ... If no # combination is successfull the configure script is aborted with a # message. # # Intended purpose is to find which combination of argument types is # acceptable for a given function . It is recommended to list # the most specific types first. For instance ARG1, [size_t, int] instead # of ARG1, [int, size_t]. # # Generic usage pattern: # # 1) add a call in configure.in # # AX_PROTOTYPE(...) # # 2) call autoheader to see which symbols are not covered # # 3) add the lines in acconfig.h # # /* Type of Nth argument of function */ # #undef FUNCTION_ARGN # # 4) Within the code use FUNCTION_ARGN instead of an hardwired type # # Complete example: # # 1) configure.in # # AX_PROTOTYPE(getpeername, # [ # #include # #include # ], # [ # int a = 0; # ARG2 * b = 0; # ARG3 * c = 0; # getpeername(a, b, c); # ], # ARG2, [struct sockaddr, void], # ARG3, [socklen_t, size_t, int, unsigned int, long unsigned int]) # # 2) call autoheader # # autoheader: Symbol `GETPEERNAME_ARG2' is not covered by ./acconfig.h # autoheader: Symbol `GETPEERNAME_ARG3' is not covered by ./acconfig.h # # 3) acconfig.h # # /* Type of second argument of getpeername */ # #undef GETPEERNAME_ARG2 # # /* Type of third argument of getpeername */ # #undef GETPEERNAME_ARG3 # # 4) in the code # # ... # GETPEERNAME_ARG2 name; # GETPEERNAME_ARG3 namelen; # ... # ret = getpeername(socket, &name, &namelen); # ... # # Implementation notes: generating all possible permutations of the # arguments is not easily done with the usual mixture of shell and m4, # that is why this macro is almost 100% m4 code. It generates long but # simple to read code. # # LICENSE # # Copyright (c) 2009 Loic Dachary # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 6 AU_ALIAS([AC_PROTOTYPE], [AX_PROTOTYPE]) AC_DEFUN([AX_PROTOTYPE],[ dnl dnl Upper case function name dnl pushdef([function],translit([$1], [a-z], [A-Z])) dnl dnl Collect tags that will be substituted dnl pushdef([tags],[AX_PROTOTYPE_TAGS(builtin([shift],builtin([shift],builtin([shift],$@))))]) dnl dnl Wrap in a 1 time loop, when a combination is found break to stop the combinatory exploration dnl for i in 1 do AX_PROTOTYPE_LOOP(AX_PROTOTYPE_REVERSE($1, AX_PROTOTYPE_SUBST($2,tags),AX_PROTOTYPE_SUBST($3,tags),builtin([shift],builtin([shift],builtin([shift],$@))))) AC_MSG_ERROR($1 unable to find a working combination) done popdef([tags]) popdef([function]) ]) dnl dnl AX_PROTOTYPE_REVERSE(list) dnl dnl Reverse the order of the dnl AC_DEFUN([AX_PROTOTYPE_REVERSE],[ifelse($#,0,,$#,1,[[$1]],[AX_PROTOTYPE_REVERSE(builtin([shift],$@)),[$1]])]) dnl dnl AX_PROTOTYPE_SUBST(string, tag) dnl dnl Substitute all occurence of in with _VAL. dnl Assumes that tag_VAL is a macro containing the value associated to tag. dnl AC_DEFUN([AX_PROTOTYPE_SUBST],[ifelse($2,,[$1],[AX_PROTOTYPE_SUBST(patsubst([$1],[$2],[$2[]_VAL]),builtin([shift],builtin([shift],$@)))])]) dnl dnl AX_PROTOTYPE_TAGS([tag, values, [tag, values ...]]) dnl dnl Generate a list of by skipping . dnl AC_DEFUN([AX_PROTOTYPE_TAGS],[ifelse($1,,[],[$1, AX_PROTOTYPE_TAGS(builtin([shift],builtin([shift],$@)))])]) dnl dnl AX_PROTOTYPE_DEFINES(tags) dnl dnl Generate a AC_DEFINE(function_tag, tag_VAL) for each tag in list dnl Assumes that function is a macro containing the name of the function in upper case dnl and that tag_VAL is a macro containing the value associated to tag. dnl AC_DEFUN([AX_PROTOTYPE_DEFINES],[ifelse($1,,[], [AC_DEFINE(function[]_$1, $1_VAL, [ ]) AC_SUBST(function[]_$1, "$1_VAL") AX_PROTOTYPE_DEFINES(builtin([shift],$@))])]) dnl dnl AX_PROTOTYPE_STATUS(tags) dnl dnl Generates a message suitable for argument to AC_MSG_* macros. For each tag dnl in the list the message tag => tag_VAL is generated. dnl Assumes that tag_VAL is a macro containing the value associated to tag. dnl AC_DEFUN([AX_PROTOTYPE_STATUS],[ifelse($1,,[],[$1 => $1_VAL AX_PROTOTYPE_STATUS(builtin([shift],$@))])]) dnl dnl AX_PROTOTYPE_EACH(tag, values) dnl dnl Call AX_PROTOTYPE_LOOP for each values and define the macro tag_VAL to dnl the current value. dnl AC_DEFUN([AX_PROTOTYPE_EACH],[ ifelse($2,, [ ], [ pushdef([$1_VAL], $2) AX_PROTOTYPE_LOOP(rest) popdef([$1_VAL]) AX_PROTOTYPE_EACH($1, builtin([shift], builtin([shift], $@))) ]) ]) dnl dnl AX_PROTOTYPE_LOOP([tag, values, [tag, values ...]], code, include, function) dnl dnl If there is a tag/values pair, call AX_PROTOTYPE_EACH with it. dnl If there is no tag/values pair left, tries to compile the code and include dnl using AC_TRY_COMPILE. If it compiles, AC_DEFINE all the tags to their dnl current value and exit with success. dnl AC_DEFUN([AX_PROTOTYPE_LOOP],[ ifelse(builtin([eval], $# > 3), 1, [ pushdef([rest],[builtin([shift],builtin([shift],$@))]) AX_PROTOTYPE_EACH($2,$1) popdef([rest]) ], [ AC_MSG_CHECKING($3 AX_PROTOTYPE_STATUS(tags)) dnl dnl Activate fatal warnings if possible, gives better guess dnl ac_save_CPPFLAGS="$CPPFLAGS" if test "$GCC" = "yes" ; then CPPFLAGS="$CPPFLAGS -Werror" ; fi AC_TRY_COMPILE($2, $1, [ CPPFLAGS="$ac_save_CPPFLAGS" AC_MSG_RESULT(ok) AX_PROTOTYPE_DEFINES(tags) break; ], [ CPPFLAGS="$ac_save_CPPFLAGS" AC_MSG_RESULT(not ok) ]) ] ) ]) mongodb-1.6.1/scripts/autotools/m4/ax_pthread.m40000644000076500000240000003303013572250757021101 0ustar alcaeusstaff# =========================================================================== # http://www.gnu.org/software/autoconf-archive/ax_pthread.html # =========================================================================== # # SYNOPSIS # # AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) # # DESCRIPTION # # This macro figures out how to build C programs using POSIX threads. It # sets the PTHREAD_LIBS output variable to the threads library and linker # flags, and the PTHREAD_CFLAGS output variable to any special C compiler # flags that are needed. (The user can also force certain compiler # flags/libs to be tested by setting these environment variables.) # # Also sets PTHREAD_CC to any special C compiler that is needed for # multi-threaded programs (defaults to the value of CC otherwise). (This # is necessary on AIX to use the special cc_r compiler alias.) # # NOTE: You are assumed to not only compile your program with these flags, # but also link it with them as well. e.g. you should link with # $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS # # If you are only building threads programs, you may wish to use these # variables in your default LIBS, CFLAGS, and CC: # # LIBS="$PTHREAD_LIBS $LIBS" # CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # CC="$PTHREAD_CC" # # In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant # has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name # (e.g. PTHREAD_CREATE_UNDETACHED on AIX). # # Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the # PTHREAD_PRIO_INHERIT symbol is defined when compiling with # PTHREAD_CFLAGS. # # ACTION-IF-FOUND is a list of shell commands to run if a threads library # is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it # is not found. If ACTION-IF-FOUND is not specified, the default action # will define HAVE_PTHREAD. # # Please let the authors know if this macro fails on any platform, or if # you have any other suggestions or comments. This macro was based on work # by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help # from M. Frigo), as well as ac_pthread and hb_pthread macros posted by # Alejandro Forero Cuervo to the autoconf macro repository. We are also # grateful for the helpful feedback of numerous users. # # Updated for Autoconf 2.68 by Daniel Richard G. # # LICENSE # # Copyright (c) 2008 Steven G. Johnson # Copyright (c) 2011 Daniel Richard G. # # This program is free software: you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program. If not, see . # # As a special exception, the respective Autoconf Macro's copyright owner # gives unlimited permission to copy, distribute and modify the configure # scripts that are the output of Autoconf when processing the Macro. You # need not follow the terms of the GNU General Public License when using # or distributing such scripts, even though portions of the text of the # Macro appear in them. The GNU General Public License (GPL) does govern # all other use of the material that constitutes the Autoconf Macro. # # This special exception to the GPL applies to versions of the Autoconf # Macro released by the Autoconf Archive. When you make and distribute a # modified version of the Autoconf Macro, you may extend this special # exception to the GPL to apply to your modified version as well. #serial 21 AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) AC_DEFUN([AX_PTHREAD], [ AC_REQUIRE([AC_CANONICAL_HOST]) AC_LANG_PUSH([C]) ax_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h # requires special compiler flags (e.g. on True64 or Sequent). # It gets checked for in the link test anyway. # First of all, check if the user has set any of the PTHREAD_LIBS, # etcetera environment variables, and if threads linking works using # them: if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) AC_MSG_RESULT([$ax_pthread_ok]) if test x"$ax_pthread_ok" = xno; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" fi # We must check for the threads library under a number of different # names; the ordering is very important because some systems # (e.g. DEC) have both -lpthread and -lpthreads, where one of the # libraries is broken (non-POSIX). # Create a list of thread flags to try. Items starting with a "-" are # C compiler flags, and other items are library names, except for "none" # which indicates that we try without any flags at all, and "pthread-config" # which is a program returning the flags for the Pth emulation library. ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: # pthreads: AIX (must check this before -lpthread) # none: in case threads are in libc; should be tried before -Kthread and # other compiler flags to prevent continual compiler warnings # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) # lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) # -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) # -pthreads: Solaris/gcc # -mthreads: Mingw32/gcc, Lynx/gcc # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) # ... -mt is also the pthreads flag for HP/aCC # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) case ${host_os} in solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" ;; darwin*) if test "$c_compiler" != "clang"; then ax_pthread_flags="-pthread $ax_pthread_flags" fi ;; esac # Clang doesn't consider unrecognized options an error unless we specify # -Werror. We throw in some extra Clang-specific options to ensure that # this doesn't happen for GCC, which also accepts -Werror. AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) save_CFLAGS="$CFLAGS" ax_pthread_extra_flags="-Werror" CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], [AC_MSG_RESULT([yes])], [ax_pthread_extra_flags= AC_MSG_RESULT([no])]) CFLAGS="$save_CFLAGS" if test x"$ax_pthread_ok" = xno; then for flag in $ax_pthread_flags; do case $flag in none) AC_MSG_CHECKING([whether pthreads work without any flags]) ;; -*) AC_MSG_CHECKING([whether pthreads work with $flag]) PTHREAD_CFLAGS="$flag" ;; pthread-config) AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) if test x"$ax_pthread_config" = xno; then continue; fi PTHREAD_CFLAGS="`pthread-config --cflags`" PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) AC_MSG_CHECKING([for the pthreads library -l$flag]) PTHREAD_LIBS="-l$flag" ;; esac save_LIBS="$LIBS" save_CFLAGS="$CFLAGS" LIBS="$PTHREAD_LIBS $LIBS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we # need a special flag -Kthread to make this header compile.) # We check for pthread_join because it is in -lpthread on IRIX # while pthread_create is in libc. We check for pthread_attr_init # due to DEC craziness with -lpthreads. We check for # pthread_cleanup_push because it is one of the few pthread # functions on Solaris that doesn't have a non-functional libc stub. # We try pthread_create on general principles. AC_LINK_IFELSE([AC_LANG_PROGRAM([#include static void routine(void *a) { a = 0; } static void *start_routine(void *a) { return a; }], [pthread_t th; pthread_attr_t attr; pthread_create(&th, 0, start_routine, 0); pthread_join(th, 0); pthread_attr_init(&attr); pthread_cleanup_push(routine, 0); pthread_cleanup_pop(0) /* ; */])], [ax_pthread_ok=yes], []) LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" AC_MSG_RESULT([$ax_pthread_ok]) if test "x$ax_pthread_ok" = xyes; then break; fi PTHREAD_LIBS="" PTHREAD_CFLAGS="" done fi # Various other checks: if test "x$ax_pthread_ok" = xyes; then save_LIBS="$LIBS" LIBS="$PTHREAD_LIBS $LIBS" save_CFLAGS="$CFLAGS" CFLAGS="$CFLAGS $PTHREAD_CFLAGS" # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. AC_MSG_CHECKING([for joinable pthread attribute]) attr_name=unknown for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], [int attr = $attr; return attr /* ; */])], [attr_name=$attr; break], []) done AC_MSG_RESULT([$attr_name]) if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], [Define to necessary symbol if this constant uses a non-standard name on your system.]) fi AC_MSG_CHECKING([if more special flags are required for pthreads]) flag=no case ${host_os} in aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; osf* | hpux*) flag="-D_REENTRANT";; solaris*) if test "$GCC" = "yes"; then flag="-D_REENTRANT" else # TODO: What about Clang on Solaris? flag="-mt -D_REENTRANT" fi ;; esac AC_MSG_RESULT([$flag]) if test "x$flag" != xno; then PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" fi AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], [ax_cv_PTHREAD_PRIO_INHERIT], [ AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], [[int i = PTHREAD_PRIO_INHERIT;]])], [ax_cv_PTHREAD_PRIO_INHERIT=yes], [ax_cv_PTHREAD_PRIO_INHERIT=no]) ]) AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" # More AIX lossage: compile with *_r variant if test "x$GCC" != xyes; then case $host_os in aix*) AS_CASE(["x/$CC"], [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], [#handle absolute path differently from PATH based program lookup AS_CASE(["x$CC"], [x/*], [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) ;; esac fi fi test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" AC_SUBST([PTHREAD_LIBS]) AC_SUBST([PTHREAD_CFLAGS]) AC_SUBST([PTHREAD_CC]) # Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: if test x"$ax_pthread_ok" = xyes; then ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) : else ax_pthread_ok=no $2 fi AC_LANG_POP ])dnl AX_PTHREAD mongodb-1.6.1/scripts/autotools/m4/php_mongodb.m40000644000076500000240000000261713572250757021265 0ustar alcaeusstaffdnl dnl PHP_MONGODB_ADD_SOURCES(source-path, sources [, special-flags]) dnl dnl Adds sources which are located relative to source-path. source-path should dnl be relative to the extension directory (i.e. PHP_EXT_DIR). special-flags dnl will be passed to the compiler. dnl dnl This macro will call PHP_ADD_SOURCES or PHP_ADD_SOURCES_X depending on dnl whether the extension is being built statically or as a shared module. dnl AC_DEFUN([PHP_MONGODB_ADD_SOURCES],[ _src_path=PHP_EXT_DIR(mongodb) dnl Join extension directory and source path case $_src_path in ""[)] _src_path="$1" ;; */[)] _src_path="$_src_path$1" ;; *[)] _src_path="$_src_path/$1" ;; esac dnl Trim trailing slash from source path case $_src_path in */[)] _src_path=${_src_path%?} esac if test "$ext_shared" = "no"; then PHP_ADD_SOURCES($_src_path, [$2], [$3]) else PHP_ADD_SOURCES_X($_src_path, [$2], [$3], shared_objects_mongodb, yes) fi ]) dnl dnl PHP_MONGODB_ADD_INCLUDE(path) dnl dnl Adds an include path relative to the extension source directory (i.e. dnl PHP_EXT_SRCDIR). dnl AC_DEFUN([PHP_MONGODB_ADD_INCLUDE],[ PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/][$1]) ]) dnl dnl PHP_MONGODB_ADD_BUILD_DIR(path) dnl dnl Adds a build directory relative to the extension build directory (i.e. dnl PHP_EXT_BUILDDIR). dnl AC_DEFUN([PHP_MONGODB_ADD_BUILD_DIR],[ PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/][$1]) ]) mongodb-1.6.1/scripts/autotools/m4/pkg.m40000644000076500000240000001623113572250757017547 0ustar alcaeusstaff# pkg.m4 - Macros to locate and utilise pkg-config. -*- Autoconf -*- # serial 1 (pkg-config-0.24) # # Copyright © 2004 Scott James Remnant . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # PKG_PROG_PKG_CONFIG([MIN-VERSION]) # ---------------------------------- AC_DEFUN([PKG_PROG_PKG_CONFIG], [m4_pattern_forbid([^_?PKG_[A-Z_]+$]) m4_pattern_allow([^PKG_CONFIG(_(PATH|LIBDIR|SYSROOT_DIR|ALLOW_SYSTEM_(CFLAGS|LIBS)))?$]) m4_pattern_allow([^PKG_CONFIG_(DISABLE_UNINSTALLED|TOP_BUILD_DIR|DEBUG_SPEW)$]) AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility]) AC_ARG_VAR([PKG_CONFIG_PATH], [directories to add to pkg-config's search path]) AC_ARG_VAR([PKG_CONFIG_LIBDIR], [path overriding pkg-config's built-in search path]) if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then AC_PATH_TOOL([PKG_CONFIG], [pkg-config]) fi if test -n "$PKG_CONFIG"; then _pkg_min_version=m4_default([$1], [0.9.0]) AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version]) if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) PKG_CONFIG="" fi fi[]dnl ])# PKG_PROG_PKG_CONFIG # PKG_CHECK_EXISTS(MODULES, [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) # # Check to see whether a particular set of modules exists. Similar # to PKG_CHECK_MODULES(), but does not set variables or print errors. # # Please remember that m4 expands AC_REQUIRE([PKG_PROG_PKG_CONFIG]) # only at the first occurence in configure.ac, so if the first place # it's called might be skipped (such as if it is within an "if", you # have to call PKG_CHECK_EXISTS manually # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_EXISTS], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl if test -n "$PKG_CONFIG" && \ AC_RUN_LOG([$PKG_CONFIG --exists --print-errors "$1"]); then m4_default([$2], [:]) m4_ifvaln([$3], [else $3])dnl fi]) # _PKG_CONFIG([VARIABLE], [COMMAND], [MODULES]) # --------------------------------------------- m4_define([_PKG_CONFIG], [if test -n "$$1"; then pkg_cv_[]$1="$$1" elif test -n "$PKG_CONFIG"; then PKG_CHECK_EXISTS([$3], [pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null` test "x$?" != "x0" && pkg_failed=yes ], [pkg_failed=yes]) else pkg_failed=untried fi[]dnl ])# _PKG_CONFIG # _PKG_SHORT_ERRORS_SUPPORTED # ----------------------------- AC_DEFUN([_PKG_SHORT_ERRORS_SUPPORTED], [AC_REQUIRE([PKG_PROG_PKG_CONFIG]) if $PKG_CONFIG --atleast-pkgconfig-version 0.20; then _pkg_short_errors_supported=yes else _pkg_short_errors_supported=no fi[]dnl ])# _PKG_SHORT_ERRORS_SUPPORTED # PKG_CHECK_MODULES(VARIABLE-PREFIX, MODULES, [ACTION-IF-FOUND], # [ACTION-IF-NOT-FOUND]) # # # Note that if there is a possibility the first call to # PKG_CHECK_MODULES might not happen, you should be sure to include an # explicit call to PKG_PROG_PKG_CONFIG in your configure.ac # # # -------------------------------------------------------------- AC_DEFUN([PKG_CHECK_MODULES], [AC_REQUIRE([PKG_PROG_PKG_CONFIG])dnl AC_ARG_VAR([$1][_CFLAGS], [C compiler flags for $1, overriding pkg-config])dnl AC_ARG_VAR([$1][_LIBS], [linker flags for $1, overriding pkg-config])dnl pkg_failed=no AC_MSG_CHECKING([for $1]) _PKG_CONFIG([$1][_CFLAGS], [cflags], [$2]) _PKG_CONFIG([$1][_LIBS], [libs], [$2]) m4_define([_PKG_TEXT], [Alternatively, you may set the environment variables $1[]_CFLAGS and $1[]_LIBS to avoid the need to call pkg-config. See the pkg-config man page for more details.]) if test $pkg_failed = yes; then AC_MSG_RESULT([no]) _PKG_SHORT_ERRORS_SUPPORTED if test $_pkg_short_errors_supported = yes; then $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors --cflags --libs "$2" 2>&1` else $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors --cflags --libs "$2" 2>&1` fi # Put the nasty error message in config.log where it belongs echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD m4_default([$4], [AC_MSG_ERROR( [Package requirements ($2) were not met: $$1_PKG_ERRORS Consider adjusting the PKG_CONFIG_PATH environment variable if you installed software in a non-standard prefix. _PKG_TEXT])[]dnl ]) elif test $pkg_failed = untried; then AC_MSG_RESULT([no]) m4_default([$4], [AC_MSG_FAILURE( [The pkg-config script could not be found or is too old. Make sure it is in your PATH or set the PKG_CONFIG environment variable to the full path to pkg-config. _PKG_TEXT To get pkg-config, see .])[]dnl ]) else $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS $1[]_LIBS=$pkg_cv_[]$1[]_LIBS AC_MSG_RESULT([yes]) $3 fi[]dnl ])# PKG_CHECK_MODULES # PKG_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable pkgconfigdir as the location where a module # should install pkg-config .pc files. By default the directory is # $libdir/pkgconfig, but the default can be changed by passing # DIRECTORY. The user can override through the --with-pkgconfigdir # parameter. AC_DEFUN([PKG_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${libdir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([pkgconfigdir], [AS_HELP_STRING([--with-pkgconfigdir], pkg_description)],, [with_pkgconfigdir=]pkg_default) AC_SUBST([pkgconfigdir], [$with_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_INSTALLDIR # PKG_NOARCH_INSTALLDIR(DIRECTORY) # ------------------------- # Substitutes the variable noarch_pkgconfigdir as the location where a # module should install arch-independent pkg-config .pc files. By # default the directory is $datadir/pkgconfig, but the default can be # changed by passing DIRECTORY. The user can override through the # --with-noarch-pkgconfigdir parameter. AC_DEFUN([PKG_NOARCH_INSTALLDIR], [m4_pushdef([pkg_default], [m4_default([$1], ['${datadir}/pkgconfig'])]) m4_pushdef([pkg_description], [pkg-config arch-independent installation directory @<:@]pkg_default[@:>@]) AC_ARG_WITH([noarch-pkgconfigdir], [AS_HELP_STRING([--with-noarch-pkgconfigdir], pkg_description)],, [with_noarch_pkgconfigdir=]pkg_default) AC_SUBST([noarch_pkgconfigdir], [$with_noarch_pkgconfigdir]) m4_popdef([pkg_default]) m4_popdef([pkg_description]) ]) dnl PKG_NOARCH_INSTALLDIR mongodb-1.6.1/scripts/autotools/CheckCompiler.m40000644000076500000240000000523713572250757021162 0ustar alcaeusstaff# If CFLAGS and CXXFLAGS are unset, default to empty. # This is to tell automake not to include '-g' if C{XX,}FLAGS is not set. # For more info - http://www.gnu.org/software/automake/manual/autoconf.html#C_002b_002b-Compiler if test -z "$CXXFLAGS"; then CXXFLAGS="" fi if test -z "$CFLAGS"; then CFLAGS="" fi AC_PROG_CC AC_PROG_CXX # Check that an appropriate C compiler is available. c_compiler="unknown" AC_LANG_PUSH([C]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([ #if !(defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER)) #error Not a supported GCC compiler #endif #if defined(__GNUC__) #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #if GCC_VERSION < 40100 #error Not a supported GCC compiler #endif #endif ])], [c_compiler="gcc"], []) # If our BEGIN_IGNORE_DEPRECATIONS macro won't work, pass # -Wno-deprecated-declarations AC_COMPILE_IFELSE([AC_LANG_PROGRAM([ #if !defined(__clang__) && defined(__GNUC__) #define GCC_VERSION (__GNUC__ * 10000 \ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) #if GCC_VERSION < 40600 #error Does not support deprecation warning pragmas #endif #endif ])], [], [STD_CFLAGS="$STD_CFLAGS -Wno-deprecated-declarations"]) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([ #if defined(__clang__) #define CLANG_VERSION (__clang_major__ * 10000 \ + __clang_minor__ * 100 \ + __clang_patchlevel__) #if CLANG_VERSION < 30300 #error Not a supported Clang compiler #endif #endif ])], [c_compiler="clang"], []) AC_COMPILE_IFELSE([AC_LANG_PROGRAM([ #if !(defined(__SUNPRO_C)) #error Not a supported Sun compiler #endif ])], [c_compiler="sun"], []) # The type of parameters for accept, getpeername, getsockname, getsockopt # all vary the same way by platform. AX_PROTOTYPE(accept, [ #include #include ], [ int a = 0; ARG2 *b = 0; ARG3 *c = 0; accept (a, b, c);], ARG2, [struct sockaddr, void], ARG3, [socklen_t, size_t, int]) MONGOC_SOCKET_ARG2="$ACCEPT_ARG2" AC_SUBST(MONGOC_SOCKET_ARG2) MONGOC_SOCKET_ARG3="$ACCEPT_ARG3" AC_SUBST(MONGOC_SOCKET_ARG3) AC_LANG_POP([C]) if test "$c_compiler" = "unknown"; then AC_MSG_ERROR([Compiler GCC >= 4.1 or Clang >= 3.3 is required for C compilation]) fi # GLibc 2.19 complains about both _BSD_SOURCE and _GNU_SOURCE. The _GNU_SOURCE # contains everything anyway. So just use that. AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ #include #ifndef __GLIBC__ #error not glibc #endif ]], [])], LIBC_FEATURES="-D_GNU_SOURCE", LIBC_FEATURES="-D_BSD_SOURCE") AC_SUBST(LIBC_FEATURES) AC_C_CONST AC_C_INLINE AC_C_TYPEOF mongodb-1.6.1/scripts/autotools/CheckHost.m40000644000076500000240000000164413572250757020323 0ustar alcaeusstaffAC_CANONICAL_HOST os_win32=no os_netbsd=no os_freebsd=no os_openbsd=no os_hpux=no os_linux=no os_solaris=no os_darwin=no os_gnu=no case "$host" in *-mingw*|*-*-cygwin*) os_win32=yes TARGET_OS=windows ;; *-*-*netbsd*) os_netbsd=yes TARGET_OS=unix ;; *-*-*freebsd*) os_freebsd=yes TARGET_OS=unix ;; *-*-*openbsd*) os_openbsd=yes TARGET_OS=unix ;; *-*-hpux*) os_hpux=yes TARGET_OS=unix ;; *-*-linux*) os_linux=yes os_gnu=yes TARGET_OS=unix ;; *-*-solaris*) os_solaris=yes TARGET_OS=unix ;; *-*-darwin*) os_darwin=yes TARGET_OS=unix ;; gnu*|k*bsd*-gnu*) os_gnu=yes TARGET_OS=unix ;; *) AC_MSG_WARN([*** Please add $host to configure.ac checks!]) ;; esac mongodb-1.6.1/scripts/centos/ldap/Domain.ldif0000644000076500000240000000012713572250757020432 0ustar alcaeusstaffdn: dc=10gen,dc=me objectClass: dcObject objectClass: organization dc: 10gen o : 10gen mongodb-1.6.1/scripts/centos/ldap/Users.ldif0000644000076500000240000000010613572250757020321 0ustar alcaeusstaffdn: ou=Users,dc=10gen,dc=me ou: Users objectClass: organizationalUnit mongodb-1.6.1/scripts/centos/ldap/basics.ldif0000644000076500000240000000024413572250757020467 0ustar alcaeusstaffdn: dc=10gen,dc=me objectclass: dcObject objectclass: organization o: MongoDB dc: 10gen dn: cn=Manager,dc=10gen,dc=me objectclass: organizationalRole cn: Manager mongodb-1.6.1/scripts/centos/ldap/install.sh0000644000076500000240000000114113572250757020362 0ustar alcaeusstaffyum -y update yum -y install openldap-servers openldap-clients openldap-devel python-devel gcc cyrus-sasl-plain xfsprogs net-snmp ps-misc wget python-ldap service slapd stop service slapd start #just in case sleep 10 ldapadd -Y EXTERNAL -H ldapi:/// -f /phongo/scripts/centos/ldap/pw.ldif # Add our specifics ldapadd -x -D "cn=Manager,dc=10gen,dc=me" -w password -f /phongo/scripts/centos/ldap/Domain.ldif ldapadd -x -D "cn=Manager,dc=10gen,dc=me" -w password -f /phongo/scripts/centos/ldap/Users.ldif # Add the users python /phongo/scripts/centos/ldap/ldapconfig.py -f /phongo/scripts/centos/ldap/users mongodb-1.6.1/scripts/centos/ldap/ldapconfig.py0000644000076500000240000000375013572250757021050 0ustar alcaeusstaff#!/usr/bin/python import optparse import ldap import ldap.modlist as modlist def main(): parser = optparse.OptionParser(usage="""\ %prog [options] Add users to LDAP """) # add in command line options. Add mongo host/port combo later parser.add_option("-f", "--filename", dest="fname", help="name of file with user names", default=None) (options, args) = parser.parse_args() if options.fname is None: print "\nERROR: Must specify name of file to import\n" sys.exit(-1) # Open a connection l = ldap.initialize("ldap://localhost") # Bind/authenticate with a user with apropriate rights to add objects l.simple_bind_s("cn=Manager,dc=10gen,dc=me","password") for uname in open(options.fname, 'r'): try: # The dn of our new entry/object print "adding ", uname dn= 'uid=' + uname.lower() + ',ou=Users,dc=10gen,dc=me' ldif = configUser(uname.rstrip('\r\n')) # Do the actual synchronous add-operation to the ldapserver l.add_s(dn,ldif) except ldap.LDAPError, e: print e.message['info'] # Its nice to the server to disconnect and free resources when done l.unbind_s() # Do the tld configuration for the ldap tree def configDC(): # A dict to help build the "body" of the object attrs = {} attrs['objectclass'] = ['organization','dcObject'] attrs['dn'] = 'dc=10gen,dc=me' attrs['dc'] = '10gen' attrs['o'] = '10gen' # Convert our dict to nice syntax for the add-function using modlist ldif = modlist.addModlist(attrs) def configOU(): # A dict to help build the "body" of the object attrs = {} attrs['dn'] = 'dc=10gen,dc=me' attrs['objectclass'] = ['organiationalUnit'] attrs['ou'] = 'Users' ldif = modlist.addModlist(attrs) def configUser( uname ): attrs = {} # attrs['dn'] = ['cn=' + uname + 'ou=Users,dc=10gen,dc=me'] attrs['cn'] = [uname] # attrs['uid'] = [uname] attrs['sn'] = 'TestUser' attrs['objectclass'] = ['inetOrgPerson'] attrs['userPassword'] = 'password' return modlist.addModlist(attrs) if __name__ == "__main__": main() mongodb-1.6.1/scripts/centos/ldap/mongod.ldif0000644000076500000240000001027213572250757020510 0ustar alcaeusstaff# # See slapd-config(5) for details on configuration options. # This file should NOT be world readable. # dn: cn=config objectClass: olcGlobal cn: config olcArgsFile: /var/run/openldap/slapd.args olcPidFile: /var/run/openldap/slapd.pid # # TLS settings # olcTLSCACertificatePath: /etc/openldap/certs olcTLSCertificateFile: "OpenLDAP Server" olcTLSCertificateKeyFile: /etc/openldap/certs/password # # Do not enable referrals until AFTER you have a working directory # service AND an understanding of referrals. # #olcReferral: ldap://root.openldap.org # # Sample security restrictions # Require integrity protection (prevent hijacking) # Require 112-bit (3DES or better) encryption for updates # Require 64-bit encryption for simple bind # #olcSecurity: ssf=1 update_ssf=112 simple_bind=64 # # Load dynamic backend modules: # - modulepath is architecture dependent value (32/64-bit system) # - back_sql.la backend requires openldap-servers-sql package # - dyngroup.la and dynlist.la cannot be used at the same time # #dn: cn=module,cn=config #objectClass: olcModuleList #cn: module #olcModulepath: /usr/lib/openldap #olcModulepath: /usr/lib64/openldap #olcModuleload: accesslog.la #olcModuleload: auditlog.la #olcModuleload: back_dnssrv.la #olcModuleload: back_ldap.la #olcModuleload: back_mdb.la #olcModuleload: back_meta.la #olcModuleload: back_null.la #olcModuleload: back_passwd.la #olcModuleload: back_relay.la #olcModuleload: back_shell.la #olcModuleload: back_sock.la #olcModuleload: collect.la #olcModuleload: constraint.la #olcModuleload: dds.la #olcModuleload: deref.la #olcModuleload: dyngroup.la #olcModuleload: dynlist.la #olcModuleload: memberof.la #olcModuleload: pcache.la #olcModuleload: ppolicy.la #olcModuleload: refint.la #olcModuleload: retcode.la #olcModuleload: rwm.la #olcModuleload: seqmod.la #olcModuleload: smbk5pwd.la #olcModuleload: sssvlv.la #olcModuleload: syncprov.la #olcModuleload: translucent.la #olcModuleload: unique.la #olcModuleload: valsort.la # # Schema settings # dn: cn=schema,cn=config objectClass: olcSchemaConfig cn: schema include: file:///etc/openldap/schema/core.ldif include: file:///etc/openldap/schema/corba.schema include: file:///etc/openldap/schema/cosine.ldif include: file:///etc/openldap/schema/duaconf.schema include: file:///etc/openldap/schema/dyngroup.schema include: file:///etc/openldap/schema/inetorgperson.ldif include: file:///etc/openldap/schema/java.schema include: file:///etc/openldap/schema/misc.schema include: file:///etc/openldap/schema/nis.ldif include: file:///etc/openldap/schema/openldap.ldif include: file:///etc/openldap/schema/ppolicy.schema include: file:///etc/openldap/schema/collective.schema # # Frontend settings # dn: olcDatabase=frontend,cn=config objectClass: olcDatabaseConfig olcDatabase: frontend # # Sample global access control policy: # Root DSE: allow anyone to read it # Subschema (sub)entry DSE: allow anyone to read it # Other DSEs: # Allow self write access # Allow authenticated users read access # Allow anonymous users to authenticate # #olcAccess: to dn.base="" by * read #olcAccess: to dn.base="cn=Subschema" by * read #olcAccess: to * # by self write # by users read # by anonymous auth # # if no access controls are present, the default policy # allows anyone and everyone to read anything but restricts # updates to rootdn. (e.g., "access to * by * read") # # rootdn can always read and write EVERYTHING! # # # Configuration database # dn: olcDatabase=config,cn=config objectClass: olcDatabaseConfig olcDatabase: config olcAccess: to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,c n=auth" manage by * none # # Server status monitoring # dn: olcDatabase=monitor,cn=config objectClass: olcDatabaseConfig olcDatabase: monitor olcAccess: to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,c n=auth" read by dn.base="cn=Manager,dc=10gen,dc=me" read by * none # # Backend database definitions # dn: olcDatabase=hdb,cn=config objectClass: olcDatabaseConfig objectClass: olcHdbConfig olcDatabase: hdb olcSuffix: dc=10gen,dc=me olcRootDN: cn=Manager,dc=10gen,dc=me olcRootPW: {SSHA}t3hTZGC4FTOS6AnTa76aX7HRtt1IDqFM olcDbDirectory: /var/lib/ldap olcDbIndex: objectClass eq,pres olcDbIndex: ou,cn,mail,surname,givenname eq,pres,sub mongodb-1.6.1/scripts/centos/ldap/pw.ldif0000644000076500000240000000117313572250757017653 0ustar alcaeusstaffdn: olcDatabase={0}config,cn=config changetype: modify replace: olcRootPW olcRootPW: {SSHA}t3hTZGC4FTOS6AnTa76aX7HRtt1IDqFM - replace: olcRootDN olcRootDN: cn=Manager,dc=10gen,dc=me dn: olcDatabase={2}bdb,cn=config changetype: modify replace: olcRootPW olcRootPW: {SSHA}t3hTZGC4FTOS6AnTa76aX7HRtt1IDqFM - replace: olcSuffix olcSuffix: dc=10gen,dc=me - replace: olcRootDN olcRootDN: cn=Manager,dc=10gen,dc=me dn: olcDatabase={1}monitor,cn=config changetype: modify replace: olcAccess olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" read by dn.base="cn=Manager,dc=10gen,dc=me" read by * none mongodb-1.6.1/scripts/centos/ldap/saslauthd.conf0000644000076500000240000000014313572250757021220 0ustar alcaeusstaffldap_servers: ldap://localhost:389 ldap_search_base: ou=Users,dc=10gen,dc=me ldap_filter: (uid=%u) mongodb-1.6.1/scripts/centos/ldap/users0000644000076500000240000000002613572250757017445 0ustar alcaeusstaffbugs features dbadmin mongodb-1.6.1/scripts/centos/essentials.sh0000644000076500000240000000036213572250757020152 0ustar alcaeusstaff# Tools you can't live without sudo yum install -y git vim # I can't stand emacs echo 'set -o vi' | sudo tee /etc/profile.d/vishell.sh # Who knows how to configure RHEL at all anyway? sudo service iptables stop sudo chkconfig iptables off mongodb-1.6.1/scripts/freebsd/essentials.sh0000644000076500000240000000057513572250757020277 0ustar alcaeusstaff# Update ports sudo portsnap fetch extract update # Minimum required PHP install + pecl sudo pkg install -y pcre php56 php56-openssl php56-json php56-zlib pear autoconf pkgconf cyrus-sasl # We need vim. git requires curl. mongoc requires libtool and automake sudo pkg install -y vim git curl libtool automake # I can't stand emacs echo 'set -o vi' | sudo tee -a /etc/profile mongodb-1.6.1/scripts/freebsd/phongo.sh0000644000076500000240000000047313572250757017414 0ustar alcaeusstaffls -1 /phongo/mongodb*.tgz | sort -n -r | xargs sudo pecl install -f 2>&1 > /phongo/.build if test $? -eq 0; then php -m | grep -q mongodb || echo "extension=mongodb.so" | sudo tee -a /usr/local/etc/php/extensions.ini pecl run-tests -q -p mongodb 2>&1 > /phongo/.tests else tail -n50 /phongo/.build exit 3 fi mongodb-1.6.1/scripts/presets/replicaset-30.json0000644000076500000240000000403613572250757021106 0ustar alcaeusstaff{ "id": "REPLICASET_30", "name": "mongod", "members": [ { "procParams": { "dbpath": "/tmp/REPLICASET/3100/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3100/mongod.log", "journal": true, "nssize": 1, "port": 3100, "bind_ip": "::,0.0.0.0", "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 99, "tags": { "ordinal": "one", "dc": "pa" } }, "server_id": "RS-30-one" }, { "procParams": { "dbpath": "/tmp/REPLICASET/3101/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3101/mongod.log", "journal": true, "nssize": 1, "port": 3101, "bind_ip": "::,0.0.0.0", "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 1.1, "tags": { "ordinal": "two", "dc": "nyc" } }, "server_id": "RS-30-two" }, { "procParams": { "dbpath": "/tmp/REPLICASET/3102/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3002/mongod.log", "journal": true, "nssize": 1, "port": 3102, "bind_ip": "::,0.0.0.0", "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "arbiterOnly": true }, "server_id": "RS-30-arbiter" } ], "version": "30-release" } mongodb-1.6.1/scripts/presets/replicaset-dns.json0000644000076500000240000000341113572250757021444 0ustar alcaeusstaff{ "id": "REPLICASET_DNS", "name": "mongod", "members": [ { "procParams": { "dbpath": "/tmp/REPLICASET/27017/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/27017/mongod.log", "journal": true, "nssize": 1, "port": 27017, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 1 }, "server_id": "DNS-one" }, { "procParams": { "dbpath": "/tmp/REPLICASET/27018/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/27018/mongod.log", "journal": true, "nssize": 1, "port": 27018, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 1 }, "server_id": "DNS-two" }, { "procParams": { "dbpath": "/tmp/REPLICASET/27019/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/27019/mongod.log", "journal": true, "nssize": 1, "port": 27019, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 1 }, "server_id": "DNS-three" } ] } mongodb-1.6.1/scripts/presets/replicaset.json0000644000076500000240000000375113572250757020671 0ustar alcaeusstaff{ "id": "REPLICASET", "name": "mongod", "members": [ { "procParams": { "dbpath": "/tmp/REPLICASET/3000/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3000/mongod.log", "journal": true, "nssize": 1, "port": 3000, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 99, "tags": { "ordinal": "one", "dc": "pa" } }, "server_id": "RS-one" }, { "procParams": { "dbpath": "/tmp/REPLICASET/3001/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3001/mongod.log", "journal": true, "nssize": 1, "port": 3001, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "priority": 1.1, "tags": { "ordinal": "two", "dc": "nyc" } }, "server_id": "RS-two" }, { "procParams": { "dbpath": "/tmp/REPLICASET/3002/", "ipv6": true, "logappend": true, "logpath": "/tmp/REPLICASET/3002/mongod.log", "journal": true, "nssize": 1, "port": 3002, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "rsParams": { "arbiterOnly": true }, "server_id": "RS-arbiter" } ] } mongodb-1.6.1/scripts/presets/standalone-30.json0000644000076500000240000000065613572250757021107 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE_30", "procParams": { "dbpath": "/tmp/standalone-30/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone-30/mongod.log", "journal": true, "nssize": 1, "port": 2700, "bind_ip": "::,0.0.0.0", "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "version": "30-release" } mongodb-1.6.1/scripts/presets/standalone-auth.json0000644000076500000240000000072513572250757021623 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE_AUTH", "auth_key": "secret", "login": "root", "password": "toor", "procParams": { "dbpath": "/tmp/standalone-auth/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone-auth/m.log", "journal": true, "nssize": 1, "port": 2200, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} } } mongodb-1.6.1/scripts/presets/standalone-plain.json0000644000076500000240000000106413572250757021762 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE_PLAIN", "auth_key": "secret", "login": "root", "password": "toor", "procParams": { "dbpath": "/tmp/standalone-plain/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone-plain/m.log", "journal": true, "nssize": 1, "port": 2400, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1, "saslauthdPath": "/var/run/saslauthd/mux", "authenticationMechanisms": "SCRAM-SHA-1,PLAIN"} } } mongodb-1.6.1/scripts/presets/standalone-ssl.json0000644000076500000240000000115313572250757021457 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE_SSL", "procParams": { "dbpath": "/tmp/standalone-ssl/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone-ssl/m.log", "journal": true, "nssize": 1, "port": 2100, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} }, "sslParams": { "sslMode": "requireSSL", "sslCAFile": "/phongo/scripts/ssl/ca.pem", "sslPEMKeyFile": "/phongo/scripts/ssl/server.pem", "sslAllowConnectionsWithoutCertificates": true } } mongodb-1.6.1/scripts/presets/standalone-x509.json0000644000076500000240000000140213572250757021360 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE_X509", "authSource": "$external", "login": "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=client", "procParams": { "dbpath": "/tmp/standalone-x509/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone-x509/m.log", "journal": true, "nssize": 1, "port": 2300, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1, "authenticationMechanisms": "MONGODB-X509"} }, "sslParams": { "sslMode": "requireSSL", "sslCAFile": "/phongo/scripts/ssl/ca.pem", "sslPEMKeyFile": "/phongo/scripts/ssl/server.pem", "sslWeakCertificateValidation": true } } mongodb-1.6.1/scripts/presets/standalone.json0000644000076500000240000000060413572250757020660 0ustar alcaeusstaff{ "name": "mongod", "id" : "STANDALONE", "procParams": { "dbpath": "/tmp/standalone/", "ipv6": true, "logappend": true, "logpath": "/tmp/standalone/mongod.log", "journal": true, "nssize": 1, "port": 2000, "bind_ip_all": true, "smallfiles": true, "setParameter": {"enableTestCommands": 1} } } mongodb-1.6.1/scripts/ssl/ca.pem0000644000076500000240000000237013572250757016041 0ustar alcaeusstaff-----BEGIN CERTIFICATE----- MIIDfzCCAmegAwIBAgIDB1MGMA0GCSqGSIb3DQEBCwUAMHkxGzAZBgNVBAMTEkRy aXZlcnMgVGVzdGluZyBDQTEQMA4GA1UECxMHRHJpdmVyczEQMA4GA1UEChMHTW9u Z29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsx CzAJBgNVBAYTAlVTMB4XDTE5MDUyMjIwMjMxMVoXDTM5MDUyMjIwMjMxMVoweTEb MBkGA1UEAxMSRHJpdmVycyBUZXN0aW5nIENBMRAwDgYDVQQLEwdEcml2ZXJzMRAw DgYDVQQKEwdNb25nb0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQI EwhOZXcgWW9yazELMAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQCl7VN+WsQfHlwapcOpTLZVoeMAl1LTbWTFuXSAavIyy0W1Ytky1UP/ bxCSW0mSWwCgqoJ5aXbAvrNRp6ArWu3LsTQIEcD3pEdrFIVQhYzWUs9fXqPyI9k+ QNNQ+MRFKeGteTPYwF2eVEtPzUHU5ws3+OKp1m6MCLkwAG3RBFUAfddUnLvGoZiT pd8/eNabhgHvdrCw+tYFCWvSjz7SluEVievpQehrSEPKe8DxJq/IM3tSl3tdylzT zeiKNO7c7LuQrgjAfrZl7n2SriHIlNmqiDR/kdd8+TxBuxjFlcf2WyHCO3lIcIgH KXTlhUCg50KfHaxHu05Qw0x8869yIzqbAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8w DQYJKoZIhvcNAQELBQADggEBAEHuhTL8KQZcKCTSJbYA9MgZj7U32arMGBbc1hiq VBREwvdVz4+9tIyWMzN9R/YCKmUTnCq8z3wTlC8kBtxYn/l4Tj8nJYcgLJjQ0Fwe gT564CmvkUat8uXPz6olOCdwkMpJ9Sj62i0mpgXJdBfxKQ6TZ9yGz6m3jannjZpN LchB7xSAEWtqUgvNusq0dApJsf4n7jZ+oBZVaQw2+tzaMfaLqHgMwcu1FzA8UKCD sxCgIsZUs8DdxaD418Ot6nPfheOTqe24n+TTa+Z6O0W0QtnofJBx7tmAo1aEc57i 77s89pfwIJetpIlhzNSMKurCAocFCJMJLAASJFuu6dyDvPo= -----END CERTIFICATE-----mongodb-1.6.1/scripts/ssl/client.pem0000644000076500000240000000561413572250757016740 0ustar alcaeusstaff-----BEGIN RSA PRIVATE KEY----- MIIEpAIBAAKCAQEAsNS8UEuin7/K29jXfIOLpIoh1jEyWVqxiie2Onx7uJJKcoKo khA3XeUnVN0k6X5MwYWcN52xcns7LYtyt06nRpTG2/emoV44w9uKTuHsvUbiOwSV m/ToKQQ4FUFZoqorXH+ZmJuIpJNfoW+3CkE1vEDCIecIq6BNg5ySsPtvSuSJHGjp mc7/5ZUDvFE2aJ8QbJU3Ws0HXiEb6ymi048LlzEL2VKX3w6mqqh+7dcZGAy7qYk2 5FZ9ktKvCeQau7mTyU1hsPrKFiKtMN8Q2ZAItX13asw5/IeSTq2LgLFHlbj5Kpq4 GmLdNCshzH5X7Ew3IYM8EHmsX8dmD6mhv7vpVwIDAQABAoIBABOdpb4qhcG+3twA c/cGCKmaASLnljQ/UU6IFTjrsjXJVKTbRaPeVKX/05sgZQXZ0t3s2mV5AsQ2U1w8 Cd+3w+qaemzQThW8hAOGCROzEDX29QWi/o2sX0ydgTMqaq0Wv3SlWv6I0mGfT45y /BURIsrdTCvCmz2erLqa1dL4MWJXRFjT9UTs5twlecIOM2IHKoGGagFhymRK4kDe wTRC9fpfoAgyfus3pCO/wi/F8yKGPDEwY+zgkhrJQ+kSeki7oKdGD1H540vB8gRt EIqssE0Y6rEYf97WssQlxJgvoJBDSftOijS6mwvoasDUwfFqyyPiirawXWWhHXkc DjIi/XECgYEA5xfjilw9YyM2UGQNESbNNunPcj7gDZbN347xJwmYmi9AUdPLt9xN 3XaMqqR22k1DUOxC/5hH0uiXir7mDfqmC+XS/ic/VOsa3CDWejkEnyGLiwSHY502 wD/xWgHwUiGVAG9HY64vnDGm6L3KGXA2oqxanL4V0+0+Ht49pZ16i8sCgYEAw+Ox CHGtpkzjCP/z8xr+1VTSdpc/4CP2HONnYopcn48KfQnf7Nale69/1kZpypJlvQSG eeA3jMGigNJEkb8/kaVoRLCisXcwLc0XIfCTeiK6FS0Ka30D/84Qm8UsHxRdpGkM kYITAa2r64tgRL8as4/ukeXBKE+oOhX43LeEfyUCgYBkf7IX2Ndlhsm3GlvIarxy NipeP9PGdR/hKlPbq0OvQf9R1q7QrcE7H7Q6/b0mYNV2mtjkOQB7S2WkFDMOP0P5 BqDEoKLdNkV/F9TOYH+PCNKbyYNrodJOt0Ap6Y/u1+Xpw3sjcXwJDFrO+sKqX2+T PStG4S+y84jBedsLbDoAEwKBgQCTz7/KC11o2yOFqv09N+WKvBKDgeWlD/2qFr3w UU9K5viXGVhqshz0k5z25vL09Drowf1nAZVpFMO2SPOMtq8VC6b+Dfr1xmYIaXVH Gu1tf77CM9Zk/VSDNc66e7GrUgbHBK2DLo+A+Ld9aRIfTcSsMbNnS+LQtCrQibvb cG7+MQKBgQCY11oMT2dUekoZEyW4no7W5D74lR8ztMjp/fWWTDo/AZGPBY6cZoZF IICrzYtDT/5BzB0Jh1f4O9ZQkm5+OvlFbmoZoSbMzHL3oJCBOY5K0/kdGXL46WWh IRJSYakNU6VIS7SjDpKgm9D8befQqZeoSggSjIIULIiAtYgS80vmGA== -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDgzCCAmugAwIBAgIDAxOUMA0GCSqGSIb3DQEBCwUAMHkxGzAZBgNVBAMTEkRy aXZlcnMgVGVzdGluZyBDQTEQMA4GA1UECxMHRHJpdmVyczEQMA4GA1UEChMHTW9u Z29EQjEWMBQGA1UEBxMNTmV3IFlvcmsgQ2l0eTERMA8GA1UECBMITmV3IFlvcmsx CzAJBgNVBAYTAlVTMB4XDTE5MDUyMjIzNTU1NFoXDTM5MDUyMjIzNTU1NFowaTEP MA0GA1UEAxMGY2xpZW50MRAwDgYDVQQLEwdEcml2ZXJzMQwwCgYDVQQKEwNNREIx FjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYD VQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALDUvFBLop+/ ytvY13yDi6SKIdYxMllasYontjp8e7iSSnKCqJIQN13lJ1TdJOl+TMGFnDedsXJ7 Oy2LcrdOp0aUxtv3pqFeOMPbik7h7L1G4jsElZv06CkEOBVBWaKqK1x/mZibiKST X6FvtwpBNbxAwiHnCKugTYOckrD7b0rkiRxo6ZnO/+WVA7xRNmifEGyVN1rNB14h G+spotOPC5cxC9lSl98Opqqofu3XGRgMu6mJNuRWfZLSrwnkGru5k8lNYbD6yhYi rTDfENmQCLV9d2rMOfyHkk6ti4CxR5W4+SqauBpi3TQrIcx+V+xMNyGDPBB5rF/H Zg+pob+76VcCAwEAAaMkMCIwCwYDVR0PBAQDAgeAMBMGA1UdJQQMMAoGCCsGAQUF BwMCMA0GCSqGSIb3DQEBCwUAA4IBAQAqRcLAGvYMaGYOV4HJTzNotT2qE0I9THNQ wOV1fBg69x6SrUQTQLjJEptpOA288Wue6Jt3H+p5qAGV5GbXjzN/yjCoItggSKxG Xg7279nz6/C5faoIKRjpS9R+MsJGlttP9nUzdSxrHvvqm62OuSVFjjETxD39DupE YPFQoHOxdFTtBQlc/zIKxVdd20rs1xJeeU2/L7jtRBSPuR/Sk8zot7G2/dQHX49y kHrq8qz12kj1T6XDXf8KZawFywXaz0/Ur+fUYKmkVk1T0JZaNtF4sKqDeNE4zcns p3xLVDSl1Q5Gwj7bgph9o4Hxs9izPwiqjmNaSjPimGYZ399zcurY -----END CERTIFICATE----- mongodb-1.6.1/scripts/ssl/crl.pem0000644000076500000240000000131713572250757016236 0ustar alcaeusstaff-----BEGIN X509 CRL----- MIIB6jCB0wIBATANBgkqhkiG9w0BAQsFADB5MRswGQYDVQQDExJEcml2ZXJzIFRl c3RpbmcgQ0ExEDAOBgNVBAsTB0RyaXZlcnMxEDAOBgNVBAoTB01vbmdvREIxFjAU BgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3JrMQswCQYDVQQG EwJVUxcNMTkwNTIyMjI0NTUzWhcNMTkwNjIxMjI0NTUzWjAVMBMCAncVFw0xOTA1 MjIyMjQ1MzJaoA8wDTALBgNVHRQEBAICEAAwDQYJKoZIhvcNAQELBQADggEBACwQ W9OF6ExJSzzYbpCRroznkfdLG7ghNSxIpBQUGtcnYbkP4em6TdtAj5K3yBjcKn4a hnUoa5EJGr2Xgg0QascV/1GuWEJC9rsYYB9boVi95l1CrkS0pseaunM086iItZ4a hRVza8qEMBc3rdsracA7hElYMKdFTRLpIGciJehXzv40yT5XFBHGy/HIT0CD50O7 BDOHzA+rCFCvxX8UY9myDfb1r1zUW7Gzjn241VT7bcIJmhFE9oV0popzDyqr6GvP qB2t5VmFpbnSwkuc4ie8Jizip1P8Hg73lut3oVAHACFGPpfaNIAp4GcSH61zJmff 9UBe3CJ1INwqyiuqGeA= -----END X509 CRL----- mongodb-1.6.1/scripts/ssl/server.pem0000644000076500000240000000564113572250757016770 0ustar alcaeusstaff-----BEGIN RSA PRIVATE KEY----- MIIEogIBAAKCAQEAhNrB0E6GY/kFSd8/vNpu/t952tbnOsD5drV0XPvmuy7SgKDY a/S+xb/jPnlZKKehdBnH7qP/gYbv34ZykzcDFZscjPLiGc2cRGP+NQCSFK0d2/7d y15zSD3zhj14G8+MkpAejTU+0/qFNZMc5neDvGanTe0+8aWa0DXssM0MuTxIv7j6 CtsMWeqLLofN7a1Kw2UvmieCHfHMuA/08pJwRnV/+5T9WONBPJja2ZQRrG1BjpI4 81zSPUZesIqi8yDlExdvgNaRZIEHi/njREqwVgJOZomUY57zmKypiMzbz48dDTsV gUStxrEqbaP+BEjQYPX5+QQk4GdMjkLf52LR6QIDAQABAoIBAHSs+hHLJNOf2zkp S3y8CUblVMsQeTpsR6otaehPgi9Zy50TpX4KD5D0GMrBH8BIl86y5Zd7h+VlcDzK gs0vPxI2izhuBovKuzaE6rf5rFFkSBjxGDCG3o/PeJOoYFdsS3RcBbjVzju0hFCs xnDQ/Wz0anJRrTnjyraY5SnQqx/xuhLXkj/lwWoWjP2bUqDprnuLOj16soNu60Um JziWbmWx9ty0wohkI/8DPBl9FjSniEEUi9pnZXPElFN6kwPkgdfT5rY/TkMH4lsu ozOUc5xgwlkT6kVjXHcs3fleuT/mOfVXLPgNms85JKLucfd6KiV7jYZkT/bXIjQ+ 7CZEn0ECgYEA5QiKZgsfJjWvZpt21V/i7dPje2xdwHtZ8F9NjX7ZUFA7mUPxUlwe GiXxmy6RGzNdnLOto4SF0/7ebuF3koO77oLup5a2etL+y/AnNAufbu4S5D72sbiz wdLzr3d5JQ12xeaEH6kQNk2SD5/ShctdS6GmTgQPiJIgH0MIdi9F3v0CgYEAlH84 hMWcC+5b4hHUEexeNkT8kCXwHVcUjGRaYFdSHgovvWllApZDHSWZ+vRcMBdlhNPu 09Btxo99cjOZwGYJyt20QQLGc/ZyiOF4ximQzabTeFgLkTH3Ox6Mh2Rx9yIruYoX nE3UfMDkYELanEJUv0zenKpZHw7tTt5yXXSlEF0CgYBSsEOvVcKYO/eoluZPYQAA F2jgzZ4HeUFebDoGpM52lZD+463Dq2hezmYtPaG77U6V3bUJ/TWH9VN/Or290vvN v83ECcC2FWlSXdD5lFyqYx/E8gqE3YdgqfW62uqM+xBvoKsA9zvYLydVpsEN9v8m 6CSvs/2btA4O21e5u5WBTQKBgGtAb6vFpe0gHRDs24SOeYUs0lWycPhf+qFjobrP lqnHpa9iPeheat7UV6BfeW3qmBIVl/s4IPE2ld4z0qqZiB0Tf6ssu/TpXNPsNXS6 dLFz+myC+ufFdNEoQUtQitd5wKbjTCZCOGRaVRgJcSdG6Tq55Fa22mOKPm+mTmed ZdKpAoGAFsTYBAHPxs8nzkCJCl7KLa4/zgbgywO6EcQgA7tfelB8bc8vcAMG5o+8 YqAfwxrzhVSVbJx0fibTARXROmbh2pn010l2wj3+qUajM8NiskCPFbSjGy7HSUze P8Kt1uMDJdj55gATzn44au31QBioZY2zXleorxF21cr+BZCJgfA= -----END RSA PRIVATE KEY----- -----BEGIN CERTIFICATE----- MIIDlTCCAn2gAwIBAgICdxUwDQYJKoZIhvcNAQELBQAweTEbMBkGA1UEAxMSRHJp dmVycyBUZXN0aW5nIENBMRAwDgYDVQQLEwdEcml2ZXJzMRAwDgYDVQQKEwdNb25n b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL MAkGA1UEBhMCVVMwHhcNMTkwNTIyMjIzMjU2WhcNMzkwNTIyMjIzMjU2WjBwMRIw EAYDVQQDEwlsb2NhbGhvc3QxEDAOBgNVBAsTB0RyaXZlcnMxEDAOBgNVBAoTB01v bmdvREIxFjAUBgNVBAcTDU5ldyBZb3JrIENpdHkxETAPBgNVBAgTCE5ldyBZb3Jr MQswCQYDVQQGEwJVUzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAITa wdBOhmP5BUnfP7zabv7fedrW5zrA+Xa1dFz75rsu0oCg2Gv0vsW/4z55WSinoXQZ x+6j/4GG79+GcpM3AxWbHIzy4hnNnERj/jUAkhStHdv+3ctec0g984Y9eBvPjJKQ Ho01PtP6hTWTHOZ3g7xmp03tPvGlmtA17LDNDLk8SL+4+grbDFnqiy6Hze2tSsNl L5ongh3xzLgP9PKScEZ1f/uU/VjjQTyY2tmUEaxtQY6SOPNc0j1GXrCKovMg5RMX b4DWkWSBB4v540RKsFYCTmaJlGOe85isqYjM28+PHQ07FYFErcaxKm2j/gRI0GD1 +fkEJOBnTI5C3+di0ekCAwEAAaMwMC4wLAYDVR0RBCUwI4IJbG9jYWxob3N0hwR/ AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqGSIb3DQEBCwUAA4IBAQBol8+YH7MA HwnIh7KcJ8h87GkCWsjOJCDJWiYBJArQ0MmgDO0qdx+QEtvLMn3XNtP05ZfK0WyX or4cWllAkMFYaFbyB2hYazlD1UAAG+22Rku0UP6pJMLbWe6pnqzx+RL68FYdbZhN fCW2xiiKsdPoo2VEY7eeZKrNr/0RFE5EKXgzmobpTBQT1Dl3Ve4aWLoTy9INlQ/g z40qS7oq1PjjPLgxINhf4ncJqfmRXugYTOnyFiVXLZTys5Pb9SMKdToGl3NTYWLL 2AZdjr6bKtT+WtXyHqO0cQ8CkAW0M6VOlMluACllcJxfrtdlQS2S4lUIj76QKBdZ khBHXq/b8MFX -----END CERTIFICATE----- mongodb-1.6.1/scripts/ubuntu/ldap/install.sh0000644000076500000240000000100613572250757020411 0ustar alcaeusstaffsudo apt-get -y install ldap-utils libsasl2-modules-ldap sasl2-bin # setup saslauthd sudo sed -i 's/MECHANISMS="pam"/MECHANISMS="ldap"/' /etc/default/saslauthd sudo sed -i 's/START=no/START="yes"/' /etc/default/saslauthd sudo cp /phongo/scripts/ubuntu/ldap/saslauthd.conf /etc/ sudo service saslauthd restart testsaslauthd -u bugs -p password -s mongod -f /var/run/saslauthd/mux #ldapsearch -x -LLL -b dc=10gen,dc=me -h 192.168.112.20 #ldapsearch -x -b '' -s base '(objectclass=*)' namingContexts -h 192.168.112.20 mongodb-1.6.1/scripts/ubuntu/ldap/saslauthd.conf0000644000076500000240000000015013572250757021245 0ustar alcaeusstaffldap_servers: ldap://192.168.112.20:389 ldap_search_base: ou=Users,dc=10gen,dc=me ldap_filter: (uid=%u) mongodb-1.6.1/scripts/ubuntu/essentials.sh0000644000076500000240000000036513572250757020204 0ustar alcaeusstaffif [ ! -e ".provisioned" ]; then # Tools you can't live without apt-get update apt-get install -y build-essential git vim libtool autoconf # I can't stand emacs echo 'set -o vi' | sudo tee /etc/profile.d/vishell.sh touch .provisioned fi mongodb-1.6.1/scripts/ubuntu/mongo-orchestration.sh0000644000076500000240000000272013572250757022030 0ustar alcaeusstaff# 3.0 apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10 # 3.6 apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 # 4.0 apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 68818C72E52529D4 echo 'deb http://repo.mongodb.com/apt/ubuntu trusty/mongodb-enterprise/3.0 multiverse' | tee /etc/apt/sources.list.d/mongodb-enterprise-3.0.list echo 'deb http://repo.mongodb.com/apt/ubuntu trusty/mongodb-enterprise/3.6 multiverse' | tee /etc/apt/sources.list.d/mongodb-enterprise-3.6.list echo 'deb http://repo.mongodb.com/apt/ubuntu trusty/mongodb-enterprise/testing multiverse' | tee /etc/apt/sources.list.d/mongodb-enterprise-4.0.list apt-get update apt-get install -y libsnmp30 libgsasl7 libcurl4-openssl-dev apt-get download mongodb-enterprise-server=3.0.15 apt-get download mongodb-enterprise-server=3.6.1 apt-get download mongodb-enterprise-mongos=3.6.1 apt-get download mongodb-enterprise-server=4.0.0~rc5 dpkg -x mongodb-enterprise-server_3.0.15_amd64.deb 3.0 dpkg -x mongodb-enterprise-server_3.6.1_amd64.deb 3.6 dpkg -x mongodb-enterprise-mongos_3.6.1_amd64.deb 3.6 dpkg -x mongodb-enterprise-server_4.0.0~rc5_amd64.deb 4.0 # Python stuff for mongo-orchestration apt-get install -y python python-dev python get-pip.py pip install --upgrade mongo-orchestration # Launch mongo-orchestration mongo-orchestration -f mongo-orchestration-config.json -b 192.168.112.10 --enable-majority-read-concern start mongodb-1.6.1/scripts/ubuntu/phongo.sh0000644000076500000240000000056613572250757017327 0ustar alcaeusstaffapt-get install -y php-pear php5-dbg gdb apt-get install -y libssl-dev libsasl2-dev libpcre3-dev pkg-config ls -1 /phongo/mongodb*.tgz | sort -n -r | xargs sudo pecl install -f 2>&1 > /phongo/.build php -m | grep -q mongodb || echo "extension=mongodb.so" >> `php --ini | grep "Loaded Configuration" | sed -e "s|.*:\s*||"` pecl run-tests -q -p mongodb 2>&1 > /phongo/.tests mongodb-1.6.1/scripts/vmware/kernel.sh0000644000076500000240000000046613572250757017273 0ustar alcaeusstaff# Ensure that VMWare Tools recompiles kernel modules # when we update the linux images sed -i.bak 's/answer AUTO_KMODS_ENABLED_ANSWER no/answer AUTO_KMODS_ENABLED_ANSWER yes/g' /etc/vmware-tools/locations sed -i.bak 's/answer AUTO_KMODS_ENABLED no/answer AUTO_KMODS_ENABLED yes/g' /etc/vmware-tools/locations mongodb-1.6.1/scripts/clang-format.sh0000755000076500000240000000205313572250757017061 0ustar alcaeusstaff#!/bin/sh if test x"$1" = x; then FILES1=`git ls-files | grep -v "src/contrib" | grep '\.[ch]$'` FILES2=`git ls-files --others --exclude-standard | grep -v "src/contrib" | grep '\.[ch]$'` FILES="$FILES1 $FILES2" fi if test x"$1" = xchanged; then FILES1=`git diff --name-only | grep -v "src/contrib" | grep '\.[ch]$'` FILES2=`git diff --cached --name-only | grep -v "src/contrib" | grep '\.[ch]$'` FILES3=`git ls-files --others --exclude-standard | grep '\.[ch]$'` FILES="$FILES1 $FILES2 $FILES3" fi # Find clang-format, we prefer -6.0, but also allow binaries without -suffix as # long as they're >= 6.0.0 CLANG_FORMAT=`which clang-format-6.0` if [ -z "$CLANG_FORMAT" ]; then CLANG_FORMAT=`which clang-format` fi if [ -z "$CLANG_FORMAT" ]; then echo "Couldn't find clang-format" exit fi VERSION=`$CLANG_FORMAT -version | cut -d " " -f 3` VERSION_MAJOR=`echo $VERSION | cut -d "." -f 1` if [ $VERSION_MAJOR -lt 6 ]; then echo "Found clang-format $VERSION but we need >= 6.0.0" exit fi # Run formatter for i in $FILES; do $CLANG_FORMAT -i $i done mongodb-1.6.1/scripts/convert-bson-corpus-tests.php0000644000076500000240000002775613572250757021772 0ustar alcaeusstaff 'Variation in double\'s string representation (SPEC-850)', 'Double type: -1.23456789012345677E+18' => 'Variation in double\'s string representation (SPEC-850)', 'Int64 type: -1' => 'PHP encodes integers as 32-bit if range allows', 'Int64 type: 0' => 'PHP encodes integers as 32-bit if range allows', 'Int64 type: 1' => 'PHP encodes integers as 32-bit if range allows', 'Javascript Code with Scope: bad scope doc (field has bad string length)' => 'Depends on PHPC-889', 'Javascript Code with Scope: Unicode and embedded null in code string, empty scope' => 'Embedded null in code string is not supported in libbson (CDRIVER-1879)', 'Multiple types within the same document: All BSON types' => 'PHP encodes integers as 32-bit if range allows', 'Top-level document validity: Bad $date (number, not string or hash)' => 'Legacy extended JSON $date syntax uses numbers (CDRIVER-2223)', ]; $for64bitOnly = [ /* Note: Although 64-bit integers be represented by the Int64 class, these * tests fail on 32-bit platforms due to json_canonicalize() roundtripping * values through PHP, which converts large integers to floats. */ 'Int64 type: MinValue' => "Can't represent 64-bit ints on a 32-bit platform", 'Int64 type: MaxValue' => "Can't represent 64-bit ints on a 32-bit platform", ]; $outputPath = realpath(__DIR__ . '/../tests') . '/bson-corpus/'; if ( ! is_dir($outputPath) && ! mkdir($outputPath, 0755, true)) { printf("Error creating output path: %s\n", $outputPath); } foreach (array_slice($argv, 1) as $inputFile) { if ( ! is_readable($inputFile) || ! is_file($inputFile)) { printf("Error reading %s\n", $inputFile); continue; } $test = json_decode(file_get_contents($inputFile), true); if (json_last_error() !== JSON_ERROR_NONE) { printf("Error decoding %s: %s\n", $inputFile, json_last_error_msg()); continue; } if ( ! isset($test['description'])) { printf("Skipping test file without \"description\" field: %s\n", $inputFile); continue; } if ( ! empty($test['valid'])) { foreach ($test['valid'] as $i => $case) { $outputFile = sprintf('%s-valid-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1); try { $output = renderPhpt(getParamsForValid($test, $case), $expectedFailures, $for64bitOnly); } catch (Exception $e) { printf("Error processing valid[%d] in %s: %s\n", $i, $inputFile, $e->getMessage()); continue; } if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) { printf("Error writing valid[%d] in %s\n", $i, $inputFile); continue; } } } if ( ! empty($test['decodeErrors'])) { foreach ($test['decodeErrors'] as $i => $case) { $outputFile = sprintf('%s-decodeError-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1); try { $output = renderPhpt(getParamsForDecodeError($test, $case), $expectedFailures, $for64bitOnly); } catch (Exception $e) { printf("Error processing decodeErrors[%d] in %s: %s\n", $i, $inputFile, $e->getMessage()); continue; } if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) { printf("Error writing decodeErrors[%d] in %s\n", $i, $inputFile); continue; } } } if ( ! empty($test['parseErrors'])) { foreach ($test['parseErrors'] as $i => $case) { $outputFile = sprintf('%s-parseError-%03d.phpt', pathinfo($inputFile, PATHINFO_FILENAME), $i + 1); try { $output = renderPhpt(getParamsForParseError($test, $case), $expectedFailures, $for64bitOnly); } catch (Exception $e) { printf("Error processing parseErrors[%d] in %s: %s\n", $i, $inputFile, $e->getMessage()); continue; } if (false === file_put_contents($outputPath . '/' . $outputFile, $output)) { printf("Error writing parseErrors[%d] in %s\n", $i, $inputFile); continue; } } } } function getParamsForValid(array $test, array $case) { foreach (['description', 'canonical_bson', 'canonical_extjson'] as $field) { if (!isset($case[$field])) { throw new InvalidArgumentException(sprintf('Missing "%s" field', $field)); } } $code = ''; $expect = ''; $lossy = isset($case['lossy']) ? (boolean) $case['lossy'] : false; $canonicalBson = $case['canonical_bson']; $expectedCanonicalBson = strtolower($canonicalBson); $code .= sprintf('$canonicalBson = hex2bin(%s);', var_export($canonicalBson, true)) . "\n"; if (isset($case['degenerate_bson'])) { $degenerateBson = $case['degenerate_bson']; $expectedDegenerateBson = strtolower($degenerateBson); $code .= sprintf('$degenerateBson = hex2bin(%s);', var_export($degenerateBson, true)) . "\n"; } if (isset($case['converted_bson'])) { $convertedBson = $case['converted_bson']; $expectedConvertedBson = strtolower($convertedBson); $code .= sprintf('$convertedBson = hex2bin(%s);', var_export($convertedBson, true)) . "\n"; } $canonicalExtJson = $case['canonical_extjson']; $expectedCanonicalExtJson = json_canonicalize($canonicalExtJson); $code .= sprintf('$canonicalExtJson = %s;', var_export($canonicalExtJson, true)) . "\n"; if (isset($case['relaxed_extjson'])) { $relaxedExtJson = $case['relaxed_extjson']; $expectedRelaxedExtJson = json_canonicalize($relaxedExtJson); $code .= sprintf('$relaxedExtJson = %s;', var_export($relaxedExtJson, true)) . "\n"; } if (isset($case['degenerate_extjson'])) { $degenerateExtJson = $case['degenerate_extjson']; $expectedDegenerateExtJson = json_canonicalize($degenerateExtJson); $code .= sprintf('$degenerateExtJson = %s;', var_export($degenerateExtJson, true)) . "\n"; } if (isset($case['converted_extjson'])) { $convertedExtJson = $case['converted_extjson']; $expectedConvertedExtJson = json_canonicalize($convertedExtJson); $code .= sprintf('$convertedExtJson = %s;', var_export($convertedExtJson, true)) . "\n"; } $code .= "\n// Canonical BSON -> Native -> Canonical BSON \n"; $code .= 'echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n";' . "\n"; $expect .= $expectedCanonicalBson . "\n"; $code .= "\n// Canonical BSON -> Canonical extJSON \n"; $code .= 'echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n";' . "\n";; $expect .= $expectedCanonicalExtJson . "\n"; if (isset($relaxedExtJson)) { $code .= "\n// Canonical BSON -> Relaxed extJSON \n"; $code .= 'echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n";' . "\n";; $expect .= $expectedRelaxedExtJson . "\n"; } if (!$lossy) { $code .= "\n// Canonical extJSON -> Canonical BSON \n"; $code .= 'echo bin2hex(fromJSON($canonicalExtJson)), "\n";' . "\n"; $expect .= $expectedCanonicalBson . "\n"; } if (isset($degenerateBson)) { $code .= "\n// Degenerate BSON -> Native -> Canonical BSON \n"; $code .= 'echo bin2hex(fromPHP(toPHP($degenerateBson))), "\n";' . "\n"; $expect .= $expectedCanonicalBson . "\n"; $code .= "\n// Degenerate BSON -> Canonical extJSON \n"; $code .= 'echo json_canonicalize(toCanonicalExtendedJSON($degenerateBson)), "\n";' . "\n";; $expect .= $expectedCanonicalExtJson . "\n"; if (isset($relaxedExtJson)) { $code .= "\n// Degenerate BSON -> Relaxed extJSON \n"; $code .= 'echo json_canonicalize(toRelaxedExtendedJSON($degenerateBson)), "\n";' . "\n";; $expect .= $expectedRelaxedExtJson . "\n"; } } if (isset($degenerateExtJson) && !$lossy) { $code .= "\n// Degenerate extJSON -> Canonical BSON \n"; $code .= 'echo bin2hex(fromJSON($degenerateExtJson)), "\n";' . "\n"; $expect .= $expectedCanonicalBson . "\n"; } if (isset($relaxedExtJson)) { $code .= "\n// Relaxed extJSON -> BSON -> Relaxed extJSON \n"; $code .= 'echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n";' . "\n"; $expect .= $expectedRelaxedExtJson . "\n"; } return [ '%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])), '%CODE%' => trim($code), '%EXPECT%' => trim($expect), ]; } function getParamsForDecodeError(array $test, array $case) { foreach (['description', 'bson'] as $field) { if (!isset($case[$field])) { throw new InvalidArgumentException(sprintf('Missing "%s" field', $field)); } } $code = sprintf('$bson = hex2bin(%s);', var_export($case['bson'], true)) . "\n\n"; $code .= "throws(function() use (\$bson) {\n"; $code .= " var_dump(toPHP(\$bson));\n"; $code .= "}, 'MongoDB\Driver\Exception\UnexpectedValueException');"; /* We do not test for the exception message, since that may differ based on * the nature of the decoding error. */ $expect = "OK: Got MongoDB\Driver\Exception\UnexpectedValueException"; return [ '%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])), '%CODE%' => trim($code), '%EXPECT%' => trim($expect), ]; } function getParamsForParseError(array $test, array $case) { foreach (['description', 'string'] as $field) { if (!isset($case[$field])) { throw new InvalidArgumentException(sprintf('Missing "%s" field', $field)); } } $code = ''; $expect = ''; switch ($test['bson_type']) { case '0x00': // Top-level document $code = "throws(function() {\n"; $code .= sprintf(" fromJSON(%s);\n", var_export($case['string'], true)); $code .= "}, 'MongoDB\Driver\Exception\UnexpectedValueException');"; /* We do not test for the exception message, since that may differ * based on the nature of the parse error. */ $expect = "OK: Got MongoDB\Driver\Exception\UnexpectedValueException"; break; case '0x13': // Decimal128 $code = "throws(function() {\n"; $code .= sprintf(" new MongoDB\BSON\Decimal128(%s);\n", var_export($case['string'], true)); $code .= "}, 'MongoDB\Driver\Exception\InvalidArgumentException');"; /* We do not test for the exception message, since that may differ * based on the nature of the parse error. */ $expect = "OK: Got MongoDB\Driver\Exception\InvalidArgumentException"; break; default: throw new UnexpectedValueException(sprintf("Parse errors not supported for BSON type: %s", $test['bson_type'])); } return [ '%NAME%' => sprintf('%s: %s', trim($test['description']), trim($case['description'])), '%CODE%' => trim($code), '%EXPECT%' => trim($expect), ]; } function renderPhpt(array $params, array $expectedFailures, array $for64bitOnly) { $params['%XFAIL%'] = isset($expectedFailures[$params['%NAME%']]) ? "--XFAIL--\n" . $expectedFailures[$params['%NAME%']] . "\n" : ''; $params['%SKIPIF%'] = isset($for64bitOnly[$params['%NAME%']]) ? "--SKIPIF--\n" . "" . "\n" : ''; $template = <<< 'TEMPLATE' --TEST-- %NAME% %XFAIL%%SKIPIF%--DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- %EXPECT% ===DONE=== TEMPLATE; return str_replace(array_keys($params), array_values($params), $template); } mongodb-1.6.1/scripts/list-servers.php0000644000076500000240000000034013572250757017320 0ustar alcaeusstaff $uri) { printf("%-20s \t %s\n", $serverid, $uri); } mongodb-1.6.1/scripts/run-tests-on.sh0000644000076500000240000000046113572250757017063 0ustar alcaeusstaff#!/bin/sh VMNAME=$1 vagrant status $VMNAME | grep -q "$VMNAME.*running" if test $? -eq 0; then vagrant provision $VMNAME > .$VMNAME else vagrant up $VMNAME > .$VMNAME fi cat .tests | grep -q -E "FAIL|WARN" if test $? -eq 0; then echo "$VMNAME FAILED" cat .tests exit 2 else echo "$VMNAME OK" fi mongodb-1.6.1/scripts/start-servers.php0000644000076500000240000002047413572250757017514 0ustar alcaeusstaff [ "scripts/presets/standalone.json", "scripts/presets/standalone-30.json", "scripts/presets/standalone-ssl.json", "scripts/presets/standalone-auth.json", "scripts/presets/standalone-x509.json", "scripts/presets/standalone-plain.json", ], "replicasets" => [ "scripts/presets/replicaset.json", "scripts/presets/replicaset-30.json", "scripts/presets/replicaset-dns.json", ], ]; function make_ctx($preset, $method = "POST") { $opts = [ "http" => [ "timeout" => 60, "method" => $method, "header" => "Accept: application/json\r\n" . "Content-type: application/x-www-form-urlencoded", "content" => json_encode(array("preset" => $preset)), "ignore_errors" => true, ], ]; $ctx = stream_context_create($opts); return $ctx; } function failed() { printf("\nLast operation took: %.2f secs\n", lap()); exit(1); } function mo_http_request($uri, $context) { global $http_response_header; $result = file_get_contents($uri, false, $context); if ($result === false) { printf("HTTP request to %s failed:\n", $uri); var_dump($http_response_header); failed(); } return $result; } function json_decode_or_fail(...$args) { $decoded = json_decode(...$args); if ($decoded === NULL && json_last_error() !== JSON_ERROR_NONE) { printf("\njson_decode() failed: %s\n", json_last_error_msg()); var_dump(func_get_arg(0)); failed(); } return $decoded; } printf("Cleaning out previous processes, if any "); lap(); /* Remove all pre-existing ReplicaSets */ $replicasets = mo_http_request(getMOUri() . "/replica_sets", make_ctx(getMOPresetBase(), "GET")); $replicasets = json_decode_or_fail($replicasets, true); foreach($replicasets["replica_sets"] as $replicaset) { $uri = getMOUri() . "/replica_sets/" . $replicaset["id"]; mo_http_request($uri, make_ctx(getMOPresetBase(), "DELETE")); echo "."; } echo " "; /* Remove all pre-existing servers */ $servers = mo_http_request(getMOUri() . "/servers", make_ctx(getMOPresetBase(), "GET")); $servers = json_decode_or_fail($servers, true); foreach($servers["servers"] as $server) { $uri = getMOUri() . "/servers/" . $server["id"]; mo_http_request($uri, make_ctx(getMOPresetBase(), "DELETE")); echo "."; } printf("\t(took: %.2f secs)\n", lap()); foreach($PRESETS["standalone"] as $preset) { lap(); $json = json_decode_or_fail(file_get_contents($preset), true); printf("Starting %-20s ... ", $json["id"]); $result = mo_http_request(getMOUri() . "/servers", make_ctx(getMOPresetBase() . $preset)); $decode = json_decode_or_fail($result, true); if (!isset($decode["id"])) { printf("\"id\" field not found in server response:\n%s\n", $decode); failed(); } $SERVERS[$decode["id"]] = isset($decode["mongodb_auth_uri"]) ? $decode["mongodb_auth_uri"] : $decode["mongodb_uri"]; printf("'%s'\t(took: %.2f secs)\n", $SERVERS[$decode["id"]], lap()); } echo "---\n"; foreach($PRESETS["replicasets"] as $preset) { lap(); $json = json_decode_or_fail(file_get_contents($preset), true); printf("Starting %-20s ... ", $json["id"]); $result = mo_http_request(getMOUri() . "/replica_sets", make_ctx(getMOPresetBase() . $preset)); $decode = json_decode_or_fail($result, true); if (!isset($decode["id"])) { printf("\"id\" field not found in replica set response:\n%s\n", $decode); failed(); } $SERVERS[$decode["id"]] = isset($decode["mongodb_auth_uri"]) ? $decode["mongodb_auth_uri"] : $decode["mongodb_uri"]; printf("'%s'\t(took: %.2f secs)\n", $SERVERS[$decode["id"]], lap()); } file_put_contents($FILENAME, json_encode($SERVERS, JSON_PRETTY_PRINT)); /* wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-AUTH wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-26 wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-two wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-arbiter wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-PLAIN wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-X509 wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/RS-one wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers/STANDALONE-SSL wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets wget --body-data='' --method='DELETE' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets/REPLICASET wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/ wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='' --method='GET' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-26.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-ssl.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-auth.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-x509.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/standalone-plain.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/servers wget --body-data='{"preset":"\/phongo\/\/scripts\/presets\/replicaset.json"}' --method='POST' --header='Accept: application/json' --header='Content-type: application/x-www-form-urlencoded' http://192.168.112.10:8889/replica_sets */ mongodb-1.6.1/src/BSON/Binary.c0000644000076500000240000003624413572250757015412 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #define PHONGO_BINARY_UUID_SIZE 16 zend_class_entry* php_phongo_binary_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_binary_init(php_phongo_binary_t* intern, const char* data, phongo_zpp_char_len data_len, phongo_long type TSRMLS_DC) /* {{{ */ { if (type < 0 || type > UINT8_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected type to be an unsigned 8-bit integer, %" PHONGO_LONG_FORMAT " given", type); return false; } if ((type == BSON_SUBTYPE_UUID_DEPRECATED || type == BSON_SUBTYPE_UUID) && data_len != PHONGO_BINARY_UUID_SIZE) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected UUID length to be %d bytes, %d given", PHONGO_BINARY_UUID_SIZE, data_len); return false; } intern->data = estrndup(data, data_len); intern->data_len = data_len; intern->type = (uint8_t) type; return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_binary_init_from_hash(php_phongo_binary_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *data, *type; if ((data = zend_hash_str_find(props, "data", sizeof("data") - 1)) && Z_TYPE_P(data) == IS_STRING && (type = zend_hash_str_find(props, "type", sizeof("type") - 1)) && Z_TYPE_P(type) == IS_LONG) { return php_phongo_binary_init(intern, Z_STRVAL_P(data), Z_STRLEN_P(data), Z_LVAL_P(type) TSRMLS_CC); } #else zval **data, **type; if (zend_hash_find(props, "data", sizeof("data"), (void**) &data) == SUCCESS && Z_TYPE_PP(data) == IS_STRING && zend_hash_find(props, "type", sizeof("type"), (void**) &type) == SUCCESS && Z_TYPE_PP(type) == IS_LONG) { return php_phongo_binary_init(intern, Z_STRVAL_PP(data), Z_STRLEN_PP(data), Z_LVAL_PP(type) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"data\" string and \"type\" integer fields", ZSTR_VAL(php_phongo_binary_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\Binary::__construct(string $data, int $type) Construct a new BSON binary type */ static PHP_METHOD(Binary, __construct) { php_phongo_binary_t* intern; zend_error_handling error_handling; char* data; phongo_zpp_char_len data_len; phongo_long type; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_BINARY_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sl", &data, &data_len, &type) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_binary_init(intern, data, data_len, type TSRMLS_CC); } /* }}} */ /* {{{ proto void MongoDB\BSON\Binary::__set_state(array $properties) */ static PHP_METHOD(Binary, __set_state) { php_phongo_binary_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_binary_ce); intern = Z_BINARY_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_binary_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Binary::__toString() Return the Binary's data string. */ static PHP_METHOD(Binary, __toString) { php_phongo_binary_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_BINARY_OBJ_P(getThis()); PHONGO_RETURN_STRINGL(intern->data, intern->data_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Binary::getData() */ static PHP_METHOD(Binary, getData) { php_phongo_binary_t* intern; intern = Z_BINARY_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRINGL(intern->data, intern->data_len); } /* }}} */ /* {{{ proto integer MongoDB\BSON\Binary::getType() */ static PHP_METHOD(Binary, getType) { php_phongo_binary_t* intern; intern = Z_BINARY_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->type); } /* }}} */ /* {{{ proto array MongoDB\BSON\Binary::jsonSerialize() */ static PHP_METHOD(Binary, jsonSerialize) { php_phongo_binary_t* intern; char type[3]; int type_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_BINARY_OBJ_P(getThis()); array_init_size(return_value, 2); #if PHP_VERSION_ID >= 70000 { zend_string* data = php_base64_encode((unsigned char*) intern->data, intern->data_len); ADD_ASSOC_STRINGL(return_value, "$binary", ZSTR_VAL(data), ZSTR_LEN(data)); zend_string_free(data); } #else { int data_len = 0; unsigned char* data = php_base64_encode((unsigned char*) intern->data, intern->data_len, &data_len); ADD_ASSOC_STRINGL(return_value, "$binary", (char*) data, data_len); efree(data); } #endif type_len = snprintf(type, sizeof(type), "%02x", intern->type); ADD_ASSOC_STRINGL(return_value, "$type", type, type_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Binary::serialize() */ static PHP_METHOD(Binary, serialize) { php_phongo_binary_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_BINARY_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "data", intern->data, intern->data_len); ADD_ASSOC_LONG_EX(&retval, "type", intern->type); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "data", intern->data, intern->data_len); ADD_ASSOC_LONG_EX(retval, "type", intern->type); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Binary::unserialize(string $serialized) */ static PHP_METHOD(Binary, unserialize) { php_phongo_binary_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_BINARY_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_binary_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_binary_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_binary_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Binary function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Binary___construct, 0, 0, 2) ZEND_ARG_INFO(0, data) ZEND_ARG_INFO(0, type) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Binary___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Binary_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Binary_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_binary_me[] = { /* clang-format off */ PHP_ME(Binary, __construct, ai_Binary___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, __set_state, ai_Binary___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(Binary, __toString, ai_Binary_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, jsonSerialize, ai_Binary_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, serialize, ai_Binary_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, unserialize, ai_Binary_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, getData, ai_Binary_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Binary, getType, ai_Binary_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Binary object handlers */ static zend_object_handlers php_phongo_handler_binary; static void php_phongo_binary_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_binary_t* intern = Z_OBJ_BINARY(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->data) { efree(intern->data); } if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_binary_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_binary_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_binary_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_binary; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_binary_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_binary; return retval; } #endif } /* }}} */ static int php_phongo_binary_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_binary_t *intern1, *intern2; intern1 = Z_BINARY_OBJ_P(o1); intern2 = Z_BINARY_OBJ_P(o2); /* MongoDB compares binary types first by the data length, then by the type * byte, and finally by the binary data itself. */ if (intern1->data_len != intern2->data_len) { return intern1->data_len < intern2->data_len ? -1 : 1; } if (intern1->type != intern2->type) { return intern1->type < intern2->type ? -1 : 1; } return zend_binary_strcmp(intern1->data, intern1->data_len, intern2->data, intern2->data_len); } /* }}} */ static HashTable* php_phongo_binary_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_BINARY_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_binary_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_binary_t* intern; HashTable* props; intern = Z_BINARY_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->data) { return props; } #if PHP_VERSION_ID >= 70000 { zval data, type; ZVAL_STRINGL(&data, intern->data, intern->data_len); zend_hash_str_update(props, "data", sizeof("data") - 1, &data); ZVAL_LONG(&type, intern->type); zend_hash_str_update(props, "type", sizeof("type") - 1, &type); } #else { zval *data, *type; MAKE_STD_ZVAL(data); ZVAL_STRINGL(data, intern->data, intern->data_len, 1); zend_hash_update(props, "data", sizeof("data"), &data, sizeof(data), NULL); MAKE_STD_ZVAL(type); ZVAL_LONG(type, intern->type); zend_hash_update(props, "type", sizeof("type"), &type, sizeof(type), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_binary_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_binary_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_binary_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_binary_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_binary_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Binary", php_phongo_binary_me); php_phongo_binary_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_binary_ce->create_object = php_phongo_binary_create_object; PHONGO_CE_FINAL(php_phongo_binary_ce); zend_class_implements(php_phongo_binary_ce TSRMLS_CC, 1, php_phongo_binary_interface_ce); zend_class_implements(php_phongo_binary_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_binary_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_binary_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_binary, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_binary.compare_objects = php_phongo_binary_compare_objects; php_phongo_handler_binary.get_debug_info = php_phongo_binary_get_debug_info; php_phongo_handler_binary.get_gc = php_phongo_binary_get_gc; php_phongo_handler_binary.get_properties = php_phongo_binary_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_binary.free_obj = php_phongo_binary_free_object; php_phongo_handler_binary.offset = XtOffsetOf(php_phongo_binary_t, std); #endif zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_GENERIC"), BSON_SUBTYPE_BINARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_FUNCTION"), BSON_SUBTYPE_FUNCTION TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_OLD_BINARY"), BSON_SUBTYPE_BINARY_DEPRECATED TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_OLD_UUID"), BSON_SUBTYPE_UUID_DEPRECATED TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_UUID"), BSON_SUBTYPE_UUID TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_MD5"), BSON_SUBTYPE_MD5 TSRMLS_CC); zend_declare_class_constant_long(php_phongo_binary_ce, ZEND_STRL("TYPE_USER_DEFINED"), BSON_SUBTYPE_USER TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/BinaryInterface.c0000644000076500000240000000320113572250757017216 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_binary_interface_ce; /* {{{ MongoDB\BSON\BinaryInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_BinaryInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_binary_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(BinaryInterface, getData, ai_BinaryInterface_void) ZEND_ABSTRACT_ME(BinaryInterface, getType, ai_BinaryInterface_void) ZEND_ABSTRACT_ME(BinaryInterface, __toString, ai_BinaryInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_binary_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "BinaryInterface", php_phongo_binary_interface_me); php_phongo_binary_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/DBPointer.c0000644000076500000240000003003013572250757015777 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_dbpointer_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_dbpointer_init(php_phongo_dbpointer_t* intern, const char* ref, phongo_zpp_char_len ref_len, const char* id, phongo_zpp_char_len id_len TSRMLS_DC) /* {{{ */ { if (strlen(ref) != (size_t) ref_len) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Ref cannot contain null bytes"); return false; } if (!bson_oid_is_valid(id, id_len)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing ObjectId string: %s", id); return false; } intern->ref = estrndup(ref, ref_len); intern->ref_len = ref_len; strncpy(intern->id, id, sizeof(intern->id)); return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_dbpointer_init_from_hash(php_phongo_dbpointer_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *ref, *id; if ((ref = zend_hash_str_find(props, "ref", sizeof("ref") - 1)) && Z_TYPE_P(ref) == IS_STRING && (id = zend_hash_str_find(props, "id", sizeof("id") - 1)) && Z_TYPE_P(id) == IS_STRING) { return php_phongo_dbpointer_init(intern, Z_STRVAL_P(ref), Z_STRLEN_P(ref), Z_STRVAL_P(id), Z_STRLEN_P(id) TSRMLS_CC); } #else zval **ref, **id; if (zend_hash_find(props, "ref", sizeof("ref"), (void**) &ref) == SUCCESS && Z_TYPE_PP(ref) == IS_STRING && zend_hash_find(props, "id", sizeof("id"), (void**) &id) == SUCCESS && Z_TYPE_PP(id) == IS_STRING) { return php_phongo_dbpointer_init(intern, Z_STRVAL_PP(ref), Z_STRLEN_PP(ref), Z_STRVAL_PP(id), Z_STRLEN_PP(id) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"ref\" and \"id\" string fields", ZSTR_VAL(php_phongo_dbpointer_ce->name)); return false; } /* }}} */ /* {{{ proto string MongoDB\BSON\DBPointer::__toString() Return the DBPointer's namespace string and ObjectId. */ static PHP_METHOD(DBPointer, __toString) { php_phongo_dbpointer_t* intern; char* retval; int retval_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_DBPOINTER_OBJ_P(getThis()); retval_len = spprintf(&retval, 0, "[%s/%s]", intern->ref, intern->id); PHONGO_RETVAL_STRINGL(retval, retval_len); efree(retval); } /* }}} */ /* {{{ proto array MongoDB\BSON\Symbol::jsonSerialize() */ static PHP_METHOD(DBPointer, jsonSerialize) { php_phongo_dbpointer_t* intern; ZVAL_RETVAL_TYPE zdb_pointer; ZVAL_RETVAL_TYPE zoid; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_DBPOINTER_OBJ_P(getThis()); #if PHP_VERSION_ID >= 70000 array_init_size(&zdb_pointer, 2); array_init_size(&zoid, 1); ADD_ASSOC_STRINGL(&zdb_pointer, "$ref", intern->ref, intern->ref_len); ADD_ASSOC_STRING(&zoid, "$oid", intern->id); ADD_ASSOC_ZVAL(&zdb_pointer, "$id", &zoid); array_init_size(return_value, 1); ADD_ASSOC_ZVAL(return_value, "$dbPointer", &zdb_pointer); #else ALLOC_INIT_ZVAL(zdb_pointer); ALLOC_INIT_ZVAL(zoid); array_init_size(zdb_pointer, 2); array_init_size(zoid, 1); ADD_ASSOC_STRINGL(zdb_pointer, "$ref", intern->ref, intern->ref_len); ADD_ASSOC_STRING(zoid, "$oid", intern->id); ADD_ASSOC_ZVAL(zdb_pointer, "$id", zoid); array_init_size(return_value, 1); ADD_ASSOC_ZVAL(return_value, "$dbPointer", zdb_pointer); #endif } /* }}} */ /* {{{ proto string MongoDB\BSON\DBPointer::serialize() */ static PHP_METHOD(DBPointer, serialize) { php_phongo_dbpointer_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_DBPOINTER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "ref", intern->ref, intern->ref_len); ADD_ASSOC_STRING(&retval, "id", intern->id); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "ref", intern->ref, intern->ref_len); ADD_ASSOC_STRING(retval, "id", intern->id); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\DBPointer::unserialize(string $serialized) */ static PHP_METHOD(DBPointer, unserialize) { php_phongo_dbpointer_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_DBPOINTER_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_dbpointer_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_dbpointer_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_dbpointer_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\DBPointer function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_DBPointer_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_DBPointer_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_dbpointer_me[] = { /* clang-format off */ /* __set_state intentionally missing */ PHP_ME(DBPointer, __toString, ai_DBPointer_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(DBPointer, jsonSerialize, ai_DBPointer_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(DBPointer, serialize, ai_DBPointer_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(DBPointer, unserialize, ai_DBPointer_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_DBPointer_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\DBPointer object handlers */ static zend_object_handlers php_phongo_handler_dbpointer; static void php_phongo_dbpointer_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_dbpointer_t* intern = Z_OBJ_DBPOINTER(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->ref) { efree(intern->ref); } if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ phongo_create_object_retval php_phongo_dbpointer_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_dbpointer_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_dbpointer_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_dbpointer; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_dbpointer_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_dbpointer; return retval; } #endif } /* }}} */ static int php_phongo_dbpointer_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_dbpointer_t *intern1, *intern2; int retval; intern1 = Z_DBPOINTER_OBJ_P(o1); intern2 = Z_DBPOINTER_OBJ_P(o2); retval = strcmp(intern1->ref, intern2->ref); if (retval != 0) { return retval; } return strcmp(intern1->id, intern2->id); } /* }}} */ static HashTable* php_phongo_dbpointer_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_DBPOINTER_OBJ_P(object)->properties; } /* }}} */ HashTable* php_phongo_dbpointer_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_dbpointer_t* intern; HashTable* props; intern = Z_DBPOINTER_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->ref) { return props; } #if PHP_VERSION_ID >= 70000 { zval ref, id; ZVAL_STRING(&ref, intern->ref); ZVAL_STRING(&id, intern->id); zend_hash_str_update(props, "ref", sizeof("ref") - 1, &ref); zend_hash_str_update(props, "id", sizeof("id") - 1, &id); } #else { zval *ref, *id; MAKE_STD_ZVAL(ref); ZVAL_STRING(ref, intern->ref, 1); MAKE_STD_ZVAL(id); ZVAL_STRING(id, intern->id, 1); zend_hash_update(props, "ref", sizeof("ref"), &ref, sizeof(ref), NULL); zend_hash_update(props, "id", sizeof("id"), &id, sizeof(id), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_dbpointer_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_dbpointer_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_dbpointer_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_dbpointer_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "DBPointer", php_phongo_dbpointer_me); php_phongo_dbpointer_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_dbpointer_ce->create_object = php_phongo_dbpointer_create_object; PHONGO_CE_FINAL(php_phongo_dbpointer_ce); zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_dbpointer_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_dbpointer, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_dbpointer.compare_objects = php_phongo_dbpointer_compare_objects; php_phongo_handler_dbpointer.get_debug_info = php_phongo_dbpointer_get_debug_info; php_phongo_handler_dbpointer.get_gc = php_phongo_dbpointer_get_gc; php_phongo_handler_dbpointer.get_properties = php_phongo_dbpointer_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_dbpointer.free_obj = php_phongo_dbpointer_free_object; php_phongo_handler_dbpointer.offset = XtOffsetOf(php_phongo_dbpointer_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Decimal128.c0000644000076500000240000002746513572250757015764 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_decimal128_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_decimal128_init(php_phongo_decimal128_t* intern, const char* value TSRMLS_DC) /* {{{ */ { if (!bson_decimal128_from_string(value, &intern->decimal)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing Decimal128 string: %s", value); return false; } intern->initialized = true; return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_decimal128_init_from_hash(php_phongo_decimal128_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* dec; if ((dec = zend_hash_str_find(props, "dec", sizeof("dec") - 1)) && Z_TYPE_P(dec) == IS_STRING) { return php_phongo_decimal128_init(intern, Z_STRVAL_P(dec) TSRMLS_CC); } #else zval** dec; if (zend_hash_find(props, "dec", sizeof("dec"), (void**) &dec) == SUCCESS && Z_TYPE_PP(dec) == IS_STRING) { return php_phongo_decimal128_init(intern, Z_STRVAL_PP(dec) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"dec\" string field", ZSTR_VAL(php_phongo_decimal128_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\Decimal128::__construct(string $value) Construct a new BSON Decimal128 type */ static PHP_METHOD(Decimal128, __construct) { php_phongo_decimal128_t* intern; zend_error_handling error_handling; char* value; phongo_zpp_char_len value_len; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_DECIMAL128_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &value, &value_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_decimal128_init(intern, value TSRMLS_CC); } /* }}} */ /* {{{ proto void MongoDB\BSON\Decimal128::__set_state(array $properties) */ static PHP_METHOD(Decimal128, __set_state) { php_phongo_decimal128_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_decimal128_ce); intern = Z_DECIMAL128_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_decimal128_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Decimal128::__toString() */ static PHP_METHOD(Decimal128, __toString) { php_phongo_decimal128_t* intern; char outbuf[BSON_DECIMAL128_STRING]; intern = Z_DECIMAL128_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } bson_decimal128_to_string(&intern->decimal, outbuf); PHONGO_RETURN_STRING(outbuf); } /* }}} */ /* {{{ proto array MongoDB\BSON\Decimal128::jsonSerialize() */ static PHP_METHOD(Decimal128, jsonSerialize) { php_phongo_decimal128_t* intern; char outbuf[BSON_DECIMAL128_STRING] = ""; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_DECIMAL128_OBJ_P(getThis()); array_init_size(return_value, 1); bson_decimal128_to_string(&intern->decimal, outbuf); ADD_ASSOC_STRING(return_value, "$numberDecimal", outbuf); } /* }}} */ /* {{{ proto string MongoDB\BSON\Decimal128::serialize() */ static PHP_METHOD(Decimal128, serialize) { php_phongo_decimal128_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; char outbuf[BSON_DECIMAL128_STRING]; intern = Z_DECIMAL128_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } bson_decimal128_to_string(&intern->decimal, outbuf); #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 1); ADD_ASSOC_STRING(&retval, "dec", outbuf); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 1); ADD_ASSOC_STRING(retval, "dec", outbuf); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Decimal128::unserialize(string $serialized) */ static PHP_METHOD(Decimal128, unserialize) { php_phongo_decimal128_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_DECIMAL128_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_decimal128_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_decimal128_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_decimal128_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Decimal128 function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Decimal128___construct, 0, 0, 1) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Decimal128___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Decimal128_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Decimal128_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_decimal128_me[] = { /* clang-format off */ PHP_ME(Decimal128, __construct, ai_Decimal128___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Decimal128, __set_state, ai_Decimal128___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(Decimal128, __toString, ai_Decimal128_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Decimal128, jsonSerialize, ai_Decimal128_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Decimal128, serialize, ai_Decimal128_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Decimal128, unserialize, ai_Decimal128_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Decimal128 object handlers */ static zend_object_handlers php_phongo_handler_decimal128; static void php_phongo_decimal128_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_decimal128_t* intern = Z_OBJ_DECIMAL128(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_decimal128_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_decimal128_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_decimal128_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_decimal128; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_decimal128_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_decimal128; return retval; } #endif } /* }}} */ static HashTable* php_phongo_decimal128_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_DECIMAL128_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_decimal128_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_decimal128_t* intern; HashTable* props; char outbuf[BSON_DECIMAL128_STRING] = ""; intern = Z_DECIMAL128_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 1); if (!intern->initialized) { return props; } bson_decimal128_to_string(&intern->decimal, outbuf); #if PHP_VERSION_ID >= 70000 { zval dec; ZVAL_STRING(&dec, outbuf); zend_hash_str_update(props, "dec", sizeof("dec") - 1, &dec); } #else { zval* dec; MAKE_STD_ZVAL(dec); ZVAL_STRING(dec, outbuf, 1); zend_hash_update(props, "dec", sizeof("dec"), &dec, sizeof(dec), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_decimal128_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_decimal128_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_decimal128_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_decimal128_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_decimal128_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Decimal128", php_phongo_decimal128_me); php_phongo_decimal128_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_decimal128_ce->create_object = php_phongo_decimal128_create_object; PHONGO_CE_FINAL(php_phongo_decimal128_ce); zend_class_implements(php_phongo_decimal128_ce TSRMLS_CC, 1, php_phongo_decimal128_interface_ce); zend_class_implements(php_phongo_decimal128_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_decimal128_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_decimal128_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_decimal128, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_decimal128.get_debug_info = php_phongo_decimal128_get_debug_info; php_phongo_handler_decimal128.get_gc = php_phongo_decimal128_get_gc; php_phongo_handler_decimal128.get_properties = php_phongo_decimal128_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_decimal128.free_obj = php_phongo_decimal128_free_object; php_phongo_handler_decimal128.offset = XtOffsetOf(php_phongo_decimal128_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Decimal128Interface.c0000644000076500000240000000303713572250757017572 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_decimal128_interface_ce; /* {{{ MongoDB\BSON\Decimal128Interface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Decimal128Interface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_decimal128_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(Decimal128Interface, __toString, ai_Decimal128Interface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_decimal128_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Decimal128Interface", php_phongo_decimal128_interface_me); php_phongo_decimal128_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Int64.c0000644000076500000240000002640613572250757015071 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_int64_ce; /* Initialize the object and return whether it was successful. */ static bool php_phongo_int64_init(php_phongo_int64_t* intern, int64_t integer) /* {{{ */ { intern->integer = integer; intern->initialized = true; return true; } /* }}} */ /* Initialize the object from a numeric string and return whether it was * successful. An exception will be thrown on error. */ static bool php_phongo_int64_init_from_string(php_phongo_int64_t* intern, const char* s_integer, phongo_zpp_char_len s_integer_len TSRMLS_DC) /* {{{ */ { int64_t integer; char* endptr = NULL; /* bson_ascii_strtoll() sets errno if conversion fails. If conversion * succeeds, we still want to ensure that the entire string was parsed. */ integer = bson_ascii_strtoll(s_integer, &endptr, 10); if (errno || (endptr && endptr != ((const char*) s_integer + s_integer_len))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer for %s initialization", s_integer, ZSTR_VAL(php_phongo_int64_ce->name)); return false; } return php_phongo_int64_init(intern, integer); } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_int64_init_from_hash(php_phongo_int64_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* value; if ((value = zend_hash_str_find(props, "integer", sizeof("integer") - 1)) && Z_TYPE_P(value) == IS_STRING) { return php_phongo_int64_init_from_string(intern, Z_STRVAL_P(value), Z_STRLEN_P(value) TSRMLS_CC); } #else zval** value; if (zend_hash_find(props, "integer", sizeof("integer"), (void**) &value) == SUCCESS && Z_TYPE_PP(value) == IS_STRING) { return php_phongo_int64_init_from_string(intern, Z_STRVAL_PP(value), Z_STRLEN_PP(value) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"integer\" string field", ZSTR_VAL(php_phongo_int64_ce->name)); return false; } /* }}} */ /* {{{ proto string MongoDB\BSON\Int64::__toString() Return the Int64's value as a string. */ static PHP_METHOD(Int64, __toString) { php_phongo_int64_t* intern; char s_integer[24]; int s_integer_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_INT64_OBJ_P(getThis()); s_integer_len = snprintf(s_integer, sizeof(s_integer), "%" PRId64, intern->integer); PHONGO_RETVAL_STRINGL(s_integer, s_integer_len); } /* }}} */ /* {{{ proto array MongoDB\BSON\Int64::jsonSerialize() */ static PHP_METHOD(Int64, jsonSerialize) { php_phongo_int64_t* intern; char s_integer[24]; int s_integer_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_INT64_OBJ_P(getThis()); s_integer_len = snprintf(s_integer, sizeof(s_integer), "%" PRId64, intern->integer); array_init_size(return_value, 1); ADD_ASSOC_STRINGL(return_value, "$numberLong", s_integer, s_integer_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Int64::serialize() */ static PHP_METHOD(Int64, serialize) { php_phongo_int64_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; char s_integer[24]; int s_integer_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_INT64_OBJ_P(getThis()); s_integer_len = snprintf(s_integer, sizeof(s_integer), "%" PRId64, intern->integer); #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 1); ADD_ASSOC_STRINGL(&retval, "integer", s_integer, s_integer_len); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 1); ADD_ASSOC_STRINGL(retval, "integer", s_integer, s_integer_len); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Int64::unserialize(string $serialized) */ static PHP_METHOD(Int64, unserialize) { php_phongo_int64_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_INT64_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_int64_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_int64_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_int64_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Int64 function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Int64_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Int64_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_int64_me[] = { /* clang-format off */ /* __set_state intentionally missing */ PHP_ME(Int64, __toString, ai_Int64_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Int64, jsonSerialize, ai_Int64_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Int64, serialize, ai_Int64_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Int64, unserialize, ai_Int64_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Int64_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Int64 object handlers */ static zend_object_handlers php_phongo_handler_int64; static void php_phongo_int64_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_int64_t* intern = Z_OBJ_INT64(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ phongo_create_object_retval php_phongo_int64_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_int64_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_int64_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_int64; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_int64_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_int64; return retval; } #endif } /* }}} */ static int php_phongo_int64_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_int64_t *intern1, *intern2; intern1 = Z_INT64_OBJ_P(o1); intern2 = Z_INT64_OBJ_P(o2); if (intern1->integer != intern2->integer) { return intern1->integer < intern2->integer ? -1 : 1; } return 0; } /* }}} */ static HashTable* php_phongo_int64_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_INT64_OBJ_P(object)->properties; } /* }}} */ HashTable* php_phongo_int64_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_int64_t* intern; HashTable* props; char s_integer[24]; int s_integer_len; intern = Z_INT64_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->initialized) { return props; } s_integer_len = snprintf(s_integer, sizeof(s_integer), "%" PRId64, intern->integer); #if PHP_VERSION_ID >= 70000 { zval value; ZVAL_STRINGL(&value, s_integer, s_integer_len); zend_hash_str_update(props, "integer", sizeof("integer") - 1, &value); } #else { zval* value; MAKE_STD_ZVAL(value); ZVAL_STRINGL(value, s_integer, s_integer_len, 1); zend_hash_update(props, "integer", sizeof("integer"), &value, sizeof(value), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_int64_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_int64_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_int64_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_int64_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_int64_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Int64", php_phongo_int64_me); php_phongo_int64_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_int64_ce->create_object = php_phongo_int64_create_object; PHONGO_CE_FINAL(php_phongo_int64_ce); zend_class_implements(php_phongo_int64_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_int64_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_int64_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_int64, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_int64.compare_objects = php_phongo_int64_compare_objects; php_phongo_handler_int64.get_debug_info = php_phongo_int64_get_debug_info; php_phongo_handler_int64.get_gc = php_phongo_int64_get_gc; php_phongo_handler_int64.get_properties = php_phongo_int64_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_int64.free_obj = php_phongo_int64_free_object; php_phongo_handler_int64.offset = XtOffsetOf(php_phongo_int64_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Javascript.c0000644000076500000240000004115313572250757016267 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_javascript_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_javascript_init(php_phongo_javascript_t* intern, const char* code, phongo_zpp_char_len code_len, zval* scope TSRMLS_DC) /* {{{ */ { if (scope && Z_TYPE_P(scope) != IS_OBJECT && Z_TYPE_P(scope) != IS_ARRAY && Z_TYPE_P(scope) != IS_NULL) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected scope to be array or object, %s given", zend_get_type_by_const(Z_TYPE_P(scope))); return false; } if (strlen(code) != (size_t) code_len) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Code cannot contain null bytes"); return false; } intern->code = estrndup(code, code_len); intern->code_len = code_len; if (scope && (Z_TYPE_P(scope) == IS_OBJECT || Z_TYPE_P(scope) == IS_ARRAY)) { intern->scope = bson_new(); php_phongo_zval_to_bson(scope, PHONGO_BSON_NONE, intern->scope, NULL TSRMLS_CC); } else { intern->scope = NULL; } return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_javascript_init_from_hash(php_phongo_javascript_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *code, *scope; if ((code = zend_hash_str_find(props, "code", sizeof("code") - 1)) && Z_TYPE_P(code) == IS_STRING) { scope = zend_hash_str_find(props, "scope", sizeof("scope") - 1); return php_phongo_javascript_init(intern, Z_STRVAL_P(code), Z_STRLEN_P(code), scope TSRMLS_CC); } #else zval **code, **scope; if (zend_hash_find(props, "code", sizeof("code"), (void**) &code) == SUCCESS && Z_TYPE_PP(code) == IS_STRING) { zval* tmp = zend_hash_find(props, "scope", sizeof("scope"), (void**) &scope) == SUCCESS ? *scope : NULL; return php_phongo_javascript_init(intern, Z_STRVAL_PP(code), Z_STRLEN_PP(code), tmp TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"code\" string field", ZSTR_VAL(php_phongo_javascript_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\Javascript::__construct(string $code[, array|object $scope]) Construct a new BSON Javascript type. The scope is a document mapping identifiers and values, representing the scope in which the code string will be evaluated. Note that this type cannot be represented as Extended JSON. */ static PHP_METHOD(Javascript, __construct) { php_phongo_javascript_t* intern; zend_error_handling error_handling; char* code; phongo_zpp_char_len code_len; zval* scope = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_JAVASCRIPT_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|A!", &code, &code_len, &scope) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_javascript_init(intern, code, code_len, scope TSRMLS_CC); } /* }}} */ /* {{{ proto void MongoDB\BSON\Javascript::__set_state(array $properties) */ static PHP_METHOD(Javascript, __set_state) { php_phongo_javascript_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_javascript_ce); intern = Z_JAVASCRIPT_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_javascript_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Javascript::__toString() Return the Javascript's code string. */ static PHP_METHOD(Javascript, __toString) { php_phongo_javascript_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_JAVASCRIPT_OBJ_P(getThis()); PHONGO_RETURN_STRINGL(intern->code, intern->code_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Javascript::getCode() */ static PHP_METHOD(Javascript, getCode) { php_phongo_javascript_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_JAVASCRIPT_OBJ_P(getThis()); PHONGO_RETURN_STRINGL(intern->code, intern->code_len); } /* }}} */ /* {{{ proto object|null MongoDB\BSON\Javascript::getScope() */ static PHP_METHOD(Javascript, getScope) { php_phongo_javascript_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_JAVASCRIPT_OBJ_P(getThis()); if (!intern->scope) { RETURN_NULL(); } if (intern->scope->len) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } else { RETURN_NULL(); } } /* }}} */ /* {{{ proto array MongoDB\BSON\Javascript::jsonSerialize() */ static PHP_METHOD(Javascript, jsonSerialize) { php_phongo_javascript_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_JAVASCRIPT_OBJ_P(getThis()); array_init_size(return_value, 2); ADD_ASSOC_STRINGL(return_value, "$code", intern->code, intern->code_len); if (intern->scope && intern->scope->len) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; if (php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state)) { #if PHP_VERSION_ID >= 70000 Z_ADDREF(state.zchild); ADD_ASSOC_ZVAL_EX(return_value, "$scope", &state.zchild); #else Z_ADDREF_P(state.zchild); ADD_ASSOC_ZVAL_EX(return_value, "$scope", state.zchild); #endif } zval_ptr_dtor(&state.zchild); } } /* }}} */ /* {{{ proto string MongoDB\BSON\Javascript::serialize() */ static PHP_METHOD(Javascript, serialize) { php_phongo_javascript_t* intern; ZVAL_RETVAL_TYPE retval; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_JAVASCRIPT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 if (intern->scope && intern->scope->len) { if (!php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state)) { return; } Z_ADDREF(state.zchild); } else { ZVAL_NULL(&state.zchild); } #else if (intern->scope && intern->scope->len) { if (!php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state)) { return; } Z_ADDREF_P(state.zchild); } else { MAKE_STD_ZVAL(state.zchild); ZVAL_NULL(state.zchild); Z_ADDREF_P(state.zchild); } #endif #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "code", intern->code, intern->code_len); ADD_ASSOC_ZVAL(&retval, "scope", &state.zchild); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "code", intern->code, intern->code_len); ADD_ASSOC_ZVAL(retval, "scope", state.zchild); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); zval_ptr_dtor(&state.zchild); } /* }}} */ /* {{{ proto void MongoDB\BSON\Javascript::unserialize(string $serialized) */ static PHP_METHOD(Javascript, unserialize) { php_phongo_javascript_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_JAVASCRIPT_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_javascript_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_javascript_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_javascript_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Javascript function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Javascript___construct, 0, 0, 1) ZEND_ARG_INFO(0, javascript) ZEND_ARG_INFO(0, scope) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Javascript___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Javascript_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Javascript_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_javascript_me[] = { /* clang-format off */ PHP_ME(Javascript, __construct, ai_Javascript___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, __set_state, ai_Javascript___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(Javascript, __toString, ai_Javascript_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, jsonSerialize, ai_Javascript_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, serialize, ai_Javascript_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, unserialize, ai_Javascript_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, getCode, ai_Javascript_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Javascript, getScope, ai_Javascript_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Javascript object handlers */ static zend_object_handlers php_phongo_handler_javascript; static void php_phongo_javascript_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_javascript_t* intern = Z_OBJ_JAVASCRIPT(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->code) { efree(intern->code); } if (intern->scope) { bson_destroy(intern->scope); intern->scope = NULL; } if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ phongo_create_object_retval php_phongo_javascript_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_javascript_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_javascript_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_javascript; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_javascript_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_javascript; return retval; } #endif } /* }}} */ static int php_phongo_javascript_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_javascript_t *intern1, *intern2; intern1 = Z_JAVASCRIPT_OBJ_P(o1); intern2 = Z_JAVASCRIPT_OBJ_P(o2); /* Do not consider the scope document for comparisons */ return strcmp(intern1->code, intern2->code); } /* }}} */ static HashTable* php_phongo_javascript_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_JAVASCRIPT_OBJ_P(object)->properties; } /* }}} */ HashTable* php_phongo_javascript_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_javascript_t* intern; HashTable* props; intern = Z_JAVASCRIPT_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->code) { return props; } #if PHP_VERSION_ID >= 70000 { zval code; ZVAL_STRING(&code, intern->code); zend_hash_str_update(props, "code", sizeof("code") - 1, &code); if (intern->scope) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; if (php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state)) { Z_ADDREF(state.zchild); zend_hash_str_update(props, "scope", sizeof("scope") - 1, &state.zchild); } else { zval scope; ZVAL_NULL(&scope); zend_hash_str_update(props, "scope", sizeof("scope") - 1, &scope); } zval_ptr_dtor(&state.zchild); } else { zval scope; ZVAL_NULL(&scope); zend_hash_str_update(props, "scope", sizeof("scope") - 1, &scope); } } #else { zval* code; MAKE_STD_ZVAL(code); ZVAL_STRING(code, intern->code, 1); zend_hash_update(props, "code", sizeof("code"), &code, sizeof(code), NULL); if (intern->scope) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; if (php_phongo_bson_to_zval_ex(bson_get_data(intern->scope), intern->scope->len, &state)) { Z_ADDREF_P(state.zchild); zend_hash_update(props, "scope", sizeof("scope"), &state.zchild, sizeof(state.zchild), NULL); } else { zval* scope; MAKE_STD_ZVAL(scope); ZVAL_NULL(scope); zend_hash_update(props, "scope", sizeof("scope"), &scope, sizeof(scope), NULL); } zval_ptr_dtor(&state.zchild); } else { zval* scope; MAKE_STD_ZVAL(scope); ZVAL_NULL(scope); zend_hash_update(props, "scope", sizeof("scope"), &scope, sizeof(scope), NULL); } } #endif return props; } /* }}} */ static HashTable* php_phongo_javascript_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_javascript_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_javascript_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_javascript_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_javascript_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Javascript", php_phongo_javascript_me); php_phongo_javascript_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_javascript_ce->create_object = php_phongo_javascript_create_object; PHONGO_CE_FINAL(php_phongo_javascript_ce); zend_class_implements(php_phongo_javascript_ce TSRMLS_CC, 1, php_phongo_javascript_interface_ce); zend_class_implements(php_phongo_javascript_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_javascript_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_javascript_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_javascript, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_javascript.compare_objects = php_phongo_javascript_compare_objects; php_phongo_handler_javascript.get_debug_info = php_phongo_javascript_get_debug_info; php_phongo_handler_javascript.get_gc = php_phongo_javascript_get_gc; php_phongo_handler_javascript.get_properties = php_phongo_javascript_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_javascript.free_obj = php_phongo_javascript_free_object; php_phongo_handler_javascript.offset = XtOffsetOf(php_phongo_javascript_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/JavascriptInterface.c0000644000076500000240000000327213572250757020110 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_javascript_interface_ce; /* {{{ MongoDB\BSON\JavascriptInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_JavascriptInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_javascript_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(JavascriptInterface, getCode, ai_JavascriptInterface_void) ZEND_ABSTRACT_ME(JavascriptInterface, getScope, ai_JavascriptInterface_void) ZEND_ABSTRACT_ME(JavascriptInterface, __toString, ai_JavascriptInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_javascript_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "JavascriptInterface", php_phongo_javascript_interface_me); php_phongo_javascript_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/MaxKey.c0000644000076500000240000001217713572250757015363 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_maxkey_ce; /* {{{ proto void MongoDB\BSON\MaxKey::__set_state(array $properties) */ static PHP_METHOD(MaxKey, __set_state) { zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_maxkey_ce); } /* }}} */ /* {{{ proto array MongoDB\BSON\MaxKey::jsonSerialize() */ static PHP_METHOD(MaxKey, jsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init_size(return_value, 1); ADD_ASSOC_LONG_EX(return_value, "$maxKey", 1); } /* }}} */ /* {{{ proto string MongoDB\BSON\MaxKey::serialize() */ static PHP_METHOD(MaxKey, serialize) { PHONGO_RETURN_STRING(""); } /* }}} */ /* {{{ proto void MongoDB\BSON\MaxKey::unserialize(string $serialized) */ static PHP_METHOD(MaxKey, unserialize) { zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ MongoDB\BSON\MaxKey function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_MaxKey___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_MaxKey_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_MaxKey_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_maxkey_me[] = { /* clang-format off */ PHP_ME(MaxKey, __set_state, ai_MaxKey___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(MaxKey, jsonSerialize, ai_MaxKey_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(MaxKey, serialize, ai_MaxKey_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(MaxKey, unserialize, ai_MaxKey_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\MaxKey object handlers */ static zend_object_handlers php_phongo_handler_maxkey; static void php_phongo_maxkey_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_maxkey_t* intern = Z_OBJ_MAXKEY(object); zend_object_std_dtor(&intern->std TSRMLS_CC); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_maxkey_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_maxkey_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_maxkey_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_maxkey; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_maxkey_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_maxkey; return retval; } #endif } /* }}} */ /* }}} */ void php_phongo_maxkey_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "MaxKey", php_phongo_maxkey_me); php_phongo_maxkey_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_maxkey_ce->create_object = php_phongo_maxkey_create_object; PHONGO_CE_FINAL(php_phongo_maxkey_ce); zend_class_implements(php_phongo_maxkey_ce TSRMLS_CC, 1, php_phongo_maxkey_interface_ce); zend_class_implements(php_phongo_maxkey_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_maxkey_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_maxkey_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_maxkey, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); #if PHP_VERSION_ID >= 70000 php_phongo_handler_maxkey.free_obj = php_phongo_maxkey_free_object; php_phongo_handler_maxkey.offset = XtOffsetOf(php_phongo_maxkey_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/MaxKeyInterface.c0000644000076500000240000000246213572250757017200 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_maxkey_interface_ce; /* {{{ MongoDB\BSON\MaxKeyInterface function entries */ static zend_function_entry php_phongo_maxkey_interface_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_maxkey_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "MaxKeyInterface", php_phongo_maxkey_interface_me); php_phongo_maxkey_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/MinKey.c0000644000076500000240000001220013572250757015344 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_minkey_ce; /* {{{ proto void MongoDB\BSON\MinKey::__set_state(array $properties) */ static PHP_METHOD(MinKey, __set_state) { zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_minkey_ce); } /* }}} */ /* {{{ proto array MongoDB\BSON\MinKey::jsonSerialize() */ static PHP_METHOD(MinKey, jsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init_size(return_value, 1); ADD_ASSOC_LONG_EX(return_value, "$minKey", 1); } /* }}} */ /* {{{ proto string MongoDB\BSON\MinKey::serialize() */ static PHP_METHOD(MinKey, serialize) { PHONGO_RETURN_STRING(""); } /* }}} */ /* {{{ proto void MongoDB\BSON\MinKey::unserialize(string $serialized) */ static PHP_METHOD(MinKey, unserialize) { zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ MongoDB\BSON\MinKey function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_MinKey___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_MinKey_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_MinKey_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_minkey_me[] = { /* clang-format off */ PHP_ME(MinKey, __set_state, ai_MinKey___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(MinKey, jsonSerialize, ai_MinKey_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(MinKey, serialize, ai_MinKey_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(MinKey, unserialize, ai_MinKey_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\MinKey object handlers */ static zend_object_handlers php_phongo_handler_minkey; static void php_phongo_minkey_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_minkey_t* intern = Z_OBJ_MINKEY(object); zend_object_std_dtor(&intern->std TSRMLS_CC); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_minkey_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_minkey_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_minkey_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_minkey; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_minkey_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_minkey; return retval; } #endif } /* }}} */ /* }}} */ void php_phongo_minkey_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "MinKey", php_phongo_minkey_me); php_phongo_minkey_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_minkey_ce->create_object = php_phongo_minkey_create_object; PHONGO_CE_FINAL(php_phongo_minkey_ce); zend_class_implements(php_phongo_minkey_ce TSRMLS_CC, 1, php_phongo_minkey_interface_ce); zend_class_implements(php_phongo_minkey_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_minkey_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_minkey_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_minkey, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); #if PHP_VERSION_ID >= 70000 php_phongo_handler_minkey.free_obj = php_phongo_minkey_free_object; php_phongo_handler_minkey.offset = XtOffsetOf(php_phongo_minkey_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/MinKeyInterface.c0000644000076500000240000000246213572250757017176 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_minkey_interface_ce; /* {{{ MongoDB\BSON\MinKeyInterface function entries */ static zend_function_entry php_phongo_minkey_interface_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_minkey_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "MinKeyInterface", php_phongo_minkey_interface_me); php_phongo_minkey_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/ObjectId.c0000644000076500000240000003113713572250757015645 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_objectid_ce; /* Initialize the object with a generated value and return whether it was * successful. */ static bool php_phongo_objectid_init(php_phongo_objectid_t* intern) { bson_oid_t oid; intern->initialized = true; bson_oid_init(&oid, NULL); bson_oid_to_string(&oid, intern->oid); return true; } /* Initialize the object from a hex string and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_objectid_init_from_hex_string(php_phongo_objectid_t* intern, const char* hex, phongo_zpp_char_len hex_len TSRMLS_DC) /* {{{ */ { if (bson_oid_is_valid(hex, hex_len)) { bson_oid_t oid; bson_oid_init_from_string(&oid, hex); bson_oid_to_string(&oid, intern->oid); intern->initialized = true; return true; } phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing ObjectId string: %s", hex); return false; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_objectid_init_from_hash(php_phongo_objectid_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* z_oid; z_oid = zend_hash_str_find(props, "oid", sizeof("oid") - 1); if (z_oid && Z_TYPE_P(z_oid) == IS_STRING) { return php_phongo_objectid_init_from_hex_string(intern, Z_STRVAL_P(z_oid), Z_STRLEN_P(z_oid) TSRMLS_CC); } #else zval** z_oid; if (zend_hash_find(props, "oid", sizeof("oid"), (void**) &z_oid) == SUCCESS && Z_TYPE_PP(z_oid) == IS_STRING) { return php_phongo_objectid_init_from_hex_string(intern, Z_STRVAL_PP(z_oid), Z_STRLEN_PP(z_oid) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"oid\" string field", ZSTR_VAL(php_phongo_objectid_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\ObjectId::__construct([string $id]) Constructs a new BSON ObjectId type, optionally from a hex string. */ static PHP_METHOD(ObjectId, __construct) { php_phongo_objectid_t* intern; zend_error_handling error_handling; char* id = NULL; phongo_zpp_char_len id_len; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_OBJECTID_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!", &id, &id_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (id) { php_phongo_objectid_init_from_hex_string(intern, id, id_len TSRMLS_CC); } else { php_phongo_objectid_init(intern); } } /* }}} */ /* {{{ proto integer MongoDB\BSON\ObjectId::getTimestamp() */ static PHP_METHOD(ObjectId, getTimestamp) { php_phongo_objectid_t* intern; bson_oid_t tmp_oid; intern = Z_OBJECTID_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } bson_oid_init_from_string(&tmp_oid, intern->oid); RETVAL_LONG(bson_oid_get_time_t(&tmp_oid)); } /* }}} */ /* {{{ proto MongoDB\BSON\ObjectId::__set_state(array $properties) */ static PHP_METHOD(ObjectId, __set_state) { php_phongo_objectid_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_objectid_ce); intern = Z_OBJECTID_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_objectid_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\ObjectId::__toString() */ static PHP_METHOD(ObjectId, __toString) { php_phongo_objectid_t* intern; intern = Z_OBJECTID_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRINGL(intern->oid, 24); } /* }}} */ /* {{{ proto array MongoDB\BSON\ObjectId::jsonSerialize() */ static PHP_METHOD(ObjectId, jsonSerialize) { php_phongo_objectid_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_OBJECTID_OBJ_P(getThis()); array_init_size(return_value, 1); ADD_ASSOC_STRINGL(return_value, "$oid", intern->oid, 24); } /* }}} */ /* {{{ proto string MongoDB\BSON\ObjectId::serialize() */ static PHP_METHOD(ObjectId, serialize) { php_phongo_objectid_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_OBJECTID_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "oid", intern->oid, 24); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "oid", intern->oid, 24); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\ObjectId::unserialize(string $serialized) */ static PHP_METHOD(ObjectId, unserialize) { php_phongo_objectid_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_OBJECTID_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_objectid_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_objectid_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_objectid_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\ObjectId function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId___construct, 0, 0, 0) ZEND_ARG_INFO(0, id) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ObjectId_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_objectid_me[] = { /* clang-format off */ PHP_ME(ObjectId, __construct, ai_ObjectId___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ObjectId, getTimestamp, ai_ObjectId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ObjectId, __set_state, ai_ObjectId___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(ObjectId, __toString, ai_ObjectId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ObjectId, jsonSerialize, ai_ObjectId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ObjectId, serialize, ai_ObjectId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ObjectId, unserialize, ai_ObjectId_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\ObjectId object handlers */ static zend_object_handlers php_phongo_handler_objectid; static void php_phongo_objectid_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_objectid_t* intern = Z_OBJ_OBJECTID(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_objectid_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_objectid_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_objectid_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_objectid; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_objectid_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_objectid; return retval; } #endif } /* }}} */ static int php_phongo_objectid_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_objectid_t* intern1; php_phongo_objectid_t* intern2; intern1 = Z_OBJECTID_OBJ_P(o1); intern2 = Z_OBJECTID_OBJ_P(o2); return strcmp(intern1->oid, intern2->oid); } /* }}} */ static HashTable* php_phongo_objectid_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_OBJECTID_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_objectid_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_objectid_t* intern; HashTable* props; intern = Z_OBJECTID_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 1); if (!intern->initialized) { return props; } #if PHP_VERSION_ID >= 70000 { zval zv; ZVAL_STRING(&zv, intern->oid); zend_hash_str_update(props, "oid", sizeof("oid") - 1, &zv); } #else { zval* zv; MAKE_STD_ZVAL(zv); ZVAL_STRING(zv, intern->oid, 1); zend_hash_update(props, "oid", sizeof("oid"), &zv, sizeof(zv), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_objectid_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_objectid_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_objectid_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_objectid_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_objectid_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "ObjectId", php_phongo_objectid_me); php_phongo_objectid_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_objectid_ce->create_object = php_phongo_objectid_create_object; PHONGO_CE_FINAL(php_phongo_objectid_ce); zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_objectid_interface_ce); zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_objectid_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_objectid, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_objectid.compare_objects = php_phongo_objectid_compare_objects; php_phongo_handler_objectid.get_debug_info = php_phongo_objectid_get_debug_info; php_phongo_handler_objectid.get_gc = php_phongo_objectid_get_gc; php_phongo_handler_objectid.get_properties = php_phongo_objectid_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_objectid.free_obj = php_phongo_objectid_free_object; php_phongo_handler_objectid.offset = XtOffsetOf(php_phongo_objectid_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/ObjectIdInterface.c0000644000076500000240000000313113572250757017457 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_objectid_interface_ce; /* {{{ MongoDB\BSON\ObjectIdInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_ObjectIdInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_objectid_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(ObjectIdInterface, getTimestamp, ai_ObjectIdInterface_void) ZEND_ABSTRACT_ME(ObjectIdInterface, __toString, ai_ObjectIdInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_objectid_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "ObjectIdInterface", php_phongo_objectid_interface_me); php_phongo_objectid_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Persistable.c0000644000076500000240000000262013572250757016432 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_persistable_ce; /* {{{ MongoDB\BSON\Persistable function entries */ static zend_function_entry php_phongo_persistable_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_persistable_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Persistable", php_phongo_persistable_me); php_phongo_persistable_ce = zend_register_internal_interface(&ce TSRMLS_CC); zend_class_implements(php_phongo_persistable_ce TSRMLS_CC, 2, php_phongo_unserializable_ce, php_phongo_serializable_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Regex.c0000644000076500000240000003505713572250757015241 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_regex_ce; /* qsort() compare callback for alphabetizing regex flags upon initialization */ static int php_phongo_regex_compare_flags(const void* f1, const void* f2) /* {{{ */ { if (*(const char*) f1 == *(const char*) f2) { return 0; } return (*(const char*) f1 > *(const char*) f2) ? 1 : -1; } /* }}} */ /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_regex_init(php_phongo_regex_t* intern, const char* pattern, phongo_zpp_char_len pattern_len, const char* flags, phongo_zpp_char_len flags_len TSRMLS_DC) /* {{{ */ { if (strlen(pattern) != (size_t) pattern_len) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Pattern cannot contain null bytes"); return false; } intern->pattern = estrndup(pattern, pattern_len); intern->pattern_len = pattern_len; if (flags) { if (strlen(flags) != (size_t) flags_len) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Flags cannot contain null bytes"); return false; } intern->flags = estrndup(flags, flags_len); intern->flags_len = flags_len; /* Ensure flags are alphabetized upon initialization */ qsort((void*) intern->flags, flags_len, 1, php_phongo_regex_compare_flags); } else { intern->flags = estrdup(""); intern->flags_len = 0; } return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_regex_init_from_hash(php_phongo_regex_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *pattern, *flags; if ((pattern = zend_hash_str_find(props, "pattern", sizeof("pattern") - 1)) && Z_TYPE_P(pattern) == IS_STRING && (flags = zend_hash_str_find(props, "flags", sizeof("flags") - 1)) && Z_TYPE_P(flags) == IS_STRING) { return php_phongo_regex_init(intern, Z_STRVAL_P(pattern), Z_STRLEN_P(pattern), Z_STRVAL_P(flags), Z_STRLEN_P(flags) TSRMLS_CC); } #else zval **pattern, **flags; if (zend_hash_find(props, "pattern", sizeof("pattern"), (void**) &pattern) == SUCCESS && Z_TYPE_PP(pattern) == IS_STRING && zend_hash_find(props, "flags", sizeof("flags"), (void**) &flags) == SUCCESS && Z_TYPE_PP(flags) == IS_STRING) { return php_phongo_regex_init(intern, Z_STRVAL_PP(pattern), Z_STRLEN_PP(pattern), Z_STRVAL_PP(flags), Z_STRLEN_PP(flags) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"pattern\" and \"flags\" string fields", ZSTR_VAL(php_phongo_regex_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\Regex::__construct(string $pattern [, string $flags]) Constructs a new BSON regular expression type. */ static PHP_METHOD(Regex, __construct) { php_phongo_regex_t* intern; zend_error_handling error_handling; char* pattern; phongo_zpp_char_len pattern_len; char* flags = NULL; phongo_zpp_char_len flags_len = 0; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_REGEX_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|s", &pattern, &pattern_len, &flags, &flags_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_regex_init(intern, pattern, pattern_len, flags, flags_len TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Regex::getPattern() */ static PHP_METHOD(Regex, getPattern) { php_phongo_regex_t* intern; intern = Z_REGEX_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRINGL(intern->pattern, intern->pattern_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Regex::getFlags() */ static PHP_METHOD(Regex, getFlags) { php_phongo_regex_t* intern; intern = Z_REGEX_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRINGL(intern->flags, intern->flags_len); } /* }}} */ /* {{{ proto void MongoDB\BSON\Regex::__set_state(array $properties) */ static PHP_METHOD(Regex, __set_state) { php_phongo_regex_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_regex_ce); intern = Z_REGEX_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_regex_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Regex::__toString() Returns a string in the form: /pattern/flags */ static PHP_METHOD(Regex, __toString) { php_phongo_regex_t* intern; char* regex; int regex_len; intern = Z_REGEX_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } regex_len = spprintf(®ex, 0, "/%s/%s", intern->pattern, intern->flags); PHONGO_RETVAL_STRINGL(regex, regex_len); efree(regex); } /* }}} */ /* {{{ proto array MongoDB\BSON\Regex::jsonSerialize() */ static PHP_METHOD(Regex, jsonSerialize) { php_phongo_regex_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_REGEX_OBJ_P(getThis()); array_init_size(return_value, 2); ADD_ASSOC_STRINGL(return_value, "$regex", intern->pattern, intern->pattern_len); ADD_ASSOC_STRINGL(return_value, "$options", intern->flags, intern->flags_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Regex::serialize() */ static PHP_METHOD(Regex, serialize) { php_phongo_regex_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_REGEX_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "pattern", intern->pattern, intern->pattern_len); ADD_ASSOC_STRINGL(&retval, "flags", intern->flags, intern->flags_len); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "pattern", intern->pattern, intern->pattern_len); ADD_ASSOC_STRINGL(retval, "flags", intern->flags, intern->flags_len); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Regex::unserialize(string $serialized) */ static PHP_METHOD(Regex, unserialize) { php_phongo_regex_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_REGEX_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_regex_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_regex_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_regex_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Regex function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Regex___construct, 0, 0, 2) ZEND_ARG_INFO(0, pattern) ZEND_ARG_INFO(0, flags) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Regex___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Regex_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Regex_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_regex_me[] = { /* clang-format off */ PHP_ME(Regex, __construct, ai_Regex___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, __set_state, ai_Regex___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(Regex, __toString, ai_Regex_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, jsonSerialize, ai_Regex_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, serialize, ai_Regex_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, unserialize, ai_Regex_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, getPattern, ai_Regex_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Regex, getFlags, ai_Regex_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Regex object handlers */ static zend_object_handlers php_phongo_handler_regex; static void php_phongo_regex_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_regex_t* intern = Z_OBJ_REGEX(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->pattern) { efree(intern->pattern); } if (intern->flags) { efree(intern->flags); } if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_regex_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_regex_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_regex_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_regex; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_regex_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_regex; return retval; } #endif } /* }}} */ static int php_phongo_regex_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_regex_t *intern1, *intern2; int retval; intern1 = Z_REGEX_OBJ_P(o1); intern2 = Z_REGEX_OBJ_P(o2); /* MongoDB compares the pattern string before the flags. */ retval = strcmp(intern1->pattern, intern2->pattern); if (retval != 0) { return retval; } return strcmp(intern1->flags, intern2->flags); } /* }}} */ static HashTable* php_phongo_regex_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_REGEX_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_regex_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_regex_t* intern; HashTable* props; intern = Z_REGEX_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->pattern) { return props; } #if PHP_VERSION_ID >= 70000 { zval pattern, flags; ZVAL_STRINGL(&pattern, intern->pattern, intern->pattern_len); zend_hash_str_update(props, "pattern", sizeof("pattern") - 1, &pattern); ZVAL_STRINGL(&flags, intern->flags, intern->flags_len); zend_hash_str_update(props, "flags", sizeof("flags") - 1, &flags); } #else { zval *pattern, *flags; MAKE_STD_ZVAL(pattern); ZVAL_STRINGL(pattern, intern->pattern, intern->pattern_len, 1); zend_hash_update(props, "pattern", sizeof("pattern"), &pattern, sizeof(pattern), NULL); MAKE_STD_ZVAL(flags); ZVAL_STRINGL(flags, intern->flags, intern->flags_len, 1); zend_hash_update(props, "flags", sizeof("flags"), &flags, sizeof(flags), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_regex_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_regex_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_regex_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_regex_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_regex_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Regex", php_phongo_regex_me); php_phongo_regex_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_regex_ce->create_object = php_phongo_regex_create_object; PHONGO_CE_FINAL(php_phongo_regex_ce); zend_class_implements(php_phongo_regex_ce TSRMLS_CC, 1, php_phongo_regex_interface_ce); zend_class_implements(php_phongo_regex_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_regex_ce TSRMLS_CC, 1, zend_ce_serializable); zend_class_implements(php_phongo_regex_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); memcpy(&php_phongo_handler_regex, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_regex.compare_objects = php_phongo_regex_compare_objects; php_phongo_handler_regex.get_debug_info = php_phongo_regex_get_debug_info; php_phongo_handler_regex.get_gc = php_phongo_regex_get_gc; php_phongo_handler_regex.get_properties = php_phongo_regex_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_regex.free_obj = php_phongo_regex_free_object; php_phongo_handler_regex.offset = XtOffsetOf(php_phongo_regex_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/RegexInterface.c0000644000076500000240000000316713572250757017057 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_regex_interface_ce; /* {{{ MongoDB\BSON\RegexInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_RegexInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_regex_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(RegexInterface, getFlags, ai_RegexInterface_void) ZEND_ABSTRACT_ME(RegexInterface, getPattern, ai_RegexInterface_void) ZEND_ABSTRACT_ME(RegexInterface, __toString, ai_RegexInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_regex_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "RegexInterface", php_phongo_regex_interface_me); php_phongo_regex_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Serializable.c0000644000076500000240000000306113572250757016563 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_serializable_ce; /* {{{ MongoDB\BSON\Serializable function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Serializable_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_serializable_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(Serializable, bsonSerialize, ai_Serializable_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_serializable_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Serializable", php_phongo_serializable_me); php_phongo_serializable_ce = zend_register_internal_interface(&ce TSRMLS_CC); zend_class_implements(php_phongo_serializable_ce TSRMLS_CC, 1, php_phongo_type_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Symbol.c0000644000076500000240000002417013572250757015426 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_symbol_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_symbol_init(php_phongo_symbol_t* intern, const char* symbol, phongo_zpp_char_len symbol_len TSRMLS_DC) /* {{{ */ { if (strlen(symbol) != (size_t) symbol_len) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Symbol cannot contain null bytes"); return false; } intern->symbol = estrndup(symbol, symbol_len); intern->symbol_len = symbol_len; return true; } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_symbol_init_from_hash(php_phongo_symbol_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* symbol; if ((symbol = zend_hash_str_find(props, "symbol", sizeof("symbol") - 1)) && Z_TYPE_P(symbol) == IS_STRING) { return php_phongo_symbol_init(intern, Z_STRVAL_P(symbol), Z_STRLEN_P(symbol) TSRMLS_CC); } #else zval** symbol; if (zend_hash_find(props, "symbol", sizeof("symbol"), (void**) &symbol) == SUCCESS && Z_TYPE_PP(symbol) == IS_STRING) { return php_phongo_symbol_init(intern, Z_STRVAL_PP(symbol), Z_STRLEN_PP(symbol) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"symbol\" string field", ZSTR_VAL(php_phongo_symbol_ce->name)); return false; } /* }}} */ /* {{{ proto string MongoDB\BSON\Symbol::__toString() Return the Symbol's symbol string. */ static PHP_METHOD(Symbol, __toString) { php_phongo_symbol_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_SYMBOL_OBJ_P(getThis()); PHONGO_RETURN_STRINGL(intern->symbol, intern->symbol_len); } /* }}} */ /* {{{ proto array MongoDB\BSON\Symbol::jsonSerialize() */ static PHP_METHOD(Symbol, jsonSerialize) { php_phongo_symbol_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_SYMBOL_OBJ_P(getThis()); array_init_size(return_value, 1); ADD_ASSOC_STRINGL(return_value, "$symbol", intern->symbol, intern->symbol_len); } /* }}} */ /* {{{ proto string MongoDB\BSON\Symbol::serialize() */ static PHP_METHOD(Symbol, serialize) { php_phongo_symbol_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; intern = Z_SYMBOL_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 1); ADD_ASSOC_STRINGL(&retval, "symbol", intern->symbol, intern->symbol_len); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 1); ADD_ASSOC_STRINGL(retval, "symbol", intern->symbol, intern->symbol_len); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Symbol::unserialize(string $serialized) */ static PHP_METHOD(Symbol, unserialize) { php_phongo_symbol_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_SYMBOL_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_symbol_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_symbol_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_symbol_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Symbol function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Symbol_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Symbol_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_symbol_me[] = { /* clang-format off */ /* __set_state intentionally missing */ PHP_ME(Symbol, __toString, ai_Symbol_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Symbol, jsonSerialize, ai_Symbol_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Symbol, serialize, ai_Symbol_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Symbol, unserialize, ai_Symbol_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Symbol_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Symbol object handlers */ static zend_object_handlers php_phongo_handler_symbol; static void php_phongo_symbol_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_symbol_t* intern = Z_OBJ_SYMBOL(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->symbol) { efree(intern->symbol); } if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ phongo_create_object_retval php_phongo_symbol_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_symbol_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_symbol_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_symbol; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_symbol_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_symbol; return retval; } #endif } /* }}} */ static int php_phongo_symbol_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_symbol_t *intern1, *intern2; intern1 = Z_SYMBOL_OBJ_P(o1); intern2 = Z_SYMBOL_OBJ_P(o2); return strcmp(intern1->symbol, intern2->symbol); } /* }}} */ static HashTable* php_phongo_symbol_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_SYMBOL_OBJ_P(object)->properties; } /* }}} */ HashTable* php_phongo_symbol_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_symbol_t* intern; HashTable* props; intern = Z_SYMBOL_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->symbol) { return props; } #if PHP_VERSION_ID >= 70000 { zval symbol; ZVAL_STRING(&symbol, intern->symbol); zend_hash_str_update(props, "symbol", sizeof("symbol") - 1, &symbol); } #else { zval* symbol; MAKE_STD_ZVAL(symbol); ZVAL_STRING(symbol, intern->symbol, 1); zend_hash_update(props, "symbol", sizeof("symbol"), &symbol, sizeof(symbol), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_symbol_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_symbol_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_symbol_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_symbol_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_symbol_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Symbol", php_phongo_symbol_me); php_phongo_symbol_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_symbol_ce->create_object = php_phongo_symbol_create_object; PHONGO_CE_FINAL(php_phongo_symbol_ce); zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_symbol_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_symbol, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_symbol.compare_objects = php_phongo_symbol_compare_objects; php_phongo_handler_symbol.get_debug_info = php_phongo_symbol_get_debug_info; php_phongo_handler_symbol.get_gc = php_phongo_symbol_get_gc; php_phongo_handler_symbol.get_properties = php_phongo_symbol_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_symbol.free_obj = php_phongo_symbol_free_object; php_phongo_handler_symbol.offset = XtOffsetOf(php_phongo_symbol_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Timestamp.c0000644000076500000240000004474513572250757016136 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_timestamp_ce; /* Initialize the object and return whether it was successful. An exception will * be thrown on error. */ static bool php_phongo_timestamp_init(php_phongo_timestamp_t* intern, int64_t increment, int64_t timestamp TSRMLS_DC) /* {{{ */ { if (increment < 0 || increment > UINT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected increment to be an unsigned 32-bit integer, %" PRId64 " given", increment); return false; } if (timestamp < 0 || timestamp > UINT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected timestamp to be an unsigned 32-bit integer, %" PRId64 " given", timestamp); return false; } intern->increment = (uint32_t) increment; intern->timestamp = (uint32_t) timestamp; intern->initialized = true; return true; } /* }}} */ /* Initialize the object from numeric strings and return whether it was * successful. An exception will be thrown on error. */ static bool php_phongo_timestamp_init_from_string(php_phongo_timestamp_t* intern, const char* s_increment, phongo_zpp_char_len s_increment_len, const char* s_timestamp, phongo_zpp_char_len s_timestamp_len TSRMLS_DC) /* {{{ */ { int64_t increment, timestamp; char* endptr = NULL; /* bson_ascii_strtoll() sets errno if conversion fails. If conversion * succeeds, we still want to ensure that the entire string was parsed. */ increment = bson_ascii_strtoll(s_increment, &endptr, 10); if (errno || (endptr && endptr != ((const char*) s_increment + s_increment_len))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer increment for %s initialization", s_increment, ZSTR_VAL(php_phongo_timestamp_ce->name)); return false; } timestamp = bson_ascii_strtoll(s_timestamp, &endptr, 10); if (errno || (endptr && endptr != ((const char*) s_timestamp + s_timestamp_len))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer timestamp for %s initialization", s_timestamp, ZSTR_VAL(php_phongo_timestamp_ce->name)); return false; } return php_phongo_timestamp_init(intern, increment, timestamp TSRMLS_CC); } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_timestamp_init_from_hash(php_phongo_timestamp_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *increment, *timestamp; if ((increment = zend_hash_str_find(props, "increment", sizeof("increment") - 1)) && Z_TYPE_P(increment) == IS_LONG && (timestamp = zend_hash_str_find(props, "timestamp", sizeof("timestamp") - 1)) && Z_TYPE_P(timestamp) == IS_LONG) { return php_phongo_timestamp_init(intern, Z_LVAL_P(increment), Z_LVAL_P(timestamp) TSRMLS_CC); } if ((increment = zend_hash_str_find(props, "increment", sizeof("increment") - 1)) && Z_TYPE_P(increment) == IS_STRING && (timestamp = zend_hash_str_find(props, "timestamp", sizeof("timestamp") - 1)) && Z_TYPE_P(timestamp) == IS_STRING) { return php_phongo_timestamp_init_from_string(intern, Z_STRVAL_P(increment), Z_STRLEN_P(increment), Z_STRVAL_P(timestamp), Z_STRLEN_P(timestamp) TSRMLS_CC); } #else zval **increment, **timestamp; if (zend_hash_find(props, "increment", sizeof("increment"), (void**) &increment) == SUCCESS && Z_TYPE_PP(increment) == IS_LONG && zend_hash_find(props, "timestamp", sizeof("timestamp"), (void**) ×tamp) == SUCCESS && Z_TYPE_PP(timestamp) == IS_LONG) { return php_phongo_timestamp_init(intern, Z_LVAL_PP(increment), Z_LVAL_PP(timestamp) TSRMLS_CC); } if (zend_hash_find(props, "increment", sizeof("increment"), (void**) &increment) == SUCCESS && Z_TYPE_PP(increment) == IS_STRING && zend_hash_find(props, "timestamp", sizeof("timestamp"), (void**) ×tamp) == SUCCESS && Z_TYPE_PP(timestamp) == IS_STRING) { return php_phongo_timestamp_init_from_string(intern, Z_STRVAL_PP(increment), Z_STRLEN_PP(increment), Z_STRVAL_PP(timestamp), Z_STRLEN_PP(timestamp) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"increment\" and \"timestamp\" integer or numeric string fields", ZSTR_VAL(php_phongo_timestamp_ce->name)); return false; } /* }}} */ /* {{{ proto void MongoDB\BSON\Timestamp::__construct(int|string $increment, int|string $timestamp) Construct a new BSON timestamp type, which consists of a 4-byte increment and 4-byte timestamp. */ static PHP_METHOD(Timestamp, __construct) { php_phongo_timestamp_t* intern; zend_error_handling error_handling; zval * increment = NULL, *timestamp = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_TIMESTAMP_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "zz", &increment, ×tamp) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (Z_TYPE_P(increment) == IS_LONG && Z_TYPE_P(timestamp) == IS_LONG) { php_phongo_timestamp_init(intern, Z_LVAL_P(increment), Z_LVAL_P(timestamp) TSRMLS_CC); return; } if (Z_TYPE_P(increment) == IS_LONG) { convert_to_string(increment); } if (Z_TYPE_P(increment) != IS_STRING) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected increment to be an unsigned 32-bit integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(increment)); return; } if (Z_TYPE_P(timestamp) == IS_LONG) { convert_to_string(timestamp); } if (Z_TYPE_P(timestamp) != IS_STRING) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected timestamp to be an unsigned 32-bit integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(timestamp)); return; } php_phongo_timestamp_init_from_string(intern, Z_STRVAL_P(increment), Z_STRLEN_P(increment), Z_STRVAL_P(timestamp), Z_STRLEN_P(timestamp) TSRMLS_CC); } /* }}} */ /* {{{ proto integer MongoDB\BSON\Timestamp::getIncrement() */ static PHP_METHOD(Timestamp, getIncrement) { php_phongo_timestamp_t* intern; intern = Z_TIMESTAMP_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETVAL_LONG(intern->increment); } /* }}} */ /* {{{ proto integer MongoDB\BSON\Timestamp::getTimestamp() */ static PHP_METHOD(Timestamp, getTimestamp) { php_phongo_timestamp_t* intern; intern = Z_TIMESTAMP_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETVAL_LONG(intern->timestamp); } /* }}} */ /* {{{ proto void MongoDB\BSON\Timestamp::__set_state(array $properties) */ static PHP_METHOD(Timestamp, __set_state) { php_phongo_timestamp_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_timestamp_ce); intern = Z_TIMESTAMP_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_timestamp_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\Timestamp::__toString() Returns a string in the form: [increment:timestamp] */ static PHP_METHOD(Timestamp, __toString) { php_phongo_timestamp_t* intern; char* retval; int retval_len; intern = Z_TIMESTAMP_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } retval_len = spprintf(&retval, 0, "[%" PRIu32 ":%" PRIu32 "]", intern->increment, intern->timestamp); PHONGO_RETVAL_STRINGL(retval, retval_len); efree(retval); } /* }}} */ /* {{{ proto array MongoDB\BSON\Timestamp::jsonSerialize() */ static PHP_METHOD(Timestamp, jsonSerialize) { php_phongo_timestamp_t* intern; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_TIMESTAMP_OBJ_P(getThis()); array_init_size(return_value, 1); #if PHP_VERSION_ID >= 70000 { zval ts; array_init_size(&ts, 2); ADD_ASSOC_LONG_EX(&ts, "t", intern->timestamp); ADD_ASSOC_LONG_EX(&ts, "i", intern->increment); ADD_ASSOC_ZVAL_EX(return_value, "$timestamp", &ts); } #else { zval* ts; MAKE_STD_ZVAL(ts); array_init_size(ts, 2); ADD_ASSOC_LONG_EX(ts, "t", intern->timestamp); ADD_ASSOC_LONG_EX(ts, "i", intern->increment); ADD_ASSOC_ZVAL_EX(return_value, "$timestamp", ts); } #endif } /* }}} */ /* {{{ proto string MongoDB\BSON\Timestamp::serialize() */ static PHP_METHOD(Timestamp, serialize) { php_phongo_timestamp_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; char s_increment[12]; char s_timestamp[12]; int s_increment_len; int s_timestamp_len; intern = Z_TIMESTAMP_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } s_increment_len = snprintf(s_increment, sizeof(s_increment), "%" PRIu32, intern->increment); s_timestamp_len = snprintf(s_timestamp, sizeof(s_timestamp), "%" PRIu32, intern->timestamp); #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "increment", s_increment, s_increment_len); ADD_ASSOC_STRINGL(&retval, "timestamp", s_timestamp, s_timestamp_len); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "increment", s_increment, s_increment_len); ADD_ASSOC_STRINGL(retval, "timestamp", s_timestamp, s_timestamp_len); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\Timestamp::unserialize(string $serialized) */ static PHP_METHOD(Timestamp, unserialize) { php_phongo_timestamp_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_TIMESTAMP_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_timestamp_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_timestamp_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_timestamp_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\Timestamp function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp___construct, 0, 0, 2) ZEND_ARG_INFO(0, increment) ZEND_ARG_INFO(0, timestamp) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Timestamp_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_timestamp_me[] = { /* clang-format off */ PHP_ME(Timestamp, __construct, ai_Timestamp___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, __set_state, ai_Timestamp___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(Timestamp, __toString, ai_Timestamp_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, jsonSerialize, ai_Timestamp_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, serialize, ai_Timestamp_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, unserialize, ai_Timestamp_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, getIncrement, ai_Timestamp_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Timestamp, getTimestamp, ai_Timestamp_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Timestamp object handlers */ static zend_object_handlers php_phongo_handler_timestamp; static void php_phongo_timestamp_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_timestamp_t* intern = Z_OBJ_TIMESTAMP(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_timestamp_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_timestamp_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_timestamp_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_timestamp; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_timestamp_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_timestamp; return retval; } #endif } /* }}} */ static int php_phongo_timestamp_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_timestamp_t *intern1, *intern2; intern1 = Z_TIMESTAMP_OBJ_P(o1); intern2 = Z_TIMESTAMP_OBJ_P(o2); /* MongoDB compares the timestamp before the increment. */ if (intern1->timestamp != intern2->timestamp) { return intern1->timestamp < intern2->timestamp ? -1 : 1; } if (intern1->increment != intern2->increment) { return intern1->increment < intern2->increment ? -1 : 1; } return 0; } /* }}} */ static HashTable* php_phongo_timestamp_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_TIMESTAMP_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_timestamp_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_timestamp_t* intern; HashTable* props; char s_increment[24]; char s_timestamp[24]; int s_increment_len; int s_timestamp_len; intern = Z_TIMESTAMP_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->initialized) { return props; } s_increment_len = snprintf(s_increment, sizeof(s_increment), "%" PRIu32, intern->increment); s_timestamp_len = snprintf(s_timestamp, sizeof(s_timestamp), "%" PRIu32, intern->timestamp); #if PHP_VERSION_ID >= 70000 { zval increment, timestamp; ZVAL_STRINGL(&increment, s_increment, s_increment_len); zend_hash_str_update(props, "increment", sizeof("increment") - 1, &increment); ZVAL_STRINGL(×tamp, s_timestamp, s_timestamp_len); zend_hash_str_update(props, "timestamp", sizeof("timestamp") - 1, ×tamp); } #else { zval *increment, *timestamp; MAKE_STD_ZVAL(increment); ZVAL_STRINGL(increment, s_increment, s_increment_len, 1); zend_hash_update(props, "increment", sizeof("increment"), &increment, sizeof(increment), NULL); MAKE_STD_ZVAL(timestamp); ZVAL_STRINGL(timestamp, s_timestamp, s_timestamp_len, 1); zend_hash_update(props, "timestamp", sizeof("timestamp"), ×tamp, sizeof(timestamp), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_timestamp_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_timestamp_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_timestamp_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_timestamp_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_timestamp_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Timestamp", php_phongo_timestamp_me); php_phongo_timestamp_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_timestamp_ce->create_object = php_phongo_timestamp_create_object; PHONGO_CE_FINAL(php_phongo_timestamp_ce); zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_timestamp_interface_ce); zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_timestamp_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_timestamp, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_timestamp.compare_objects = php_phongo_timestamp_compare_objects; php_phongo_handler_timestamp.get_debug_info = php_phongo_timestamp_get_debug_info; php_phongo_handler_timestamp.get_gc = php_phongo_timestamp_get_gc; php_phongo_handler_timestamp.get_properties = php_phongo_timestamp_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_timestamp.free_obj = php_phongo_timestamp_free_object; php_phongo_handler_timestamp.offset = XtOffsetOf(php_phongo_timestamp_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/TimestampInterface.c0000644000076500000240000000326513572250757017747 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_timestamp_interface_ce; /* {{{ MongoDB\BSON\TimestampInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_TimestampInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_timestamp_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(TimestampInterface, getIncrement, ai_TimestampInterface_void) ZEND_ABSTRACT_ME(TimestampInterface, getTimestamp, ai_TimestampInterface_void) ZEND_ABSTRACT_ME(TimestampInterface, __toString, ai_TimestampInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_timestamp_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "TimestampInterface", php_phongo_timestamp_interface_me); php_phongo_timestamp_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Type.c0000644000076500000240000000234513572250757015102 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_type_ce; /* {{{ MongoDB\BSON\Type function entries */ static zend_function_entry php_phongo_type_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_type_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Type", php_phongo_type_me); php_phongo_type_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/UTCDateTime.c0000644000076500000240000004433313572250757016234 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifdef PHP_WIN32 #include "win32/time.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_utcdatetime_ce; /* Initialize the object and return whether it was successful. */ static bool php_phongo_utcdatetime_init(php_phongo_utcdatetime_t* intern, int64_t milliseconds) /* {{{ */ { intern->milliseconds = milliseconds; intern->initialized = true; return true; } /* }}} */ /* Initialize the object from a numeric string and return whether it was * successful. An exception will be thrown on error. */ static bool php_phongo_utcdatetime_init_from_string(php_phongo_utcdatetime_t* intern, const char* s_milliseconds, phongo_zpp_char_len s_milliseconds_len TSRMLS_DC) /* {{{ */ { int64_t milliseconds; char* endptr = NULL; /* bson_ascii_strtoll() sets errno if conversion fails. If conversion * succeeds, we still want to ensure that the entire string was parsed. */ milliseconds = bson_ascii_strtoll(s_milliseconds, &endptr, 10); if (errno || (endptr && endptr != ((const char*) s_milliseconds + s_milliseconds_len))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error parsing \"%s\" as 64-bit integer for %s initialization", s_milliseconds, ZSTR_VAL(php_phongo_utcdatetime_ce->name)); return false; } return php_phongo_utcdatetime_init(intern, milliseconds); } /* }}} */ /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_utcdatetime_init_from_hash(php_phongo_utcdatetime_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* milliseconds; if ((milliseconds = zend_hash_str_find(props, "milliseconds", sizeof("milliseconds") - 1)) && Z_TYPE_P(milliseconds) == IS_LONG) { return php_phongo_utcdatetime_init(intern, Z_LVAL_P(milliseconds)); } if ((milliseconds = zend_hash_str_find(props, "milliseconds", sizeof("milliseconds") - 1)) && Z_TYPE_P(milliseconds) == IS_STRING) { return php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_P(milliseconds), Z_STRLEN_P(milliseconds) TSRMLS_CC); } #else zval** milliseconds; if (zend_hash_find(props, "milliseconds", sizeof("milliseconds"), (void**) &milliseconds) == SUCCESS && Z_TYPE_PP(milliseconds) == IS_LONG) { return php_phongo_utcdatetime_init(intern, Z_LVAL_PP(milliseconds)); } if (zend_hash_find(props, "milliseconds", sizeof("milliseconds"), (void**) &milliseconds) == SUCCESS && Z_TYPE_PP(milliseconds) == IS_STRING) { return php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_PP(milliseconds), Z_STRLEN_PP(milliseconds) TSRMLS_CC); } #endif phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"milliseconds\" integer or numeric string field", ZSTR_VAL(php_phongo_utcdatetime_ce->name)); return false; } /* }}} */ /* Initialize the object from the current time and return whether it was * successful. */ static bool php_phongo_utcdatetime_init_from_current_time(php_phongo_utcdatetime_t* intern) /* {{{ */ { int64_t sec, usec; struct timeval cur_time; gettimeofday(&cur_time, NULL); sec = cur_time.tv_sec; usec = cur_time.tv_usec; intern->milliseconds = (sec * 1000) + (usec / 1000); intern->initialized = true; return true; } /* }}} */ /* Initialize the object from a DateTime object and return whether it was * successful. */ static bool php_phongo_utcdatetime_init_from_date(php_phongo_utcdatetime_t* intern, php_date_obj* datetime_obj) /* {{{ */ { int64_t sec, usec; /* The following assignments use the same logic as date_format() in php_date.c */ sec = datetime_obj->time->sse; #if PHP_VERSION_ID >= 70200 usec = (int64_t) floor(datetime_obj->time->us); #else usec = (int64_t) floor(datetime_obj->time->f * 1000000 + 0.5); #endif intern->milliseconds = (sec * 1000) + (usec / 1000); intern->initialized = true; return true; } /* }}} */ /* {{{ proto void MongoDB\BSON\UTCDateTime::__construct([int|float|string|DateTimeInterface $milliseconds = null]) Construct a new BSON UTCDateTime type from either the current time, milliseconds since the epoch, or a DateTimeInterface object. Defaults to the current time. */ static PHP_METHOD(UTCDateTime, __construct) { php_phongo_utcdatetime_t* intern; zend_error_handling error_handling; zval* milliseconds = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_UTCDATETIME_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|z!", &milliseconds) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (milliseconds == NULL) { php_phongo_utcdatetime_init_from_current_time(intern); return; } if (Z_TYPE_P(milliseconds) == IS_OBJECT) { if (instanceof_function(Z_OBJCE_P(milliseconds), php_date_get_date_ce() TSRMLS_CC) || (php_phongo_date_immutable_ce && instanceof_function(Z_OBJCE_P(milliseconds), php_phongo_date_immutable_ce TSRMLS_CC))) { php_phongo_utcdatetime_init_from_date(intern, Z_PHPDATE_P(milliseconds)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected instance of DateTimeInterface, %s given", ZSTR_VAL(Z_OBJCE_P(milliseconds)->name)); } return; } if (Z_TYPE_P(milliseconds) == IS_LONG) { php_phongo_utcdatetime_init(intern, Z_LVAL_P(milliseconds)); return; } if (Z_TYPE_P(milliseconds) == IS_DOUBLE) { char tmp[24]; int tmp_len; tmp_len = snprintf(tmp, sizeof(tmp), "%.0f", Z_DVAL_P(milliseconds) > 0 ? floor(Z_DVAL_P(milliseconds)) : ceil(Z_DVAL_P(milliseconds))); php_phongo_utcdatetime_init_from_string(intern, tmp, tmp_len TSRMLS_CC); return; } if (Z_TYPE_P(milliseconds) != IS_STRING) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(milliseconds)); return; } php_phongo_utcdatetime_init_from_string(intern, Z_STRVAL_P(milliseconds), Z_STRLEN_P(milliseconds) TSRMLS_CC); } /* }}} */ /* {{{ proto void MongoDB\BSON\UTCDateTime::__set_state(array $properties) */ static PHP_METHOD(UTCDateTime, __set_state) { php_phongo_utcdatetime_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_utcdatetime_ce); intern = Z_UTCDATETIME_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_utcdatetime_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string MongoDB\BSON\UTCDateTime::__toString() Returns the UTCDateTime's milliseconds as a string */ static PHP_METHOD(UTCDateTime, __toString) { php_phongo_utcdatetime_t* intern; char s_milliseconds[24]; int s_milliseconds_len; intern = Z_UTCDATETIME_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds); PHONGO_RETVAL_STRINGL(s_milliseconds, s_milliseconds_len); } /* }}} */ /* {{{ proto DateTime MongoDB\BSON\UTCDateTime::toDateTime() Returns a DateTime object representing this UTCDateTime */ static PHP_METHOD(UTCDateTime, toDateTime) { php_phongo_utcdatetime_t* intern; php_date_obj* datetime_obj; char* sec; size_t sec_len; intern = Z_UTCDATETIME_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } object_init_ex(return_value, php_date_get_date_ce()); datetime_obj = Z_PHPDATE_P(return_value); sec_len = spprintf(&sec, 0, "@%" PRId64, intern->milliseconds / 1000); php_date_initialize(datetime_obj, sec, sec_len, NULL, NULL, 0 TSRMLS_CC); efree(sec); #if PHP_VERSION_ID >= 70200 datetime_obj->time->us = (intern->milliseconds % 1000) * 1000; #else datetime_obj->time->f = (double) (intern->milliseconds % 1000) / 1000; #endif } /* }}} */ /* {{{ proto array MongoDB\BSON\UTCDateTime::jsonSerialize() */ static PHP_METHOD(UTCDateTime, jsonSerialize) { php_phongo_utcdatetime_t* intern; char s_milliseconds[24]; int s_milliseconds_len; if (zend_parse_parameters_none() == FAILURE) { return; } intern = Z_UTCDATETIME_OBJ_P(getThis()); s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds); array_init_size(return_value, 1); #if PHP_VERSION_ID >= 70000 { zval udt; array_init_size(&udt, 1); ADD_ASSOC_STRINGL(&udt, "$numberLong", s_milliseconds, s_milliseconds_len); ADD_ASSOC_ZVAL_EX(return_value, "$date", &udt); } #else { zval* udt; MAKE_STD_ZVAL(udt); array_init_size(udt, 1); ADD_ASSOC_STRINGL(udt, "$numberLong", s_milliseconds, s_milliseconds_len); ADD_ASSOC_ZVAL_EX(return_value, "$date", udt); } #endif } /* }}} */ /* {{{ proto string MongoDB\BSON\UTCDateTime::serialize() */ static PHP_METHOD(UTCDateTime, serialize) { php_phongo_utcdatetime_t* intern; ZVAL_RETVAL_TYPE retval; php_serialize_data_t var_hash; smart_str buf = { 0 }; char s_milliseconds[24]; int s_milliseconds_len; intern = Z_UTCDATETIME_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds); #if PHP_VERSION_ID >= 70000 array_init_size(&retval, 2); ADD_ASSOC_STRINGL(&retval, "milliseconds", s_milliseconds, s_milliseconds_len); #else ALLOC_INIT_ZVAL(retval); array_init_size(retval, 2); ADD_ASSOC_STRINGL(retval, "milliseconds", s_milliseconds, s_milliseconds_len); #endif PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&buf, &retval, &var_hash TSRMLS_CC); smart_str_0(&buf); PHP_VAR_SERIALIZE_DESTROY(var_hash); PHONGO_RETVAL_SMART_STR(buf); smart_str_free(&buf); zval_ptr_dtor(&retval); } /* }}} */ /* {{{ proto void MongoDB\BSON\UTCDateTime::unserialize(string $serialized) */ static PHP_METHOD(UTCDateTime, unserialize) { php_phongo_utcdatetime_t* intern; zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; #if PHP_VERSION_ID >= 70000 zval props; #else zval* props; #endif php_unserialize_data_t var_hash; intern = Z_UTCDATETIME_OBJ_P(getThis()); zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); #if PHP_VERSION_ID < 70000 ALLOC_INIT_ZVAL(props); #endif PHP_VAR_UNSERIALIZE_INIT(var_hash); if (!php_var_unserialize(&props, (const unsigned char**) &serialized, (unsigned char*) serialized + serialized_len, &var_hash TSRMLS_CC)) { zval_ptr_dtor(&props); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s unserialization failed", ZSTR_VAL(php_phongo_utcdatetime_ce->name)); PHP_VAR_UNSERIALIZE_DESTROY(var_hash); return; } PHP_VAR_UNSERIALIZE_DESTROY(var_hash); #if PHP_VERSION_ID >= 70000 php_phongo_utcdatetime_init_from_hash(intern, HASH_OF(&props) TSRMLS_CC); #else php_phongo_utcdatetime_init_from_hash(intern, HASH_OF(props) TSRMLS_CC); #endif zval_ptr_dtor(&props); } /* }}} */ /* {{{ MongoDB\BSON\UTCDateTime function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime___construct, 0, 0, 0) ZEND_ARG_INFO(0, milliseconds) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTime_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_utcdatetime_me[] = { /* clang-format off */ PHP_ME(UTCDateTime, __construct, ai_UTCDateTime___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(UTCDateTime, __set_state, ai_UTCDateTime___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(UTCDateTime, __toString, ai_UTCDateTime_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(UTCDateTime, jsonSerialize, ai_UTCDateTime_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(UTCDateTime, serialize, ai_UTCDateTime_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(UTCDateTime, unserialize, ai_UTCDateTime_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(UTCDateTime, toDateTime, ai_UTCDateTime_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\UTCDateTime object handlers */ static zend_object_handlers php_phongo_handler_utcdatetime; static void php_phongo_utcdatetime_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_utcdatetime_t* intern = Z_OBJ_UTCDATETIME(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_utcdatetime_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_utcdatetime_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_utcdatetime_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_utcdatetime; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_utcdatetime_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_utcdatetime; return retval; } #endif } /* }}} */ static int php_phongo_utcdatetime_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_utcdatetime_t *intern1, *intern2; intern1 = Z_UTCDATETIME_OBJ_P(o1); intern2 = Z_UTCDATETIME_OBJ_P(o2); if (intern1->milliseconds != intern2->milliseconds) { return intern1->milliseconds < intern2->milliseconds ? -1 : 1; } return 0; } /* }}} */ static HashTable* php_phongo_utcdatetime_get_gc(zval* object, phongo_get_gc_table table, int* n TSRMLS_DC) /* {{{ */ { *table = NULL; *n = 0; return Z_UTCDATETIME_OBJ_P(object)->properties; } /* }}} */ static HashTable* php_phongo_utcdatetime_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_utcdatetime_t* intern; HashTable* props; char s_milliseconds[24]; int s_milliseconds_len; intern = Z_UTCDATETIME_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 2); if (!intern->initialized) { return props; } s_milliseconds_len = snprintf(s_milliseconds, sizeof(s_milliseconds), "%" PRId64, intern->milliseconds); #if PHP_VERSION_ID >= 70000 { zval milliseconds; ZVAL_STRINGL(&milliseconds, s_milliseconds, s_milliseconds_len); zend_hash_str_update(props, "milliseconds", sizeof("milliseconds") - 1, &milliseconds); } #else { zval* milliseconds; MAKE_STD_ZVAL(milliseconds); ZVAL_STRINGL(milliseconds, s_milliseconds, s_milliseconds_len, 1); zend_hash_update(props, "milliseconds", sizeof("milliseconds"), &milliseconds, sizeof(milliseconds), NULL); } #endif return props; } /* }}} */ static HashTable* php_phongo_utcdatetime_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_utcdatetime_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_utcdatetime_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_utcdatetime_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "UTCDateTime", php_phongo_utcdatetime_me); php_phongo_utcdatetime_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_utcdatetime_ce->create_object = php_phongo_utcdatetime_create_object; PHONGO_CE_FINAL(php_phongo_utcdatetime_ce); zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_utcdatetime_interface_ce); zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_utcdatetime_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_utcdatetime, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_utcdatetime.compare_objects = php_phongo_utcdatetime_compare_objects; php_phongo_handler_utcdatetime.get_debug_info = php_phongo_utcdatetime_get_debug_info; php_phongo_handler_utcdatetime.get_gc = php_phongo_utcdatetime_get_gc; php_phongo_handler_utcdatetime.get_properties = php_phongo_utcdatetime_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_utcdatetime.free_obj = php_phongo_utcdatetime_free_object; php_phongo_handler_utcdatetime.offset = XtOffsetOf(php_phongo_utcdatetime_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/UTCDateTimeInterface.c0000644000076500000240000000317313572250757020052 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_utcdatetime_interface_ce; /* {{{ MongoDB\BSON\UTCDateTimeInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_UTCDateTimeInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_utcdatetime_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(UTCDateTimeInterface, toDateTime, ai_UTCDateTimeInterface_void) ZEND_ABSTRACT_ME(UTCDateTimeInterface, __toString, ai_UTCDateTimeInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_utcdatetime_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "UTCDateTimeInterface", php_phongo_utcdatetime_interface_me); php_phongo_utcdatetime_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Undefined.c0000644000076500000240000001213213572250757016055 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #if PHP_VERSION_ID >= 70000 #include #else #include #endif #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_undefined_ce; /* {{{ proto string MongoDB\BSON\Undefined::__toString() Return the empty string. */ static PHP_METHOD(Undefined, __toString) { PHONGO_RETURN_STRINGL("", 0); } /* }}} */ /* {{{ proto array MongoDB\BSON\Undefined::jsonSerialize() */ static PHP_METHOD(Undefined, jsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init_size(return_value, 1); ADD_ASSOC_BOOL_EX(return_value, "$undefined", 1); } /* }}} */ /* {{{ proto string MongoDB\BSON\Undefined::serialize() */ static PHP_METHOD(Undefined, serialize) { PHONGO_RETURN_STRING(""); } /* }}} */ /* {{{ proto void MongoDB\BSON\Undefined::unserialize(string $serialized) */ static PHP_METHOD(Undefined, unserialize) { zend_error_handling error_handling; char* serialized; phongo_zpp_char_len serialized_len; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &serialized, &serialized_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); } /* }}} */ /* {{{ MongoDB\BSON\Undefined function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Undefined_unserialize, 0, 0, 1) ZEND_ARG_INFO(0, serialized) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Undefined_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_undefined_me[] = { /* clang-format off */ /* __set_state intentionally missing */ PHP_ME(Undefined, __toString, ai_Undefined_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Undefined, jsonSerialize, ai_Undefined_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Undefined, serialize, ai_Undefined_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Undefined, unserialize, ai_Undefined_unserialize, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Undefined_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\BSON\Undefined object handlers */ static zend_object_handlers php_phongo_handler_undefined; static void php_phongo_undefined_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_undefined_t* intern = Z_OBJ_UNDEFINED(object); zend_object_std_dtor(&intern->std TSRMLS_CC); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_undefined_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_undefined_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_undefined_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_undefined; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_undefined_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_undefined; return retval; } #endif } /* }}} */ /* }}} */ void php_phongo_undefined_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Undefined", php_phongo_undefined_me); php_phongo_undefined_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_undefined_ce->create_object = php_phongo_undefined_create_object; PHONGO_CE_FINAL(php_phongo_undefined_ce); zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, php_phongo_json_serializable_ce); zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, php_phongo_type_ce); zend_class_implements(php_phongo_undefined_ce TSRMLS_CC, 1, zend_ce_serializable); memcpy(&php_phongo_handler_undefined, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); #if PHP_VERSION_ID >= 70000 php_phongo_handler_undefined.free_obj = php_phongo_undefined_free_object; php_phongo_handler_undefined.offset = XtOffsetOf(php_phongo_undefined_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/Unserializable.c0000644000076500000240000000305113572250757017125 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_unserializable_ce; /* {{{ MongoDB\BSON\Unserializable function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Unserializable_bsonUnserialize, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, data, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_unserializable_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(Unserializable, bsonUnserialize, ai_Unserializable_bsonUnserialize) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_unserializable_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\BSON", "Unserializable", php_phongo_unserializable_me); php_phongo_unserializable_ce = zend_register_internal_interface(&ce TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/functions.c0000644000076500000240000001246513572250757016175 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" typedef enum { PHONGO_JSON_MODE_LEGACY, PHONGO_JSON_MODE_CANONICAL, PHONGO_JSON_MODE_RELAXED, } php_phongo_json_mode_t; /* {{{ proto string MongoDB\BSON\fromPHP(array|object $value) Returns the BSON representation of a PHP value */ PHP_FUNCTION(MongoDB_BSON_fromPHP) { zval* data; bson_t* bson; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &data) == FAILURE) { return; } bson = bson_new(); php_phongo_zval_to_bson(data, PHONGO_BSON_NONE, bson, NULL TSRMLS_CC); PHONGO_RETVAL_STRINGL((const char*) bson_get_data(bson), bson->len); bson_destroy(bson); } /* }}} */ /* {{{ proto array|object MongoDB\BSON\toPHP(string $bson [, array $typemap = array()]) Returns the PHP representation of a BSON value, optionally converting it into a custom class */ PHP_FUNCTION(MongoDB_BSON_toPHP) { char* data; phongo_zpp_char_len data_len; zval* typemap = NULL; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s|a!", &data, &data_len, &typemap) == FAILURE) { return; } if (!php_phongo_bson_typemap_to_state(typemap, &state.map TSRMLS_CC)) { return; } if (!php_phongo_bson_to_zval_ex((const unsigned char*) data, data_len, &state)) { zval_ptr_dtor(&state.zchild); php_phongo_bson_typemap_dtor(&state.map); RETURN_NULL(); } php_phongo_bson_typemap_dtor(&state.map); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto string MongoDB\BSON\fromJSON(string $json) Returns the BSON representation of a JSON value */ PHP_FUNCTION(MongoDB_BSON_fromJSON) { char* json; phongo_zpp_char_len json_len; bson_t bson = BSON_INITIALIZER; bson_error_t error = { 0 }; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &json, &json_len) == FAILURE) { return; } if (bson_init_from_json(&bson, (const char*) json, json_len, &error)) { PHONGO_RETVAL_STRINGL((const char*) bson_get_data(&bson), bson.len); bson_destroy(&bson); } else { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s", error.domain == BSON_ERROR_JSON ? error.message : "Error parsing JSON"); } } /* }}} */ static void phongo_bson_to_json(INTERNAL_FUNCTION_PARAMETERS, php_phongo_json_mode_t mode) { char* data; phongo_zpp_char_len data_len; const bson_t* bson; bool eof = false; bson_reader_t* reader; char* json = NULL; size_t json_len; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &data, &data_len) == FAILURE) { return; } reader = bson_reader_new_from_data((const unsigned char*) data, data_len); bson = bson_reader_read(reader, NULL); if (!bson) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not read document from BSON reader"); bson_reader_destroy(reader); return; } if (mode == PHONGO_JSON_MODE_LEGACY) { json = bson_as_json(bson, &json_len); } else if (mode == PHONGO_JSON_MODE_CANONICAL) { json = bson_as_canonical_extended_json(bson, &json_len); } else if (mode == PHONGO_JSON_MODE_RELAXED) { json = bson_as_relaxed_extended_json(bson, &json_len); } if (!json) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not convert BSON document to a JSON string"); bson_reader_destroy(reader); return; } PHONGO_RETVAL_STRINGL(json, json_len); bson_free(json); if (bson_reader_read(reader, &eof) || !eof) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Reading document did not exhaust input buffer"); } bson_reader_destroy(reader); } /* }}} */ /* {{{ proto string MongoDB\BSON\toJSON(string $bson) Returns the legacy extended JSON representation of a BSON value */ PHP_FUNCTION(MongoDB_BSON_toJSON) { phongo_bson_to_json(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHONGO_JSON_MODE_LEGACY); } /* }}} */ /* {{{ proto string MongoDB\BSON\toCanonicalExtendedJSON(string $bson) Returns the canonical extended JSON representation of a BSON value */ PHP_FUNCTION(MongoDB_BSON_toCanonicalExtendedJSON) { phongo_bson_to_json(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHONGO_JSON_MODE_CANONICAL); } /* }}} */ /* {{{ proto string MongoDB\BSON\toRelaxedExtendedJSON(string $bson) Returns the relaxed extended JSON representation of a BSON value */ PHP_FUNCTION(MongoDB_BSON_toRelaxedExtendedJSON) { phongo_bson_to_json(INTERNAL_FUNCTION_PARAM_PASSTHRU, PHONGO_JSON_MODE_RELAXED); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/BSON/functions.h0000644000076500000240000000212013572250757016165 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_BSON_FUNCTIONS_H #define PHONGO_BSON_FUNCTIONS_H #include PHP_FUNCTION(MongoDB_BSON_fromPHP); PHP_FUNCTION(MongoDB_BSON_toPHP); PHP_FUNCTION(MongoDB_BSON_fromJSON); PHP_FUNCTION(MongoDB_BSON_toJSON); PHP_FUNCTION(MongoDB_BSON_toCanonicalExtendedJSON); PHP_FUNCTION(MongoDB_BSON_toRelaxedExtendedJSON); #endif /* PHONGO_BSON_FUNCTIONS_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/AuthenticationException.c0000644000076500000240000000310613572250757023475 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_authenticationexception_ce; /* {{{ MongoDB\Driver\Exception\AuthenticationException function entries */ static zend_function_entry php_phongo_authenticationexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_authenticationexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "AuthenticationException", php_phongo_authenticationexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_authenticationexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce); #else php_phongo_authenticationexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce, NULL TSRMLS_CC); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/BulkWriteException.c0000644000076500000240000000302413572250757022425 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_bulkwriteexception_ce; /* {{{ MongoDB\Driver\Exception\BulkWriteException function entries */ static zend_function_entry php_phongo_bulkwriteexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_bulkwriteexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "BulkWriteException", php_phongo_bulkwriteexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_bulkwriteexception_ce = zend_register_internal_class_ex(&ce, php_phongo_writeexception_ce); #else php_phongo_bulkwriteexception_ce = zend_register_internal_class_ex(&ce, php_phongo_writeexception_ce, NULL TSRMLS_CC); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/CommandException.c0000644000076500000240000000500313572250757022072 0ustar alcaeusstaff/* * Copyright 2018 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_commandexception_ce; /* {{{ proto document MongoDB\Driver\Exception\CommandException::getResultDocument() Returns the result document from the failed command. */ static PHP_METHOD(CommandException, getResultDocument) { zval* resultdocument; #if PHP_VERSION_ID >= 70000 zval rv; #endif if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 resultdocument = zend_read_property(php_phongo_commandexception_ce, getThis(), ZEND_STRL("resultDocument"), 0, &rv TSRMLS_CC); #else resultdocument = zend_read_property(php_phongo_commandexception_ce, getThis(), ZEND_STRL("resultDocument"), 0 TSRMLS_CC); #endif RETURN_ZVAL(resultdocument, 1, 0); } /* }}} */ /* {{{ MongoDB\Driver\Exception\CommandException function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CommandException_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_commandexception_me[] = { /* clang-format off */ PHP_ME(CommandException, getResultDocument, ai_CommandException_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_commandexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "CommandException", php_phongo_commandexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_commandexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce); #else php_phongo_commandexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce, NULL TSRMLS_CC); #endif zend_declare_property_null(php_phongo_commandexception_ce, ZEND_STRL("resultDocument"), ZEND_ACC_PROTECTED TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/ConnectionException.c0000644000076500000240000000304013572250757022612 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_connectionexception_ce; /* {{{ MongoDB\Driver\Exception\ConnectionException function entries */ static zend_function_entry php_phongo_connectionexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_connectionexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "ConnectionException", php_phongo_connectionexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_connectionexception_ce = zend_register_internal_class_ex(&ce, php_phongo_runtimeexception_ce); #else php_phongo_connectionexception_ce = zend_register_internal_class_ex(&ce, php_phongo_runtimeexception_ce, NULL TSRMLS_CC); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/ConnectionTimeoutException.c0000644000076500000240000000323213572250757024164 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_connectiontimeoutexception_ce; /* {{{ MongoDB\Driver\Exception\ConnectionTimeoutException function entries */ static zend_function_entry php_phongo_connectiontimeoutexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_connectiontimeoutexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "ConnectionTimeoutException", php_phongo_connectiontimeoutexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_connectiontimeoutexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce); #else php_phongo_connectiontimeoutexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce, NULL TSRMLS_CC); #endif PHONGO_CE_FINAL(php_phongo_connectiontimeoutexception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/Exception.c0000644000076500000240000000266313572250757020604 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_exception_ce; /* {{{ MongoDB\Driver\Exception\Exception function entries */ static zend_function_entry php_phongo_exception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_exception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "Exception", php_phongo_exception_me); php_phongo_exception_ce = zend_register_internal_interface(&ce TSRMLS_CC); #if PHP_VERSION_ID >= 70000 zend_class_implements(php_phongo_exception_ce TSRMLS_CC, 1, zend_ce_throwable); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/ExecutionTimeoutException.c0000644000076500000240000000321113572250757024025 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_executiontimeoutexception_ce; /* {{{ MongoDB\Driver\Exception\ExecutionTimeoutException function entries */ static zend_function_entry php_phongo_executiontimeoutexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_executiontimeoutexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "ExecutionTimeoutException", php_phongo_executiontimeoutexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_executiontimeoutexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce); #else php_phongo_executiontimeoutexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce, NULL TSRMLS_CC); #endif PHONGO_CE_FINAL(php_phongo_executiontimeoutexception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/InvalidArgumentException.c0000644000076500000240000000332413572250757023611 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_invalidargumentexception_ce; /* {{{ MongoDB\Driver\Exception\InvalidArgumentException function entries */ static zend_function_entry php_phongo_invalidargumentexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_invalidargumentexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "InvalidArgumentException", php_phongo_invalidargumentexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_invalidargumentexception_ce = zend_register_internal_class_ex(&ce, spl_ce_InvalidArgumentException); #else php_phongo_invalidargumentexception_ce = zend_register_internal_class_ex(&ce, spl_ce_InvalidArgumentException, NULL TSRMLS_CC); #endif zend_class_implements(php_phongo_invalidargumentexception_ce TSRMLS_CC, 1, php_phongo_exception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/LogicException.c0000644000076500000240000000314613572250757021557 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_logicexception_ce; /* {{{ MongoDB\Driver\Exception\LogicException function entries */ static zend_function_entry php_phongo_logicexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_logicexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "LogicException", php_phongo_logicexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_logicexception_ce = zend_register_internal_class_ex(&ce, spl_ce_LogicException); #else php_phongo_logicexception_ce = zend_register_internal_class_ex(&ce, spl_ce_LogicException, NULL TSRMLS_CC); #endif zend_class_implements(php_phongo_logicexception_ce TSRMLS_CC, 1, php_phongo_exception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/RuntimeException.c0000644000076500000240000000723013572250757022143 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_array_api.h" zend_class_entry* php_phongo_runtimeexception_ce; static bool php_phongo_has_string_array_element(zval* labels, char* label TSRMLS_DC) { HashTable* ht_data; if (Z_TYPE_P(labels) != IS_ARRAY) { return false; } ht_data = HASH_OF(labels); #if PHP_VERSION_ID >= 70000 { zval* z_label; ZEND_HASH_FOREACH_VAL(ht_data, z_label) { if ((Z_TYPE_P(z_label) == IS_STRING) && (strcmp(Z_STRVAL_P(z_label), label) == 0)) { return true; } } ZEND_HASH_FOREACH_END(); } #else { HashPosition pos; zval** z_label; for ( zend_hash_internal_pointer_reset_ex(ht_data, &pos); zend_hash_get_current_data_ex(ht_data, (void**) &z_label, &pos) == SUCCESS; zend_hash_move_forward_ex(ht_data, &pos)) { if (Z_TYPE_PP(z_label) == IS_STRING) { if (strcmp(Z_STRVAL_PP(z_label), label) == 0) { return true; } } } } #endif return false; } /* {{{ proto bool MongoDB\Driver\Exception\RuntimeException::hasErrorLabel(string $label) Returns whether a specific error label has been set */ static PHP_METHOD(RuntimeException, hasErrorLabel) { char* label; phongo_zpp_char_len label_len; zval* error_labels; #if PHP_VERSION_ID >= 70000 zval rv; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "s", &label, &label_len) == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 error_labels = zend_read_property(php_phongo_runtimeexception_ce, getThis(), ZEND_STRL("errorLabels"), 0, &rv TSRMLS_CC); #else error_labels = zend_read_property(php_phongo_runtimeexception_ce, getThis(), ZEND_STRL("errorLabels"), 0 TSRMLS_CC); #endif RETURN_BOOL(php_phongo_has_string_array_element(error_labels, label TSRMLS_CC)); } /* }}} */ ZEND_BEGIN_ARG_INFO_EX(ai_RuntimeException_hasErrorLabel, 0, 0, 1) ZEND_ARG_INFO(0, label) ZEND_END_ARG_INFO() /* {{{ MongoDB\Driver\Exception\RuntimeException function entries */ static zend_function_entry php_phongo_runtimeexception_me[] = { /* clang-format off */ PHP_ME(RuntimeException, hasErrorLabel, ai_RuntimeException_hasErrorLabel, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_runtimeexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "RuntimeException", php_phongo_runtimeexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_runtimeexception_ce = zend_register_internal_class_ex(&ce, spl_ce_RuntimeException); #else php_phongo_runtimeexception_ce = zend_register_internal_class_ex(&ce, spl_ce_RuntimeException, NULL TSRMLS_CC); #endif zend_class_implements(php_phongo_runtimeexception_ce TSRMLS_CC, 1, php_phongo_exception_ce); zend_declare_property_null(php_phongo_runtimeexception_ce, ZEND_STRL("errorLabels"), ZEND_ACC_PROTECTED TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/SSLConnectionException.c0000644000076500000240000000316613572250757023205 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_sslconnectionexception_ce; /* {{{ MongoDB\Driver\Exception\SSLConnectionException function entries */ static zend_function_entry php_phongo_sslconnectionexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_sslconnectionexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "SSLConnectionException", php_phongo_sslconnectionexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_sslconnectionexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce); #else php_phongo_sslconnectionexception_ce = zend_register_internal_class_ex(&ce, php_phongo_connectionexception_ce, NULL TSRMLS_CC); #endif PHONGO_CE_FINAL(php_phongo_sslconnectionexception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/ServerException.c0000644000076500000240000000277313572250757021775 0ustar alcaeusstaff/* * Copyright 2018 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_serverexception_ce; /* {{{ MongoDB\Driver\Exception\ServerException function entries */ static zend_function_entry php_phongo_serverexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_serverexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "ServerException", php_phongo_serverexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_serverexception_ce = zend_register_internal_class_ex(&ce, php_phongo_runtimeexception_ce); #else php_phongo_serverexception_ce = zend_register_internal_class_ex(&ce, php_phongo_runtimeexception_ce, NULL TSRMLS_CC); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/UnexpectedValueException.c0000644000076500000240000000332413572250757023621 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_unexpectedvalueexception_ce; /* {{{ MongoDB\Driver\Exception\UnexpectedValueException function entries */ static zend_function_entry php_phongo_unexpectedvalueexception_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_unexpectedvalueexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "UnexpectedValueException", php_phongo_unexpectedvalueexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_unexpectedvalueexception_ce = zend_register_internal_class_ex(&ce, spl_ce_UnexpectedValueException); #else php_phongo_unexpectedvalueexception_ce = zend_register_internal_class_ex(&ce, spl_ce_UnexpectedValueException, NULL TSRMLS_CC); #endif zend_class_implements(php_phongo_unexpectedvalueexception_ce TSRMLS_CC, 1, php_phongo_exception_ce); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Exception/WriteException.c0000644000076500000240000000507613572250757021620 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_writeexception_ce; /* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Exception\WriteException::getWriteResult() Returns the WriteResult from the failed write operation. */ static PHP_METHOD(WriteException, getWriteResult) { zval* writeresult; #if PHP_VERSION_ID >= 70000 zval rv; #endif if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 writeresult = zend_read_property(php_phongo_writeexception_ce, getThis(), ZEND_STRL("writeResult"), 0, &rv TSRMLS_CC); #else writeresult = zend_read_property(php_phongo_writeexception_ce, getThis(), ZEND_STRL("writeResult"), 0 TSRMLS_CC); #endif RETURN_ZVAL(writeresult, 1, 0); } /* }}} */ /* {{{ MongoDB\Driver\Exception\WriteException function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_WriteException_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_writeexception_me[] = { /* clang-format off */ PHP_ME(WriteException, getWriteResult, ai_WriteException_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_writeexception_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Exception", "WriteException", php_phongo_writeexception_me); #if PHP_VERSION_ID >= 70000 php_phongo_writeexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce); #else php_phongo_writeexception_ce = zend_register_internal_class_ex(&ce, php_phongo_serverexception_ce, NULL TSRMLS_CC); #endif php_phongo_writeexception_ce->ce_flags |= ZEND_ACC_EXPLICIT_ABSTRACT_CLASS; zend_declare_property_null(php_phongo_writeexception_ce, ZEND_STRL("writeResult"), ZEND_ACC_PROTECTED TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/CommandFailedEvent.c0000644000076500000240000002253513572250757022522 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_commandfailedevent_ce; /* {{{ proto string CommandFailedEvent::getCommandName() Returns the command name for this event */ PHP_METHOD(CommandFailedEvent, getCommandName) { php_phongo_commandfailedevent_t* intern; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETVAL_STRING(intern->command_name); } /* }}} */ /* {{{ proto int CommandFailedEvent::getDurationMicros() Returns the event's duration in microseconds */ PHP_METHOD(CommandFailedEvent, getDurationMicros) { php_phongo_commandfailedevent_t* intern; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->duration_micros); } /* }}} */ /* {{{ proto Exception CommandFailedEvent::getError() Returns the error document associated with the event */ PHP_METHOD(CommandFailedEvent, getError) { php_phongo_commandfailedevent_t* intern; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&intern->z_error, 1, 0); #else RETURN_ZVAL(intern->z_error, 1, 0); #endif } /* }}} */ /* {{{ proto string CommandFailedEvent::getOperationId() Returns the event's operation ID */ PHP_METHOD(CommandFailedEvent, getOperationId) { php_phongo_commandfailedevent_t* intern; char int_as_string[20]; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->operation_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto stdClass CommandFailedEvent::getReply() Returns the reply document associated with the event */ PHP_METHOD(CommandFailedEvent, getReply) { php_phongo_commandfailedevent_t* intern; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &state); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto string CommandFailedEvent::getRequestId() Returns the event's request ID */ PHP_METHOD(CommandFailedEvent, getRequestId) { php_phongo_commandfailedevent_t* intern; char int_as_string[20]; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->request_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto MongoDB\Driver\Server CommandFailedEvent::getServer() Returns the Server from which the event originated */ PHP_METHOD(CommandFailedEvent, getServer) { php_phongo_commandfailedevent_t* intern; intern = Z_COMMANDFAILEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC); } /* }}} */ /** * Event thrown when a command has failed to execute. * * This class is only constructed internally. */ /* {{{ MongoDB\Driver\Monitoring\CommandFailedEvent function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CommandFailedEvent_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_commandfailedevent_me[] = { /* clang-format off */ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CommandFailedEvent_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getCommandName, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getError, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getDurationMicros, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getOperationId, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getReply, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getRequestId, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandFailedEvent, getServer, ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CommandFailedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Monitoring\CommandFailedEvent object handlers */ static zend_object_handlers php_phongo_handler_commandfailedevent; static void php_phongo_commandfailedevent_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_commandfailedevent_t* intern = Z_OBJ_COMMANDFAILEDEVENT(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (!Z_ISUNDEF(intern->z_error)) { zval_ptr_dtor(&intern->z_error); } if (intern->reply) { bson_destroy(intern->reply); } if (intern->command_name) { efree(intern->command_name); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_commandfailedevent_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_commandfailedevent_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_commandfailedevent_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_commandfailedevent; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_commandfailedevent_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_commandfailedevent; return retval; } #endif } /* }}} */ static HashTable* php_phongo_commandfailedevent_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_commandfailedevent_t* intern; zval retval = ZVAL_STATIC_INIT; char operation_id[20], request_id[20]; php_phongo_bson_state reply_state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDFAILEDEVENT_OBJ_P(object); *is_temp = 1; array_init_size(&retval, 6); ADD_ASSOC_STRING(&retval, "commandName", intern->command_name); ADD_ASSOC_INT64(&retval, "durationMicros", (int64_t) intern->duration_micros); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "error", &intern->z_error); Z_ADDREF(intern->z_error); #else ADD_ASSOC_ZVAL_EX(&retval, "error", intern->z_error); Z_ADDREF_P(intern->z_error); #endif sprintf(operation_id, "%" PRIu64, intern->operation_id); ADD_ASSOC_STRING(&retval, "operationId", operation_id); php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &reply_state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL(&retval, "reply", &reply_state.zchild); #else ADD_ASSOC_ZVAL(&retval, "reply", reply_state.zchild); #endif sprintf(request_id, "%" PRIu64, intern->request_id); ADD_ASSOC_STRING(&retval, "requestId", request_id); { #if PHP_VERSION_ID >= 70000 zval server; phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", &server); #else zval* server = NULL; MAKE_STD_ZVAL(server); phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", server); #endif } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; (void) type; (void) module_number; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandFailedEvent", php_phongo_commandfailedevent_me); php_phongo_commandfailedevent_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_commandfailedevent_ce->create_object = php_phongo_commandfailedevent_create_object; PHONGO_CE_FINAL(php_phongo_commandfailedevent_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_commandfailedevent_ce); memcpy(&php_phongo_handler_commandfailedevent, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_commandfailedevent.get_debug_info = php_phongo_commandfailedevent_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_commandfailedevent.free_obj = php_phongo_commandfailedevent_free_object; php_phongo_handler_commandfailedevent.offset = XtOffsetOf(php_phongo_commandfailedevent_t, std); #endif return; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/CommandStartedEvent.c0000644000076500000240000002130413572250757022735 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_commandstartedevent_ce; /* {{{ proto stdClass CommandStartedEvent::getCommand() Returns the command document associated with the event */ PHP_METHOD(CommandStartedEvent, getCommand) { php_phongo_commandstartedevent_t* intern; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_bson_to_zval_ex(bson_get_data(intern->command), intern->command->len, &state); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto string CommandStartedEvent::getCommandName() Returns the command name for this event */ PHP_METHOD(CommandStartedEvent, getCommandName) { php_phongo_commandstartedevent_t* intern; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETVAL_STRING(intern->command_name); } /* }}} */ /* {{{ proto string CommandStartedEvent::getDatabaseName() Returns the database name for this event */ PHP_METHOD(CommandStartedEvent, getDatabaseName) { php_phongo_commandstartedevent_t* intern; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETVAL_STRING(intern->database_name); } /* }}} */ /* {{{ proto string CommandStartedEvent::getOperationId() Returns the event's operation ID */ PHP_METHOD(CommandStartedEvent, getOperationId) { php_phongo_commandstartedevent_t* intern; char int_as_string[20]; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->operation_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto string CommandStartedEvent::getRequestId() Returns the event's request ID */ PHP_METHOD(CommandStartedEvent, getRequestId) { php_phongo_commandstartedevent_t* intern; char int_as_string[20]; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->request_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto MongoDB\Driver\Server CommandStartedEvent::getServer() Returns the Server from which the event originated */ PHP_METHOD(CommandStartedEvent, getServer) { php_phongo_commandstartedevent_t* intern; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC); } /* }}} */ /** * Event thrown when a command has started to execute. * * This class is only constructed internally. */ /* {{{ MongoDB\Driver\Monitoring\CommandStartedEvent function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CommandStartedEvent_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_commandstartedevent_me[] = { /* clang-format off */ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CommandStartedEvent_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getCommand, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getCommandName, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getDatabaseName, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getOperationId, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getRequestId, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandStartedEvent, getServer, ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CommandStartedEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Monitoring\CommandStartedEvent object handlers */ static zend_object_handlers php_phongo_handler_commandstartedevent; static void php_phongo_commandstartedevent_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_commandstartedevent_t* intern = Z_OBJ_COMMANDSTARTEDEVENT(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->command) { bson_destroy(intern->command); } if (intern->command_name) { efree(intern->command_name); } if (intern->database_name) { efree(intern->database_name); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_commandstartedevent_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_commandstartedevent_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_commandstartedevent_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_commandstartedevent; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_commandstartedevent_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_commandstartedevent; return retval; } #endif } /* }}} */ static HashTable* php_phongo_commandstartedevent_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_commandstartedevent_t* intern; zval retval = ZVAL_STATIC_INIT; char operation_id[20], request_id[20]; php_phongo_bson_state command_state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDSTARTEDEVENT_OBJ_P(object); *is_temp = 1; array_init_size(&retval, 6); php_phongo_bson_to_zval_ex(bson_get_data(intern->command), intern->command->len, &command_state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL(&retval, "command", &command_state.zchild); #else ADD_ASSOC_ZVAL(&retval, "command", command_state.zchild); #endif ADD_ASSOC_STRING(&retval, "commandName", intern->command_name); ADD_ASSOC_STRING(&retval, "databaseName", intern->database_name); sprintf(operation_id, "%" PRIu64, intern->operation_id); ADD_ASSOC_STRING(&retval, "operationId", operation_id); sprintf(request_id, "%" PRIu64, intern->request_id); ADD_ASSOC_STRING(&retval, "requestId", request_id); { #if PHP_VERSION_ID >= 70000 zval server; phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", &server); #else zval* server = NULL; MAKE_STD_ZVAL(server); phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", server); #endif } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_commandstartedevent_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; (void) type; (void) module_number; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandStartedEvent", php_phongo_commandstartedevent_me); php_phongo_commandstartedevent_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_commandstartedevent_ce->create_object = php_phongo_commandstartedevent_create_object; PHONGO_CE_FINAL(php_phongo_commandstartedevent_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_commandstartedevent_ce); memcpy(&php_phongo_handler_commandstartedevent, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_commandstartedevent.get_debug_info = php_phongo_commandstartedevent_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_commandstartedevent.free_obj = php_phongo_commandstartedevent_free_object; php_phongo_handler_commandstartedevent.offset = XtOffsetOf(php_phongo_commandstartedevent_t, std); #endif return; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/CommandSubscriber.c0000644000076500000240000000452113572250757022432 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_commandsubscriber_ce; /* {{{ MongoDB\Driver\Monitoring\CommandSubscriber function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CommandSubscriber_commandStarted, 0, 0, 1) ZEND_ARG_OBJ_INFO(0, event, MongoDB\\Driver\\Monitoring\\CommandStartedEvent, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_CommandSubscriber_commandSucceeded, 0, 0, 1) ZEND_ARG_OBJ_INFO(0, event, MongoDB\\Driver\\Monitoring\\CommandSucceededEvent, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_CommandSubscriber_commandFailed, 0, 0, 1) ZEND_ARG_OBJ_INFO(0, event, MongoDB\\Driver\\Monitoring\\CommandFailedEvent, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_commandsubscriber_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(CommandSubscriber, commandStarted, ai_CommandSubscriber_commandStarted) ZEND_ABSTRACT_ME(CommandSubscriber, commandSucceeded, ai_CommandSubscriber_commandSucceeded) ZEND_ABSTRACT_ME(CommandSubscriber, commandFailed, ai_CommandSubscriber_commandFailed) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_commandsubscriber_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; (void) type; (void) module_number; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandSubscriber", php_phongo_commandsubscriber_me); php_phongo_commandsubscriber_ce = zend_register_internal_interface(&ce TSRMLS_CC); zend_class_implements(php_phongo_commandsubscriber_ce TSRMLS_CC, 1, php_phongo_subscriber_ce); return; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/CommandSucceededEvent.c0000644000076500000240000002141413572250757023215 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_commandsucceededevent_ce; /* {{{ proto string CommandSucceededEvent::getCommandName() Returns the command name for this event */ PHP_METHOD(CommandSucceededEvent, getCommandName) { php_phongo_commandsucceededevent_t* intern; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETVAL_STRING(intern->command_name); } /* }}} */ /* {{{ proto int CommandSucceededEvent::getDurationMicros() Returns the event's duration in microseconds */ PHP_METHOD(CommandSucceededEvent, getDurationMicros) { php_phongo_commandsucceededevent_t* intern; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->duration_micros); } /* }}} */ /* {{{ proto string CommandSucceededEvent::getOperationId() Returns the event's operation ID */ PHP_METHOD(CommandSucceededEvent, getOperationId) { php_phongo_commandsucceededevent_t* intern; char int_as_string[20]; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->operation_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto stdClass CommandSucceededEvent::getReply() Returns the reply document associated with the event */ PHP_METHOD(CommandSucceededEvent, getReply) { php_phongo_commandsucceededevent_t* intern; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &state); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto string CommandsucceededEvent::getRequestId() Returns the event's request ID */ PHP_METHOD(CommandSucceededEvent, getRequestId) { php_phongo_commandsucceededevent_t* intern; char int_as_string[20]; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sprintf(int_as_string, "%" PRIu64, intern->request_id); PHONGO_RETVAL_STRING(int_as_string); } /* }}} */ /* {{{ proto MongoDB\Driver\Server CommandSucceededEvent::getServer() Returns the Server from which the event originated */ PHP_METHOD(CommandSucceededEvent, getServer) { php_phongo_commandsucceededevent_t* intern; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC); } /* }}} */ /** * Event thrown when a command has succeeded to execute. * * This class is only constructed internally. */ /* {{{ MongoDB\Driver\Monitoring\CommandSucceededEvent function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CommandSucceededEvent_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_commandsucceededevent_me[] = { /* clang-format off */ ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CommandSucceededEvent_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getCommandName, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getDurationMicros, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getOperationId, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getReply, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getRequestId, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(CommandSucceededEvent, getServer, ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CommandSucceededEvent_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Monitoring\CommandSucceededEvent object handlers */ static zend_object_handlers php_phongo_handler_commandsucceededevent; static void php_phongo_commandsucceededevent_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_commandsucceededevent_t* intern = Z_OBJ_COMMANDSUCCEEDEDEVENT(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->reply) { bson_destroy(intern->reply); } if (intern->command_name) { efree(intern->command_name); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_commandsucceededevent_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_commandsucceededevent_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_commandsucceededevent_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_commandsucceededevent; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_commandsucceededevent_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_commandsucceededevent; return retval; } #endif } /* }}} */ static HashTable* php_phongo_commandsucceededevent_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_commandsucceededevent_t* intern; zval retval = ZVAL_STATIC_INIT; char operation_id[20], request_id[20]; php_phongo_bson_state reply_state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(object); *is_temp = 1; array_init_size(&retval, 6); ADD_ASSOC_STRING(&retval, "commandName", intern->command_name); ADD_ASSOC_INT64(&retval, "durationMicros", (int64_t) intern->duration_micros); sprintf(operation_id, "%" PRIu64, intern->operation_id); ADD_ASSOC_STRING(&retval, "operationId", operation_id); php_phongo_bson_to_zval_ex(bson_get_data(intern->reply), intern->reply->len, &reply_state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL(&retval, "reply", &reply_state.zchild); #else ADD_ASSOC_ZVAL(&retval, "reply", reply_state.zchild); #endif sprintf(request_id, "%" PRIu64, intern->request_id); ADD_ASSOC_STRING(&retval, "requestId", request_id); { #if PHP_VERSION_ID >= 70000 zval server; phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", &server); #else zval* server = NULL; MAKE_STD_ZVAL(server); phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", server); #endif } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; (void) type; (void) module_number; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "CommandSucceededEvent", php_phongo_commandsucceededevent_me); php_phongo_commandsucceededevent_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_commandsucceededevent_ce->create_object = php_phongo_commandsucceededevent_create_object; PHONGO_CE_FINAL(php_phongo_commandsucceededevent_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_commandsucceededevent_ce); memcpy(&php_phongo_handler_commandsucceededevent, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_commandsucceededevent.get_debug_info = php_phongo_commandsucceededevent_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_commandsucceededevent.free_obj = php_phongo_commandsucceededevent_free_object; php_phongo_handler_commandsucceededevent.offset = XtOffsetOf(php_phongo_commandsucceededevent_t, std); #endif return; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/Subscriber.c0000644000076500000240000000257313572250757021140 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_subscriber_ce; /* {{{ MongoDB\Driver\Monitoring\Subscriber function entries */ static zend_function_entry php_phongo_subscriber_me[] = { PHP_FE_END }; /* }}} */ void php_phongo_subscriber_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; (void) type; (void) module_number; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver\\Monitoring", "Subscriber", php_phongo_subscriber_me); php_phongo_subscriber_ce = zend_register_internal_interface(&ce TSRMLS_CC); return; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/functions.c0000644000076500000240000000635213572250757021044 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" ZEND_EXTERN_MODULE_GLOBALS(mongodb) static char* php_phongo_make_subscriber_hash(zval* subscriber TSRMLS_DC) { char* hash; int hash_len; hash_len = spprintf(&hash, 0, "SUBS-%09d", Z_OBJ_HANDLE_P(subscriber)); return hash; } /* {{{ proto void MongoDB\Driver\Monitoring\addSubscriber(MongoDB\Driver\Monitoring\Subscriber $subscriber) Adds a monitoring subscriber to the set of subscribers */ PHP_FUNCTION(MongoDB_Driver_Monitoring_addSubscriber) { zval* zSubscriber = NULL; char* hash; #if PHP_VERSION_ID >= 70000 zval* subscriber; #else zval** subscriber; #endif if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &zSubscriber, php_phongo_subscriber_ce) == FAILURE) { return; } /* The HashTable should never be NULL, as it's initialized during RINIT and * destroyed during RSHUTDOWN. This is simply a defensive guard. */ if (!MONGODB_G(subscribers)) { return; } hash = php_phongo_make_subscriber_hash(zSubscriber TSRMLS_CC); /* If we have already stored the subscriber, bail out. Otherwise, add * subscriber to list */ #if PHP_VERSION_ID >= 70000 if ((subscriber = zend_hash_str_find(MONGODB_G(subscribers), hash, strlen(hash)))) { efree(hash); return; } zend_hash_str_update(MONGODB_G(subscribers), hash, strlen(hash), zSubscriber); #else if (zend_hash_find(MONGODB_G(subscribers), hash, strlen(hash) + 1, (void**) &subscriber) == SUCCESS) { efree(hash); return; } zend_hash_update(MONGODB_G(subscribers), hash, strlen(hash) + 1, (void*) &zSubscriber, sizeof(zval*), NULL); #endif Z_ADDREF_P(zSubscriber); efree(hash); } /* }}} */ /* {{{ proto void MongoDB\Driver\Monitoring\removeSubscriber(MongoDB\Driver\Monitoring\Subscriber $subscriber) Removes a monitoring subscriber from the set of subscribers */ PHP_FUNCTION(MongoDB_Driver_Monitoring_removeSubscriber) { zval* zSubscriber = NULL; char* hash; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &zSubscriber, php_phongo_subscriber_ce) == FAILURE) { return; } /* The HashTable should never be NULL, as it's initialized during RINIT and * destroyed during RSHUTDOWN. This is simply a defensive guard. */ if (!MONGODB_G(subscribers)) { return; } hash = php_phongo_make_subscriber_hash(zSubscriber TSRMLS_CC); #if PHP_VERSION_ID >= 70000 zend_hash_str_del(MONGODB_G(subscribers), hash, strlen(hash)); #else zend_hash_del(MONGODB_G(subscribers), hash, strlen(hash) + 1); #endif efree(hash); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Monitoring/functions.h0000644000076500000240000000173613572250757021052 0ustar alcaeusstaff/* * Copyright 2016-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_MONITORING_FUNCTIONS_H #define PHONGO_MONITORING_FUNCTIONS_H #include PHP_FUNCTION(MongoDB_Driver_Monitoring_addSubscriber); PHP_FUNCTION(MongoDB_Driver_Monitoring_removeSubscriber); #endif /* PHONGO_MONITORING_FUNCTIONS_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/BulkWrite.c0000644000076500000240000004746313572250757016627 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" #define PHONGO_BULKWRITE_BYPASS_UNSET -1 zend_class_entry* php_phongo_bulkwrite_ce; /* Extracts the "_id" field of a BSON document into a return value. */ static void php_phongo_bulkwrite_extract_id(bson_t* doc, zval** return_value) /* {{{ */ { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; zval* id = NULL; state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; if (!php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &state)) { goto cleanup; } #if PHP_VERSION_ID >= 70000 id = php_array_fetchc(&state.zchild, "_id"); #else id = php_array_fetchc(state.zchild, "_id"); #endif if (id) { ZVAL_ZVAL(*return_value, id, 1, 0); } cleanup: zval_ptr_dtor(&state.zchild); } /* }}} */ /* Returns whether any top-level field names in the document contain a "$". */ static inline bool php_phongo_bulkwrite_update_has_operators(bson_t* bupdate) /* {{{ */ { bson_iter_t iter; if (bson_iter_init(&iter, bupdate)) { while (bson_iter_next(&iter)) { if (strchr(bson_iter_key(&iter), '$')) { return true; } } } return false; } /* }}} */ /* Returns whether the update document is considered an aggregation pipeline */ static inline bool php_phongo_bulkwrite_update_is_pipeline(bson_t* bupdate) /* {{{ */ { bson_iter_t iter; bson_iter_t child; const char* key; int i = 0; char* i_str; if (!bson_iter_init(&iter, bupdate)) { return false; } while (bson_iter_next(&iter)) { key = bson_iter_key(&iter); i_str = bson_strdup_printf("%d", i++); if (strcmp(key, i_str)) { bson_free(i_str); return false; } bson_free(i_str); if (BSON_ITER_HOLDS_DOCUMENT(&iter)) { if (!bson_iter_recurse(&iter, &child)) { return false; } if (!bson_iter_next(&child)) { return false; } key = bson_iter_key(&child); if (key[0] != '$') { return false; } } else { return false; } } /* should return false when the document is empty */ return i != 0; } /* }}} */ /* Returns whether the BSON array's keys are a sequence of integer strings * starting with "0". BSON_APPEND_ARRAY considers it the caller's responsibility * to ensure that the array's keys are properly formatted. */ static inline bool php_phongo_bulkwrite_bson_array_has_valid_keys(bson_t* array) /* {{{ */ { bson_iter_t iter; if (bson_empty(array)) { return true; } if (bson_iter_init(&iter, array)) { char key[12]; int count = 0; while (bson_iter_next(&iter)) { bson_snprintf(key, sizeof(key), "%d", count); if (0 != strcmp(key, bson_iter_key(&iter))) { return false; } count++; } } return true; } /* }}} */ /* Appends an array field for the given opts document and key. Returns true on * success; otherwise, false is returned and an exception is thrown. */ static bool php_phongo_bulkwrite_opts_append_array(bson_t* opts, const char* key, zval* zarr TSRMLS_DC) /* {{{ */ { zval* value = php_array_fetch(zarr, key); bson_t b = BSON_INITIALIZER; if (Z_TYPE_P(value) != IS_OBJECT && Z_TYPE_P(value) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" option to be array or object, %s given", key, zend_get_type_by_const(Z_TYPE_P(value))); return false; } php_phongo_zval_to_bson(value, PHONGO_BSON_NONE, &b, NULL TSRMLS_CC); if (EG(exception)) { bson_destroy(&b); return false; } if (!php_phongo_bulkwrite_bson_array_has_valid_keys(&b)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"%s\" option has invalid keys for a BSON array", key); bson_destroy(&b); return false; } if (!BSON_APPEND_ARRAY(opts, key, &b)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", key); bson_destroy(&b); return false; } bson_destroy(&b); return true; } /* }}} */ /* Appends a document field for the given opts document and key. Returns true on * success; otherwise, false is returned and an exception is thrown. */ static bool php_phongo_bulkwrite_opts_append_document(bson_t* opts, const char* key, zval* zarr TSRMLS_DC) /* {{{ */ { zval* value = php_array_fetch(zarr, key); bson_t b = BSON_INITIALIZER; if (Z_TYPE_P(value) != IS_OBJECT && Z_TYPE_P(value) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" option to be array or object, %s given", key, zend_get_type_by_const(Z_TYPE_P(value))); return false; } php_phongo_zval_to_bson(value, PHONGO_BSON_NONE, &b, NULL TSRMLS_CC); if (EG(exception)) { bson_destroy(&b); return false; } if (!BSON_APPEND_DOCUMENT(opts, key, &b)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", key); bson_destroy(&b); return false; } bson_destroy(&b); return true; } /* }}} */ #define PHONGO_BULKWRITE_APPEND_BOOL(opt, value) \ if (!BSON_APPEND_BOOL(boptions, (opt), (value))) { \ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \ return false; \ } #define PHONGO_BULKWRITE_APPEND_INT32(opt, value) \ if (!BSON_APPEND_INT32(boptions, (opt), (value))) { \ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \ return false; \ } #define PHONGO_BULKWRITE_OPT_ARRAY(opt) \ if (zoptions && php_array_existsc(zoptions, (opt))) { \ if (!php_phongo_bulkwrite_opts_append_array(boptions, (opt), zoptions TSRMLS_CC)) { \ return false; \ } \ } #define PHONGO_BULKWRITE_OPT_DOCUMENT(opt) \ if (zoptions && php_array_existsc(zoptions, (opt))) { \ if (!php_phongo_bulkwrite_opts_append_document(boptions, (opt), zoptions TSRMLS_CC)) { \ return false; \ } \ } /* Applies options (including defaults) for an update operation. */ static bool php_phongo_bulkwrite_update_apply_options(bson_t* boptions, zval* zoptions TSRMLS_DC) /* {{{ */ { bool multi = false, upsert = false; if (zoptions) { if (php_array_existsc(zoptions, "multi")) { multi = php_array_fetchc_bool(zoptions, "multi"); } if (php_array_existsc(zoptions, "upsert")) { upsert = php_array_fetchc_bool(zoptions, "upsert"); } } PHONGO_BULKWRITE_APPEND_BOOL("multi", multi); PHONGO_BULKWRITE_APPEND_BOOL("upsert", upsert); PHONGO_BULKWRITE_OPT_ARRAY("arrayFilters"); PHONGO_BULKWRITE_OPT_DOCUMENT("collation"); return true; } /* }}} */ /* Applies options (including defaults) for an delete operation. */ static bool php_phongo_bulkwrite_delete_apply_options(bson_t* boptions, zval* zoptions TSRMLS_DC) /* {{{ */ { int32_t limit = 0; if (zoptions) { if (php_array_existsc(zoptions, "limit")) { limit = php_array_fetchc_bool(zoptions, "limit") ? 1 : 0; } } PHONGO_BULKWRITE_APPEND_INT32("limit", limit); PHONGO_BULKWRITE_OPT_DOCUMENT("collation"); return true; } /* }}} */ #undef PHONGO_BULKWRITE_APPEND_BOOL #undef PHONGO_BULKWRITE_APPEND_INT32 #undef PHONGO_BULKWRITE_OPT_DOCUMENT /* {{{ proto void MongoDB\Driver\BulkWrite::__construct([array $options = array()]) Constructs a new BulkWrite */ static PHP_METHOD(BulkWrite, __construct) { php_phongo_bulkwrite_t* intern; zend_error_handling error_handling; zval* options = NULL; zend_bool ordered = 1; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_BULKWRITE_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a!", &options) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (options && php_array_existsc(options, "ordered")) { ordered = php_array_fetchc_bool(options, "ordered"); } intern->bulk = mongoc_bulk_operation_new(ordered); intern->ordered = ordered; intern->bypass = PHONGO_BULKWRITE_BYPASS_UNSET; intern->num_ops = 0; if (options && php_array_existsc(options, "bypassDocumentValidation")) { zend_bool bypass = php_array_fetchc_bool(options, "bypassDocumentValidation"); mongoc_bulk_operation_set_bypass_document_validation(intern->bulk, bypass); intern->bypass = bypass; } } /* }}} */ /* {{{ proto mixed MongoDB\Driver\BulkWrite::insert(array|object $document) Adds an insert operation to the BulkWrite */ static PHP_METHOD(BulkWrite, insert) { php_phongo_bulkwrite_t* intern; zval* zdocument; bson_t bdocument = BSON_INITIALIZER, boptions = BSON_INITIALIZER; bson_t* bson_out = NULL; int bson_flags = PHONGO_BSON_ADD_ID; bson_error_t error = { 0 }; DECLARE_RETURN_VALUE_USED intern = Z_BULKWRITE_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &zdocument) == FAILURE) { return; } if (return_value_used) { bson_flags |= PHONGO_BSON_RETURN_ID; } php_phongo_zval_to_bson(zdocument, bson_flags, &bdocument, &bson_out TSRMLS_CC); if (EG(exception)) { goto cleanup; } if (!mongoc_bulk_operation_insert_with_opts(intern->bulk, &bdocument, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } intern->num_ops++; if (bson_out && return_value_used) { php_phongo_bulkwrite_extract_id(bson_out, &return_value); } cleanup: bson_destroy(&bdocument); bson_destroy(&boptions); bson_clear(&bson_out); } /* }}} */ /* {{{ proto void MongoDB\Driver\BulkWrite::update(array|object $query, array|object $newObj[, array $updateOptions = array()]) Adds an update operation to the BulkWrite */ static PHP_METHOD(BulkWrite, update) { php_phongo_bulkwrite_t* intern; zval * zquery, *zupdate, *zoptions = NULL; bson_t bquery = BSON_INITIALIZER, bupdate = BSON_INITIALIZER, boptions = BSON_INITIALIZER; bson_error_t error = { 0 }; intern = Z_BULKWRITE_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "AA|a!", &zquery, &zupdate, &zoptions) == FAILURE) { return; } php_phongo_zval_to_bson(zquery, PHONGO_BSON_NONE, &bquery, NULL TSRMLS_CC); if (EG(exception)) { goto cleanup; } php_phongo_zval_to_bson(zupdate, PHONGO_BSON_NONE, &bupdate, NULL TSRMLS_CC); if (EG(exception)) { goto cleanup; } if (!php_phongo_bulkwrite_update_apply_options(&boptions, zoptions TSRMLS_CC)) { goto cleanup; } if (php_phongo_bulkwrite_update_has_operators(&bupdate) || php_phongo_bulkwrite_update_is_pipeline(&bupdate)) { if (zoptions && php_array_existsc(zoptions, "multi") && php_array_fetchc_bool(zoptions, "multi")) { if (!mongoc_bulk_operation_update_many_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } } else { if (!mongoc_bulk_operation_update_one_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } } } else { if (zoptions && php_array_existsc(zoptions, "multi") && php_array_fetchc_bool(zoptions, "multi")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Replacement document conflicts with true \"multi\" option"); goto cleanup; } if (!mongoc_bulk_operation_replace_one_with_opts(intern->bulk, &bquery, &bupdate, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } } intern->num_ops++; cleanup: bson_destroy(&bquery); bson_destroy(&bupdate); bson_destroy(&boptions); } /* }}} */ /* {{{ proto void MongoDB\Driver\BulkWrite::delete(array|object $query[, array $deleteOptions = array()]) Adds a delete operation to the BulkWrite */ static PHP_METHOD(BulkWrite, delete) { php_phongo_bulkwrite_t* intern; zval * zquery, *zoptions = NULL; bson_t bquery = BSON_INITIALIZER, boptions = BSON_INITIALIZER; bson_error_t error = { 0 }; intern = Z_BULKWRITE_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &zquery, &zoptions) == FAILURE) { return; } php_phongo_zval_to_bson(zquery, PHONGO_BSON_NONE, &bquery, NULL TSRMLS_CC); if (EG(exception)) { goto cleanup; } if (!php_phongo_bulkwrite_delete_apply_options(&boptions, zoptions TSRMLS_CC)) { goto cleanup; } if (zoptions && php_array_existsc(zoptions, "limit") && php_array_fetchc_bool(zoptions, "limit")) { if (!mongoc_bulk_operation_remove_one_with_opts(intern->bulk, &bquery, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } } else { if (!mongoc_bulk_operation_remove_many_with_opts(intern->bulk, &bquery, &boptions, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); goto cleanup; } } intern->num_ops++; cleanup: bson_destroy(&bquery); bson_destroy(&boptions); } /* }}} */ /* {{{ proto integer MongoDB\Driver\BulkWrite::count() Returns the number of operations that have been added to the BulkWrite */ static PHP_METHOD(BulkWrite, count) { php_phongo_bulkwrite_t* intern; intern = Z_BULKWRITE_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->num_ops); } /* }}} */ /* {{{ MongoDB\Driver\BulkWrite function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite___construct, 0, 0, 0) ZEND_ARG_ARRAY_INFO(0, options, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_insert, 0, 0, 1) ZEND_ARG_INFO(0, document) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_update, 0, 0, 2) ZEND_ARG_INFO(0, query) ZEND_ARG_INFO(0, newObj) ZEND_ARG_ARRAY_INFO(0, updateOptions, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_delete, 0, 0, 1) ZEND_ARG_INFO(0, query) ZEND_ARG_ARRAY_INFO(0, deleteOptions, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_BulkWrite_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_bulkwrite_me[] = { /* clang-format off */ PHP_ME(BulkWrite, __construct, ai_BulkWrite___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(BulkWrite, insert, ai_BulkWrite_insert, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(BulkWrite, update, ai_BulkWrite_update, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(BulkWrite, delete, ai_BulkWrite_delete, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(BulkWrite, count, ai_BulkWrite_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_BulkWrite_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\BulkWrite object handlers */ static zend_object_handlers php_phongo_handler_bulkwrite; static void php_phongo_bulkwrite_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_bulkwrite_t* intern = Z_OBJ_BULKWRITE(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->bulk) { mongoc_bulk_operation_destroy(intern->bulk); } if (intern->database) { efree(intern->database); } if (intern->collection) { efree(intern->collection); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_bulkwrite_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_bulkwrite_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_bulkwrite_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_bulkwrite; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_bulkwrite_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_bulkwrite; return retval; } #endif } /* }}} */ static HashTable* php_phongo_bulkwrite_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { zval retval = ZVAL_STATIC_INIT; php_phongo_bulkwrite_t* intern = NULL; *is_temp = 1; intern = Z_BULKWRITE_OBJ_P(object); array_init(&retval); if (intern->database) { ADD_ASSOC_STRING(&retval, "database", intern->database); } else { ADD_ASSOC_NULL_EX(&retval, "database"); } if (intern->collection) { ADD_ASSOC_STRING(&retval, "collection", intern->collection); } else { ADD_ASSOC_NULL_EX(&retval, "collection"); } ADD_ASSOC_BOOL_EX(&retval, "ordered", intern->ordered); if (intern->bypass != PHONGO_BULKWRITE_BYPASS_UNSET) { ADD_ASSOC_BOOL_EX(&retval, "bypassDocumentValidation", intern->bypass); } else { ADD_ASSOC_NULL_EX(&retval, "bypassDocumentValidation"); } ADD_ASSOC_BOOL_EX(&retval, "executed", intern->executed); ADD_ASSOC_LONG_EX(&retval, "server_id", mongoc_bulk_operation_get_hint(intern->bulk)); if (mongoc_bulk_operation_get_write_concern(intern->bulk)) { #if PHP_VERSION_ID >= 70000 zval write_concern; php_phongo_write_concern_to_zval(&write_concern, mongoc_bulk_operation_get_write_concern(intern->bulk)); ADD_ASSOC_ZVAL_EX(&retval, "write_concern", &write_concern); #else zval* write_concern = NULL; MAKE_STD_ZVAL(write_concern); php_phongo_write_concern_to_zval(write_concern, mongoc_bulk_operation_get_write_concern(intern->bulk)); ADD_ASSOC_ZVAL_EX(&retval, "write_concern", write_concern); #endif } else { ADD_ASSOC_NULL_EX(&retval, "write_concern"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "BulkWrite", php_phongo_bulkwrite_me); php_phongo_bulkwrite_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_bulkwrite_ce->create_object = php_phongo_bulkwrite_create_object; PHONGO_CE_FINAL(php_phongo_bulkwrite_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_bulkwrite_ce); memcpy(&php_phongo_handler_bulkwrite, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_bulkwrite.get_debug_info = php_phongo_bulkwrite_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_bulkwrite.free_obj = php_phongo_bulkwrite_free_object; php_phongo_handler_bulkwrite.offset = XtOffsetOf(php_phongo_bulkwrite_t, std); #endif zend_class_implements(php_phongo_bulkwrite_ce TSRMLS_CC, 1, spl_ce_Countable); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Command.c0000644000076500000240000001567513572250757016275 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_command_ce; /* Initialize the "maxAwaitTimeMS" option. Returns true on success; otherwise, * false is returned and an exception is thrown. * * The "maxAwaitTimeMS" option is assigned to the cursor after query execution * via mongoc_cursor_set_max_await_time_ms(). */ static bool php_phongo_command_init_max_await_time_ms(php_phongo_command_t* intern, zval* options TSRMLS_DC) /* {{{ */ { if (php_array_existsc(options, "maxAwaitTimeMS")) { int64_t max_await_time_ms = php_array_fetchc_long(options, "maxAwaitTimeMS"); if (max_await_time_ms < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be >= 0, %" PRId64 " given", max_await_time_ms); return false; } if (max_await_time_ms > UINT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be <= %" PRIu32 ", %" PRId64 " given", UINT32_MAX, max_await_time_ms); return false; } intern->max_await_time_ms = (uint32_t) max_await_time_ms; } return true; } /* }}} */ /* Initializes the php_phongo_command_init from options argument. This * function will fall back to a modifier in the absence of a top-level option * (where applicable). */ static bool php_phongo_command_init(php_phongo_command_t* intern, zval* filter, zval* options TSRMLS_DC) /* {{{ */ { bson_iter_t iter; bson_iter_t sub_iter; intern->bson = bson_new(); php_phongo_zval_to_bson(filter, PHONGO_BSON_NONE, intern->bson, NULL TSRMLS_CC); /* Note: if any exceptions are thrown, we can simply return as PHP will * invoke php_phongo_query_free_object to destruct the object. */ if (EG(exception)) { return false; } if (bson_iter_init(&iter, intern->bson) && bson_iter_find_descendant(&iter, "cursor.batchSize", &sub_iter) && BSON_ITER_HOLDS_INT(&sub_iter)) { int64_t batch_size = bson_iter_as_int64(&sub_iter); if (batch_size >= 0 && batch_size <= UINT32_MAX) { intern->batch_size = (uint32_t) batch_size; } } if (!options) { return true; } if (!php_phongo_command_init_max_await_time_ms(intern, options TSRMLS_CC)) { return false; } return true; } /* }}} */ /* {{{ proto void MongoDB\Driver\Command::__construct(array|object $document[, array $options = array()]) Constructs a new Command */ static PHP_METHOD(Command, __construct) { php_phongo_command_t* intern; zend_error_handling error_handling; zval* document; zval* options = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_COMMAND_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &document, &options) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_command_init(intern, document, options TSRMLS_CC); } /* }}} */ /* {{{ MongoDB\Driver\Command function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Command___construct, 0, 0, 1) ZEND_ARG_INFO(0, document) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Command_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_command_me[] = { /* clang-format off */ PHP_ME(Command, __construct, ai_Command___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Command_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Command object handlers */ static zend_object_handlers php_phongo_handler_command; static void php_phongo_command_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_command_t* intern = Z_OBJ_COMMAND(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->bson) { bson_clear(&intern->bson); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_command_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_command_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_command_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_command; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_command_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_command; return retval; } #endif } /* }}} */ static HashTable* php_phongo_command_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_command_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_COMMAND_OBJ_P(object); array_init_size(&retval, 1); if (intern->bson) { #if PHP_VERSION_ID >= 70000 zval zv; #else zval* zv; #endif php_phongo_bson_to_zval(bson_get_data(intern->bson), intern->bson->len, &zv); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "command", &zv); #else ADD_ASSOC_ZVAL_EX(&retval, "command", zv); #endif } else { ADD_ASSOC_NULL_EX(&retval, "command"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_command_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Command", php_phongo_command_me); php_phongo_command_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_command_ce->create_object = php_phongo_command_create_object; PHONGO_CE_FINAL(php_phongo_command_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_command_ce); memcpy(&php_phongo_handler_command, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_command.get_debug_info = php_phongo_command_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_command.free_obj = php_phongo_command_free_object; php_phongo_handler_command.offset = XtOffsetOf(php_phongo_command_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Cursor.c0000644000076500000240000004064113572250757016163 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_cursor_ce; /* Check if the cursor is exhausted (i.e. ID is zero) and free any reference to * the session. Calling this function during iteration will allow an implicit * session to return to the pool immediately after a getMore indicates that the * server has no more results to return. */ static void php_phongo_cursor_free_session_if_exhausted(php_phongo_cursor_t* cursor) /* {{{ */ { if (mongoc_cursor_get_id(cursor->cursor)) { return; } if (!Z_ISUNDEF(cursor->session)) { zval_ptr_dtor(&cursor->session); ZVAL_UNDEF(&cursor->session); } } /* }}} */ static void php_phongo_cursor_free_current(php_phongo_cursor_t* cursor) /* {{{ */ { if (!Z_ISUNDEF(cursor->visitor_data.zchild)) { zval_ptr_dtor(&cursor->visitor_data.zchild); ZVAL_UNDEF(&cursor->visitor_data.zchild); } } /* }}} */ /* {{{ MongoDB\Driver\Cursor iterator handlers */ static void php_phongo_cursor_iterator_dtor(zend_object_iterator* iter TSRMLS_DC) /* {{{ */ { php_phongo_cursor_iterator* cursor_it = (php_phongo_cursor_iterator*) iter; if (!Z_ISUNDEF(cursor_it->intern.data)) { #if PHP_VERSION_ID >= 70000 zval_ptr_dtor(&cursor_it->intern.data); #else zval_ptr_dtor((zval**) &cursor_it->intern.data); cursor_it->intern.data = NULL; #endif } #if PHP_VERSION_ID < 70000 efree(cursor_it); #endif } /* }}} */ static int php_phongo_cursor_iterator_valid(zend_object_iterator* iter TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* cursor = ((php_phongo_cursor_iterator*) iter)->cursor; if (!Z_ISUNDEF(cursor->visitor_data.zchild)) { return SUCCESS; } return FAILURE; } /* }}} */ static void php_phongo_cursor_iterator_get_current_key(zend_object_iterator* iter, zval* key TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* cursor = ((php_phongo_cursor_iterator*) iter)->cursor; ZVAL_LONG(key, cursor->current); } /* }}} */ #if PHP_VERSION_ID < 70000 static void php_phongo_cursor_iterator_get_current_data(zend_object_iterator* iter, zval*** data TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* cursor = ((php_phongo_cursor_iterator*) iter)->cursor; *data = &cursor->visitor_data.zchild; } /* }}} */ #else static zval* php_phongo_cursor_iterator_get_current_data(zend_object_iterator* iter) /* {{{ */ { php_phongo_cursor_t* cursor = ((php_phongo_cursor_iterator*) iter)->cursor; return &cursor->visitor_data.zchild; } /* }}} */ #endif static void php_phongo_cursor_iterator_move_forward(zend_object_iterator* iter TSRMLS_DC) /* {{{ */ { php_phongo_cursor_iterator* cursor_it = (php_phongo_cursor_iterator*) iter; php_phongo_cursor_t* cursor = cursor_it->cursor; const bson_t* doc; php_phongo_cursor_free_current(cursor); /* If the cursor has already advanced, increment its position. Otherwise, * the first call to mongoc_cursor_next() will be made below and we should * leave its position at zero. */ if (cursor->advanced) { cursor->current++; } else { cursor->advanced = true; } if (mongoc_cursor_next(cursor->cursor, &doc)) { php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &cursor->visitor_data); } else { bson_error_t error = { 0 }; const bson_t* doc = NULL; if (mongoc_cursor_error_document(cursor->cursor, &error, &doc)) { /* Intentionally not destroying the cursor as it will happen * naturally now that there are no more results */ phongo_throw_exception_from_bson_error_t_and_reply(&error, doc TSRMLS_CC); } } php_phongo_cursor_free_session_if_exhausted(cursor); } /* }}} */ static void php_phongo_cursor_iterator_rewind(zend_object_iterator* iter TSRMLS_DC) /* {{{ */ { php_phongo_cursor_iterator* cursor_it = (php_phongo_cursor_iterator*) iter; php_phongo_cursor_t* cursor = cursor_it->cursor; const bson_t* doc; /* If the cursor was never advanced (e.g. command cursor), do so now */ if (!cursor->advanced) { cursor->advanced = true; if (!phongo_cursor_advance_and_check_for_error(cursor->cursor TSRMLS_CC)) { /* Exception should already have been thrown */ return; } } if (cursor->current > 0) { phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Cursors cannot rewind after starting iteration"); return; } php_phongo_cursor_free_current(cursor); doc = mongoc_cursor_current(cursor->cursor); if (doc) { php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &cursor->visitor_data); } php_phongo_cursor_free_session_if_exhausted(cursor); } /* }}} */ static zend_object_iterator_funcs php_phongo_cursor_iterator_funcs = { php_phongo_cursor_iterator_dtor, php_phongo_cursor_iterator_valid, php_phongo_cursor_iterator_get_current_data, php_phongo_cursor_iterator_get_current_key, php_phongo_cursor_iterator_move_forward, php_phongo_cursor_iterator_rewind, NULL /* invalidate_current is not used */ }; static zend_object_iterator* php_phongo_cursor_get_iterator(zend_class_entry* ce, zval* object, int by_ref TSRMLS_DC) /* {{{ */ { php_phongo_cursor_iterator* cursor_it = NULL; php_phongo_cursor_t* cursor = Z_CURSOR_OBJ_P(object); if (by_ref) { zend_error(E_ERROR, "An iterator cannot be used with foreach by reference"); } if (cursor->got_iterator) { phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Cursors cannot yield multiple iterators"); return NULL; } cursor->got_iterator = true; cursor_it = ecalloc(1, sizeof(php_phongo_cursor_iterator)); #if PHP_VERSION_ID >= 70000 zend_iterator_init(&cursor_it->intern); #endif #if PHP_VERSION_ID >= 70000 ZVAL_COPY(&cursor_it->intern.data, object); #else Z_ADDREF_P(object); cursor_it->intern.data = (void*) object; #endif cursor_it->intern.funcs = &php_phongo_cursor_iterator_funcs; cursor_it->cursor = cursor; /* cursor_it->current should already be allocated to zero */ php_phongo_cursor_free_current(cursor_it->cursor); return &cursor_it->intern; } /* }}} */ /* }}} */ /* {{{ proto void MongoDB\Driver\Cursor::setTypeMap(array $typemap) Sets a type map to use for BSON unserialization */ static PHP_METHOD(Cursor, setTypeMap) { php_phongo_cursor_t* intern; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; zval* typemap = NULL; bool restore_current_element = false; intern = Z_CURSOR_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a!", &typemap) == FAILURE) { return; } if (!php_phongo_bson_typemap_to_state(typemap, &state.map TSRMLS_CC)) { return; } /* Check if the existing element needs to be freed before we overwrite * visitor_data, which contains the only reference to it. */ if (!Z_ISUNDEF(intern->visitor_data.zchild)) { php_phongo_cursor_free_current(intern); restore_current_element = true; } php_phongo_bson_typemap_dtor(&intern->visitor_data.map); intern->visitor_data = state; /* If the cursor has a current element, we just freed it and should restore * it with a new type map applied. */ if (restore_current_element && mongoc_cursor_current(intern->cursor)) { const bson_t* doc = mongoc_cursor_current(intern->cursor); php_phongo_bson_to_zval_ex(bson_get_data(doc), doc->len, &intern->visitor_data); } } /* }}} */ static int php_phongo_cursor_to_array_apply(zend_object_iterator* iter, void* puser TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* data; zval* return_value = (zval*) puser; data = iter->funcs->get_current_data(iter TSRMLS_CC); if (EG(exception)) { return ZEND_HASH_APPLY_STOP; } if (Z_ISUNDEF_P(data)) { return ZEND_HASH_APPLY_STOP; } Z_TRY_ADDREF_P(data); add_next_index_zval(return_value, data); #else zval** data; zval* return_value = (zval*) puser; iter->funcs->get_current_data(iter, &data TSRMLS_CC); if (EG(exception)) { return ZEND_HASH_APPLY_STOP; } if (data == NULL || *data == NULL) { return ZEND_HASH_APPLY_STOP; } Z_ADDREF_PP(data); add_next_index_zval(return_value, *data); #endif return ZEND_HASH_APPLY_KEEP; } /* }}} */ /* {{{ proto array MongoDB\Driver\Cursor::toArray() Returns an array of all result documents for this cursor */ static PHP_METHOD(Cursor, toArray) { if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); if (spl_iterator_apply(getThis(), php_phongo_cursor_to_array_apply, (void*) return_value TSRMLS_CC) != SUCCESS) { zval_dtor(return_value); RETURN_NULL(); } } /* }}} */ /* {{{ proto MongoDB\Driver\CursorId MongoDB\Driver\Cursor::getId() Returns the CursorId for this cursor */ static PHP_METHOD(Cursor, getId) { php_phongo_cursor_t* intern; intern = Z_CURSOR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_cursor_id_new_from_id(return_value, mongoc_cursor_get_id(intern->cursor) TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Server MongoDB\Driver\Cursor::getServer() Returns the Server object to which this cursor is attached */ static PHP_METHOD(Cursor, getServer) { php_phongo_cursor_t* intern; intern = Z_CURSOR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Cursor::isDead() Checks if a cursor is still alive */ static PHP_METHOD(Cursor, isDead) { php_phongo_cursor_t* intern; intern = Z_CURSOR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(!mongoc_cursor_more(intern->cursor)); } /* }}} */ /* {{{ MongoDB\Driver\Cursor function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Cursor_setTypeMap, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, typemap, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Cursor_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_cursor_me[] = { /* clang-format off */ PHP_ME(Cursor, setTypeMap, ai_Cursor_setTypeMap, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Cursor, toArray, ai_Cursor_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Cursor, getId, ai_Cursor_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Cursor, getServer, ai_Cursor_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Cursor, isDead, ai_Cursor_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Cursor_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Cursor_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Cursor object handlers */ static zend_object_handlers php_phongo_handler_cursor; static void php_phongo_cursor_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern = Z_OBJ_CURSOR(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->cursor) { mongoc_cursor_destroy(intern->cursor); } if (intern->database) { efree(intern->database); } if (intern->collection) { efree(intern->collection); } if (!Z_ISUNDEF(intern->query)) { zval_ptr_dtor(&intern->query); } if (!Z_ISUNDEF(intern->command)) { zval_ptr_dtor(&intern->command); } if (!Z_ISUNDEF(intern->read_preference)) { zval_ptr_dtor(&intern->read_preference); } if (!Z_ISUNDEF(intern->session)) { zval_ptr_dtor(&intern->session); } php_phongo_bson_typemap_dtor(&intern->visitor_data.map); php_phongo_cursor_free_current(intern); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_cursor_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_cursor_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_cursor; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_cursor_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_cursor; return retval; } #endif } /* }}} */ static HashTable* php_phongo_cursor_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_CURSOR_OBJ_P(object); array_init_size(&retval, 10); if (intern->database) { ADD_ASSOC_STRING(&retval, "database", intern->database); } else { ADD_ASSOC_NULL_EX(&retval, "database"); } if (intern->collection) { ADD_ASSOC_STRING(&retval, "collection", intern->collection); } else { ADD_ASSOC_NULL_EX(&retval, "collection"); } if (!Z_ISUNDEF(intern->query)) { #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "query", &intern->query); Z_ADDREF(intern->query); #else ADD_ASSOC_ZVAL_EX(&retval, "query", intern->query); Z_ADDREF_P(intern->query); #endif } else { ADD_ASSOC_NULL_EX(&retval, "query"); } if (!Z_ISUNDEF(intern->command)) { #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "command", &intern->command); Z_ADDREF(intern->command); #else ADD_ASSOC_ZVAL_EX(&retval, "command", intern->command); Z_ADDREF_P(intern->command); #endif } else { ADD_ASSOC_NULL_EX(&retval, "command"); } if (!Z_ISUNDEF(intern->read_preference)) { #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "readPreference", &intern->read_preference); Z_ADDREF(intern->read_preference); #else ADD_ASSOC_ZVAL_EX(&retval, "readPreference", intern->read_preference); Z_ADDREF_P(intern->read_preference); #endif } else { ADD_ASSOC_NULL_EX(&retval, "readPreference"); } if (!Z_ISUNDEF(intern->session)) { #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "session", &intern->session); Z_ADDREF(intern->session); #else ADD_ASSOC_ZVAL_EX(&retval, "session", intern->session); Z_ADDREF_P(intern->session); #endif } else { ADD_ASSOC_NULL_EX(&retval, "session"); } ADD_ASSOC_BOOL_EX(&retval, "isDead", !mongoc_cursor_more(intern->cursor)); ADD_ASSOC_LONG_EX(&retval, "currentIndex", intern->current); if (!Z_ISUNDEF(intern->visitor_data.zchild)) { #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "currentDocument", &intern->visitor_data.zchild); Z_ADDREF(intern->visitor_data.zchild); #else ADD_ASSOC_ZVAL_EX(&retval, "currentDocument", intern->visitor_data.zchild); Z_ADDREF_P(intern->visitor_data.zchild); #endif } else { ADD_ASSOC_NULL_EX(&retval, "currentDocument"); } { #if PHP_VERSION_ID >= 70000 zval server; phongo_server_init(&server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", &server); #else zval* server = NULL; MAKE_STD_ZVAL(server); phongo_server_init(server, intern->client, intern->server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", server); #endif } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_cursor_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Cursor", php_phongo_cursor_me); php_phongo_cursor_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_cursor_ce->create_object = php_phongo_cursor_create_object; PHONGO_CE_FINAL(php_phongo_cursor_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_cursor_ce); php_phongo_cursor_ce->get_iterator = php_phongo_cursor_get_iterator; zend_class_implements(php_phongo_cursor_ce TSRMLS_CC, 1, php_phongo_cursor_interface_ce); memcpy(&php_phongo_handler_cursor, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_cursor.get_debug_info = php_phongo_cursor_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_cursor.free_obj = php_phongo_cursor_free_object; php_phongo_handler_cursor.offset = XtOffsetOf(php_phongo_cursor_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/CursorId.c0000644000076500000240000001052513572250757016436 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_cursorid_ce; /* {{{ proto string MongoDB\Driver\CursorId::__toString() Returns the string representation of the CursorId */ static PHP_METHOD(CursorId, __toString) { php_phongo_cursorid_t* intern; char* tmp; int tmp_len; intern = Z_CURSORID_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } tmp_len = spprintf(&tmp, 0, "%" PRId64, intern->id); PHONGO_RETVAL_STRINGL(tmp, tmp_len); efree(tmp); } /* }}} */ /* {{{ MongoDB\Driver\CursorId function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CursorId_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_cursorid_me[] = { /* clang-format off */ PHP_ME(CursorId, __toString, ai_CursorId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_CursorId_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_CursorId_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\CursorId object handlers */ static zend_object_handlers php_phongo_handler_cursorid; static void php_phongo_cursorid_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_cursorid_t* intern = Z_OBJ_CURSORID(object); zend_object_std_dtor(&intern->std TSRMLS_CC); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_cursorid_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_cursorid_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_cursorid_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_cursorid; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_cursorid_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_cursorid; return retval; } #endif } /* }}} */ static HashTable* php_phongo_cursorid_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_cursorid_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_CURSORID_OBJ_P(object); array_init(&retval); #if SIZEOF_LONG == 4 { char tmp[24]; int tmp_len; tmp_len = snprintf(tmp, sizeof(tmp), "%" PRId64, intern->id); ADD_ASSOC_STRINGL(&retval, "id", tmp, tmp_len); } #else ADD_ASSOC_LONG_EX(&retval, "id", intern->id); #endif return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_cursorid_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "CursorId", php_phongo_cursorid_me); php_phongo_cursorid_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_cursorid_ce->create_object = php_phongo_cursorid_create_object; PHONGO_CE_FINAL(php_phongo_cursorid_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_cursorid_ce); memcpy(&php_phongo_handler_cursorid, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_cursorid.get_debug_info = php_phongo_cursorid_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_cursorid.free_obj = php_phongo_cursorid_free_object; php_phongo_handler_cursorid.offset = XtOffsetOf(php_phongo_cursorid_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/CursorInterface.c0000644000076500000240000000400713572250757020000 0ustar alcaeusstaff/* * Copyright 2018 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_cursor_interface_ce; /* {{{ MongoDB\BSON\CursorInterface function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_CursorInterface_setTypeMap, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, typemap, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_CursorInterface_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_cursor_interface_me[] = { /* clang-format off */ ZEND_ABSTRACT_ME(CursorInterface, getId, ai_CursorInterface_void) ZEND_ABSTRACT_ME(CursorInterface, getServer, ai_CursorInterface_void) ZEND_ABSTRACT_ME(CursorInterface, isDead, ai_CursorInterface_void) ZEND_ABSTRACT_ME(CursorInterface, setTypeMap, ai_CursorInterface_setTypeMap) ZEND_ABSTRACT_ME(CursorInterface, toArray, ai_CursorInterface_void) PHP_FE_END /* clang-format on */ }; /* }}} */ void php_phongo_cursor_interface_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "CursorInterface", php_phongo_cursor_interface_me); php_phongo_cursor_interface_ce = zend_register_internal_interface(&ce TSRMLS_CC); zend_class_implements(php_phongo_cursor_interface_ce TSRMLS_CC, 1, zend_ce_traversable); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Manager.c0000644000076500000240000007420113572250757016257 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "Session.h" #define PHONGO_MANAGER_URI_DEFAULT "mongodb://127.0.0.1/" /** * Manager abstracts a cluster of Server objects (i.e. socket connections). * * Typically, users will connect to a cluster using a URI, and the Manager will * perform tasks such as replica set discovery and create the necessary Server * objects. That said, it is also possible to create a Manager with an arbitrary * collection of Server objects using the static factory method (this can be * useful for testing or administration). * * Operation methods do not take socket-level options (e.g. socketTimeoutMS). * Those options should be specified during construction. */ zend_class_entry* php_phongo_manager_ce; /* Checks if driverOptions contains a stream context resource in the "context" * key and incorporates any of its SSL options into the base array that did not * already exist (i.e. array union). The "context" key is then unset from the * base array. * * This handles the merging of any legacy SSL context options and also makes * driverOptions suitable for serialization by removing the resource zval. */ static bool php_phongo_manager_merge_context_options(zval* zdriverOptions TSRMLS_DC) /* {{{ */ { php_stream_context* context; zval * zcontext, *zcontextOptions; if (!php_array_existsc(zdriverOptions, "context")) { return true; } zcontext = php_array_fetchc(zdriverOptions, "context"); context = php_stream_context_from_zval(zcontext, 1); if (!context) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"context\" driver option is not a valid Stream-Context resource"); return false; } #if PHP_VERSION_ID >= 70000 zcontextOptions = php_array_fetchc_array(&context->options, "ssl"); #else zcontextOptions = php_array_fetchc_array(context->options, "ssl"); #endif if (!zcontextOptions) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Stream-Context resource does not contain \"ssl\" options array"); return false; } /* Perform array union (see: add_function() in zend_operators.c) */ #if PHP_VERSION_ID >= 70000 zend_hash_merge(Z_ARRVAL_P(zdriverOptions), Z_ARRVAL_P(zcontextOptions), zval_add_ref, 0); #else { zval* tmp; zend_hash_merge(Z_ARRVAL_P(zdriverOptions), Z_ARRVAL_P(zcontextOptions), (void (*)(void* pData)) zval_add_ref, (void*) &tmp, sizeof(zval*), 0); } #endif php_array_unsetc(zdriverOptions, "context"); return true; } /* }}} */ /* Prepare authMechanismProperties for BSON encoding by converting a boolean * value for the "CANONICALIZE_HOST_NAME" option to a string. * * Note: URI options are case-insensitive, so we must iterate through the * HashTable in order to detect options. */ static void php_phongo_manager_prep_authmechanismproperties(zval* properties TSRMLS_DC) /* {{{ */ { HashTable* ht_data; if (Z_TYPE_P(properties) != IS_ARRAY && Z_TYPE_P(properties) != IS_OBJECT) { return; } ht_data = HASH_OF(properties); #if PHP_VERSION_ID >= 70000 { zend_string* string_key = NULL; zend_ulong num_key = 0; zval* property; ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, property) { if (!string_key) { continue; } /* URI options are case-insensitive */ if (!strcasecmp(ZSTR_VAL(string_key), "CANONICALIZE_HOST_NAME")) { ZVAL_DEREF(property); if (Z_TYPE_P(property) != IS_STRING && zend_is_true(property)) { SEPARATE_ZVAL_NOREF(property); ZVAL_NEW_STR(property, zend_string_init(ZEND_STRL("true"), 0)); } } } ZEND_HASH_FOREACH_END(); } #else { HashPosition pos; zval** property; for ( zend_hash_internal_pointer_reset_ex(ht_data, &pos); zend_hash_get_current_data_ex(ht_data, (void**) &property, &pos) == SUCCESS; zend_hash_move_forward_ex(ht_data, &pos)) { char* string_key = NULL; uint string_key_len = 0; ulong num_key = 0; if (HASH_KEY_IS_STRING != zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos)) { continue; } /* URI options are case-insensitive */ if (!strcasecmp(string_key, "CANONICALIZE_HOST_NAME")) { if (Z_TYPE_PP(property) != IS_STRING && zend_is_true(*property)) { SEPARATE_ZVAL_IF_NOT_REF(property); Z_TYPE_PP(property) = IS_STRING; Z_STRVAL_PP(property) = estrndup("true", sizeof("true") - 1); Z_STRLEN_PP(property) = sizeof("true") - 1; } } } } #endif /* PHP_VERSION_ID >= 70000 */ return; } /* }}} */ /* Prepare URI options for BSON encoding. * * Read preference tag sets must be an array of documents. In order to ensure * that empty arrays serialize as empty documents, array elements will be * converted to objects. php_phongo_read_preference_tags_are_valid() handles * actual validation of the tag set structure. * * Auth mechanism properties must have string values, so a boolean true value * for the "CANONICALIZE_HOST_NAME" property will be converted to "true". * * Note: URI options are case-insensitive, so we must iterate through the * HashTable in order to detect options. */ static void php_phongo_manager_prep_uri_options(zval* options TSRMLS_DC) /* {{{ */ { HashTable* ht_data; if (Z_TYPE_P(options) != IS_ARRAY) { return; } ht_data = HASH_OF(options); #if PHP_VERSION_ID >= 70000 { zend_string* string_key = NULL; zend_ulong num_key = 0; zval* option; ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, option) { if (!string_key) { continue; } if (!strcasecmp(ZSTR_VAL(string_key), MONGOC_URI_READPREFERENCETAGS)) { ZVAL_DEREF(option); SEPARATE_ZVAL_NOREF(option); php_phongo_read_preference_prep_tagsets(option TSRMLS_CC); continue; } if (!strcasecmp(ZSTR_VAL(string_key), MONGOC_URI_AUTHMECHANISMPROPERTIES)) { ZVAL_DEREF(option); SEPARATE_ZVAL_NOREF(option); php_phongo_manager_prep_authmechanismproperties(option TSRMLS_CC); continue; } } ZEND_HASH_FOREACH_END(); } #else { HashPosition pos; zval** option; for ( zend_hash_internal_pointer_reset_ex(ht_data, &pos); zend_hash_get_current_data_ex(ht_data, (void**) &option, &pos) == SUCCESS; zend_hash_move_forward_ex(ht_data, &pos)) { char* string_key = NULL; uint string_key_len = 0; ulong num_key = 0; if (HASH_KEY_IS_STRING != zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos)) { continue; } if (!strcasecmp(string_key, MONGOC_URI_READPREFERENCETAGS)) { SEPARATE_ZVAL_IF_NOT_REF(option); php_phongo_read_preference_prep_tagsets(*option TSRMLS_CC); continue; } if (!strcasecmp(string_key, MONGOC_URI_AUTHMECHANISMPROPERTIES)) { SEPARATE_ZVAL_IF_NOT_REF(option); php_phongo_manager_prep_authmechanismproperties(*option TSRMLS_CC); continue; } } } #endif return; } /* }}} */ /* Selects a server for an execute method. If "for_writes" is true, a primary * will be selected. Otherwise, a read preference will be used to select the * server. If zreadPreference is NULL, the client's read preference will be * used. If zsession is a session object in a sharded transaction, the session * will be checked whether it is pinned to a server. If so, that server will be * selected. Otherwise, server selection * * On success, server_id will be set and the function will return true; * otherwise, false is returned and an exception is thrown. */ static bool php_phongo_manager_select_server(bool for_writes, zval* zreadPreference, zval* zsession, mongoc_client_t* client, uint32_t* server_id TSRMLS_DC) /* {{{ */ { mongoc_server_description_t* selected_server; const mongoc_read_prefs_t* read_preference = NULL; bson_error_t error = { 0 }; if (zsession) { const mongoc_client_session_t* session = Z_SESSION_OBJ_P(zsession)->client_session; /* Attempt to fetch server pinned to session */ if (mongoc_client_session_get_server_id(session) > 0) { *server_id = mongoc_client_session_get_server_id(session); return true; } } if (!for_writes) { read_preference = zreadPreference ? phongo_read_preference_from_zval(zreadPreference TSRMLS_CC) : mongoc_client_get_read_prefs(client); } selected_server = mongoc_client_select_server(client, for_writes, read_preference, &error); if (selected_server) { *server_id = mongoc_server_description_id(selected_server); mongoc_server_description_destroy(selected_server); return true; } /* Check for connection related exceptions */ if (!EG(exception)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); } return false; } /* }}} */ /* {{{ proto void MongoDB\Driver\Manager::__construct([string $uri = "mongodb://127.0.0.1/"[, array $options = array()[, array $driverOptions = array()]]]) Constructs a new Manager */ static PHP_METHOD(Manager, __construct) { php_phongo_manager_t* intern; zend_error_handling error_handling; char* uri_string = NULL; phongo_zpp_char_len uri_string_len = 0; zval* options = NULL; zval* driverOptions = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_MANAGER_OBJ_P(getThis()); /* Separate the options and driverOptions zvals, since we may end up * modifying them in php_phongo_manager_prep_uri_options() and * php_phongo_manager_merge_context_options() below, respectively. */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!a/!a/!", &uri_string, &uri_string_len, &options, &driverOptions) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (options) { php_phongo_manager_prep_uri_options(options TSRMLS_CC); } if (driverOptions && !php_phongo_manager_merge_context_options(driverOptions TSRMLS_CC)) { /* Exception should already have been thrown */ return; } phongo_manager_init(intern, uri_string ? uri_string : PHONGO_MANAGER_URI_DEFAULT, options, driverOptions TSRMLS_CC); if (intern->client) { php_phongo_set_monitoring_callbacks(intern->client); } } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]) Execute a Command */ static PHP_METHOD(Manager, executeCommand) { php_phongo_manager_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; bool free_options = false; zval* zreadPreference = NULL; zval* zsession = NULL; uint32_t server_id = 0; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (!php_phongo_manager_select_server(false, zreadPreference, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } phongo_execute_command(intern->client, PHONGO_COMMAND_RAW, db, command, options, server_id, return_value, return_value_used TSRMLS_CC); cleanup: if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeReadCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]) Execute a ReadCommand */ static PHP_METHOD(Manager, executeReadCommand) { php_phongo_manager_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; zval* zreadPreference = NULL; uint32_t server_id = 0; zval* zsession = NULL; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ return; } if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) { /* Exception should already have been thrown */ return; } if (!php_phongo_manager_select_server(false, zreadPreference, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ return; } phongo_execute_command(intern->client, PHONGO_COMMAND_READ, db, command, options, server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]) Execute a WriteCommand */ static PHP_METHOD(Manager, executeWriteCommand) { php_phongo_manager_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; uint32_t server_id = 0; zval* zsession = NULL; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ return; } if (!php_phongo_manager_select_server(true, NULL, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ return; } phongo_execute_command(intern->client, PHONGO_COMMAND_WRITE, db, command, options, server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeReadWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null]) Execute a ReadWriteCommand */ static PHP_METHOD(Manager, executeReadWriteCommand) { php_phongo_manager_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; uint32_t server_id = 0; zval* zsession = NULL; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ return; } if (!php_phongo_manager_select_server(true, NULL, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ return; } phongo_execute_command(intern->client, PHONGO_COMMAND_READ_WRITE, db, command, options, server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Manager::executeQuery(string $namespace, MongoDB\Driver\Query $query[, array $options = null]) Execute a Query */ static PHP_METHOD(Manager, executeQuery) { php_phongo_manager_t* intern; char* namespace; phongo_zpp_char_len namespace_len; zval* query; zval* options = NULL; bool free_options = false; zval* zreadPreference = NULL; uint32_t server_id = 0; zval* zsession = NULL; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &query, php_phongo_query_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (!php_phongo_manager_select_server(false, zreadPreference, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } phongo_execute_query(intern->client, namespace, query, options, server_id, return_value, return_value_used TSRMLS_CC); cleanup: if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Manager::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, array $options = null]) Executes a BulkWrite (i.e. any number of insert, update, and delete ops) */ static PHP_METHOD(Manager, executeBulkWrite) { php_phongo_manager_t* intern; char* namespace; phongo_zpp_char_len namespace_len; zval* zbulk; php_phongo_bulkwrite_t* bulk; zval* options = NULL; bool free_options = false; uint32_t server_id = 0; zval* zsession = NULL; DECLARE_RETURN_VALUE_USED if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &options) == FAILURE) { return; } intern = Z_MANAGER_OBJ_P(getThis()); bulk = Z_BULKWRITE_OBJ_P(zbulk); options = php_phongo_prep_legacy_option(options, "writeConcern", &free_options TSRMLS_CC); if (!phongo_parse_session(options, intern->client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ return; } if (!php_phongo_manager_select_server(true, NULL, zsession, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } phongo_execute_bulk_write(intern->client, namespace, bulk, options, server_id, return_value, return_value_used TSRMLS_CC); cleanup: if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\ReadConcern MongoDB\Driver\Manager::getReadConcern() Returns the ReadConcern associated with this Manager */ static PHP_METHOD(Manager, getReadConcern) { php_phongo_manager_t* intern; DECLARE_RETURN_VALUE_USED intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (return_value_used) { phongo_readconcern_init(return_value, mongoc_client_get_read_concern(intern->client) TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\ReadPreference MongoDB\Driver\Manager::getReadPreference() Returns the ReadPreference associated with this Manager */ static PHP_METHOD(Manager, getReadPreference) { php_phongo_manager_t* intern; DECLARE_RETURN_VALUE_USED intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (return_value_used) { phongo_readpreference_init(return_value, mongoc_client_get_read_prefs(intern->client) TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\Server[] MongoDB\Driver\Manager::getServers() Returns the Servers associated with this Manager */ static PHP_METHOD(Manager, getServers) { php_phongo_manager_t* intern; mongoc_server_description_t** sds; size_t i, n = 0; intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } sds = mongoc_client_get_server_descriptions(intern->client, &n); array_init_size(return_value, n); for (i = 0; i < n; i++) { #if PHP_VERSION_ID >= 70000 zval obj; phongo_server_init(&obj, intern->client, mongoc_server_description_id(sds[i]) TSRMLS_CC); add_next_index_zval(return_value, &obj); #else zval* obj = NULL; MAKE_STD_ZVAL(obj); phongo_server_init(obj, intern->client, mongoc_server_description_id(sds[i]) TSRMLS_CC); add_next_index_zval(return_value, obj); #endif } mongoc_server_descriptions_destroy_all(sds, n); } /* }}} */ /* {{{ proto MongoDB\Driver\WriteConcern MongoDB\Driver\Manager::getWriteConcern() Returns the WriteConcern associated with this Manager */ static PHP_METHOD(Manager, getWriteConcern) { php_phongo_manager_t* intern; DECLARE_RETURN_VALUE_USED intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (return_value_used) { phongo_writeconcern_init(return_value, mongoc_client_get_write_concern(intern->client) TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\Server MongoDB\Driver\Manager::selectServers(MongoDB\Driver\ReadPreference $readPreference) Returns a suitable Server for the given ReadPreference */ static PHP_METHOD(Manager, selectServer) { php_phongo_manager_t* intern; zval* zreadPreference = NULL; uint32_t server_id = 0; intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &zreadPreference, php_phongo_readpreference_ce) == FAILURE) { return; } if (!php_phongo_manager_select_server(false, zreadPreference, NULL, intern->client, &server_id TSRMLS_CC)) { /* Exception should already have been thrown */ return; } phongo_server_init(return_value, intern->client, server_id TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Session MongoDB\Driver\Manager::startSession([array $options = null]) Returns a new client session */ static PHP_METHOD(Manager, startSession) { php_phongo_manager_t* intern; zval* options = NULL; mongoc_session_opt_t* cs_opts = NULL; mongoc_client_session_t* cs; bson_error_t error = { 0 }; mongoc_transaction_opt_t* txn_opts = NULL; intern = Z_MANAGER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a!", &options) == FAILURE) { return; } if (options && php_array_existsc(options, "causalConsistency")) { cs_opts = mongoc_session_opts_new(); mongoc_session_opts_set_causal_consistency(cs_opts, php_array_fetchc_bool(options, "causalConsistency")); } if (options && php_array_existsc(options, "defaultTransactionOptions")) { zval* txn_options = php_array_fetchc(options, "defaultTransactionOptions"); /* Thrown exception and return if the defaultTransactionOptions is not an array */ if (Z_TYPE_P(txn_options) != IS_ARRAY) { phongo_throw_exception( PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"defaultTransactionOptions\" option to be an array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(txn_options)); goto cleanup; } /* Parse transaction options */ txn_opts = php_mongodb_session_parse_transaction_options(txn_options TSRMLS_CC); /* If an exception is thrown while parsing, the txn_opts struct is also * NULL, so no need to free it here */ if (EG(exception)) { goto cleanup; } /* If the options are non-empty, add them to the client session opts struct */ if (txn_opts) { if (!cs_opts) { cs_opts = mongoc_session_opts_new(); } mongoc_session_opts_set_default_transaction_opts(cs_opts, txn_opts); mongoc_transaction_opts_destroy(txn_opts); } } cs = mongoc_client_start_session(intern->client, cs_opts, &error); if (cs) { phongo_session_init(return_value, cs TSRMLS_CC); } else { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); } cleanup: if (cs_opts) { mongoc_session_opts_destroy(cs_opts); } } /* }}} */ /* {{{ MongoDB\Driver\Manager function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Manager___construct, 0, 0, 0) ZEND_ARG_INFO(0, uri) ZEND_ARG_ARRAY_INFO(0, options, 0) ZEND_ARG_ARRAY_INFO(0, driverOptions, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeCommand, 0, 0, 2) ZEND_ARG_INFO(0, db) ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeRWCommand, 0, 0, 2) ZEND_ARG_INFO(0, db) ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0) ZEND_ARG_ARRAY_INFO(0, options, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeQuery, 0, 0, 2) ZEND_ARG_INFO(0, namespace) ZEND_ARG_OBJ_INFO(0, zquery, MongoDB\\Driver\\Query, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_executeBulkWrite, 0, 0, 2) ZEND_ARG_INFO(0, namespace) ZEND_ARG_OBJ_INFO(0, zbulk, MongoDB\\Driver\\BulkWrite, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_selectServer, 0, 0, 1) ZEND_ARG_OBJ_INFO(0, readPreference, MongoDB\\Driver\\ReadPreference, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_startSession, 0, 0, 0) ZEND_ARG_ARRAY_INFO(0, options, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Manager_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_manager_me[] = { /* clang-format off */ PHP_ME(Manager, __construct, ai_Manager___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeCommand, ai_Manager_executeCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeReadCommand, ai_Manager_executeRWCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeWriteCommand, ai_Manager_executeRWCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeReadWriteCommand, ai_Manager_executeCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeQuery, ai_Manager_executeQuery, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, executeBulkWrite, ai_Manager_executeBulkWrite, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, getReadConcern, ai_Manager_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, getReadPreference, ai_Manager_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, getServers, ai_Manager_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, getWriteConcern, ai_Manager_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, selectServer, ai_Manager_selectServer, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Manager, startSession, ai_Manager_startSession, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Manager_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Manager object handlers */ static zend_object_handlers php_phongo_handler_manager; static void php_phongo_manager_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_manager_t* intern = Z_OBJ_MANAGER(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->client) { MONGOC_DEBUG("Not destroying persistent client for Manager"); intern->client = NULL; } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_manager_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_manager_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_manager_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_manager; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_manager_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_manager; return retval; } #endif } /* }}} */ static HashTable* php_phongo_manager_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_manager_t* intern; mongoc_server_description_t** sds; size_t i, n = 0; zval retval = ZVAL_STATIC_INIT; ZVAL_RETVAL_TYPE cluster; *is_temp = 1; intern = Z_MANAGER_OBJ_P(object); array_init_size(&retval, 2); ADD_ASSOC_STRING(&retval, "uri", mongoc_uri_get_string(mongoc_client_get_uri(intern->client))); sds = mongoc_client_get_server_descriptions(intern->client, &n); #if PHP_VERSION_ID >= 70000 array_init_size(&cluster, n); for (i = 0; i < n; i++) { zval obj; php_phongo_server_to_zval(&obj, sds[i]); add_next_index_zval(&cluster, &obj); } ADD_ASSOC_ZVAL_EX(&retval, "cluster", &cluster); #else MAKE_STD_ZVAL(cluster); array_init_size(cluster, n); for (i = 0; i < n; i++) { zval* obj = NULL; MAKE_STD_ZVAL(obj); php_phongo_server_to_zval(obj, sds[i]); add_next_index_zval(cluster, obj); } ADD_ASSOC_ZVAL_EX(&retval, "cluster", cluster); #endif mongoc_server_descriptions_destroy_all(sds, n); return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_manager_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Manager", php_phongo_manager_me); php_phongo_manager_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_manager_ce->create_object = php_phongo_manager_create_object; PHONGO_CE_FINAL(php_phongo_manager_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_manager_ce); memcpy(&php_phongo_handler_manager, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_manager.get_debug_info = php_phongo_manager_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_manager.free_obj = php_phongo_manager_free_object; php_phongo_handler_manager.offset = XtOffsetOf(php_phongo_manager_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Query.c0000644000076500000240000005112013572250757016005 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_query_ce; /* Appends a string field into the BSON options. Returns true on success; * otherwise, false is returned and an exception is thrown. */ static bool php_phongo_query_opts_append_string(bson_t* opts, const char* opts_key, zval* zarr, const char* zarr_key TSRMLS_DC) /* {{{ */ { zval* value = php_array_fetch(zarr, zarr_key); if (Z_TYPE_P(value) != IS_STRING) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be string, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(value)); return false; } if (!bson_append_utf8(opts, opts_key, strlen(opts_key), Z_STRVAL_P(value), Z_STRLEN_P(value))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", opts_key); return false; } return true; } /* }}} */ /* Appends a document field for the given opts document and key. Returns true on * success; otherwise, false is returned and an exception is thrown. */ static bool php_phongo_query_opts_append_document(bson_t* opts, const char* opts_key, zval* zarr, const char* zarr_key TSRMLS_DC) /* {{{ */ { zval* value = php_array_fetch(zarr, zarr_key); bson_t b = BSON_INITIALIZER; if (Z_TYPE_P(value) != IS_OBJECT && Z_TYPE_P(value) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"%s\" %s to be array or object, %s given", zarr_key, zarr_key[0] == '$' ? "modifier" : "option", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(value)); return false; } php_phongo_zval_to_bson(value, PHONGO_BSON_NONE, &b, NULL TSRMLS_CC); if (EG(exception)) { bson_destroy(&b); return false; } if (!bson_validate(&b, BSON_VALIDATE_EMPTY_KEYS, NULL)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use empty keys in \"%s\" %s", zarr_key, zarr_key[0] == '$' ? "modifier" : "option"); bson_destroy(&b); return false; } if (!BSON_APPEND_DOCUMENT(opts, opts_key, &b)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", opts_key); bson_destroy(&b); return false; } bson_destroy(&b); return true; } /* }}} */ #define PHONGO_QUERY_OPT_BOOL_EX(opt, zarr, key, deprecated) \ if ((zarr) && php_array_existsc((zarr), (key))) { \ if ((deprecated)) { \ php_error_docref(NULL TSRMLS_CC, E_DEPRECATED, "The \"%s\" option is deprecated and will be removed in a future release", key); \ } \ if (!BSON_APPEND_BOOL(intern->opts, (opt), php_array_fetchc_bool((zarr), (key)))) { \ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \ return false; \ } \ } #define PHONGO_QUERY_OPT_BOOL(opt, zarr, key) PHONGO_QUERY_OPT_BOOL_EX((opt), (zarr), (key), 0) #define PHONGO_QUERY_OPT_BOOL_DEPRECATED(opt, zarr, key) PHONGO_QUERY_OPT_BOOL_EX((opt), (zarr), (key), 1) #define PHONGO_QUERY_OPT_DOCUMENT(opt, zarr, key) \ if ((zarr) && php_array_existsc((zarr), (key))) { \ if (!php_phongo_query_opts_append_document(intern->opts, (opt), (zarr), (key) TSRMLS_CC)) { \ return false; \ } \ } /* Note: handling of integer options will depend on SIZEOF_ZEND_LONG and we * are not converting strings to 64-bit integers for 32-bit platforms. */ #define PHONGO_QUERY_OPT_INT64_EX(opt, zarr, key, deprecated) \ if ((zarr) && php_array_existsc((zarr), (key))) { \ if ((deprecated)) { \ php_error_docref(NULL TSRMLS_CC, E_DEPRECATED, "The \"%s\" option is deprecated and will be removed in a future release", key); \ } \ if (!BSON_APPEND_INT64(intern->opts, (opt), php_array_fetchc_long((zarr), (key)))) { \ phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"%s\" option", (opt)); \ return false; \ } \ } #define PHONGO_QUERY_OPT_INT64(opt, zarr, key) PHONGO_QUERY_OPT_INT64_EX((opt), (zarr), (key), 0) #define PHONGO_QUERY_OPT_INT64_DEPRECATED(opt, zarr, key) PHONGO_QUERY_OPT_INT64_EX((opt), (zarr), (key), 1) #define PHONGO_QUERY_OPT_STRING(opt, zarr, key) \ if ((zarr) && php_array_existsc((zarr), (key))) { \ if (!php_phongo_query_opts_append_string(intern->opts, (opt), (zarr), (key) TSRMLS_CC)) { \ return false; \ } \ } /* Initialize the "hint" option. Returns true on success; otherwise, false is * returned and an exception is thrown. * * The "hint" option (or "$hint" modifier) must be a string or document. Check * for both types and merge into BSON options accordingly. */ static bool php_phongo_query_init_hint(php_phongo_query_t* intern, zval* options, zval* modifiers TSRMLS_DC) /* {{{ */ { /* The "hint" option (or "$hint" modifier) must be a string or document. * Check for both types and merge into BSON options accordingly. */ if (php_array_existsc(options, "hint")) { zend_uchar type = Z_TYPE_P(php_array_fetchc(options, "hint")); if (type == IS_STRING) { PHONGO_QUERY_OPT_STRING("hint", options, "hint"); } else if (type == IS_OBJECT || type == IS_ARRAY) { PHONGO_QUERY_OPT_DOCUMENT("hint", options, "hint"); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"hint\" option to be string, array, or object, %s given", zend_get_type_by_const(type)); return false; } } else if (modifiers && php_array_existsc(modifiers, "$hint")) { zend_uchar type = Z_TYPE_P(php_array_fetchc(modifiers, "$hint")); if (type == IS_STRING) { PHONGO_QUERY_OPT_STRING("hint", modifiers, "$hint"); } else if (type == IS_OBJECT || type == IS_ARRAY) { PHONGO_QUERY_OPT_DOCUMENT("hint", modifiers, "$hint"); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"$hint\" modifier to be string, array, or object, %s given", zend_get_type_by_const(type)); return false; } } return true; } /* }}} */ /* Initialize the "limit" and "singleBatch" options. Returns true on success; * otherwise, false is returned and an exception is thrown. * * mongoc_collection_find_with_opts() requires a non-negative limit. For * backwards compatibility, a negative limit should be set as a positive value * and default singleBatch to true. */ static bool php_phongo_query_init_limit_and_singlebatch(php_phongo_query_t* intern, zval* options TSRMLS_DC) /* {{{ */ { if (php_array_existsc(options, "limit") && php_array_fetchc_long(options, "limit") < 0) { phongo_long limit = php_array_fetchc_long(options, "limit"); if (!BSON_APPEND_INT64(intern->opts, "limit", -limit)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"limit\" option"); return false; } if (php_array_existsc(options, "singleBatch") && !php_array_fetchc_bool(options, "singleBatch")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Negative \"limit\" option conflicts with false \"singleBatch\" option"); return false; } else { if (!BSON_APPEND_BOOL(intern->opts, "singleBatch", true)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"singleBatch\" option"); return false; } } } else { PHONGO_QUERY_OPT_INT64("limit", options, "limit"); PHONGO_QUERY_OPT_BOOL("singleBatch", options, "singleBatch"); } return true; } /* }}} */ /* Initialize the "readConcern" option. Returns true on success; otherwise, * false is returned and an exception is thrown. * * The "readConcern" option should be a MongoDB\Driver\ReadConcern instance, * which must be converted to a mongoc_read_concern_t. */ static bool php_phongo_query_init_readconcern(php_phongo_query_t* intern, zval* options TSRMLS_DC) /* {{{ */ { if (php_array_existsc(options, "readConcern")) { zval* read_concern = php_array_fetchc(options, "readConcern"); if (Z_TYPE_P(read_concern) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(read_concern), php_phongo_readconcern_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(read_concern)); return false; } intern->read_concern = mongoc_read_concern_copy(phongo_read_concern_from_zval(read_concern TSRMLS_CC)); } return true; } /* }}} */ /* Initialize the "maxAwaitTimeMS" option. Returns true on success; otherwise, * false is returned and an exception is thrown. * * The "maxAwaitTimeMS" option is assigned to the cursor after query execution * via mongoc_cursor_set_max_await_time_ms(). */ static bool php_phongo_query_init_max_await_time_ms(php_phongo_query_t* intern, zval* options TSRMLS_DC) /* {{{ */ { if (php_array_existsc(options, "maxAwaitTimeMS")) { int64_t max_await_time_ms = php_array_fetchc_long(options, "maxAwaitTimeMS"); if (max_await_time_ms < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be >= 0, %" PRId64 " given", max_await_time_ms); return false; } if (max_await_time_ms > UINT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxAwaitTimeMS\" option to be <= %" PRIu32 ", %" PRId64 " given", UINT32_MAX, max_await_time_ms); return false; } intern->max_await_time_ms = (uint32_t) max_await_time_ms; } return true; } /* }}} */ /* Initializes the php_phongo_query_t from filter and options arguments. This * function will fall back to a modifier in the absence of a top-level option * (where applicable). */ static bool php_phongo_query_init(php_phongo_query_t* intern, zval* filter, zval* options TSRMLS_DC) /* {{{ */ { zval* modifiers = NULL; intern->filter = bson_new(); intern->opts = bson_new(); php_phongo_zval_to_bson(filter, PHONGO_BSON_NONE, intern->filter, NULL TSRMLS_CC); /* Note: if any exceptions are thrown, we can simply return as PHP will * invoke php_phongo_query_free_object to destruct the object. */ if (EG(exception)) { return false; } if (!bson_validate(intern->filter, BSON_VALIDATE_EMPTY_KEYS, NULL)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use empty keys in filter document"); return false; } if (!options) { return true; } if (php_array_existsc(options, "modifiers")) { modifiers = php_array_fetchc(options, "modifiers"); if (Z_TYPE_P(modifiers) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"modifiers\" option to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(modifiers)); return false; } } PHONGO_QUERY_OPT_BOOL("allowPartialResults", options, "allowPartialResults") else PHONGO_QUERY_OPT_BOOL("allowPartialResults", options, "partial"); PHONGO_QUERY_OPT_BOOL("awaitData", options, "awaitData"); PHONGO_QUERY_OPT_INT64("batchSize", options, "batchSize"); PHONGO_QUERY_OPT_DOCUMENT("collation", options, "collation"); PHONGO_QUERY_OPT_STRING("comment", options, "comment") else PHONGO_QUERY_OPT_STRING("comment", modifiers, "$comment"); PHONGO_QUERY_OPT_BOOL("exhaust", options, "exhaust"); PHONGO_QUERY_OPT_DOCUMENT("max", options, "max") else PHONGO_QUERY_OPT_DOCUMENT("max", modifiers, "$max"); PHONGO_QUERY_OPT_INT64_DEPRECATED("maxScan", options, "maxScan") else PHONGO_QUERY_OPT_INT64_DEPRECATED("maxScan", modifiers, "$maxScan"); PHONGO_QUERY_OPT_INT64("maxTimeMS", options, "maxTimeMS") else PHONGO_QUERY_OPT_INT64("maxTimeMS", modifiers, "$maxTimeMS"); PHONGO_QUERY_OPT_DOCUMENT("min", options, "min") else PHONGO_QUERY_OPT_DOCUMENT("min", modifiers, "$min"); PHONGO_QUERY_OPT_BOOL("noCursorTimeout", options, "noCursorTimeout"); PHONGO_QUERY_OPT_BOOL("oplogReplay", options, "oplogReplay"); PHONGO_QUERY_OPT_DOCUMENT("projection", options, "projection"); PHONGO_QUERY_OPT_BOOL("returnKey", options, "returnKey") else PHONGO_QUERY_OPT_BOOL("returnKey", modifiers, "$returnKey"); PHONGO_QUERY_OPT_BOOL("showRecordId", options, "showRecordId") else PHONGO_QUERY_OPT_BOOL("showRecordId", modifiers, "$showDiskLoc"); PHONGO_QUERY_OPT_INT64("skip", options, "skip"); PHONGO_QUERY_OPT_DOCUMENT("sort", options, "sort") else PHONGO_QUERY_OPT_DOCUMENT("sort", modifiers, "$orderby"); PHONGO_QUERY_OPT_BOOL_DEPRECATED("snapshot", options, "snapshot") else PHONGO_QUERY_OPT_BOOL_DEPRECATED("snapshot", modifiers, "$snapshot"); PHONGO_QUERY_OPT_BOOL("tailable", options, "tailable"); /* The "$explain" modifier should be converted to an "explain" option, which * libmongoc will later convert back to a modifier for the OP_QUERY code * path. This modifier will be ignored for the find command code path. */ PHONGO_QUERY_OPT_BOOL("explain", modifiers, "$explain"); if (!php_phongo_query_init_hint(intern, options, modifiers TSRMLS_CC)) { return false; } if (!php_phongo_query_init_limit_and_singlebatch(intern, options TSRMLS_CC)) { return false; } if (!php_phongo_query_init_readconcern(intern, options TSRMLS_CC)) { return false; } if (!php_phongo_query_init_max_await_time_ms(intern, options TSRMLS_CC)) { return false; } return true; } /* }}} */ #undef PHONGO_QUERY_OPT_BOOL #undef PHONGO_QUERY_OPT_DOCUMENT #undef PHONGO_QUERY_OPT_INT64 #undef PHONGO_QUERY_OPT_STRING /* {{{ proto void MongoDB\Driver\Query::__construct(array|object $filter[, array $options = array()]) Constructs a new Query */ static PHP_METHOD(Query, __construct) { php_phongo_query_t* intern; zend_error_handling error_handling; zval* filter; zval* options = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_QUERY_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A|a!", &filter, &options) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); php_phongo_query_init(intern, filter, options TSRMLS_CC); } /* }}} */ /* {{{ MongoDB\Driver\Query function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Query___construct, 0, 0, 1) ZEND_ARG_INFO(0, filter) ZEND_ARG_ARRAY_INFO(0, options, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Query_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_query_me[] = { /* clang-format off */ PHP_ME(Query, __construct, ai_Query___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Query_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Query object handlers */ static zend_object_handlers php_phongo_handler_query; static void php_phongo_query_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_query_t* intern = Z_OBJ_QUERY(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->filter) { bson_clear(&intern->filter); } if (intern->opts) { bson_clear(&intern->opts); } if (intern->read_concern) { mongoc_read_concern_destroy(intern->read_concern); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_query_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_query_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_query_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_query; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_query_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_query; return retval; } #endif } /* }}} */ static HashTable* php_phongo_query_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_query_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_QUERY_OBJ_P(object); array_init_size(&retval, 3); /* Avoid using PHONGO_TYPEMAP_NATIVE_ARRAY for decoding filter and opts * documents so that users can differentiate BSON arrays and documents. */ if (intern->filter) { #if PHP_VERSION_ID >= 70000 zval zv; #else zval* zv; #endif php_phongo_bson_to_zval(bson_get_data(intern->filter), intern->filter->len, &zv); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "filter", &zv); #else ADD_ASSOC_ZVAL_EX(&retval, "filter", zv); #endif } else { ADD_ASSOC_NULL_EX(&retval, "filter"); } if (intern->opts) { #if PHP_VERSION_ID >= 70000 zval zv; #else zval* zv; #endif php_phongo_bson_to_zval(bson_get_data(intern->opts), intern->opts->len, &zv); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "options", &zv); #else ADD_ASSOC_ZVAL_EX(&retval, "options", zv); #endif } else { ADD_ASSOC_NULL_EX(&retval, "options"); } if (intern->read_concern) { #if PHP_VERSION_ID >= 70000 zval read_concern; php_phongo_read_concern_to_zval(&read_concern, intern->read_concern); ADD_ASSOC_ZVAL_EX(&retval, "readConcern", &read_concern); #else zval* read_concern = NULL; MAKE_STD_ZVAL(read_concern); php_phongo_read_concern_to_zval(read_concern, intern->read_concern); ADD_ASSOC_ZVAL_EX(&retval, "readConcern", read_concern); #endif } else { ADD_ASSOC_NULL_EX(&retval, "readConcern"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_query_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Query", php_phongo_query_me); php_phongo_query_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_query_ce->create_object = php_phongo_query_create_object; PHONGO_CE_FINAL(php_phongo_query_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_query_ce); memcpy(&php_phongo_handler_query, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_query.get_debug_info = php_phongo_query_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_query.free_obj = php_phongo_query_free_object; php_phongo_handler_query.offset = XtOffsetOf(php_phongo_query_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/ReadConcern.c0000644000076500000240000002351213572250757017067 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_readconcern_ce; /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_readconcern_init_from_hash(php_phongo_readconcern_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval* level; intern->read_concern = mongoc_read_concern_new(); if ((level = zend_hash_str_find(props, "level", sizeof("level") - 1))) { if (Z_TYPE_P(level) == IS_STRING) { mongoc_read_concern_set_level(intern->read_concern, Z_STRVAL_P(level)); return true; } phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"level\" string field", ZSTR_VAL(php_phongo_readconcern_ce->name)); goto failure; } #else zval** level; intern->read_concern = mongoc_read_concern_new(); if (zend_hash_find(props, "level", sizeof("level"), (void**) &level) == SUCCESS) { if (Z_TYPE_PP(level) == IS_STRING) { mongoc_read_concern_set_level(intern->read_concern, Z_STRVAL_PP(level)); return true; } phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"level\" string field", ZSTR_VAL(php_phongo_readconcern_ce->name)); goto failure; } #endif return true; failure: mongoc_read_concern_destroy(intern->read_concern); intern->read_concern = NULL; return false; } /* }}} */ /* {{{ proto void MongoDB\Driver\ReadConcern::__construct([string $level]) Constructs a new ReadConcern */ static PHP_METHOD(ReadConcern, __construct) { php_phongo_readconcern_t* intern; zend_error_handling error_handling; char* level = NULL; phongo_zpp_char_len level_len = 0; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_READCONCERN_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|s!", &level, &level_len) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); intern->read_concern = mongoc_read_concern_new(); if (level) { mongoc_read_concern_set_level(intern->read_concern, level); } } /* }}} */ /* {{{ proto void MongoDB\BSON\ReadConcern::__set_state(array $properties) */ static PHP_METHOD(ReadConcern, __set_state) { php_phongo_readconcern_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_readconcern_ce); intern = Z_READCONCERN_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_readconcern_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string|null MongoDB\Driver\ReadConcern::getLevel() Returns the ReadConcern "level" option */ static PHP_METHOD(ReadConcern, getLevel) { php_phongo_readconcern_t* intern; const char* level; intern = Z_READCONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } level = mongoc_read_concern_get_level(intern->read_concern); if (level) { PHONGO_RETURN_STRING(level); } RETURN_NULL(); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\ReadConcern::isDefault() Returns whether the read concern has not been modified (i.e. constructed without a level or from a Manager with no read concern URI options). */ static PHP_METHOD(ReadConcern, isDefault) { php_phongo_readconcern_t* intern; intern = Z_READCONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(mongoc_read_concern_is_default(intern->read_concern)); } /* }}} */ static HashTable* php_phongo_read_concern_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_readconcern_t* intern; HashTable* props; const char* level; intern = Z_READCONCERN_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 1); if (!intern->read_concern) { return props; } level = mongoc_read_concern_get_level(intern->read_concern); if (level) { #if PHP_VERSION_ID >= 70000 zval z_level; ZVAL_STRING(&z_level, level); zend_hash_str_update(props, "level", sizeof("level") - 1, &z_level); #else zval* z_level; MAKE_STD_ZVAL(z_level); ZVAL_STRING(z_level, level, 1); zend_hash_update(props, "level", sizeof("level"), &z_level, sizeof(z_level), NULL); #endif } return props; } /* }}} */ /* {{{ proto array MongoDB\Driver\ReadConcern::bsonSerialize() */ static PHP_METHOD(ReadConcern, bsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } ZVAL_ARR(return_value, php_phongo_read_concern_get_properties_hash(getThis(), true TSRMLS_CC)); convert_to_object(return_value); } /* }}} */ /* {{{ MongoDB\Driver\ReadConcern function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_ReadConcern___construct, 0, 0, 0) ZEND_ARG_INFO(0, level) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ReadConcern___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ReadConcern_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_readconcern_me[] = { /* clang-format off */ PHP_ME(ReadConcern, __construct, ai_ReadConcern___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadConcern, __set_state, ai_ReadConcern___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(ReadConcern, getLevel, ai_ReadConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadConcern, isDefault, ai_ReadConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadConcern, bsonSerialize, ai_ReadConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\ReadConcern object handlers */ static zend_object_handlers php_phongo_handler_readconcern; static void php_phongo_readconcern_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_readconcern_t* intern = Z_OBJ_READCONCERN(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } if (intern->read_concern) { mongoc_read_concern_destroy(intern->read_concern); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } static phongo_create_object_retval php_phongo_readconcern_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_readconcern_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_readconcern_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_readconcern; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_readconcern_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_readconcern; return retval; } #endif } /* }}} */ static HashTable* php_phongo_readconcern_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_read_concern_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_readconcern_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_read_concern_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ void php_phongo_readconcern_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "ReadConcern", php_phongo_readconcern_me); php_phongo_readconcern_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_readconcern_ce->create_object = php_phongo_readconcern_create_object; PHONGO_CE_FINAL(php_phongo_readconcern_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_readconcern_ce); zend_class_implements(php_phongo_readconcern_ce TSRMLS_CC, 1, php_phongo_serializable_ce); memcpy(&php_phongo_handler_readconcern, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_readconcern.get_debug_info = php_phongo_readconcern_get_debug_info; php_phongo_handler_readconcern.get_properties = php_phongo_readconcern_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_readconcern.free_obj = php_phongo_readconcern_free_object; php_phongo_handler_readconcern.offset = XtOffsetOf(php_phongo_readconcern_t, std); #endif zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("LOCAL"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_LOCAL) TSRMLS_CC); zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("MAJORITY"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_MAJORITY) TSRMLS_CC); zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("LINEARIZABLE"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_LINEARIZABLE) TSRMLS_CC); zend_declare_class_constant_stringl(php_phongo_readconcern_ce, ZEND_STRL("AVAILABLE"), ZEND_STRL(MONGOC_READ_CONCERN_LEVEL_AVAILABLE) TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/ReadPreference.c0000644000076500000240000005720413572250757017563 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_readpreference_ce; /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_readpreference_init_from_hash(php_phongo_readpreference_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *mode, *tagSets, *maxStalenessSeconds; if ((mode = zend_hash_str_find(props, "mode", sizeof("mode") - 1)) && Z_TYPE_P(mode) == IS_STRING) { if (strcasecmp(Z_STRVAL_P(mode), "primary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY); } else if (strcasecmp(Z_STRVAL_P(mode), "primaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_P(mode), "secondary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY); } else if (strcasecmp(Z_STRVAL_P(mode), "secondaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_P(mode), "nearest") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_NEAREST); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires specific values for \"mode\" string field", ZSTR_VAL(php_phongo_readpreference_ce->name)); return false; } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"mode\" field to be string", ZSTR_VAL(php_phongo_readpreference_ce->name)); return false; } if ((tagSets = zend_hash_str_find(props, "tags", sizeof("tags") - 1))) { if (Z_TYPE_P(tagSets) == IS_ARRAY) { bson_t* tags = bson_new(); php_phongo_read_preference_prep_tagsets(tagSets TSRMLS_CC); php_phongo_zval_to_bson(tagSets, PHONGO_BSON_NONE, (bson_t*) tags, NULL TSRMLS_CC); if (!php_phongo_read_preference_tags_are_valid(tags)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" array field to have zero or more documents", ZSTR_VAL(php_phongo_readpreference_ce->name)); bson_destroy(tags); goto failure; } if (!bson_empty(tags) && (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" array field to not be present with \"primary\" mode", ZSTR_VAL(php_phongo_readpreference_ce->name)); bson_destroy(tags); goto failure; } mongoc_read_prefs_set_tags(intern->read_preference, tags); bson_destroy(tags); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" field to be array", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } } if ((maxStalenessSeconds = zend_hash_str_find(props, "maxStalenessSeconds", sizeof("maxStalenessSeconds") - 1))) { if (Z_TYPE_P(maxStalenessSeconds) == IS_LONG) { if (Z_LVAL_P(maxStalenessSeconds) < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" integer field to be >= %d", ZSTR_VAL(php_phongo_readpreference_ce->name), MONGOC_SMALLEST_MAX_STALENESS_SECONDS); goto failure; } if (Z_LVAL_P(maxStalenessSeconds) > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" integer field to be <= %" PRId32, ZSTR_VAL(php_phongo_readpreference_ce->name), INT32_MAX); goto failure; } if (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" array field to not be present with \"primary\" mode", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } mongoc_read_prefs_set_max_staleness_seconds(intern->read_preference, Z_LVAL_P(maxStalenessSeconds)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" field to be integer", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } } #else zval **mode, **tagSets, **maxStalenessSeconds; if (zend_hash_find(props, "mode", sizeof("mode"), (void**) &mode) == SUCCESS && Z_TYPE_PP(mode) == IS_STRING) { if (strcasecmp(Z_STRVAL_PP(mode), "primary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY); } else if (strcasecmp(Z_STRVAL_PP(mode), "primaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_PP(mode), "secondary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY); } else if (strcasecmp(Z_STRVAL_PP(mode), "secondaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_PP(mode), "nearest") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_NEAREST); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires specific values for \"mode\" string field", ZSTR_VAL(php_phongo_readpreference_ce->name)); return false; } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"mode\" field to be string", ZSTR_VAL(php_phongo_readpreference_ce->name)); return false; } if (zend_hash_find(props, "tags", sizeof("tags"), (void**) &tagSets) == SUCCESS) { if (Z_TYPE_PP(tagSets) == IS_ARRAY) { bson_t* tags = bson_new(); php_phongo_read_preference_prep_tagsets(*tagSets TSRMLS_CC); php_phongo_zval_to_bson(*tagSets, PHONGO_BSON_NONE, (bson_t*) tags, NULL TSRMLS_CC); if (!php_phongo_read_preference_tags_are_valid(tags)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" array field to have zero or more documents", ZSTR_VAL(php_phongo_readpreference_ce->name)); bson_destroy(tags); goto failure; } if (!bson_empty(tags) && (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" array field to not be present with \"primary\" mode", ZSTR_VAL(php_phongo_readpreference_ce->name)); bson_destroy(tags); goto failure; } mongoc_read_prefs_set_tags(intern->read_preference, tags); bson_destroy(tags); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"tags\" field to be array", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } } if (zend_hash_find(props, "maxStalenessSeconds", sizeof("maxStalenessSeconds"), (void**) &maxStalenessSeconds) == SUCCESS) { if (Z_TYPE_PP(maxStalenessSeconds) == IS_LONG) { if (Z_LVAL_PP(maxStalenessSeconds) < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" integer field to be >= %d", ZSTR_VAL(php_phongo_readpreference_ce->name), MONGOC_SMALLEST_MAX_STALENESS_SECONDS); goto failure; } if (Z_LVAL_PP(maxStalenessSeconds) > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" integer field to be <= %" PRId32, ZSTR_VAL(php_phongo_readpreference_ce->name), INT32_MAX); goto failure; } if (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" array field to not be present with \"primary\" mode", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } mongoc_read_prefs_set_max_staleness_seconds(intern->read_preference, Z_LVAL_PP(maxStalenessSeconds)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"maxStalenessSeconds\" field to be integer", ZSTR_VAL(php_phongo_readpreference_ce->name)); goto failure; } } #endif return true; failure: mongoc_read_prefs_destroy(intern->read_preference); intern->read_preference = NULL; return false; } /* }}} */ /* {{{ proto void MongoDB\Driver\ReadPreference::__construct(int|string $mode[, array $tagSets = array()[, array $options = array()]]) Constructs a new ReadPreference */ static PHP_METHOD(ReadPreference, __construct) { php_phongo_readpreference_t* intern; zend_error_handling error_handling; zval* mode; zval* tagSets = NULL; zval* options = NULL; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_READPREFERENCE_OBJ_P(getThis()); /* Separate the tagSets zval, since we may end up modifying it in * php_phongo_read_preference_prep_tagsets() below. */ if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|a/!a!", &mode, &tagSets, &options) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); if (Z_TYPE_P(mode) == IS_LONG) { switch (Z_LVAL_P(mode)) { case MONGOC_READ_PRIMARY: case MONGOC_READ_SECONDARY: case MONGOC_READ_PRIMARY_PREFERRED: case MONGOC_READ_SECONDARY_PREFERRED: case MONGOC_READ_NEAREST: intern->read_preference = mongoc_read_prefs_new(Z_LVAL_P(mode)); break; default: phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid mode: %" PHONGO_LONG_FORMAT, Z_LVAL_P(mode)); return; } } else if (Z_TYPE_P(mode) == IS_STRING) { if (strcasecmp(Z_STRVAL_P(mode), "primary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY); } else if (strcasecmp(Z_STRVAL_P(mode), "primaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_PRIMARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_P(mode), "secondary") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY); } else if (strcasecmp(Z_STRVAL_P(mode), "secondaryPreferred") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_SECONDARY_PREFERRED); } else if (strcasecmp(Z_STRVAL_P(mode), "nearest") == 0) { intern->read_preference = mongoc_read_prefs_new(MONGOC_READ_NEAREST); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid mode: '%s'", Z_STRVAL_P(mode)); return; } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected mode to be integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(mode)); return; } if (tagSets) { bson_t* tags = bson_new(); php_phongo_read_preference_prep_tagsets(tagSets TSRMLS_CC); php_phongo_zval_to_bson(tagSets, PHONGO_BSON_NONE, (bson_t*) tags, NULL TSRMLS_CC); if (!php_phongo_read_preference_tags_are_valid(tags)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "tagSets must be an array of zero or more documents"); bson_destroy(tags); return; } if (!bson_empty(tags) && (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "tagSets may not be used with primary mode"); bson_destroy(tags); return; } mongoc_read_prefs_set_tags(intern->read_preference, tags); bson_destroy(tags); } if (options && php_array_exists(options, "maxStalenessSeconds")) { phongo_long maxStalenessSeconds = php_array_fetchc_long(options, "maxStalenessSeconds"); if (maxStalenessSeconds != MONGOC_NO_MAX_STALENESS) { if (maxStalenessSeconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be >= %d, %" PHONGO_LONG_FORMAT " given", MONGOC_SMALLEST_MAX_STALENESS_SECONDS, maxStalenessSeconds); return; } if (maxStalenessSeconds > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be <= %" PRId32 ", %" PHONGO_LONG_FORMAT " given", INT32_MAX, maxStalenessSeconds); return; } if (mongoc_read_prefs_get_mode(intern->read_preference) == MONGOC_READ_PRIMARY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "maxStalenessSeconds may not be used with primary mode"); return; } } mongoc_read_prefs_set_max_staleness_seconds(intern->read_preference, maxStalenessSeconds); } if (!mongoc_read_prefs_is_valid(intern->read_preference)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference is not valid"); return; } } /* }}} */ /* {{{ proto void MongoDB\BSON\ReadPreference::__set_state(array $properties) */ static PHP_METHOD(ReadPreference, __set_state) { php_phongo_readpreference_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_readpreference_ce); intern = Z_READPREFERENCE_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_readpreference_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto integer MongoDB\Driver\ReadPreference::getMaxStalenessSeconds() Returns the ReadPreference maxStalenessSeconds value */ static PHP_METHOD(ReadPreference, getMaxStalenessSeconds) { php_phongo_readpreference_t* intern; intern = Z_READPREFERENCE_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(mongoc_read_prefs_get_max_staleness_seconds(intern->read_preference)); } /* }}} */ /* {{{ proto integer MongoDB\Driver\ReadPreference::getMode() Returns the ReadPreference mode */ static PHP_METHOD(ReadPreference, getMode) { php_phongo_readpreference_t* intern; intern = Z_READPREFERENCE_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(mongoc_read_prefs_get_mode(intern->read_preference)); } /* }}} */ /* {{{ proto array MongoDB\Driver\ReadPreference::getTagSets() Returns the ReadPreference tag sets */ static PHP_METHOD(ReadPreference, getTagSets) { php_phongo_readpreference_t* intern; const bson_t* tags; intern = Z_READPREFERENCE_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } tags = mongoc_read_prefs_get_tags(intern->read_preference); if (tags->len) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; php_phongo_bson_to_zval_ex(bson_get_data(tags), tags->len, &state); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } else { RETURN_NULL(); } } /* }}} */ static HashTable* php_phongo_readpreference_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_readpreference_t* intern; HashTable* props; const char* modeString = NULL; const bson_t* tags; mongoc_read_mode_t mode; intern = Z_READPREFERENCE_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 3); if (!intern->read_preference) { return props; } tags = mongoc_read_prefs_get_tags(intern->read_preference); mode = mongoc_read_prefs_get_mode(intern->read_preference); switch (mode) { case MONGOC_READ_PRIMARY: modeString = "primary"; break; case MONGOC_READ_PRIMARY_PREFERRED: modeString = "primaryPreferred"; break; case MONGOC_READ_SECONDARY: modeString = "secondary"; break; case MONGOC_READ_SECONDARY_PREFERRED: modeString = "secondaryPreferred"; break; case MONGOC_READ_NEAREST: modeString = "nearest"; break; default: /* Do nothing */ break; } if (modeString) { #if PHP_VERSION_ID >= 70000 zval z_mode; ZVAL_STRING(&z_mode, modeString); zend_hash_str_update(props, "mode", sizeof("mode") - 1, &z_mode); #else zval* z_mode; MAKE_STD_ZVAL(z_mode); ZVAL_STRING(z_mode, modeString, 1); zend_hash_update(props, "mode", sizeof("mode"), &z_mode, sizeof(z_mode), NULL); #endif } if (!bson_empty0(tags)) { /* Use PHONGO_TYPEMAP_NATIVE_ARRAY for the root type since tags is an * array; however, inner documents and arrays can use the default. */ php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; php_phongo_bson_to_zval_ex(bson_get_data(tags), tags->len, &state); #if PHP_VERSION_ID >= 70000 Z_ADDREF(state.zchild); zend_hash_str_update(props, "tags", sizeof("tags") - 1, &state.zchild); #else Z_ADDREF_P(state.zchild); zend_hash_update(props, "tags", sizeof("tags"), &state.zchild, sizeof(state.zchild), NULL); #endif zval_ptr_dtor(&state.zchild); } if (mongoc_read_prefs_get_max_staleness_seconds(intern->read_preference) != MONGOC_NO_MAX_STALENESS) { long maxStalenessSeconds = mongoc_read_prefs_get_max_staleness_seconds(intern->read_preference); #if PHP_VERSION_ID >= 70000 zval z_max_ss; ZVAL_LONG(&z_max_ss, maxStalenessSeconds); zend_hash_str_update(props, "maxStalenessSeconds", sizeof("maxStalenessSeconds") - 1, &z_max_ss); #else zval* z_max_ss; MAKE_STD_ZVAL(z_max_ss); ZVAL_LONG(z_max_ss, maxStalenessSeconds); zend_hash_update(props, "maxStalenessSeconds", sizeof("maxStalenessSeconds"), &z_max_ss, sizeof(z_max_ss), NULL); #endif } return props; } /* }}} */ /* {{{ proto array MongoDB\Driver\ReadPreference::bsonSerialize() */ static PHP_METHOD(ReadPreference, bsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } ZVAL_ARR(return_value, php_phongo_readpreference_get_properties_hash(getThis(), true TSRMLS_CC)); convert_to_object(return_value); } /* }}} */ /* {{{ MongoDB\Driver\ReadPreference function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_ReadPreference___construct, 0, 0, 1) ZEND_ARG_INFO(0, mode) ZEND_ARG_ARRAY_INFO(0, tagSets, 1) ZEND_ARG_ARRAY_INFO(0, options, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ReadPreference___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_ReadPreference_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_readpreference_me[] = { /* clang-format off */ PHP_ME(ReadPreference, __construct, ai_ReadPreference___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadPreference, __set_state, ai_ReadPreference___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(ReadPreference, getMaxStalenessSeconds, ai_ReadPreference_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadPreference, getMode, ai_ReadPreference_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadPreference, getTagSets, ai_ReadPreference_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(ReadPreference, bsonSerialize, ai_ReadPreference_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\ReadPreference object handlers */ static zend_object_handlers php_phongo_handler_readpreference; static void php_phongo_readpreference_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_readpreference_t* intern = Z_OBJ_READPREFERENCE(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } if (intern->read_preference) { mongoc_read_prefs_destroy(intern->read_preference); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_readpreference_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_readpreference_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_readpreference_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_readpreference; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_readpreference_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_readpreference; return retval; } #endif } /* }}} */ static HashTable* php_phongo_readpreference_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_readpreference_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_readpreference_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_readpreference_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ /* }}} */ void php_phongo_readpreference_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "ReadPreference", php_phongo_readpreference_me); php_phongo_readpreference_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_readpreference_ce->create_object = php_phongo_readpreference_create_object; PHONGO_CE_FINAL(php_phongo_readpreference_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_readpreference_ce); zend_class_implements(php_phongo_readpreference_ce TSRMLS_CC, 1, php_phongo_serializable_ce); memcpy(&php_phongo_handler_readpreference, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_readpreference.get_debug_info = php_phongo_readpreference_get_debug_info; php_phongo_handler_readpreference.get_properties = php_phongo_readpreference_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_readpreference.free_obj = php_phongo_readpreference_free_object; php_phongo_handler_readpreference.offset = XtOffsetOf(php_phongo_readpreference_t, std); #endif zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_PRIMARY"), MONGOC_READ_PRIMARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_PRIMARY_PREFERRED"), MONGOC_READ_PRIMARY_PREFERRED TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_SECONDARY"), MONGOC_READ_SECONDARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_SECONDARY_PREFERRED"), MONGOC_READ_SECONDARY_PREFERRED TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("RP_NEAREST"), MONGOC_READ_NEAREST TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("NO_MAX_STALENESS"), MONGOC_NO_MAX_STALENESS TSRMLS_CC); zend_declare_class_constant_long(php_phongo_readpreference_ce, ZEND_STRL("SMALLEST_MAX_STALENESS_SECONDS"), MONGOC_SMALLEST_MAX_STALENESS_SECONDS TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Server.c0000644000076500000240000005344613572250757016163 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" zend_class_entry* php_phongo_server_ce; /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])) Executes a Command on this Server */ static PHP_METHOD(Server, executeCommand) { php_phongo_server_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; bool free_options = false; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC); phongo_execute_command(intern->client, PHONGO_COMMAND_RAW, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC); if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeReadCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])) Executes a ReadCommand on this Server */ static PHP_METHOD(Server, executeReadCommand) { php_phongo_server_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } phongo_execute_command(intern->client, PHONGO_COMMAND_READ, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])) Executes a WriteCommand on this Server */ static PHP_METHOD(Server, executeWriteCommand) { php_phongo_server_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } phongo_execute_command(intern->client, PHONGO_COMMAND_WRITE, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeReadWriteCommand(string $db, MongoDB\Driver\Command $command[, array $options = null])) Executes a ReadWriteCommand on this Server */ static PHP_METHOD(Server, executeReadWriteCommand) { php_phongo_server_t* intern; char* db; phongo_zpp_char_len db_len; zval* command; zval* options = NULL; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|a!", &db, &db_len, &command, php_phongo_command_ce, &options) == FAILURE) { return; } phongo_execute_command(intern->client, PHONGO_COMMAND_READ_WRITE, db, command, options, intern->server_id, return_value, return_value_used TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Cursor MongoDB\Driver\Server::executeQuery(string $namespace, MongoDB\Driver\Query $query[, array $options = null])) Executes a Query on this Server */ static PHP_METHOD(Server, executeQuery) { php_phongo_server_t* intern; char* namespace; phongo_zpp_char_len namespace_len; zval* query; zval* options = NULL; bool free_options = false; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &query, php_phongo_query_ce, &options) == FAILURE) { return; } options = php_phongo_prep_legacy_option(options, "readPreference", &free_options TSRMLS_CC); phongo_execute_query(intern->client, namespace, query, options, intern->server_id, return_value, return_value_used TSRMLS_CC); if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto MongoDB\Driver\WriteResult MongoDB\Driver\Server::executeBulkWrite(string $namespace, MongoDB\Driver\BulkWrite $zbulk[, array $options = null]) Executes a BulkWrite (i.e. any number of insert, update, and delete ops) on this Server */ static PHP_METHOD(Server, executeBulkWrite) { php_phongo_server_t* intern; char* namespace; phongo_zpp_char_len namespace_len; zval* zbulk; php_phongo_bulkwrite_t* bulk; zval* options = NULL; bool free_options = false; DECLARE_RETURN_VALUE_USED intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "sO|z!", &namespace, &namespace_len, &zbulk, php_phongo_bulkwrite_ce, &options, php_phongo_writeconcern_ce) == FAILURE) { return; } bulk = Z_BULKWRITE_OBJ_P(zbulk); options = php_phongo_prep_legacy_option(options, "writeConcern", &free_options TSRMLS_CC); phongo_execute_bulk_write(intern->client, namespace, bulk, options, intern->server_id, return_value, return_value_used TSRMLS_CC); if (free_options) { php_phongo_prep_legacy_option_free(options TSRMLS_CC); } } /* }}} */ /* {{{ proto string MongoDB\Driver\Server::getHost() Returns the hostname for this Server */ static PHP_METHOD(Server, getHost) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { PHONGO_RETVAL_STRING(mongoc_server_description_host(sd)->host); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto array MongoDB\Driver\Server::getTags() Returns the currently configured tags for this Server */ static PHP_METHOD(Server, getTags) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { const bson_t* is_master = mongoc_server_description_ismaster(sd); bson_iter_t iter; if (bson_iter_init_find(&iter, is_master, "tags") && BSON_ITER_HOLDS_DOCUMENT(&iter)) { const uint8_t* bytes; uint32_t len; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; bson_iter_document(&iter, &len, &bytes); if (!php_phongo_bson_to_zval_ex(bytes, len, &state)) { /* Exception should already have been thrown */ zval_ptr_dtor(&state.zchild); mongoc_server_description_destroy(sd); return; } mongoc_server_description_destroy(sd); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } array_init(return_value); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto array MongoDB\Driver\Server::getInfo() Returns the last isMaster result document for this Server */ static PHP_METHOD(Server, getInfo) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { const bson_t* is_master = mongoc_server_description_ismaster(sd); php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; if (!php_phongo_bson_to_zval_ex(bson_get_data(is_master), is_master->len, &state)) { /* Exception should already have been thrown */ zval_ptr_dtor(&state.zchild); mongoc_server_description_destroy(sd); return; } mongoc_server_description_destroy(sd); #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto integer MongoDB\Driver\Server::getLatency() Returns the last measured latency for this Server */ static PHP_METHOD(Server, getLatency) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_LONG((phongo_long) mongoc_server_description_round_trip_time(sd)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto integer MongoDB\Driver\Server::getPort() Returns the port for this Server */ static PHP_METHOD(Server, getPort) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_LONG(mongoc_server_description_host(sd)->port); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto integer MongoDB\Driver\Server::getType() Returns the node type of this Server */ static PHP_METHOD(Server, getType) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_LONG(php_phongo_server_description_type(sd)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Server::isPrimary() Returns whether this Server is a primary member of a replica set */ static PHP_METHOD(Server, isPrimary) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_PRIMARY].name)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Server::isSecondary() Returns whether this Server is a secondary member of a replica set */ static PHP_METHOD(Server, isSecondary) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_SECONDARY].name)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Server::isArbiter() Returns whether this Server is an arbiter member of a replica set */ static PHP_METHOD(Server, isArbiter) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { RETVAL_BOOL(!strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_ARBITER].name)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Server::isHidden() Returns whether this Server is a hidden member of a replica set */ static PHP_METHOD(Server, isHidden) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { bson_iter_t iter; RETVAL_BOOL(bson_iter_init_find_case(&iter, mongoc_server_description_ismaster(sd), "hidden") && bson_iter_as_bool(&iter)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\Server::isPassive() Returns whether this Server is a passive member of a replica set */ static PHP_METHOD(Server, isPassive) { php_phongo_server_t* intern; mongoc_server_description_t* sd; intern = Z_SERVER_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if ((sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { bson_iter_t iter; RETVAL_BOOL(bson_iter_init_find_case(&iter, mongoc_server_description_ismaster(sd), "passive") && bson_iter_as_bool(&iter)); mongoc_server_description_destroy(sd); return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); } /* }}} */ /* {{{ MongoDB\Driver\Server function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeCommand, 0, 0, 2) ZEND_ARG_INFO(0, db) ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeRWCommand, 0, 0, 2) ZEND_ARG_INFO(0, db) ZEND_ARG_OBJ_INFO(0, command, MongoDB\\Driver\\Command, 0) ZEND_ARG_ARRAY_INFO(0, options, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeQuery, 0, 0, 2) ZEND_ARG_INFO(0, namespace) ZEND_ARG_OBJ_INFO(0, zquery, MongoDB\\Driver\\Query, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Server_executeBulkWrite, 0, 0, 2) ZEND_ARG_INFO(0, namespace) ZEND_ARG_OBJ_INFO(0, zbulk, MongoDB\\Driver\\BulkWrite, 0) ZEND_ARG_INFO(0, options) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Server_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_server_me[] = { /* clang-format off */ PHP_ME(Server, executeCommand, ai_Server_executeCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, executeReadCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, executeWriteCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, executeReadWriteCommand, ai_Server_executeRWCommand, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, executeQuery, ai_Server_executeQuery, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, executeBulkWrite, ai_Server_executeBulkWrite, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getHost, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getTags, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getInfo, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getLatency, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getPort, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, getType, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, isPrimary, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, isSecondary, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, isArbiter, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, isHidden, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Server, isPassive, ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Server_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Server_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Server object handlers */ static zend_object_handlers php_phongo_handler_server; static int php_phongo_server_compare_objects(zval* o1, zval* o2 TSRMLS_DC) /* {{{ */ { php_phongo_server_t* intern1; php_phongo_server_t* intern2; mongoc_server_description_t *sd1, *sd2; int retval = 0; intern1 = Z_SERVER_OBJ_P(o1); intern2 = Z_SERVER_OBJ_P(o2); sd1 = mongoc_client_get_server_description(intern1->client, intern1->server_id); sd2 = mongoc_client_get_server_description(intern2->client, intern2->server_id); if (sd1 && sd2) { retval = strcasecmp(mongoc_server_description_host(sd1)->host_and_port, mongoc_server_description_host(sd2)->host_and_port); } else { phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description(s)"); } if (sd1) { mongoc_server_description_destroy(sd1); } if (sd2) { mongoc_server_description_destroy(sd2); } return retval; } /* }}} */ static void php_phongo_server_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_server_t* intern = Z_OBJ_SERVER(object); zend_object_std_dtor(&intern->std TSRMLS_CC); #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_server_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_server_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_server_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_server; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_server_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_server; return retval; } #endif } /* }}} */ static HashTable* php_phongo_server_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_server_t* intern = NULL; zval retval = ZVAL_STATIC_INIT; mongoc_server_description_t* sd; *is_temp = 1; intern = Z_SERVER_OBJ_P(object); if (!(sd = mongoc_client_get_server_description(intern->client, intern->server_id))) { phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to get server description"); return NULL; } php_phongo_server_to_zval(&retval, sd); mongoc_server_description_destroy(sd); return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_server_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Server", php_phongo_server_me); php_phongo_server_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_server_ce->create_object = php_phongo_server_create_object; PHONGO_CE_FINAL(php_phongo_server_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_server_ce); memcpy(&php_phongo_handler_server, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_server.compare_objects = php_phongo_server_compare_objects; php_phongo_handler_server.get_debug_info = php_phongo_server_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_server.free_obj = php_phongo_server_free_object; php_phongo_handler_server.offset = XtOffsetOf(php_phongo_server_t, std); #endif zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_UNKNOWN"), PHONGO_SERVER_UNKNOWN TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_STANDALONE"), PHONGO_SERVER_STANDALONE TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_MONGOS"), PHONGO_SERVER_MONGOS TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_POSSIBLE_PRIMARY"), PHONGO_SERVER_POSSIBLE_PRIMARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_PRIMARY"), PHONGO_SERVER_RS_PRIMARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_SECONDARY"), PHONGO_SERVER_RS_SECONDARY TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_ARBITER"), PHONGO_SERVER_RS_ARBITER TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_OTHER"), PHONGO_SERVER_RS_OTHER TSRMLS_CC); zend_declare_class_constant_long(php_phongo_server_ce, ZEND_STRL("TYPE_RS_GHOST"), PHONGO_SERVER_RS_GHOST TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Session.c0000644000076500000240000005212513572250757016331 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" #include "php_array_api.h" #include "Session.h" zend_class_entry* php_phongo_session_ce; #define SESSION_CHECK_LIVELINESS(i, m) \ if (!(i)->client_session) { \ phongo_throw_exception( \ PHONGO_ERROR_LOGIC TSRMLS_CC, \ "Cannot call '%s', as the session has already been ended.", \ (m)); \ return; \ } static bool php_phongo_session_get_timestamp_parts(zval* obj, uint32_t* timestamp, uint32_t* increment TSRMLS_DC) { bool retval = false; #if PHP_VERSION_ID >= 70000 zval ztimestamp = ZVAL_STATIC_INIT; zval zincrement = ZVAL_STATIC_INIT; zend_call_method_with_0_params(obj, NULL, NULL, "getTimestamp", &ztimestamp); if (Z_ISUNDEF(ztimestamp) || EG(exception)) { goto cleanup; } zend_call_method_with_0_params(obj, NULL, NULL, "getIncrement", &zincrement); if (Z_ISUNDEF(zincrement) || EG(exception)) { goto cleanup; } *timestamp = Z_LVAL(ztimestamp); *increment = Z_LVAL(zincrement); #else zval* ztimestamp = NULL; zval* zincrement = NULL; zend_call_method_with_0_params(&obj, NULL, NULL, "getTimestamp", &ztimestamp); if (Z_ISUNDEF(ztimestamp) || EG(exception)) { goto cleanup; } zend_call_method_with_0_params(&obj, NULL, NULL, "getIncrement", &zincrement); if (Z_ISUNDEF(zincrement) || EG(exception)) { goto cleanup; } *timestamp = Z_LVAL_P(ztimestamp); *increment = Z_LVAL_P(zincrement); #endif retval = true; cleanup: if (!Z_ISUNDEF(ztimestamp)) { zval_ptr_dtor(&ztimestamp); } if (!Z_ISUNDEF(zincrement)) { zval_ptr_dtor(&zincrement); } return retval; } /* {{{ proto void MongoDB\Driver\Session::advanceClusterTime(array|object $clusterTime) Advances the cluster time for this Session */ static PHP_METHOD(Session, advanceClusterTime) { php_phongo_session_t* intern; zval* zcluster_time; bson_t cluster_time = BSON_INITIALIZER; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "advanceClusterTime") if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "A", &zcluster_time) == FAILURE) { return; } php_phongo_zval_to_bson(zcluster_time, PHONGO_BSON_NONE, &cluster_time, NULL TSRMLS_CC); /* An exception may be thrown during BSON conversion */ if (EG(exception)) { goto cleanup; } mongoc_client_session_advance_cluster_time(intern->client_session, &cluster_time); cleanup: bson_destroy(&cluster_time); } /* }}} */ /* {{{ proto void MongoDB\Driver\Session::advanceOperationTime(MongoDB\BSON\TimestampInterface $timestamp) Advances the operation time for this Session */ static PHP_METHOD(Session, advanceOperationTime) { php_phongo_session_t* intern; zval* ztimestamp; uint32_t timestamp = 0; uint32_t increment = 0; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "advanceOperationTime") if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "O", &ztimestamp, php_phongo_timestamp_interface_ce) == FAILURE) { return; } if (!php_phongo_session_get_timestamp_parts(ztimestamp, ×tamp, &increment TSRMLS_CC)) { return; } mongoc_client_session_advance_operation_time(intern->client_session, timestamp, increment); } /* }}} */ /* {{{ proto object|null MongoDB\Driver\Session::getClusterTime() Returns the cluster time for this Session */ static PHP_METHOD(Session, getClusterTime) { php_phongo_session_t* intern; const bson_t* cluster_time; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "getClusterTime") if (zend_parse_parameters_none() == FAILURE) { return; } cluster_time = mongoc_client_session_get_cluster_time(intern->client_session); if (!cluster_time) { RETURN_NULL(); } if (!php_phongo_bson_to_zval_ex(bson_get_data(cluster_time), cluster_time->len, &state)) { /* Exception should already have been thrown */ zval_ptr_dtor(&state.zchild); return; } #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto object MongoDB\Driver\Session::getLogicalSessionId() Returns the logical session ID for this Session */ static PHP_METHOD(Session, getLogicalSessionId) { php_phongo_session_t* intern; const bson_t* lsid; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "getLogicalSessionId") if (zend_parse_parameters_none() == FAILURE) { return; } lsid = mongoc_client_session_get_lsid(intern->client_session); if (!php_phongo_bson_to_zval_ex(bson_get_data(lsid), lsid->len, &state)) { /* Exception should already have been thrown */ zval_ptr_dtor(&state.zchild); return; } #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&state.zchild, 0, 1); #else RETURN_ZVAL(state.zchild, 0, 1); #endif } /* }}} */ /* {{{ proto MongoDB\BSON\Timestamp|null MongoDB\Driver\Session::getOperationTime() Returns the operation time for this Session */ static PHP_METHOD(Session, getOperationTime) { php_phongo_session_t* intern; uint32_t timestamp, increment; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "getOperationTime") if (zend_parse_parameters_none() == FAILURE) { return; } mongoc_client_session_get_operation_time(intern->client_session, ×tamp, &increment); /* mongoc_client_session_get_operation_time() returns 0 for both parts if * the session has not been used. According to the causal consistency spec, * the operation time for an unused session is null. */ if (timestamp == 0 && increment == 0) { RETURN_NULL(); } php_phongo_new_timestamp_from_increment_and_timestamp(return_value, increment, timestamp TSRMLS_CC); } /* }}} */ /* {{{ proto MongoDB\Driver\Server|null MongoDB\Driver\Session::getServer() Returns the server this session is pinned to */ static PHP_METHOD(Session, getServer) { php_phongo_session_t* intern; uint32_t server_id = 0; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "getServer") if (zend_parse_parameters_none() == FAILURE) { return; } server_id = mongoc_client_session_get_server_id(intern->client_session); /* For sessions without a pinned server, 0 is returned. */ if (!server_id) { RETURN_NULL(); } phongo_server_init(return_value, mongoc_client_session_get_client(intern->client_session), server_id TSRMLS_CC); } /* }}} */ /* Creates a opts structure from an array optionally containing an RP, RC, * WC object, and/or maxCommitTimeMS int. Returns NULL if no options were found, * or there was an invalid option. If there was an invalid option or structure, * an exception will be thrown too. */ mongoc_transaction_opt_t* php_mongodb_session_parse_transaction_options(zval* options TSRMLS_DC) { mongoc_transaction_opt_t* opts = NULL; if (php_array_existsc(options, "maxCommitTimeMS")) { int64_t max_commit_time_ms = php_array_fetchc_long(options, "maxCommitTimeMS"); if (max_commit_time_ms < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxCommitTimeMS\" option to be >= 0, %" PRId64 " given", max_commit_time_ms); /* Freeing opts is not needed here, as it can't be set yet. The * code is here to keep it consistent with the others in case more * options are added before this one. */ if (opts) { mongoc_transaction_opts_destroy(opts); } return NULL; } if (max_commit_time_ms > UINT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"maxCommitTimeMS\" option to be <= %" PRIu32 ", %" PRId64 " given", UINT32_MAX, max_commit_time_ms); /* Freeing opts is not needed here, as it can't be set yet. The * code is here to keep it consistent with the others in case more * options are added before this one. */ if (opts) { mongoc_transaction_opts_destroy(opts); } return NULL; } if (!opts) { opts = mongoc_transaction_opts_new(); } mongoc_transaction_opts_set_max_commit_time_ms(opts, max_commit_time_ms); } if (php_array_existsc(options, "readConcern")) { zval* read_concern = php_array_fetchc(options, "readConcern"); if (Z_TYPE_P(read_concern) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(read_concern), php_phongo_readconcern_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(read_concern)); if (opts) { mongoc_transaction_opts_destroy(opts); } return NULL; } if (!opts) { opts = mongoc_transaction_opts_new(); } mongoc_transaction_opts_set_read_concern(opts, phongo_read_concern_from_zval(read_concern TSRMLS_CC)); } if (php_array_existsc(options, "readPreference")) { zval* read_preference = php_array_fetchc(options, "readPreference"); if (Z_TYPE_P(read_preference) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(read_preference), php_phongo_readpreference_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readPreference\" option to be %s, %s given", ZSTR_VAL(php_phongo_readpreference_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(read_preference)); if (opts) { mongoc_transaction_opts_destroy(opts); } return NULL; } if (!opts) { opts = mongoc_transaction_opts_new(); } mongoc_transaction_opts_set_read_prefs(opts, phongo_read_preference_from_zval(read_preference TSRMLS_CC)); } if (php_array_existsc(options, "writeConcern")) { zval* write_concern = php_array_fetchc(options, "writeConcern"); if (Z_TYPE_P(write_concern) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(write_concern), php_phongo_writeconcern_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"writeConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_writeconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(write_concern)); if (opts) { mongoc_transaction_opts_destroy(opts); } return NULL; } if (!opts) { opts = mongoc_transaction_opts_new(); } mongoc_transaction_opts_set_write_concern(opts, phongo_write_concern_from_zval(write_concern TSRMLS_CC)); } return opts; } /* {{{ proto void MongoDB\Driver\Session::startTransaction([array $options = null]) Starts a new transaction */ static PHP_METHOD(Session, startTransaction) { php_phongo_session_t* intern; zval* options = NULL; mongoc_transaction_opt_t* txn_options = NULL; bson_error_t error; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "startTransaction") if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "|a!", &options) == FAILURE) { return; } if (options) { txn_options = php_mongodb_session_parse_transaction_options(options TSRMLS_CC); } if (EG(exception)) { return; } if (!mongoc_client_session_start_transaction(intern->client_session, txn_options, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); } if (txn_options) { mongoc_transaction_opts_destroy(txn_options); } } /* }}} */ /* {{{ proto void MongoDB\Driver\Session::commitTransaction(void) Commits an existing transaction */ static PHP_METHOD(Session, commitTransaction) { php_phongo_session_t* intern; bson_error_t error; bson_t reply; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "commitTransaction") if (zend_parse_parameters_none() == FAILURE) { return; } if (!mongoc_client_session_commit_transaction(intern->client_session, &reply, &error)) { phongo_throw_exception_from_bson_error_t_and_reply(&error, &reply TSRMLS_CC); bson_destroy(&reply); } } /* }}} */ /* {{{ proto void MongoDB\Driver\Session::abortTransaction(void) Aborts (rolls back) an existing transaction */ static PHP_METHOD(Session, abortTransaction) { php_phongo_session_t* intern; bson_error_t error; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "abortTransaction") if (zend_parse_parameters_none() == FAILURE) { return; } if (!mongoc_client_session_abort_transaction(intern->client_session, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); } } /* }}} */ /* {{{ proto void MongoDB\Driver\Session::endSession(void) Ends the session, and a running transaction if active */ static PHP_METHOD(Session, endSession) { php_phongo_session_t* intern; intern = Z_SESSION_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } mongoc_client_session_destroy(intern->client_session); intern->client_session = NULL; } /* }}} */ /* {{{ proto void MongoDB\Driver\Session::isInTransaction(void) Returns whether a multi-document transaction is in progress */ static PHP_METHOD(Session, isInTransaction) { php_phongo_session_t* intern; intern = Z_SESSION_OBJ_P(getThis()); SESSION_CHECK_LIVELINESS(intern, "isInTransaction") if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(mongoc_client_session_in_transaction(intern->client_session)); } /* }}} */ /* {{{ MongoDB\Driver\Session function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_Session_advanceClusterTime, 0, 0, 1) ZEND_ARG_INFO(0, clusterTime) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Session_advanceOperationTime, 0, 0, 1) ZEND_ARG_INFO(0, timestamp) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Session_startTransaction, 0, 0, 0) ZEND_ARG_ARRAY_INFO(0, options, 1) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_Session_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_session_me[] = { /* clang-format off */ PHP_ME(Session, abortTransaction, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, advanceClusterTime, ai_Session_advanceClusterTime, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, advanceOperationTime, ai_Session_advanceOperationTime, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, commitTransaction, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, endSession, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, getClusterTime, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, getLogicalSessionId, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, getOperationTime, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, getServer, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, isInTransaction, ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(Session, startTransaction, ai_Session_startTransaction, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_Session_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_Session_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\Session object handlers */ static zend_object_handlers php_phongo_handler_session; static void php_phongo_session_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_session_t* intern = Z_OBJ_SESSION(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->client_session) { mongoc_client_session_destroy(intern->client_session); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_session_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_session_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_session_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_session; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_session_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_session; return retval; } #endif } /* }}} */ static HashTable* php_phongo_session_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_session_t* intern = NULL; const mongoc_session_opt_t* cs_opts; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_SESSION_OBJ_P(object); array_init(&retval); if (intern->client_session) { const bson_t* lsid; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; lsid = mongoc_client_session_get_lsid(intern->client_session); php_phongo_bson_to_zval_ex(bson_get_data(lsid), lsid->len, &state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "logicalSessionId", &state.zchild); #else ADD_ASSOC_ZVAL_EX(&retval, "logicalSessionId", state.zchild); #endif } else { ADD_ASSOC_NULL_EX(&retval, "logicalSessionId"); } if (intern->client_session) { const bson_t* cluster_time; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; cluster_time = mongoc_client_session_get_cluster_time(intern->client_session); if (cluster_time) { php_phongo_bson_to_zval_ex(bson_get_data(cluster_time), cluster_time->len, &state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "clusterTime", &state.zchild); #else ADD_ASSOC_ZVAL_EX(&retval, "clusterTime", state.zchild); #endif } else { ADD_ASSOC_NULL_EX(&retval, "clusterTime"); } } else { ADD_ASSOC_NULL_EX(&retval, "clusterTime"); } if (intern->client_session) { cs_opts = mongoc_client_session_get_opts(intern->client_session); ADD_ASSOC_BOOL_EX(&retval, "causalConsistency", mongoc_session_opts_get_causal_consistency(cs_opts)); } else { ADD_ASSOC_NULL_EX(&retval, "causalConsistency"); } if (intern->client_session) { uint32_t timestamp, increment; mongoc_client_session_get_operation_time(intern->client_session, ×tamp, &increment); if (timestamp && increment) { #if PHP_VERSION_ID >= 70000 zval ztimestamp; php_phongo_new_timestamp_from_increment_and_timestamp(&ztimestamp, increment, timestamp TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "operationTime", &ztimestamp); #else zval* ztimestamp; MAKE_STD_ZVAL(ztimestamp); php_phongo_new_timestamp_from_increment_and_timestamp(ztimestamp, increment, timestamp TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "operationTime", ztimestamp); #endif } else { ADD_ASSOC_NULL_EX(&retval, "operationTime"); } } else { ADD_ASSOC_NULL_EX(&retval, "operationTime"); } if (intern->client_session) { uint32_t server_id = mongoc_client_session_get_server_id(intern->client_session); if (server_id) { #if PHP_VERSION_ID >= 70000 zval server; phongo_server_init(&server, mongoc_client_session_get_client(intern->client_session), server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", &server); #else zval* server = NULL; MAKE_STD_ZVAL(server); phongo_server_init(server, mongoc_client_session_get_client(intern->client_session), server_id TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "server", server); #endif } else { ADD_ASSOC_NULL_EX(&retval, "server"); } } else { ADD_ASSOC_NULL_EX(&retval, "server"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_session_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "Session", php_phongo_session_me); php_phongo_session_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_session_ce->create_object = php_phongo_session_create_object; PHONGO_CE_FINAL(php_phongo_session_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_session_ce); memcpy(&php_phongo_handler_session, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_session.get_debug_info = php_phongo_session_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_session.free_obj = php_phongo_session_free_object; php_phongo_handler_session.offset = XtOffsetOf(php_phongo_session_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/Session.h0000644000076500000240000000145713572250757016340 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHP_MONGODB_DRIVER_SESSION_H #define PHP_MONGODB_DRIVER_SESSION_H mongoc_transaction_opt_t* php_mongodb_session_parse_transaction_options(zval* txnOptions TSRMLS_DC); #endif /* PHP_MONGODB_DRIVER_SESSION_H */ mongodb-1.6.1/src/MongoDB/WriteConcern.c0000644000076500000240000004363713572250757017320 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_writeconcern_ce; /* Initialize the object from a HashTable and return whether it was successful. * An exception will be thrown on error. */ static bool php_phongo_writeconcern_init_from_hash(php_phongo_writeconcern_t* intern, HashTable* props TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval *w, *wtimeout, *j; intern->write_concern = mongoc_write_concern_new(); if ((w = zend_hash_str_find(props, "w", sizeof("w") - 1))) { if (Z_TYPE_P(w) == IS_LONG) { if (Z_LVAL_P(w) < -3) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"w\" integer field to be >= -3", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } mongoc_write_concern_set_w(intern->write_concern, Z_LVAL_P(w)); } else if (Z_TYPE_P(w) == IS_STRING) { if (strcmp(Z_STRVAL_P(w), PHONGO_WRITE_CONCERN_W_MAJORITY) == 0) { mongoc_write_concern_set_w(intern->write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY); } else { mongoc_write_concern_set_wtag(intern->write_concern, Z_STRVAL_P(w)); } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"w\" field to be integer or string", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } if ((wtimeout = zend_hash_str_find(props, "wtimeout", sizeof("wtimeout") - 1))) { if (Z_TYPE_P(wtimeout) == IS_LONG) { if (Z_LVAL_P(wtimeout) < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" integer field to be >= 0", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } if (Z_LVAL_P(wtimeout) > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" integer field to be <= %" PRId32, ZSTR_VAL(php_phongo_writeconcern_ce->name), INT32_MAX); goto failure; } mongoc_write_concern_set_wtimeout_int64(intern->write_concern, (int64_t) Z_LVAL_P(wtimeout)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" field to be integer", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } if ((j = zend_hash_str_find(props, "j", sizeof("j") - 1))) { if (Z_TYPE_P(j) == IS_TRUE || Z_TYPE_P(j) == IS_FALSE) { mongoc_write_concern_set_journal(intern->write_concern, zend_is_true(j)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"j\" field to be boolean", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } #else zval **w, **wtimeout, **j; intern->write_concern = mongoc_write_concern_new(); if (zend_hash_find(props, "w", sizeof("w"), (void**) &w) == SUCCESS) { if (Z_TYPE_PP(w) == IS_LONG) { if (Z_LVAL_PP(w) < -3) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"w\" integer field to be >= -3", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } mongoc_write_concern_set_w(intern->write_concern, Z_LVAL_PP(w)); } else if (Z_TYPE_PP(w) == IS_STRING) { if (strcmp(Z_STRVAL_PP(w), PHONGO_WRITE_CONCERN_W_MAJORITY) == 0) { mongoc_write_concern_set_w(intern->write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY); } else { mongoc_write_concern_set_wtag(intern->write_concern, Z_STRVAL_PP(w)); } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"w\" field to be integer or string", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } if (zend_hash_find(props, "wtimeout", sizeof("wtimeout"), (void**) &wtimeout) == SUCCESS) { if (Z_TYPE_PP(wtimeout) == IS_LONG) { if (Z_LVAL_PP(wtimeout) < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" integer field to be >= 0", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } if (Z_LVAL_PP(wtimeout) > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" integer field to be <= %" PRId32, ZSTR_VAL(php_phongo_writeconcern_ce->name), INT32_MAX); goto failure; } mongoc_write_concern_set_wtimeout_int64(intern->write_concern, (int64_t) Z_LVAL_PP(wtimeout)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"wtimeout\" field to be integer", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } if (zend_hash_find(props, "j", sizeof("j"), (void**) &j) == SUCCESS) { if (Z_TYPE_PP(j) == IS_BOOL) { mongoc_write_concern_set_journal(intern->write_concern, Z_BVAL_PP(j)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s initialization requires \"j\" field to be boolean", ZSTR_VAL(php_phongo_writeconcern_ce->name)); goto failure; } } #endif return true; failure: mongoc_write_concern_destroy(intern->write_concern); intern->write_concern = NULL; return false; } /* }}} */ /* {{{ proto void MongoDB\Driver\WriteConcern::__construct(integer|string $w[, integer $wtimeout[, boolean $journal]]) Constructs a new WriteConcern */ static PHP_METHOD(WriteConcern, __construct) { php_phongo_writeconcern_t* intern; zend_error_handling error_handling; zval * w, *journal; phongo_long wtimeout = 0; zend_replace_error_handling(EH_THROW, phongo_exception_from_phongo_domain(PHONGO_ERROR_INVALID_ARGUMENT), &error_handling TSRMLS_CC); intern = Z_WRITECONCERN_OBJ_P(getThis()); if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "z|lz", &w, &wtimeout, &journal) == FAILURE) { zend_restore_error_handling(&error_handling TSRMLS_CC); return; } zend_restore_error_handling(&error_handling TSRMLS_CC); intern->write_concern = mongoc_write_concern_new(); if (Z_TYPE_P(w) == IS_LONG) { if (Z_LVAL_P(w) < -3) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected w to be >= -3, %ld given", Z_LVAL_P(w)); return; } mongoc_write_concern_set_w(intern->write_concern, Z_LVAL_P(w)); } else if (Z_TYPE_P(w) == IS_STRING) { if (strcmp(Z_STRVAL_P(w), PHONGO_WRITE_CONCERN_W_MAJORITY) == 0) { mongoc_write_concern_set_w(intern->write_concern, MONGOC_WRITE_CONCERN_W_MAJORITY); } else { mongoc_write_concern_set_wtag(intern->write_concern, Z_STRVAL_P(w)); } } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected w to be integer or string, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(w)); return; } switch (ZEND_NUM_ARGS()) { case 3: if (Z_TYPE_P(journal) != IS_NULL) { #ifdef ZEND_ENGINE_3 mongoc_write_concern_set_journal(intern->write_concern, zend_is_true(journal)); #else mongoc_write_concern_set_journal(intern->write_concern, Z_BVAL_P(journal)); #endif } /* fallthrough */ case 2: if (wtimeout < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected wtimeout to be >= 0, %" PHONGO_LONG_FORMAT " given", wtimeout); return; } if (wtimeout > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected wtimeout to be <= %" PRId32 ", %" PHONGO_LONG_FORMAT " given", INT32_MAX, wtimeout); return; } mongoc_write_concern_set_wtimeout_int64(intern->write_concern, (int64_t) wtimeout); } } /* }}} */ /* {{{ proto void MongoDB\BSON\WriteConcern::__set_state(array $properties) */ static PHP_METHOD(WriteConcern, __set_state) { php_phongo_writeconcern_t* intern; HashTable* props; zval* array; if (zend_parse_parameters(ZEND_NUM_ARGS() TSRMLS_CC, "a", &array) == FAILURE) { RETURN_FALSE; } object_init_ex(return_value, php_phongo_writeconcern_ce); intern = Z_WRITECONCERN_OBJ_P(return_value); props = Z_ARRVAL_P(array); php_phongo_writeconcern_init_from_hash(intern, props TSRMLS_CC); } /* }}} */ /* {{{ proto string|integer|null MongoDB\Driver\WriteConcern::getW() Returns the WriteConcern "w" option */ static PHP_METHOD(WriteConcern, getW) { php_phongo_writeconcern_t* intern; const char* wtag; intern = Z_WRITECONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } wtag = mongoc_write_concern_get_wtag(intern->write_concern); if (wtag) { PHONGO_RETURN_STRING(wtag); } if (mongoc_write_concern_get_wmajority(intern->write_concern)) { PHONGO_RETURN_STRING(PHONGO_WRITE_CONCERN_W_MAJORITY); } if (mongoc_write_concern_get_w(intern->write_concern) != MONGOC_WRITE_CONCERN_W_DEFAULT) { RETURN_LONG(mongoc_write_concern_get_w(intern->write_concern)); } RETURN_NULL(); } /* }}} */ /* {{{ proto integer MongoDB\Driver\WriteConcern::getWtimeout() Returns the WriteConcern "wtimeout" option */ static PHP_METHOD(WriteConcern, getWtimeout) { php_phongo_writeconcern_t* intern; intern = Z_WRITECONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } /* Note: PHP currently enforces that wimeoutMS is a 32-bit integer, so * casting will never truncate the value. This may change with PHPC-1411. */ RETURN_LONG((int32_t) mongoc_write_concern_get_wtimeout_int64(intern->write_concern)); } /* }}} */ /* {{{ proto null|boolean MongoDB\Driver\WriteConcern::getJournal() Returns the WriteConcern "journal" option */ static PHP_METHOD(WriteConcern, getJournal) { php_phongo_writeconcern_t* intern; intern = Z_WRITECONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (mongoc_write_concern_journal_is_set(intern->write_concern)) { RETURN_BOOL(mongoc_write_concern_get_journal(intern->write_concern)); } RETURN_NULL(); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\WriteConcern::isDefault() Returns whether the write concern has not been modified (i.e. from a Manager with no write concern URI options). */ static PHP_METHOD(WriteConcern, isDefault) { php_phongo_writeconcern_t* intern; intern = Z_WRITECONCERN_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(mongoc_write_concern_is_default(intern->write_concern)); } /* }}} */ static HashTable* php_phongo_write_concern_get_properties_hash(zval* object, bool is_debug TSRMLS_DC) /* {{{ */ { php_phongo_writeconcern_t* intern; HashTable* props; const char* wtag; int32_t w; int32_t wtimeout; intern = Z_WRITECONCERN_OBJ_P(object); PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, 4); if (!intern->write_concern) { return props; } wtag = mongoc_write_concern_get_wtag(intern->write_concern); w = mongoc_write_concern_get_w(intern->write_concern); /* Note: PHP currently enforces that wimeoutMS is a 32-bit integer, so * casting will never truncate the value. This may change with PHPC-1411. */ wtimeout = (int32_t) mongoc_write_concern_get_wtimeout_int64(intern->write_concern); #if PHP_VERSION_ID >= 70000 { zval z_w; if (wtag) { ZVAL_STRING(&z_w, wtag); zend_hash_str_update(props, "w", sizeof("w") - 1, &z_w); } else if (mongoc_write_concern_get_wmajority(intern->write_concern)) { ZVAL_STRING(&z_w, PHONGO_WRITE_CONCERN_W_MAJORITY); zend_hash_str_update(props, "w", sizeof("w") - 1, &z_w); } else if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) { ZVAL_LONG(&z_w, w); zend_hash_str_update(props, "w", sizeof("w") - 1, &z_w); } if (mongoc_write_concern_journal_is_set(intern->write_concern)) { zval z_j; ZVAL_BOOL(&z_j, mongoc_write_concern_get_journal(intern->write_concern)); zend_hash_str_update(props, "j", sizeof("j") - 1, &z_j); } if (wtimeout != 0) { zval z_wtimeout; ZVAL_LONG(&z_wtimeout, wtimeout); zend_hash_str_update(props, "wtimeout", sizeof("wtimeout") - 1, &z_wtimeout); } #else { zval* z_w; if (wtag) { MAKE_STD_ZVAL(z_w); ZVAL_STRING(z_w, wtag, 1); zend_hash_update(props, "w", sizeof("w"), &z_w, sizeof(z_w), NULL); } else if (mongoc_write_concern_get_wmajority(intern->write_concern)) { MAKE_STD_ZVAL(z_w); ZVAL_STRING(z_w, PHONGO_WRITE_CONCERN_W_MAJORITY, 1); zend_hash_update(props, "w", sizeof("w"), &z_w, sizeof(z_w), NULL); } else if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) { MAKE_STD_ZVAL(z_w); ZVAL_LONG(z_w, w); zend_hash_update(props, "w", sizeof("w"), &z_w, sizeof(z_w), NULL); } if (mongoc_write_concern_journal_is_set(intern->write_concern)) { zval* z_j; MAKE_STD_ZVAL(z_j); ZVAL_BOOL(z_j, mongoc_write_concern_get_journal(intern->write_concern)); zend_hash_update(props, "j", sizeof("j"), &z_j, sizeof(z_j), NULL); } if (wtimeout != 0) { zval* z_wtimeout; MAKE_STD_ZVAL(z_wtimeout); ZVAL_LONG(z_wtimeout, wtimeout); zend_hash_update(props, "wtimeout", sizeof("wtimeout"), &z_wtimeout, sizeof(z_wtimeout), NULL); } #endif } return props; } /* }}} */ /* {{{ proto array MongoDB\Driver\WriteConcern::bsonSerialize() */ static PHP_METHOD(WriteConcern, bsonSerialize) { if (zend_parse_parameters_none() == FAILURE) { return; } ZVAL_ARR(return_value, php_phongo_write_concern_get_properties_hash(getThis(), true TSRMLS_CC)); convert_to_object(return_value); } /* }}} */ /* {{{ MongoDB\Driver\WriteConcern function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcern___construct, 0, 0, 1) ZEND_ARG_INFO(0, w) ZEND_ARG_INFO(0, wtimeout) ZEND_ARG_INFO(0, journal) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcern___set_state, 0, 0, 1) ZEND_ARG_ARRAY_INFO(0, properties, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcern_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_writeconcern_me[] = { /* clang-format off */ PHP_ME(WriteConcern, __construct, ai_WriteConcern___construct, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcern, __set_state, ai_WriteConcern___set_state, ZEND_ACC_PUBLIC | ZEND_ACC_STATIC) PHP_ME(WriteConcern, getW, ai_WriteConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcern, getWtimeout, ai_WriteConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcern, getJournal, ai_WriteConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcern, isDefault, ai_WriteConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcern, bsonSerialize, ai_WriteConcern_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\WriteConcern object handlers */ static zend_object_handlers php_phongo_handler_writeconcern; static void php_phongo_writeconcern_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_writeconcern_t* intern = Z_OBJ_WRITECONCERN(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->properties) { zend_hash_destroy(intern->properties); FREE_HASHTABLE(intern->properties); } if (intern->write_concern) { mongoc_write_concern_destroy(intern->write_concern); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_writeconcern_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_writeconcern_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_writeconcern_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_writeconcern; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_writeconcern_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_writeconcern; return retval; } #endif } /* }}} */ static HashTable* php_phongo_writeconcern_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { *is_temp = 1; return php_phongo_write_concern_get_properties_hash(object, true TSRMLS_CC); } /* }}} */ static HashTable* php_phongo_writeconcern_get_properties(zval* object TSRMLS_DC) /* {{{ */ { return php_phongo_write_concern_get_properties_hash(object, false TSRMLS_CC); } /* }}} */ void php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "WriteConcern", php_phongo_writeconcern_me); php_phongo_writeconcern_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_writeconcern_ce->create_object = php_phongo_writeconcern_create_object; PHONGO_CE_FINAL(php_phongo_writeconcern_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_writeconcern_ce); zend_class_implements(php_phongo_writeconcern_ce TSRMLS_CC, 1, php_phongo_serializable_ce); memcpy(&php_phongo_handler_writeconcern, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_writeconcern.get_debug_info = php_phongo_writeconcern_get_debug_info; php_phongo_handler_writeconcern.get_properties = php_phongo_writeconcern_get_properties; #if PHP_VERSION_ID >= 70000 php_phongo_handler_writeconcern.free_obj = php_phongo_writeconcern_free_object; php_phongo_handler_writeconcern.offset = XtOffsetOf(php_phongo_writeconcern_t, std); #endif zend_declare_class_constant_stringl(php_phongo_writeconcern_ce, ZEND_STRL("MAJORITY"), ZEND_STRL(PHONGO_WRITE_CONCERN_W_MAJORITY) TSRMLS_CC); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/WriteConcernError.c0000644000076500000240000001353713572250757020326 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_writeconcernerror_ce; /* {{{ proto integer MongoDB\Driver\WriteConcernError::getCode() Returns the MongoDB error code */ static PHP_METHOD(WriteConcernError, getCode) { php_phongo_writeconcernerror_t* intern; intern = Z_WRITECONCERNERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->code); } /* }}} */ /* {{{ proto mixed MongoDB\Driver\WriteConcernError::getInfo() Returns additional metadata for the error */ static PHP_METHOD(WriteConcernError, getInfo) { php_phongo_writeconcernerror_t* intern; intern = Z_WRITECONCERNERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (!Z_ISUNDEF(intern->info)) { #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&intern->info, 1, 0); #else RETURN_ZVAL(intern->info, 1, 0); #endif } } /* }}} */ /* {{{ proto string MongoDB\Driver\WriteConcernError::getMessage() Returns the actual error message from the server */ static PHP_METHOD(WriteConcernError, getMessage) { php_phongo_writeconcernerror_t* intern; intern = Z_WRITECONCERNERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRING(intern->message); } /* }}} */ /* {{{ MongoDB\Driver\WriteConcernError function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_WriteConcernError_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_writeconcernerror_me[] = { /* clang-format off */ PHP_ME(WriteConcernError, getCode, ai_WriteConcernError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcernError, getInfo, ai_WriteConcernError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteConcernError, getMessage, ai_WriteConcernError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_WriteConcernError_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_WriteConcernError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\WriteConcernError object handlers */ static zend_object_handlers php_phongo_handler_writeconcernerror; static void php_phongo_writeconcernerror_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_writeconcernerror_t* intern = Z_OBJ_WRITECONCERNERROR(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->message) { efree(intern->message); } if (!Z_ISUNDEF(intern->info)) { zval_ptr_dtor(&intern->info); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_writeconcernerror_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_writeconcernerror_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_writeconcernerror_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_writeconcernerror; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_writeconcernerror_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_writeconcernerror; return retval; } #endif } /* }}} */ static HashTable* php_phongo_writeconcernerror_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_writeconcernerror_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_WRITECONCERNERROR_OBJ_P(object); array_init_size(&retval, 3); ADD_ASSOC_STRING(&retval, "message", intern->message); ADD_ASSOC_LONG_EX(&retval, "code", intern->code); if (!Z_ISUNDEF(intern->info)) { #if PHP_VERSION_ID >= 70000 Z_ADDREF(intern->info); ADD_ASSOC_ZVAL_EX(&retval, "info", &intern->info); #else Z_ADDREF_P(intern->info); ADD_ASSOC_ZVAL_EX(&retval, "info", intern->info); #endif } else { ADD_ASSOC_NULL_EX(&retval, "info"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_writeconcernerror_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "WriteConcernError", php_phongo_writeconcernerror_me); php_phongo_writeconcernerror_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_writeconcernerror_ce->create_object = php_phongo_writeconcernerror_create_object; PHONGO_CE_FINAL(php_phongo_writeconcernerror_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_writeconcernerror_ce); memcpy(&php_phongo_handler_writeconcernerror, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_writeconcernerror.get_debug_info = php_phongo_writeconcernerror_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_writeconcernerror.free_obj = php_phongo_writeconcernerror_free_object; php_phongo_handler_writeconcernerror.offset = XtOffsetOf(php_phongo_writeconcernerror_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/WriteError.c0000644000076500000240000001375313572250757017016 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "phongo_compat.h" #include "php_phongo.h" zend_class_entry* php_phongo_writeerror_ce; /* {{{ proto integer MongoDB\Driver\WriteError::getCode() Returns the MongoDB error code */ static PHP_METHOD(WriteError, getCode) { php_phongo_writeerror_t* intern; intern = Z_WRITEERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->code); } /* }}} */ /* {{{ proto integer MongoDB\Driver\WriteError::getIndex() Returns the index of the operation in the BulkWrite to which this WriteError corresponds. */ static PHP_METHOD(WriteError, getIndex) { php_phongo_writeerror_t* intern; intern = Z_WRITEERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_LONG(intern->index); } /* }}} */ /* {{{ proto string MongoDB\Driver\WriteError::getMessage() Returns the actual error message from the server */ static PHP_METHOD(WriteError, getMessage) { php_phongo_writeerror_t* intern; intern = Z_WRITEERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_RETURN_STRING(intern->message); } /* }}} */ /* {{{ proto mixed MongoDB\Driver\WriteError::getInfo() Returns additional metadata for the error */ static PHP_METHOD(WriteError, getInfo) { php_phongo_writeerror_t* intern; intern = Z_WRITEERROR_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } if (!Z_ISUNDEF(intern->info)) { #if PHP_VERSION_ID >= 70000 RETURN_ZVAL(&intern->info, 1, 0); #else RETURN_ZVAL(intern->info, 1, 0); #endif } } /* }}} */ /* {{{ MongoDB\Driver\WriteError function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_WriteError_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_writeerror_me[] = { /* clang-format off */ PHP_ME(WriteError, getCode, ai_WriteError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteError, getIndex, ai_WriteError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteError, getMessage, ai_WriteError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteError, getInfo, ai_WriteError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_WriteError_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_WriteError_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\WriteError object handlers */ static zend_object_handlers php_phongo_handler_writeerror; static void php_phongo_writeerror_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_writeerror_t* intern = Z_OBJ_WRITEERROR(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->message) { efree(intern->message); } if (!Z_ISUNDEF(intern->info)) { zval_ptr_dtor(&intern->info); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_writeerror_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_writeerror_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_writeerror_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_writeerror; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_writeerror_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_writeerror; return retval; } #endif } /* }}} */ static HashTable* php_phongo_writeerror_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_writeerror_t* intern; zval retval = ZVAL_STATIC_INIT; *is_temp = 1; intern = Z_WRITEERROR_OBJ_P(object); array_init_size(&retval, 3); ADD_ASSOC_STRING(&retval, "message", intern->message); ADD_ASSOC_LONG_EX(&retval, "code", intern->code); ADD_ASSOC_LONG_EX(&retval, "index", intern->index); if (!Z_ISUNDEF(intern->info)) { #if PHP_VERSION_ID >= 70000 Z_ADDREF(intern->info); ADD_ASSOC_ZVAL_EX(&retval, "info", &intern->info); #else Z_ADDREF_P(intern->info); ADD_ASSOC_ZVAL_EX(&retval, "info", intern->info); #endif } else { ADD_ASSOC_NULL_EX(&retval, "info"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_writeerror_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "WriteError", php_phongo_writeerror_me); php_phongo_writeerror_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_writeerror_ce->create_object = php_phongo_writeerror_create_object; PHONGO_CE_FINAL(php_phongo_writeerror_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_writeerror_ce); memcpy(&php_phongo_handler_writeerror, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_writeerror.get_debug_info = php_phongo_writeerror_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_writeerror.free_obj = php_phongo_writeerror_free_object; php_phongo_handler_writeerror.offset = XtOffsetOf(php_phongo_writeerror_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/MongoDB/WriteResult.c0000644000076500000240000003706313572250757017203 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_array_api.h" #include "phongo_compat.h" #include "php_phongo.h" #include "php_bson.h" #define PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(iter, bson, key) \ if (bson_iter_init_find((iter), (bson), (key)) && BSON_ITER_HOLDS_INT32((iter))) { \ RETURN_LONG(bson_iter_int32((iter))); \ } zend_class_entry* php_phongo_writeresult_ce; static bool php_phongo_writeresult_get_writeconcernerror(php_phongo_writeresult_t* intern, zval* return_value TSRMLS_DC) /* {{{ */ { bson_iter_t iter, child; #if PHP_VERSION_ID >= 70000 zval writeconcernerror; #else zval* writeconcernerror = NULL; #endif ZVAL_NULL(return_value); if (bson_iter_init_find(&iter, intern->reply, "writeConcernErrors") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) { while (bson_iter_next(&child)) { bson_t cbson; uint32_t len; const uint8_t* data; if (!BSON_ITER_HOLDS_DOCUMENT(&child)) { continue; } bson_iter_document(&child, &len, &data); if (!bson_init_static(&cbson, data, len)) { continue; } #if PHP_VERSION_ID >= 70000 if (!phongo_writeconcernerror_init(&writeconcernerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeconcernerror); return false; } ZVAL_ZVAL(return_value, &writeconcernerror, 1, 1); #else MAKE_STD_ZVAL(writeconcernerror); if (!phongo_writeconcernerror_init(writeconcernerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeconcernerror); return false; } ZVAL_ZVAL(return_value, writeconcernerror, 1, 1); #endif return true; } } return true; } /* }}} */ static bool php_phongo_writeresult_get_writeerrors(php_phongo_writeresult_t* intern, zval* return_value TSRMLS_DC) /* {{{ */ { bson_iter_t iter, child; array_init(return_value); if (bson_iter_init_find(&iter, intern->reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) { while (bson_iter_next(&child)) { bson_t cbson; uint32_t len; const uint8_t* data; #if PHP_VERSION_ID >= 70000 zval writeerror; #else zval* writeerror = NULL; #endif if (!BSON_ITER_HOLDS_DOCUMENT(&child)) { continue; } bson_iter_document(&child, &len, &data); if (!bson_init_static(&cbson, data, len)) { continue; } #if PHP_VERSION_ID >= 70000 if (!phongo_writeerror_init(&writeerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeerror); continue; } add_next_index_zval(return_value, &writeerror); #else MAKE_STD_ZVAL(writeerror); if (!phongo_writeerror_init(writeerror, &cbson TSRMLS_CC)) { zval_ptr_dtor(&writeerror); continue; } add_next_index_zval(return_value, writeerror); #endif } } return true; } /* }}} */ /* {{{ proto integer|null MongoDB\Driver\WriteResult::getInsertedCount() Returns the number of documents that were inserted */ static PHP_METHOD(WriteResult, getInsertedCount) { bson_iter_t iter; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(&iter, intern->reply, "nInserted"); } /* }}} */ /* {{{ proto integer|null MongoDB\Driver\WriteResult::getMatchedCount() Returns the number of documents that matched the update criteria */ static PHP_METHOD(WriteResult, getMatchedCount) { bson_iter_t iter; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(&iter, intern->reply, "nMatched"); } /* }}} */ /* {{{ proto integer|null MongoDB\Driver\WriteResult::getModifiedCount() Returns the number of documents that were actually modified by an update */ static PHP_METHOD(WriteResult, getModifiedCount) { bson_iter_t iter; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(&iter, intern->reply, "nModified"); } /* }}} */ /* {{{ proto integer|null MongoDB\Driver\WriteResult::getDeletedCount() Returns the number of documents that were deleted */ static PHP_METHOD(WriteResult, getDeletedCount) { bson_iter_t iter; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(&iter, intern->reply, "nRemoved"); } /* }}} */ /* {{{ proto integer|null MongoDB\Driver\WriteResult::getUpsertedCount() Returns the number of documents that were upserted */ static PHP_METHOD(WriteResult, getUpsertedCount) { bson_iter_t iter; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } PHONGO_WRITERESULT_RETURN_LONG_FROM_BSON_INT32(&iter, intern->reply, "nUpserted"); } /* }}} */ /* {{{ proto MongoDB\Driver\Server MongoDB\Driver\WriteResult::getServer() Returns the Server from which the result originated */ static PHP_METHOD(WriteResult, getServer) { php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } phongo_server_init(return_value, intern->client, intern->server_id TSRMLS_CC); } /* }}} */ /* {{{ proto array MongoDB\Driver\WriteResult::getUpsertedIds() Returns the identifiers generated by the server for upsert operations. */ static PHP_METHOD(WriteResult, getUpsertedIds) { bson_iter_t iter, child; php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } array_init(return_value); if (bson_iter_init_find(&iter, intern->reply, "upserted") && BSON_ITER_HOLDS_ARRAY(&iter) && bson_iter_recurse(&iter, &child)) { while (bson_iter_next(&child)) { uint32_t data_len; const uint8_t* data = NULL; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use PHONGO_TYPEMAP_NATIVE_ARRAY for the root type so we can * easily access the "index" and "_id" fields. */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; if (!BSON_ITER_HOLDS_DOCUMENT(&child)) { continue; } bson_iter_document(&child, &data_len, &data); if (php_phongo_bson_to_zval_ex(data, data_len, &state)) { #if PHP_VERSION_ID >= 70000 zval* zid = php_array_fetchc(&state.zchild, "_id"); add_index_zval(return_value, php_array_fetchc_long(&state.zchild, "index"), zid); zval_add_ref(zid); #else zval* zid = php_array_fetchc(state.zchild, "_id"); add_index_zval(return_value, php_array_fetchc_long(state.zchild, "index"), zid); zval_add_ref(&zid); #endif } zval_ptr_dtor(&state.zchild); } } } /* }}} */ /* {{{ proto WriteConcernError MongoDB\Driver\WriteResult::getWriteConcernError() Return any write concern error that occurred */ static PHP_METHOD(WriteResult, getWriteConcernError) { php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_writeresult_get_writeconcernerror(intern, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto WriteError[] MongoDB\Driver\WriteResult::getWriteErrors() Returns any write errors that occurred */ static PHP_METHOD(WriteResult, getWriteErrors) { php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } php_phongo_writeresult_get_writeerrors(intern, return_value TSRMLS_CC); } /* }}} */ /* {{{ proto boolean MongoDB\Driver\WriteResult::isAcknowledged() Returns whether the write operation was acknowledged (based on the write concern). */ static PHP_METHOD(WriteResult, isAcknowledged) { php_phongo_writeresult_t* intern; intern = Z_WRITERESULT_OBJ_P(getThis()); if (zend_parse_parameters_none() == FAILURE) { return; } RETURN_BOOL(mongoc_write_concern_is_acknowledged(intern->write_concern)); } /* }}} */ /* {{{ MongoDB\Driver\WriteResult function entries */ ZEND_BEGIN_ARG_INFO_EX(ai_WriteResult_void, 0, 0, 0) ZEND_END_ARG_INFO() static zend_function_entry php_phongo_writeresult_me[] = { /* clang-format off */ PHP_ME(WriteResult, getInsertedCount, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getMatchedCount, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getModifiedCount, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getDeletedCount, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getUpsertedCount, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getServer, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getUpsertedIds, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getWriteConcernError, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, getWriteErrors, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_ME(WriteResult, isAcknowledged, ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) ZEND_NAMED_ME(__construct, PHP_FN(MongoDB_disabled___construct), ai_WriteResult_void, ZEND_ACC_PRIVATE | ZEND_ACC_FINAL) ZEND_NAMED_ME(__wakeup, PHP_FN(MongoDB_disabled___wakeup), ai_WriteResult_void, ZEND_ACC_PUBLIC | ZEND_ACC_FINAL) PHP_FE_END /* clang-format on */ }; /* }}} */ /* {{{ MongoDB\Driver\WriteResult object handlers */ static zend_object_handlers php_phongo_handler_writeresult; static void php_phongo_writeresult_free_object(phongo_free_object_arg* object TSRMLS_DC) /* {{{ */ { php_phongo_writeresult_t* intern = Z_OBJ_WRITERESULT(object); zend_object_std_dtor(&intern->std TSRMLS_CC); if (intern->reply) { bson_destroy(intern->reply); } if (intern->write_concern) { mongoc_write_concern_destroy(intern->write_concern); } #if PHP_VERSION_ID < 70000 efree(intern); #endif } /* }}} */ static phongo_create_object_retval php_phongo_writeresult_create_object(zend_class_entry* class_type TSRMLS_DC) /* {{{ */ { php_phongo_writeresult_t* intern = NULL; intern = PHONGO_ALLOC_OBJECT_T(php_phongo_writeresult_t, class_type); zend_object_std_init(&intern->std, class_type TSRMLS_CC); object_properties_init(&intern->std, class_type); #if PHP_VERSION_ID >= 70000 intern->std.handlers = &php_phongo_handler_writeresult; return &intern->std; #else { zend_object_value retval; retval.handle = zend_objects_store_put(intern, (zend_objects_store_dtor_t) zend_objects_destroy_object, php_phongo_writeresult_free_object, NULL TSRMLS_CC); retval.handlers = &php_phongo_handler_writeresult; return retval; } #endif } /* }}} */ static HashTable* php_phongo_writeresult_get_debug_info(zval* object, int* is_temp TSRMLS_DC) /* {{{ */ { php_phongo_writeresult_t* intern; zval retval = ZVAL_STATIC_INIT; bson_iter_t iter; intern = Z_WRITERESULT_OBJ_P(object); *is_temp = 1; array_init_size(&retval, 9); #define PHONGO_WRITERESULT_SCP(field) \ if (bson_iter_init_find(&iter, intern->reply, (field)) && BSON_ITER_HOLDS_INT32(&iter)) { \ ADD_ASSOC_LONG_EX(&retval, (field), bson_iter_int32(&iter)); \ } else { \ ADD_ASSOC_NULL_EX(&retval, (field)); \ } PHONGO_WRITERESULT_SCP("nInserted"); PHONGO_WRITERESULT_SCP("nMatched"); PHONGO_WRITERESULT_SCP("nModified"); PHONGO_WRITERESULT_SCP("nRemoved"); PHONGO_WRITERESULT_SCP("nUpserted"); #undef PHONGO_WRITERESULT_SCP if (bson_iter_init_find(&iter, intern->reply, "upserted") && BSON_ITER_HOLDS_ARRAY(&iter)) { uint32_t len; const uint8_t* data; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; bson_iter_array(&iter, &len, &data); php_phongo_bson_to_zval_ex(data, len, &state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(&retval, "upsertedIds", &state.zchild); #else ADD_ASSOC_ZVAL_EX(&retval, "upsertedIds", state.zchild); #endif } else { #if PHP_VERSION_ID >= 70000 zval upsertedIds; array_init(&upsertedIds); ADD_ASSOC_ZVAL_EX(&retval, "upsertedIds", &upsertedIds); #else zval* upsertedIds = NULL; MAKE_STD_ZVAL(upsertedIds); array_init(upsertedIds); ADD_ASSOC_ZVAL_EX(&retval, "upsertedIds", upsertedIds); #endif } { #if PHP_VERSION_ID >= 70000 zval writeerrors; php_phongo_writeresult_get_writeerrors(intern, &writeerrors TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "writeErrors", &writeerrors); #else zval* writeerrors = NULL; MAKE_STD_ZVAL(writeerrors); php_phongo_writeresult_get_writeerrors(intern, writeerrors TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "writeErrors", writeerrors); #endif } { #if PHP_VERSION_ID >= 70000 zval writeconcernerror; php_phongo_writeresult_get_writeconcernerror(intern, &writeconcernerror TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "writeConcernError", &writeconcernerror); #else zval* writeconcernerror = NULL; MAKE_STD_ZVAL(writeconcernerror); php_phongo_writeresult_get_writeconcernerror(intern, writeconcernerror TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "writeConcernError", writeconcernerror); #endif } if (intern->write_concern) { #if PHP_VERSION_ID >= 70000 zval write_concern; phongo_writeconcern_init(&write_concern, intern->write_concern); ADD_ASSOC_ZVAL_EX(&retval, "writeConcern", &write_concern); #else zval* write_concern = NULL; MAKE_STD_ZVAL(write_concern); phongo_writeconcern_init(write_concern, intern->write_concern TSRMLS_CC); ADD_ASSOC_ZVAL_EX(&retval, "writeConcern", write_concern); #endif } else { ADD_ASSOC_NULL_EX(&retval, "writeConcern"); } return Z_ARRVAL(retval); } /* }}} */ /* }}} */ void php_phongo_writeresult_init_ce(INIT_FUNC_ARGS) /* {{{ */ { zend_class_entry ce; INIT_NS_CLASS_ENTRY(ce, "MongoDB\\Driver", "WriteResult", php_phongo_writeresult_me); php_phongo_writeresult_ce = zend_register_internal_class(&ce TSRMLS_CC); php_phongo_writeresult_ce->create_object = php_phongo_writeresult_create_object; PHONGO_CE_FINAL(php_phongo_writeresult_ce); PHONGO_CE_DISABLE_SERIALIZATION(php_phongo_writeresult_ce); memcpy(&php_phongo_handler_writeresult, phongo_get_std_object_handlers(), sizeof(zend_object_handlers)); php_phongo_handler_writeresult.get_debug_info = php_phongo_writeresult_get_debug_info; #if PHP_VERSION_ID >= 70000 php_phongo_handler_writeresult.free_obj = php_phongo_writeresult_free_object; php_phongo_handler_writeresult.offset = XtOffsetOf(php_phongo_writeresult_t, std); #endif } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/contrib/php_array_api.h0000644000076500000240000005101513572250757017701 0ustar alcaeusstaff/* +----------------------------------------------------------------------+ | PHP Version 7 | +----------------------------------------------------------------------+ | Copyright (c) 1997-2018 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ | Author: Sara Golemon (pollita@php.net) | +----------------------------------------------------------------------+ */ #ifndef PHP_ARRAY_API_H #define PHP_ARRAY_API_H #include "zend.h" #include "zend_execute.h" #include "zend_API.h" #include "zend_operators.h" #include "zend_hash.h" #include "zend_list.h" #ifdef ZEND_ENGINE_3 # define PAA_LENGTH_ADJ(l) (l) # define PAA_SYM_EXISTS zend_symtable_str_exists # define PAA_SYM_DEL zend_symtable_str_del # define PAA_LONG zend_long # define PAA_ULONG zend_ulong #else # define PAA_LENGTH_ADJ(l) (l+1) # define PAA_SYM_EXISTS zend_symtable_exists # define PAA_SYM_DEL zend_symtable_del # define PAA_LONG long # define PAA_ULONG ulong #endif /** * All APIs in this file follow a general format: * * php_array_{$verb}{$modifier}_{$type}(zval *zarr, ...) * * $verb is one of: * exists - Boolean check whether the array offset exists * fetch - Retrieve the value at $zarr[$key] * unset - Delete the named offset from the array * * $modifier specifies what type of offset (key) is being used: * - NULL terminated string variable, unknown length * l - NULL terminated string variable, known length * l_safe - String variable of known length, not necessarily NULL terminated * n - Long (integer) offset * c - NULL terminated string literal (e.g. "foo" rather than foo) * z - zval* offset, type should be NULL, BOOL, LONG, DOUBLE, or STRING * * $type is specific to the "fetch" verb: * - Fetch a zval* of any type * bool - Fetch a zend_bool (converting as needed) * long - Fetch a long (converting as needed) * double - Fetch a double (converting as needed) * string - Fetch a string (converting as needed, caller may need to free) * array - Fetch an array (no conversion from other types) * object - Fetch an object (no conversion, type spec optional) * resource - Fetch a resource (no conversion, type spec mandatory) * * See the specific subsection for additional details */ /* isset($zarr[$key]) - Check for the existence of a key within an array * * zend_bool php_array_exists(zval *zarr, const char *key) * zend_bool php_array_existsc(zval *zarr, const char *litstr) * zend_bool php_array_existsl(zval *zarr, const char *key, int key_len) * zend_bool php_array_existsl_safe(zval *zarr, const char *key, int key_len) * zend_bool php_array_existsn(zval *zarr, unsigned long idx) * zend_bool php_array_existsz(zval *zarr, zval *key) */ static inline zend_bool php_array_exists(zval *zarr, const char *key) { return PAA_SYM_EXISTS(Z_ARRVAL_P(zarr), key, PAA_LENGTH_ADJ(strlen(key))); } #define php_array_existsc(zarr, litstr) \ PAA_SYM_EXISTS(Z_ARRVAL_P(zarr), litstr, PAA_LENGTH_ADJ(sizeof(litstr) - 1)) #define php_array_existsl(zarr, key, len) \ PAA_SYM_EXISTS(Z_ARRVAL_P(zarr), key, PAA_LENGTH_ADJ(len)) static inline zend_bool php_array_existsl_safe(zval *zarr, const char *key, int key_len) { #ifdef ZEND_ENGINE_3 zend_string *keystr = zend_string_init(key, key_len, 0); zend_bool ret = zend_symtable_exists(Z_ARRVAL_P(zarr), keystr); zend_string_release(keystr); #else char *k = estrndup(key, key_len); zend_bool ret = zend_symtable_exists(Z_ARRVAL_P(zarr), k, key_len + 1); efree(k); #endif return ret; } #define php_array_existsn(zarr, idx) \ zend_hash_index_exists(Z_ARRVAL_P(zarr), idx) static inline zend_bool php_array_existsz(zval *zarr, zval *key) { switch (Z_TYPE_P(key)) { case IS_NULL: return php_array_existsc(zarr, ""); #ifdef ZEND_ENGINE_3 case IS_FALSE: return zend_hash_index_exists(Z_ARRVAL_P(zarr), 0); case IS_TRUE: return zend_hash_index_exists(Z_ARRVAL_P(zarr), 1); #else case IS_BOOL: /* fallthrough */ #endif case IS_LONG: return zend_hash_index_exists(Z_ARRVAL_P(zarr), Z_LVAL_P(key)); case IS_DOUBLE: return zend_hash_index_exists(Z_ARRVAL_P(zarr), zend_dval_to_lval(Z_DVAL_P(key))); case IS_STRING: return php_array_existsl(zarr, Z_STRVAL_P(key), Z_STRLEN_P(key)); default: return 0; } } /* =$zarr[$key] - Fetch a zval (or appropriate type) from an array * * Methods returning pointers yield NULL on key not existing, * others yield 0, false, etc... as appropriate. * Callers needing to distinguish empty scalars from non-existent * scalars should use php_array_exists*() or fetch the zval then convert. * * If the type of the value does not match what is requested * it will be implicitly converted (if possible). * * See each type section for specific prototypes * * php_array_fetch* - Fetch a zval * php_array_fetch*_bool - Fetch a boolean * php_array_fetch*_long - Fetch a long * php_array_fetch*_double - Fetch a double * php_array_fetch*_string - Fetch a string (must be efree()'d by caller) * php_array_fetch*_array - Fetch an array * php_array_fetch*_resource - Fetch a resource or a specific type * php_array_fetch*_object - Fetch an object * * For each result type, there are six key forms: * php_array_fetch_T(zval *zarr, const char *key, ...) * NULL terminated string key * php_array_fetchc_T(zval *zarr, const char *litkey, ...) * String literal key * php_array_fetchl_T(zval *zarr, const char *key, int key_len, ...) * NULL terminated string key of known length * php_array_fetchl_safe_T(zval *zarr, const char *key, int key_len, ...) * String key of known length, may not be NULL terminated * php_array_fetchn_T(zval *zarr, unsigned long idx, ...) * Numeric key * php_array_fetchz_T(zval *zarr, zval *key, ...) * zval* key */ /* Fetch zval* * * zval *php_array_fetch(zval *zarr, const char *key) * zval *php_array_fetchl(zval *zarr, const char *key, int key_len) * zval *php_array_fetchl_safe(zval *zarr, const char *key, int key_len) * zval *php_array_fetchn(zval *zarr, unsigned long idx) * zval *php_array_fetchc(zval *zarr, const char *litstr) * zval *php_array_fetchz(zval *zarr, zval *key) */ static inline zval *php_array_fetchl(zval *zarr, const char *key, int key_len) { #ifdef ZEND_ENGINE_3 return zend_symtable_str_find(Z_ARRVAL_P(zarr), key, key_len); #else zval **ppzval; if (FAILURE == zend_symtable_find(Z_ARRVAL_P(zarr), key, key_len + 1, (void**)&ppzval)) { return NULL; } return *ppzval; #endif } static inline zval *php_array_fetch(zval *zarr, const char *key) { return php_array_fetchl(zarr, key, strlen(key)); } #define php_array_fetchc(zarr, litstr) php_array_fetchl(zarr, litstr, sizeof(litstr)-1) static inline zval *php_array_fetchl_safe(zval *zarr, const char *key, int key_len) { #ifdef ZEND_ENGINE_3 zend_string *keystr = zend_string_init(key, key_len, 0); zval *ret = zend_symtable_find(Z_ARRVAL_P(zarr), keystr); zend_string_release(keystr); #else char *k = estrndup(key, key_len); zval *ret = php_array_fetchl(zarr, k, key_len); efree(k); #endif return ret; } static inline zval *php_array_fetchn(zval *zarr, PAA_ULONG idx) { #ifdef ZEND_ENGINE_3 return zend_hash_index_find(Z_ARRVAL_P(zarr), idx); #else zval **ppzval; if (FAILURE == zend_hash_index_find(Z_ARRVAL_P(zarr), idx, (void**)&ppzval)) { return NULL; } return *ppzval; #endif } static inline zval *php_array_fetchz(zval *zarr, zval *key) { switch (Z_TYPE_P(key)) { case IS_NULL: return php_array_fetchn(zarr, 0); #ifdef ZEND_ENGINE_3 case IS_FALSE: return php_array_fetchn(zarr, 0); case IS_TRUE: return php_array_fetchn(zarr, 1); #else case IS_BOOL: /* fallthrough */ #endif case IS_LONG: return php_array_fetchn(zarr, Z_LVAL_P(key)); case IS_DOUBLE: return php_array_fetchn(zarr, (long)Z_DVAL_P(key)); case IS_STRING: return php_array_fetchl(zarr, Z_STRVAL_P(key), Z_STRLEN_P(key)); default: return NULL; } } #define PHP_ARRAY_FETCH_TYPE_MAP(ctype, ztype) \ static inline ctype php_array_fetch_##ztype(zval *zarr, const char *key) \ { return php_array_zval_to_##ztype(php_array_fetch(zarr, key)); } \ static inline ctype php_array_fetchl_##ztype(zval *zarr, const char *key, int key_len) \ { return php_array_zval_to_##ztype(php_array_fetchl(zarr, key, key_len)); } \ static inline ctype php_array_fetchl_safe_##ztype(zval *zarr, const char *key, int key_len) \ { return php_array_zval_to_##ztype(php_array_fetchl_safe(zarr, key, key_len)); } \ static inline ctype php_array_fetchn_##ztype(zval *zarr, PAA_ULONG idx) \ { return php_array_zval_to_##ztype(php_array_fetchn(zarr, idx)); } \ static inline ctype php_array_fetchz_##ztype(zval *zarr, zval *key) \ { return php_array_zval_to_##ztype(php_array_fetchz(zarr, key)); } /* Fetch zend_bool * * zend_bool php_array_fetch_bool(zval *zarr, const char *key) * zend_bool php_array_fetchl_bool(zval *zarr, const char *key, int key_len) * zend_bool php_array_fetchl_safe_bool(zval *zarr, const char *key, int key_len) * zend_bool php_array_fetchn_bool(zval *zarr, unsigned long idx) * zend_bool php_array_fetchc_bool(zval *zarr, const char *litstr) * zend_bool php_array_fetchz_bool(zval *zarr, zval *key) */ static inline zend_bool php_array_zval_to_bool(zval *z) { return z && zend_is_true(z); } PHP_ARRAY_FETCH_TYPE_MAP(zend_bool, bool) #define php_array_fetchc_bool(zarr, litstr) \ php_array_zval_to_bool(php_array_fetchc(zarr, litstr)) /* Fetch long * * long php_array_fetch_long(zval *zarr, const char *key) * long php_array_fetchl_long(zval *zarr, const char *key, int key_len) * long php_array_fetchl_safe_long(zval *zarr, const char *key, int key_len) * long php_array_fetchn_long(zval *zarr, unsigned long idx) * long php_array_fetchc_long(zval *zarr, const char *litstr) * long php_array_fetchz_long(zval *zarr, zval *key) */ static inline PAA_LONG php_array_zval_to_long(zval *z) { if (!z) { return 0; } switch(Z_TYPE_P(z)) { case IS_NULL: return 0; #ifdef ZEND_ENGINE_3 case IS_FALSE: return 0; case IS_TRUE: return 1; #else case IS_BOOL: return Z_BVAL_P(z); #endif case IS_LONG: return Z_LVAL_P(z); default: { zval c = *z; zval_copy_ctor(&c); convert_to_long(&c); return Z_LVAL(c); } } } PHP_ARRAY_FETCH_TYPE_MAP(PAA_LONG, long) #define php_array_fetchc_long(zarr, litstr) \ php_array_zval_to_long(php_array_fetchc(zarr, litstr)) /* Fetch double * * double php_array_fetch_double(zval *zarr, const char *key) * double php_array_fetchl_double(zval *zarr, const char *key, int key_len) * double php_array_fetchl_safe_double(zval *zarr, const char *key, int key_len) * double php_array_fetchn_double(zval *zarr, unsigned long idx) * double php_array_fetchc_double(zval *zarr, const char *litstr) * double php_array_fetchz_double(zval *zarr, zval *key) */ static inline double php_array_zval_to_double(zval *z) { if (!z) { return 0.0; } switch (Z_TYPE_P(z)) { case IS_NULL: return 0.0; #ifdef ZEND_ENGINE_3 case IS_FALSE: return 0.0; case IS_TRUE: return 1.0; #else case IS_BOOL: return (double)Z_BVAL_P(z); #endif case IS_LONG: return (double)Z_LVAL_P(z); case IS_DOUBLE: return Z_DVAL_P(z); default: { zval c = *z; zval_copy_ctor(&c); convert_to_double(&c); return Z_DVAL(c); } } } PHP_ARRAY_FETCH_TYPE_MAP(double, double) #define php_array_fetchc_double(zarr, litstr) \ php_array_zval_to_double(php_array_fetchc(zarr, litstr)) /* Fetch string * * If the pfree is set to 1 on exit, then the return value is owned by the caller * and must be efree()'d once it is no longer in use. * * plen is populated with the binary safe length of the string returned. * * char *php_array_fetch_string(zval *zarr, const char *key, int *plen, zend_bool *pfree) * char *php_array_fetchl_string(zval *zarr, const char *key, int key_len, int *plen, zend_bool *pfree) * char *php_array_fetchl_safe_string(zval *zarr, const char *key, int key_len, int *plen, zend_bool *pfree) * char *php_array_fetchn_string(zval *zarr, unsigned long idx, int *plen, zend_bool *pfree) * char *php_array_fetchc_string(zval *zarr, const char *litstr, int *plen, zend_bool *pfree) * char *php_array_fetchz_string(zval *zarr, zval *key, int *plen, zend_bool *pfree) */ static inline char *php_array_zval_to_string(zval *z, int *plen, zend_bool *pfree) { *plen = 0; *pfree = 0; if (!z) { return NULL; } switch (Z_TYPE_P(z)) { case IS_NULL: return (char *)""; case IS_STRING: *plen = Z_STRLEN_P(z); return Z_STRVAL_P(z); default: { zval c = *z; zval_copy_ctor(&c); convert_to_string(&c); #ifdef ZEND_ENGINE_3 *pfree = ! IS_INTERNED(Z_STR(c)); #else *pfree = ! IS_INTERNED(Z_STRVAL(c)); #endif *plen = Z_STRLEN(c); return Z_STRVAL(c); } } } #define php_array_fetch_string(zarr, key, plen, pfree) \ php_array_zval_to_string(php_array_fetch(zarr, key), plen, pfree) #define php_array_fetchl_string(zarr, key, key_len, plen, pfree) \ php_array_zval_to_string(php_array_fetchl(zarr, key, key_len), plen, pfree) #define php_array_fetchl_safe_string(zarr, key, key_len, plen, pfree) \ php_array_zval_to_string(php_array_fetchl_safe(zarr, key, key_len), plen, pfree) #define php_array_fetchn_string(zarr, idx, plen, pfree) \ php_array_zval_to_string(php_array_fetchn(zarr, idx), plen, pfree) #define php_array_fetchc_string(zarr, litstr, plen, pfree) \ php_array_zval_to_string(php_array_fetchc(zarr, litstr), plen, pfree) #define php_array_fetchz_string(zarr, key, plen, pfree) \ php_array_zval_to_string(php_array_fetchz(zarr, key), plen, pfree) /* Fetch array * * No implicit conversion is performed. * * If the value is an array, then that zval is returned, * otherwise NULL is returned. * * zval *php_array_fetch_array(zval *zarr, const char *key) * zval *php_array_fetchl_array(zval *zarr, const char *key, int key_len) * zval *php_array_fetchl_safe_array(zval *zarr, const char *key, int key_len) * zval *php_array_fetchn_array(zval *zarr, unsigned long idx) * zval *php_array_fetchc_array(zval *zarr, const char *litstr) * zval *php_array_fetchz_array(zval *zarr, zval *key) */ static inline zval *php_array_zval_to_array(zval *zarr) { return (zarr && (Z_TYPE_P(zarr) == IS_ARRAY)) ? zarr : NULL; } PHP_ARRAY_FETCH_TYPE_MAP(zval*, array) #define php_array_fetchc_array(zarr, litstr) \ php_array_zval_to_array(php_array_fetchc(zarr, litstr)) /* count($arr) - Count number of elements in the array * * int php_array_count(zval *arr) */ #define php_array_count(zarr) zend_hash_num_elements(Z_ARRVAL_P(zarr)) /* Fetch resource * * No implicit conversion is performed. * * If the value is a resource of the named type, * then the pointer for it is returned, * otherwise NULL is returned. * * To test for multiple resource types (e.g. 'stream' and 'persistent stream') * Fetch a generic zval* and use Zend's ZEND_FETCH_RESOURCE() macro. * * zval *php_array_fetch_resource(zval *zarr, const char *key, int le) * zval *php_array_fetchl_resource(zval *zarr, const char *key, int key_len, int le) * zval *php_array_fetchl_safe_resource(zval *zarr, const char *key, int key_len, int le) * zval *php_array_fetchn_resource(zval *zarr, unsigned long idx, int le) * zval *php_array_fetchc_resource(zval *zarr, const char *litstr, int le) * zval *php_array_fetchz_resource(zval *zarr, zval *key, int le) */ static inline void *php_array_zval_to_resource(zval *z, int le TSRMLS_DC) { #ifdef ZEND_ENGINE_3 return zend_fetch_resource_ex(z, NULL, le); #else void *ret; int rtype; if (!z || Z_TYPE_P(z) != IS_RESOURCE) { return NULL; } ret = zend_list_find(Z_RESVAL_P(z), &rtype); if (!ret || (rtype != le)) { return NULL; } return ret; #endif } #define php_array_fetch_resource(zarr, key, le) \ php_array_zval_to_resource(php_array_fetch(zarr, key), le TSRMLS_CC) #define php_array_fetchl_resource(zarr, key, key_len, le) \ php_array_zval_to_resource(php_array_fetchl(zarr, key, key_len), le TSRMLS_CC) #define php_array_fetchl_safe_resource(zarr, key, key_len, le) \ php_array_zval_to_resource(php_array_fetchl_safe(zarr, key, key_len), le TSRMLS_CC) #define php_array_fetchn_resource(zarr, idx, le) \ php_array_zval_to_resource(php_array_fetchn(zarr, idx), le TSRMLS_CC) #define php_array_fetchc_resource(zarr, litstr, le) \ php_array_zval_to_resource(php_array_fetchc(zarr, litstr), le TSRMLS_CC) #define php_array_fetchz_resource(zarr, key, le) \ php_array_zval_to_resource(php_array_fetchz(zarr, key), le TSRMLS_CC) /* Fetch Object * * Fetch an object of a specific or non-specific type (pass ce == NULL) * * No implicit conversion is performed * * zval *php_array_fetch_object(zval *zarr, const char *key, zend_class_entry *ce) * zval *php_array_fetchl_object(zval *zarr, const char *key, int key_len, zend_class_entry *ce) * zval *php_array_fetchl_safe_object(zval *zarr, const char *key, int key_len, zend_class_entry *ce) * zval *php_array_fetchn_object(zval *zarr, unsigned long idx, zend_class_entry *ce) * zval *php_array_fetchc_object(zval *zarr, const char *litstr, zend_class_entry *ce) * zval *php_array_fetchz_object(zval *zarr, zval *key, zend_class_entry *ce) */ static inline zval *php_array_zval_to_object(zval *z, zend_class_entry *ce TSRMLS_DC) { return (z && (Z_TYPE_P(z) == IS_OBJECT) && ((!ce) || instanceof_function(Z_OBJCE_P(z), ce TSRMLS_CC))) ? z : NULL; } #define php_array_fetch_object(zarr, key, ce) \ php_array_zval_to_object(php_array_fetch(zarr, key), ce TSRMLS_CC) #define php_array_fetchl_object(zarr, key, len, ce) \ php_array_zval_to_object(php_array_fetchl(zarr, key, len), ce TSRMLS_CC) #define php_array_fetchl_safe_object(zarr, key, len, ce) \ php_array_zval_to_object(php_array_fetchl_safe(zarr, key, len), ce TSRMLS_CC) #define php_array_fetchn_object(zarr, idx, ce) \ php_array_zval_to_object(php_array_fetchn(zarr, idx), ce TSRMLS_CC) #define php_array_fetchc_object(zarr, litstr, ce) \ php_array_zval_to_object(php_array_fetchc(zarr, litstr), ce TSRMLS_CC) #define php_array_fetchz_object(zarr, key, ce) \ php_array_zval_to_object(php_array_fetchz(zarr, key), ce TSRMLS_CC) /* unset($zarr[$key]) - Erase a key from an array * * void php_array_unset(zval *zarr, const char *key) * void php_array_unsetl(zval *zarr, const char *key, int key_len) * void php_array_unsetl_safe(zval *zarr, const char *key, int key_len) * void php_array_unsetn(zval *zarr, long idx) * void php_array_unsetc(zval *zarr, const char *litstr) * void php_array_unsetz(zval *zarr, zval *key) */ static inline void php_array_unset(zval *zarr, const char *key) { PAA_SYM_DEL(Z_ARRVAL_P(zarr), key, PAA_LENGTH_ADJ(strlen(key))); } #define php_array_unsetl(zarr, key, len) \ PAA_SYM_DEL(Z_ARRVAL_P(zarr), key, PAA_LENGTH_ADJ(len)) static inline void php_array_unsetl_safe(zval *zarr, const char *key, int key_len) { char *k = estrndup(key, key_len); PAA_SYM_DEL(Z_ARRVAL_P(zarr), k, PAA_LENGTH_ADJ(key_len)); efree(k); } #define php_array_unsetn(zarr, idx) \ zend_symtable_index_del(Z_ARRVAL_P(zarr), idx) #define php_array_unsetc(zarr, litstr) \ PAA_SYM_DEL(Z_ARRVAL_P(zarr), litstr, PAA_LENGTH_ADJ(sizeof(litstr) - 1)) static inline void php_array_unsetz(zval *zarr, zval *key) { switch (Z_TYPE_P(key)) { case IS_NULL: zend_hash_index_del(Z_ARRVAL_P(zarr), 0); return; #ifdef ZEND_ENGINE_3 case IS_FALSE: zend_hash_index_del(Z_ARRVAL_P(zarr), 0); return; case IS_TRUE: zend_hash_index_del(Z_ARRVAL_P(zarr), 1); return; #else case IS_BOOL: /* fallthrough */ #endif case IS_LONG: zend_hash_index_del(Z_ARRVAL_P(zarr), Z_LVAL_P(key)); return; case IS_DOUBLE: zend_hash_index_del(Z_ARRVAL_P(zarr), (long)Z_DVAL_P(key)); break; case IS_STRING: php_array_unsetl(zarr, Z_STRVAL_P(key), Z_STRLEN_P(key)); break; } } #endif /* PHP_ARRAY_API_H */ mongodb-1.6.1/src/libmongoc/src/common/common-b64-private.h0000644000076500000240000000167413572250757023012 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common-prelude.h" #ifndef COMMON_B64_PRIVATE_H #define COMMON_B64_PRIVATE_H #include int bson_b64_ntop (uint8_t const *src, size_t srclength, char *target, size_t targsize); int bson_b64_pton (char const *src, uint8_t *target, size_t targsize); #endif /* COMMON_B64_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/common/common-b64.c0000644000076500000240000004350013572250757021327 0ustar alcaeusstaff/* * Copyright (c) 1996, 1998 by Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS * ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE * CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS * SOFTWARE. */ /* * Portions Copyright (c) 1995 by International Business Machines, Inc. * * International Business Machines, Inc. (hereinafter called IBM) grants * permission under its copyrights to use, copy, modify, and distribute this * Software with or without fee, provided that the above copyright notice and * all paragraphs of this notice appear in all copies, and that the name of IBM * not be used in connection with the marketing of any product incorporating * the Software or modifications thereof, without specific, written prior * permission. * * To the extent it has a right to do so, IBM grants an immunity from suit * under its patents, if any, for the use, sale or manufacture of products to * the extent that such products are used for performing Domain Name System * dynamic updates in TCP/IP networks by means of the Software. No immunity is * granted for any product per se or for any other function of any product. * * THE SOFTWARE IS PROVIDED "AS IS", AND IBM DISCLAIMS ALL WARRANTIES, * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE. IN NO EVENT SHALL IBM BE LIABLE FOR ANY SPECIAL, * DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE, EVEN * IF IBM IS APPRISED OF THE POSSIBILITY OF SUCH DAMAGES. */ #include "bson/bson.h" #include "common-b64-private.h" #define Assert(Cond) \ if (!(Cond)) \ abort () static const char Base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; static const char Pad64 = '='; /* (From RFC1521 and draft-ietf-dnssec-secext-03.txt) * The following encoding technique is taken from RFC 1521 by Borenstein * and Freed. It is reproduced here in a slightly edited form for * convenience. * * A 65-character subset of US-ASCII is used, enabling 6 bits to be * represented per printable character. (The extra 65th character, "=", * is used to signify a special processing function.) * * The encoding process represents 24-bit groups of input bits as output * strings of 4 encoded characters. Proceeding from left to right, a * 24-bit input group is formed by concatenating 3 8-bit input groups. * These 24 bits are then treated as 4 concatenated 6-bit groups, each * of which is translated into a single digit in the base64 alphabet. * * Each 6-bit group is used as an index into an array of 64 printable * characters. The character referenced by the index is placed in the * output string. * * Table 1: The Base64 Alphabet * * Value Encoding Value Encoding Value Encoding Value Encoding * 0 A 17 R 34 i 51 z * 1 B 18 S 35 j 52 0 * 2 C 19 T 36 k 53 1 * 3 D 20 U 37 l 54 2 * 4 E 21 V 38 m 55 3 * 5 F 22 W 39 n 56 4 * 6 G 23 X 40 o 57 5 * 7 H 24 Y 41 p 58 6 * 8 I 25 Z 42 q 59 7 * 9 J 26 a 43 r 60 8 * 10 K 27 b 44 s 61 9 * 11 L 28 c 45 t 62 + * 12 M 29 d 46 u 63 / * 13 N 30 e 47 v * 14 O 31 f 48 w (pad) = * 15 P 32 g 49 x * 16 Q 33 h 50 y * * Special processing is performed if fewer than 24 bits are available * at the end of the data being encoded. A full encoding quantum is * always completed at the end of a quantity. When fewer than 24 input * bits are available in an input group, zero bits are added (on the * right) to form an integral number of 6-bit groups. Padding at the * end of the data is performed using the '=' character. * * Since all base64 input is an integral number of octets, only the * following cases can arise: * * (1) the final quantum of encoding input is an integral * multiple of 24 bits; here, the final unit of encoded * output will be an integral multiple of 4 characters * with no "=" padding, * (2) the final quantum of encoding input is exactly 8 bits; * here, the final unit of encoded output will be two * characters followed by two "=" padding characters, or * (3) the final quantum of encoding input is exactly 16 bits; * here, the final unit of encoded output will be three * characters followed by one "=" padding character. */ int bson_b64_ntop (uint8_t const *src, size_t srclength, char *target, size_t targsize) { size_t datalength = 0; uint8_t input[3]; uint8_t output[4]; size_t i; while (2 < srclength) { input[0] = *src++; input[1] = *src++; input[2] = *src++; srclength -= 3; output[0] = input[0] >> 2; output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4); output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6); output[3] = input[2] & 0x3f; Assert (output[0] < 64); Assert (output[1] < 64); Assert (output[2] < 64); Assert (output[3] < 64); if (datalength + 4 > targsize) { return -1; } target[datalength++] = Base64[output[0]]; target[datalength++] = Base64[output[1]]; target[datalength++] = Base64[output[2]]; target[datalength++] = Base64[output[3]]; } /* Now we worry about padding. */ if (0 != srclength) { /* Get what's left. */ input[0] = input[1] = input[2] = '\0'; for (i = 0; i < srclength; i++) { input[i] = *src++; } output[0] = input[0] >> 2; output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4); output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6); Assert (output[0] < 64); Assert (output[1] < 64); Assert (output[2] < 64); if (datalength + 4 > targsize) { return -1; } target[datalength++] = Base64[output[0]]; target[datalength++] = Base64[output[1]]; if (srclength == 1) { target[datalength++] = Pad64; } else { target[datalength++] = Base64[output[2]]; } target[datalength++] = Pad64; } if (datalength >= targsize) { return -1; } target[datalength] = '\0'; /* Returned value doesn't count \0. */ return (int) datalength; } /* (From RFC1521 and draft-ietf-dnssec-secext-03.txt) The following encoding technique is taken from RFC 1521 by Borenstein and Freed. It is reproduced here in a slightly edited form for convenience. A 65-character subset of US-ASCII is used, enabling 6 bits to be represented per printable character. (The extra 65th character, "=", is used to signify a special processing function.) The encoding process represents 24-bit groups of input bits as output strings of 4 encoded characters. Proceeding from left to right, a 24-bit input group is formed by concatenating 3 8-bit input groups. These 24 bits are then treated as 4 concatenated 6-bit groups, each of which is translated into a single digit in the base64 alphabet. Each 6-bit group is used as an index into an array of 64 printable characters. The character referenced by the index is placed in the output string. Table 1: The Base64 Alphabet Value Encoding Value Encoding Value Encoding Value Encoding 0 A 17 R 34 i 51 z 1 B 18 S 35 j 52 0 2 C 19 T 36 k 53 1 3 D 20 U 37 l 54 2 4 E 21 V 38 m 55 3 5 F 22 W 39 n 56 4 6 G 23 X 40 o 57 5 7 H 24 Y 41 p 58 6 8 I 25 Z 42 q 59 7 9 J 26 a 43 r 60 8 10 K 27 b 44 s 61 9 11 L 28 c 45 t 62 + 12 M 29 d 46 u 63 / 13 N 30 e 47 v 14 O 31 f 48 w (pad) = 15 P 32 g 49 x 16 Q 33 h 50 y Special processing is performed if fewer than 24 bits are available at the end of the data being encoded. A full encoding quantum is always completed at the end of a quantity. When fewer than 24 input bits are available in an input group, zero bits are added (on the right) to form an integral number of 6-bit groups. Padding at the end of the data is performed using the '=' character. Since all base64 input is an integral number of octets, only the following cases can arise: (1) the final quantum of encoding input is an integral multiple of 24 bits; here, the final unit of encoded output will be an integral multiple of 4 characters with no "=" padding, (2) the final quantum of encoding input is exactly 8 bits; here, the final unit of encoded output will be two characters followed by two "=" padding characters, or (3) the final quantum of encoding input is exactly 16 bits; here, the final unit of encoded output will be three characters followed by one "=" padding character. */ /* skips all whitespace anywhere. converts characters, four at a time, starting at (or after) src from base - 64 numbers into three 8 bit bytes in the target area. it returns the number of data bytes stored at the target, or -1 on error. */ static uint8_t mongoc_b64rmap[256]; static const uint8_t mongoc_b64rmap_special = 0xf0; static const uint8_t mongoc_b64rmap_end = 0xfd; static const uint8_t mongoc_b64rmap_space = 0xfe; static const uint8_t mongoc_b64rmap_invalid = 0xff; /* initializing the reverse map isn't thread safe, do it in pthread_once */ #if defined(BSON_OS_UNIX) #include #define mongoc_common_once_t pthread_once_t #define mongoc_common_once pthread_once #define MONGOC_COMMON_ONCE_FUN(n) void n (void) #define MONGOC_COMMON_ONCE_RETURN return #define MONGOC_COMMON_ONCE_INIT PTHREAD_ONCE_INIT #else #define mongoc_common_once_t INIT_ONCE #define MONGOC_COMMON_ONCE_INIT INIT_ONCE_STATIC_INIT #define mongoc_common_once(o, c) InitOnceExecuteOnce (o, c, NULL, NULL) #define MONGOC_COMMON_ONCE_FUN(n) \ BOOL CALLBACK n (PINIT_ONCE _ignored_a, PVOID _ignored_b, PVOID *_ignored_c) #define MONGOC_COMMON_ONCE_RETURN return true #endif static MONGOC_COMMON_ONCE_FUN (bson_b64_initialize_rmap) { int i; unsigned char ch; /* Null: end of string, stop parsing */ mongoc_b64rmap[0] = mongoc_b64rmap_end; for (i = 1; i < 256; ++i) { ch = (unsigned char) i; /* Whitespaces */ if (isspace (ch)) mongoc_b64rmap[i] = mongoc_b64rmap_space; /* Padding: stop parsing */ else if (ch == Pad64) mongoc_b64rmap[i] = mongoc_b64rmap_end; /* Non-base64 char */ else mongoc_b64rmap[i] = mongoc_b64rmap_invalid; } /* Fill reverse mapping for base64 chars */ for (i = 0; Base64[i] != '\0'; ++i) mongoc_b64rmap[(uint8_t) Base64[i]] = i; MONGOC_COMMON_ONCE_RETURN; } static int mongoc_b64_pton_do (char const *src, uint8_t *target, size_t targsize) { int tarindex, state, ch; uint8_t ofs; state = 0; tarindex = 0; while (1) { ch = *src++; ofs = mongoc_b64rmap[ch]; if (ofs >= mongoc_b64rmap_special) { /* Ignore whitespaces */ if (ofs == mongoc_b64rmap_space) continue; /* End of base64 characters */ if (ofs == mongoc_b64rmap_end) break; /* A non-base64 character. */ return (-1); } switch (state) { case 0: if ((size_t) tarindex >= targsize) return (-1); target[tarindex] = ofs << 2; state = 1; break; case 1: if ((size_t) tarindex + 1 >= targsize) return (-1); target[tarindex] |= ofs >> 4; target[tarindex + 1] = (ofs & 0x0f) << 4; tarindex++; state = 2; break; case 2: if ((size_t) tarindex + 1 >= targsize) return (-1); target[tarindex] |= ofs >> 2; target[tarindex + 1] = (ofs & 0x03) << 6; tarindex++; state = 3; break; case 3: if ((size_t) tarindex >= targsize) return (-1); target[tarindex] |= ofs; tarindex++; state = 0; break; default: abort (); } } /* * We are done decoding Base-64 chars. Let's see if we ended * on a byte boundary, and/or with erroneous trailing characters. */ if (ch == Pad64) { /* We got a pad char. */ ch = *src++; /* Skip it, get next. */ switch (state) { case 0: /* Invalid = in first position */ case 1: /* Invalid = in second position */ return (-1); case 2: /* Valid, means one byte of info */ /* Skip any number of spaces. */ for ((void) NULL; ch != '\0'; ch = *src++) if (mongoc_b64rmap[ch] != mongoc_b64rmap_space) break; /* Make sure there is another trailing = sign. */ if (ch != Pad64) return (-1); ch = *src++; /* Skip the = */ /* Fall through to "single trailing =" case. */ /* FALLTHROUGH */ case 3: /* Valid, means two bytes of info */ /* * We know this char is an =. Is there anything but * whitespace after it? */ for ((void) NULL; ch != '\0'; ch = *src++) if (mongoc_b64rmap[ch] != mongoc_b64rmap_space) return (-1); /* * Now make sure for cases 2 and 3 that the "extra" * bits that slopped past the last full byte were * zeros. If we don't check them, they become a * subliminal channel. */ if (target[tarindex] != 0) return (-1); default: break; } } else { /* * We ended by seeing the end of the string. Make sure we * have no partial bytes lying around. */ if (state != 0) return (-1); } return (tarindex); } static int mongoc_b64_pton_len (char const *src) { int tarindex, state, ch; uint8_t ofs; state = 0; tarindex = 0; while (1) { ch = *src++; ofs = mongoc_b64rmap[ch]; if (ofs >= mongoc_b64rmap_special) { /* Ignore whitespaces */ if (ofs == mongoc_b64rmap_space) continue; /* End of base64 characters */ if (ofs == mongoc_b64rmap_end) break; /* A non-base64 character. */ return (-1); } switch (state) { case 0: state = 1; break; case 1: tarindex++; state = 2; break; case 2: tarindex++; state = 3; break; case 3: tarindex++; state = 0; break; default: abort (); } } /* * We are done decoding Base-64 chars. Let's see if we ended * on a byte boundary, and/or with erroneous trailing characters. */ if (ch == Pad64) { /* We got a pad char. */ ch = *src++; /* Skip it, get next. */ switch (state) { case 0: /* Invalid = in first position */ case 1: /* Invalid = in second position */ return (-1); case 2: /* Valid, means one byte of info */ /* Skip any number of spaces. */ for ((void) NULL; ch != '\0'; ch = *src++) if (mongoc_b64rmap[ch] != mongoc_b64rmap_space) break; /* Make sure there is another trailing = sign. */ if (ch != Pad64) return (-1); ch = *src++; /* Skip the = */ /* Fall through to "single trailing =" case. */ /* FALLTHROUGH */ case 3: /* Valid, means two bytes of info */ /* * We know this char is an =. Is there anything but * whitespace after it? */ for ((void) NULL; ch != '\0'; ch = *src++) if (mongoc_b64rmap[ch] != mongoc_b64rmap_space) return (-1); default: break; } } else { /* * We ended by seeing the end of the string. Make sure we * have no partial bytes lying around. */ if (state != 0) return (-1); } return (tarindex); } int bson_b64_pton (char const *src, uint8_t *target, size_t targsize) { static mongoc_common_once_t once = MONGOC_COMMON_ONCE_INIT; mongoc_common_once (&once, bson_b64_initialize_rmap); if (target) return mongoc_b64_pton_do (src, target, targsize); else return mongoc_b64_pton_len (src); } mongodb-1.6.1/src/libmongoc/src/common/common-md5-private.h0000644000076500000240000000170413572250757023076 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common-prelude.h" #ifndef COMMON_MD5_PRIVATE_H #define COMMON_MD5_PRIVATE_H #include "bson/bson.h" BSON_BEGIN_DECLS void _bson_md5_init (bson_md5_t *pms); void _bson_md5_append (bson_md5_t *pms, const uint8_t *data, uint32_t nbytes); void _bson_md5_finish (bson_md5_t *pms, uint8_t digest[16]); BSON_END_DECLS #endif /* COMMON_MD5_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/common/common-md5.c0000644000076500000240000003100013572250757021411 0ustar alcaeusstaff/* Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgement in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. L. Peter Deutsch ghost@aladdin.com */ /* $Id: md5.c,v 1.6 2002/04/13 19:20:28 lpd Exp $ */ /* Independent implementation of MD5 (RFC 1321). This code implements the MD5 Algorithm defined in RFC 1321, whose text is available at http://www.ietf.org/rfc/rfc1321.txt The code is derived from the text of the RFC, including the test suite (section A.5) but excluding the rest of Appendix A. It does not include any code or documentation that is identified in the RFC as being copyrighted. The original and principal author of md5.c is L. Peter Deutsch . Other authors are noted in the change history that follows (in reverse chronological order): 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order either statically or dynamically; added missing #include in library. 2002-03-11 lpd Corrected argument list for main(), and added int return type, in test program and T value program. 2002-02-21 lpd Added missing #include in test program. 2000-07-03 lpd Patched to eliminate warnings about "constant is unsigned in ANSI C, signed in traditional"; made test program self-checking. 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5). 1999-05-03 lpd Original version. */ /* * The following MD5 implementation has been modified to use types as * specified in libbson. */ #include #include "common-md5-private.h" #undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */ #if BSON_BYTE_ORDER == BSON_BIG_ENDIAN #define BYTE_ORDER 1 #else #define BYTE_ORDER -1 #endif #define T_MASK ((uint32_t) ~0) #define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87) #define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9) #define T3 0x242070db #define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111) #define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050) #define T6 0x4787c62a #define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec) #define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe) #define T9 0x698098d8 #define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850) #define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e) #define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841) #define T13 0x6b901122 #define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c) #define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71) #define T16 0x49b40821 #define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d) #define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf) #define T19 0x265e5a51 #define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855) #define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2) #define T22 0x02441453 #define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e) #define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437) #define T25 0x21e1cde6 #define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829) #define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278) #define T28 0x455a14ed #define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa) #define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07) #define T31 0x676f02d9 #define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375) #define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd) #define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e) #define T35 0x6d9d6122 #define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3) #define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb) #define T38 0x4bdecfa9 #define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f) #define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f) #define T41 0x289b7ec6 #define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805) #define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a) #define T44 0x04881d05 #define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6) #define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a) #define T47 0x1fa27cf8 #define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a) #define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb) #define T50 0x432aff97 #define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58) #define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6) #define T53 0x655b59c3 #define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d) #define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82) #define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e) #define T57 0x6fa87e4f #define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f) #define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb) #define T60 0x4e0811a1 #define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d) #define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca) #define T63 0x2ad7d2bb #define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e) static void bson_md5_process (bson_md5_t *md5, const uint8_t *data) { uint32_t a = md5->abcd[0]; uint32_t b = md5->abcd[1]; uint32_t c = md5->abcd[2]; uint32_t d = md5->abcd[3]; uint32_t t; #if BYTE_ORDER > 0 /* Define storage only for big-endian CPUs. */ uint32_t X[16]; #else /* Define storage for little-endian or both types of CPUs. */ uint32_t xbuf[16]; const uint32_t *X; #endif { #if BYTE_ORDER == 0 /* * Determine dynamically whether this is a big-endian or * little-endian machine, since we can use a more efficient * algorithm on the latter. */ static const int w = 1; if (*((const uint8_t *) &w)) /* dynamic little-endian */ #endif #if BYTE_ORDER <= 0 /* little-endian */ { /* * On little-endian machines, we can process properly aligned * data without copying it. */ if (!((data - (const uint8_t *) 0) & 3)) { /* data are properly aligned */ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wcast-align" #endif X = (const uint32_t *) data; #ifdef __clang__ #pragma clang diagnostic pop #endif } else { /* not aligned */ memcpy (xbuf, data, sizeof (xbuf)); X = xbuf; } } #endif #if BYTE_ORDER == 0 else /* dynamic big-endian */ #endif #if BYTE_ORDER >= 0 /* big-endian */ { /* * On big-endian machines, we must arrange the bytes in the * right order. */ const uint8_t *xp = data; int i; #if BYTE_ORDER == 0 X = xbuf; /* (dynamic only) */ #else #define xbuf X /* (static only) */ #endif for (i = 0; i < 16; ++i, xp += 4) xbuf[i] = xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24); } #endif } #define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n)))) /* Round 1. */ /* Let [abcd k s i] denote the operation a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */ #define F(x, y, z) (((x) & (y)) | (~(x) & (z))) #define SET(a, b, c, d, k, s, Ti) \ t = a + F (b, c, d) + X[k] + Ti; \ a = ROTATE_LEFT (t, s) + b /* Do the following 16 operations. */ SET (a, b, c, d, 0, 7, T1); SET (d, a, b, c, 1, 12, T2); SET (c, d, a, b, 2, 17, T3); SET (b, c, d, a, 3, 22, T4); SET (a, b, c, d, 4, 7, T5); SET (d, a, b, c, 5, 12, T6); SET (c, d, a, b, 6, 17, T7); SET (b, c, d, a, 7, 22, T8); SET (a, b, c, d, 8, 7, T9); SET (d, a, b, c, 9, 12, T10); SET (c, d, a, b, 10, 17, T11); SET (b, c, d, a, 11, 22, T12); SET (a, b, c, d, 12, 7, T13); SET (d, a, b, c, 13, 12, T14); SET (c, d, a, b, 14, 17, T15); SET (b, c, d, a, 15, 22, T16); #undef SET /* Round 2. */ /* Let [abcd k s i] denote the operation a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */ #define G(x, y, z) (((x) & (z)) | ((y) & ~(z))) #define SET(a, b, c, d, k, s, Ti) \ t = a + G (b, c, d) + X[k] + Ti; \ a = ROTATE_LEFT (t, s) + b /* Do the following 16 operations. */ SET (a, b, c, d, 1, 5, T17); SET (d, a, b, c, 6, 9, T18); SET (c, d, a, b, 11, 14, T19); SET (b, c, d, a, 0, 20, T20); SET (a, b, c, d, 5, 5, T21); SET (d, a, b, c, 10, 9, T22); SET (c, d, a, b, 15, 14, T23); SET (b, c, d, a, 4, 20, T24); SET (a, b, c, d, 9, 5, T25); SET (d, a, b, c, 14, 9, T26); SET (c, d, a, b, 3, 14, T27); SET (b, c, d, a, 8, 20, T28); SET (a, b, c, d, 13, 5, T29); SET (d, a, b, c, 2, 9, T30); SET (c, d, a, b, 7, 14, T31); SET (b, c, d, a, 12, 20, T32); #undef SET /* Round 3. */ /* Let [abcd k s t] denote the operation a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */ #define H(x, y, z) ((x) ^ (y) ^ (z)) #define SET(a, b, c, d, k, s, Ti) \ t = a + H (b, c, d) + X[k] + Ti; \ a = ROTATE_LEFT (t, s) + b /* Do the following 16 operations. */ SET (a, b, c, d, 5, 4, T33); SET (d, a, b, c, 8, 11, T34); SET (c, d, a, b, 11, 16, T35); SET (b, c, d, a, 14, 23, T36); SET (a, b, c, d, 1, 4, T37); SET (d, a, b, c, 4, 11, T38); SET (c, d, a, b, 7, 16, T39); SET (b, c, d, a, 10, 23, T40); SET (a, b, c, d, 13, 4, T41); SET (d, a, b, c, 0, 11, T42); SET (c, d, a, b, 3, 16, T43); SET (b, c, d, a, 6, 23, T44); SET (a, b, c, d, 9, 4, T45); SET (d, a, b, c, 12, 11, T46); SET (c, d, a, b, 15, 16, T47); SET (b, c, d, a, 2, 23, T48); #undef SET /* Round 4. */ /* Let [abcd k s t] denote the operation a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */ #define I(x, y, z) ((y) ^ ((x) | ~(z))) #define SET(a, b, c, d, k, s, Ti) \ t = a + I (b, c, d) + X[k] + Ti; \ a = ROTATE_LEFT (t, s) + b /* Do the following 16 operations. */ SET (a, b, c, d, 0, 6, T49); SET (d, a, b, c, 7, 10, T50); SET (c, d, a, b, 14, 15, T51); SET (b, c, d, a, 5, 21, T52); SET (a, b, c, d, 12, 6, T53); SET (d, a, b, c, 3, 10, T54); SET (c, d, a, b, 10, 15, T55); SET (b, c, d, a, 1, 21, T56); SET (a, b, c, d, 8, 6, T57); SET (d, a, b, c, 15, 10, T58); SET (c, d, a, b, 6, 15, T59); SET (b, c, d, a, 13, 21, T60); SET (a, b, c, d, 4, 6, T61); SET (d, a, b, c, 11, 10, T62); SET (c, d, a, b, 2, 15, T63); SET (b, c, d, a, 9, 21, T64); #undef SET /* Then perform the following additions. (That is increment each of the four registers by the value it had before this block was started.) */ md5->abcd[0] += a; md5->abcd[1] += b; md5->abcd[2] += c; md5->abcd[3] += d; } void _bson_md5_init (bson_md5_t *pms) { pms->count[0] = pms->count[1] = 0; pms->abcd[0] = 0x67452301; pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476; pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301; pms->abcd[3] = 0x10325476; } void _bson_md5_append (bson_md5_t *pms, const uint8_t *data, uint32_t nbytes) { const uint8_t *p = data; int left = nbytes; int offset = (pms->count[0] >> 3) & 63; uint32_t nbits = (uint32_t) (nbytes << 3); if (nbytes <= 0) return; /* Update the message length. */ pms->count[1] += nbytes >> 29; pms->count[0] += nbits; if (pms->count[0] < nbits) pms->count[1]++; /* Process an initial partial block. */ if (offset) { int copy = (offset + nbytes > 64 ? 64 - offset : nbytes); memcpy (pms->buf + offset, p, copy); if (offset + copy < 64) return; p += copy; left -= copy; bson_md5_process (pms, pms->buf); } /* Process full blocks. */ for (; left >= 64; p += 64, left -= 64) bson_md5_process (pms, p); /* Process a final partial block. */ if (left) memcpy (pms->buf, p, left); } void _bson_md5_finish (bson_md5_t *pms, uint8_t digest[16]) { static const uint8_t pad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; uint8_t data[8]; int i; /* Save the length before padding. */ for (i = 0; i < 8; ++i) data[i] = (uint8_t) (pms->count[i >> 2] >> ((i & 3) << 3)); /* Pad to 56 bytes mod 64. */ _bson_md5_append (pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1); /* Append the length. */ _bson_md5_append (pms, data, sizeof (data)); for (i = 0; i < 16; ++i) digest[i] = (uint8_t) (pms->abcd[i >> 2] >> ((i & 3) << 3)); } mongodb-1.6.1/src/libmongoc/src/common/common-prelude.h0000644000076500000240000000144513572250757022403 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION) && \ !defined(BSON_COMPILATION) && !defined(BSON_INSIDE) #error "Only or can be included directly." #endifmongodb-1.6.1/src/libmongoc/src/common/common-thread-private.h0000644000076500000240000000431413572250757023660 0ustar alcaeusstaff/* * Copyright 2013-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common-prelude.h" #ifndef COMMON_THREAD_PRIVATE_H #define COMMON_THREAD_PRIVATE_H #define BSON_INSIDE #include "bson/bson-compat.h" #include "bson/bson-config.h" #include "bson/bson-macros.h" #undef BSON_INSIDE BSON_BEGIN_DECLS #if defined(BSON_OS_UNIX) #include #define BSON_ONCE_FUN(n) void n (void) #define BSON_ONCE_RETURN return #define BSON_ONCE_INIT PTHREAD_ONCE_INIT #define bson_mutex_destroy pthread_mutex_destroy #define bson_mutex_init(_n) pthread_mutex_init ((_n), NULL) #define bson_mutex_lock pthread_mutex_lock #define bson_mutex_t pthread_mutex_t #define bson_mutex_unlock pthread_mutex_unlock #define bson_once pthread_once #define bson_once_t pthread_once_t #define bson_thread_create(_t, _f, _d) pthread_create ((_t), NULL, (_f), (_d)) #define bson_thread_join(_n) pthread_join ((_n), NULL) #define bson_thread_t pthread_t #else #define BSON_ONCE_FUN(n) \ BOOL CALLBACK n (PINIT_ONCE _ignored_a, PVOID _ignored_b, PVOID *_ignored_c) #define BSON_ONCE_INIT INIT_ONCE_STATIC_INIT #define BSON_ONCE_RETURN return true #define bson_mutex_destroy DeleteCriticalSection #define bson_mutex_init InitializeCriticalSection #define bson_mutex_lock EnterCriticalSection #define bson_mutex_t CRITICAL_SECTION #define bson_mutex_unlock LeaveCriticalSection #define bson_once(o, c) InitOnceExecuteOnce (o, c, NULL, NULL) #define bson_once_t INIT_ONCE #define bson_thread_create(_t, _f, _d) \ (!(*(_t) = CreateThread (NULL, 0, (void *) _f, _d, 0, NULL))) #define bson_thread_join(_n) WaitForSingleObject ((_n), INFINITE) #define bson_thread_t HANDLE #endif BSON_END_DECLS #endif /* COMMON_THREAD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bcon.c0000644000076500000240000006545713572250757022276 0ustar alcaeusstaff/* * @file bcon.c * @brief BCON (BSON C Object Notation) Implementation */ /* Copyright 2009-2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "bson/bcon.h" #include "bson/bson-config.h" /* These stack manipulation macros are used to manage append recursion in * bcon_append_ctx_va(). They take care of some awkward dereference rules (the * real bson object isn't in the stack, but accessed by pointer) and add in run * time asserts to make sure we don't blow the stack in either direction */ #define STACK_ELE(_delta, _name) (ctx->stack[(_delta) + ctx->n]._name) #define STACK_BSON(_delta) \ (((_delta) + ctx->n) == 0 ? bson : &STACK_ELE (_delta, bson)) #define STACK_ITER(_delta) \ (((_delta) + ctx->n) == 0 ? &root_iter : &STACK_ELE (_delta, iter)) #define STACK_BSON_PARENT STACK_BSON (-1) #define STACK_BSON_CHILD STACK_BSON (0) #define STACK_ITER_PARENT STACK_ITER (-1) #define STACK_ITER_CHILD STACK_ITER (0) #define STACK_I STACK_ELE (0, i) #define STACK_IS_ARRAY STACK_ELE (0, is_array) #define STACK_PUSH_ARRAY(statement) \ do { \ BSON_ASSERT (ctx->n < (BCON_STACK_MAX - 1)); \ ctx->n++; \ STACK_I = 0; \ STACK_IS_ARRAY = 1; \ statement; \ } while (0) #define STACK_PUSH_DOC(statement) \ do { \ BSON_ASSERT (ctx->n < (BCON_STACK_MAX - 1)); \ ctx->n++; \ STACK_IS_ARRAY = 0; \ statement; \ } while (0) #define STACK_POP_ARRAY(statement) \ do { \ BSON_ASSERT (STACK_IS_ARRAY); \ BSON_ASSERT (ctx->n != 0); \ statement; \ ctx->n--; \ } while (0) #define STACK_POP_DOC(statement) \ do { \ BSON_ASSERT (!STACK_IS_ARRAY); \ BSON_ASSERT (ctx->n != 0); \ statement; \ ctx->n--; \ } while (0) /* This is a landing pad union for all of the types we can process with bcon. * We need actual storage for this to capture the return value of va_arg, which * takes multiple calls to get everything we need for some complex types */ typedef union bcon_append { char *UTF8; double DOUBLE; bson_t *DOCUMENT; bson_t *ARRAY; bson_t *BCON; struct { bson_subtype_t subtype; uint8_t *binary; uint32_t length; } BIN; bson_oid_t *OID; bool BOOL; int64_t DATE_TIME; struct { char *regex; char *flags; } REGEX; struct { char *collection; bson_oid_t *oid; } DBPOINTER; const char *CODE; char *SYMBOL; struct { const char *js; bson_t *scope; } CODEWSCOPE; int32_t INT32; struct { uint32_t timestamp; uint32_t increment; } TIMESTAMP; int64_t INT64; bson_decimal128_t *DECIMAL128; const bson_iter_t *ITER; } bcon_append_t; /* same as bcon_append_t. Some extra symbols and varying types that handle the * differences between bson_append and bson_iter */ typedef union bcon_extract { bson_type_t TYPE; bson_iter_t *ITER; const char *key; const char **UTF8; double *DOUBLE; bson_t *DOCUMENT; bson_t *ARRAY; struct { bson_subtype_t *subtype; const uint8_t **binary; uint32_t *length; } BIN; const bson_oid_t **OID; bool *BOOL; int64_t *DATE_TIME; struct { const char **regex; const char **flags; } REGEX; struct { const char **collection; const bson_oid_t **oid; } DBPOINTER; const char **CODE; const char **SYMBOL; struct { const char **js; bson_t *scope; } CODEWSCOPE; int32_t *INT32; struct { uint32_t *timestamp; uint32_t *increment; } TIMESTAMP; int64_t *INT64; bson_decimal128_t *DECIMAL128; } bcon_extract_t; static const char *gBconMagic = "BCON_MAGIC"; static const char *gBconeMagic = "BCONE_MAGIC"; const char * bson_bcon_magic (void) { return gBconMagic; } const char * bson_bcone_magic (void) { return gBconeMagic; } static void _noop (void) { } /* appends val to the passed bson object. Meant to be a super simple dispatch * table */ static void _bcon_append_single (bson_t *bson, bcon_type_t type, const char *key, bcon_append_t *val) { switch ((int) type) { case BCON_TYPE_UTF8: BSON_ASSERT (bson_append_utf8 (bson, key, -1, val->UTF8, -1)); break; case BCON_TYPE_DOUBLE: BSON_ASSERT (bson_append_double (bson, key, -1, val->DOUBLE)); break; case BCON_TYPE_BIN: { BSON_ASSERT (bson_append_binary ( bson, key, -1, val->BIN.subtype, val->BIN.binary, val->BIN.length)); break; } case BCON_TYPE_UNDEFINED: BSON_ASSERT (bson_append_undefined (bson, key, -1)); break; case BCON_TYPE_OID: BSON_ASSERT (bson_append_oid (bson, key, -1, val->OID)); break; case BCON_TYPE_BOOL: BSON_ASSERT (bson_append_bool (bson, key, -1, (bool) val->BOOL)); break; case BCON_TYPE_DATE_TIME: BSON_ASSERT (bson_append_date_time (bson, key, -1, val->DATE_TIME)); break; case BCON_TYPE_NULL: BSON_ASSERT (bson_append_null (bson, key, -1)); break; case BCON_TYPE_REGEX: { BSON_ASSERT ( bson_append_regex (bson, key, -1, val->REGEX.regex, val->REGEX.flags)); break; } case BCON_TYPE_DBPOINTER: { BSON_ASSERT (bson_append_dbpointer ( bson, key, -1, val->DBPOINTER.collection, val->DBPOINTER.oid)); break; } case BCON_TYPE_CODE: BSON_ASSERT (bson_append_code (bson, key, -1, val->CODE)); break; case BCON_TYPE_SYMBOL: BSON_ASSERT (bson_append_symbol (bson, key, -1, val->SYMBOL, -1)); break; case BCON_TYPE_CODEWSCOPE: BSON_ASSERT (bson_append_code_with_scope ( bson, key, -1, val->CODEWSCOPE.js, val->CODEWSCOPE.scope)); break; case BCON_TYPE_INT32: BSON_ASSERT (bson_append_int32 (bson, key, -1, val->INT32)); break; case BCON_TYPE_TIMESTAMP: { BSON_ASSERT (bson_append_timestamp ( bson, key, -1, val->TIMESTAMP.timestamp, val->TIMESTAMP.increment)); break; } case BCON_TYPE_INT64: BSON_ASSERT (bson_append_int64 (bson, key, -1, val->INT64)); break; case BCON_TYPE_DECIMAL128: BSON_ASSERT (bson_append_decimal128 (bson, key, -1, val->DECIMAL128)); break; case BCON_TYPE_MAXKEY: BSON_ASSERT (bson_append_maxkey (bson, key, -1)); break; case BCON_TYPE_MINKEY: BSON_ASSERT (bson_append_minkey (bson, key, -1)); break; case BCON_TYPE_ARRAY: { BSON_ASSERT (bson_append_array (bson, key, -1, val->ARRAY)); break; } case BCON_TYPE_DOCUMENT: { BSON_ASSERT (bson_append_document (bson, key, -1, val->DOCUMENT)); break; } case BCON_TYPE_ITER: BSON_ASSERT (bson_append_iter (bson, key, -1, val->ITER)); break; default: BSON_ASSERT (0); break; } } #define CHECK_TYPE(_type) \ do { \ if (bson_iter_type (iter) != (_type)) { \ return false; \ } \ } while (0) /* extracts the value under the iterator and writes it to val. returns false * if the iterator type doesn't match the token type. * * There are two magic tokens: * * BCONE_SKIP - * Let's us verify that a key has a type, without caring about its value. * This allows for wider declarative BSON verification * * BCONE_ITER - * Returns the underlying iterator. This could allow for more complicated, * procedural verification (if a parameter could have multiple types). * */ static bool _bcon_extract_single (const bson_iter_t *iter, bcon_type_t type, bcon_extract_t *val) { switch ((int) type) { case BCON_TYPE_UTF8: CHECK_TYPE (BSON_TYPE_UTF8); *val->UTF8 = bson_iter_utf8 (iter, NULL); break; case BCON_TYPE_DOUBLE: CHECK_TYPE (BSON_TYPE_DOUBLE); *val->DOUBLE = bson_iter_double (iter); break; case BCON_TYPE_BIN: CHECK_TYPE (BSON_TYPE_BINARY); bson_iter_binary ( iter, val->BIN.subtype, val->BIN.length, val->BIN.binary); break; case BCON_TYPE_UNDEFINED: CHECK_TYPE (BSON_TYPE_UNDEFINED); break; case BCON_TYPE_OID: CHECK_TYPE (BSON_TYPE_OID); *val->OID = bson_iter_oid (iter); break; case BCON_TYPE_BOOL: CHECK_TYPE (BSON_TYPE_BOOL); *val->BOOL = bson_iter_bool (iter); break; case BCON_TYPE_DATE_TIME: CHECK_TYPE (BSON_TYPE_DATE_TIME); *val->DATE_TIME = bson_iter_date_time (iter); break; case BCON_TYPE_NULL: CHECK_TYPE (BSON_TYPE_NULL); break; case BCON_TYPE_REGEX: CHECK_TYPE (BSON_TYPE_REGEX); *val->REGEX.regex = bson_iter_regex (iter, val->REGEX.flags); break; case BCON_TYPE_DBPOINTER: CHECK_TYPE (BSON_TYPE_DBPOINTER); bson_iter_dbpointer ( iter, NULL, val->DBPOINTER.collection, val->DBPOINTER.oid); break; case BCON_TYPE_CODE: CHECK_TYPE (BSON_TYPE_CODE); *val->CODE = bson_iter_code (iter, NULL); break; case BCON_TYPE_SYMBOL: CHECK_TYPE (BSON_TYPE_SYMBOL); *val->SYMBOL = bson_iter_symbol (iter, NULL); break; case BCON_TYPE_CODEWSCOPE: { const uint8_t *buf; uint32_t len; CHECK_TYPE (BSON_TYPE_CODEWSCOPE); *val->CODEWSCOPE.js = bson_iter_codewscope (iter, NULL, &len, &buf); BSON_ASSERT (bson_init_static (val->CODEWSCOPE.scope, buf, len)); break; } case BCON_TYPE_INT32: CHECK_TYPE (BSON_TYPE_INT32); *val->INT32 = bson_iter_int32 (iter); break; case BCON_TYPE_TIMESTAMP: CHECK_TYPE (BSON_TYPE_TIMESTAMP); bson_iter_timestamp ( iter, val->TIMESTAMP.timestamp, val->TIMESTAMP.increment); break; case BCON_TYPE_INT64: CHECK_TYPE (BSON_TYPE_INT64); *val->INT64 = bson_iter_int64 (iter); break; case BCON_TYPE_DECIMAL128: CHECK_TYPE (BSON_TYPE_DECIMAL128); BSON_ASSERT (bson_iter_decimal128 (iter, val->DECIMAL128)); break; case BCON_TYPE_MAXKEY: CHECK_TYPE (BSON_TYPE_MAXKEY); break; case BCON_TYPE_MINKEY: CHECK_TYPE (BSON_TYPE_MINKEY); break; case BCON_TYPE_ARRAY: { const uint8_t *buf; uint32_t len; CHECK_TYPE (BSON_TYPE_ARRAY); bson_iter_array (iter, &len, &buf); BSON_ASSERT (bson_init_static (val->ARRAY, buf, len)); break; } case BCON_TYPE_DOCUMENT: { const uint8_t *buf; uint32_t len; CHECK_TYPE (BSON_TYPE_DOCUMENT); bson_iter_document (iter, &len, &buf); BSON_ASSERT (bson_init_static (val->DOCUMENT, buf, len)); break; } case BCON_TYPE_SKIP: CHECK_TYPE (val->TYPE); break; case BCON_TYPE_ITER: memcpy (val->ITER, iter, sizeof *iter); break; default: BSON_ASSERT (0); break; } return true; } /* Consumes ap, storing output values into u and returning the type of the * captured token. * * The basic workflow goes like this: * * 1. Look at the current arg. It will be a char * * a. If it's a NULL, we're done processing. * b. If it's BCON_MAGIC (a symbol with storage in this module) * I. The next token is the type * II. The type specifies how many args to eat and their types * c. Otherwise it's either recursion related or a raw string * I. If the first byte is '{', '}', '[', or ']' pass back an * appropriate recursion token * II. If not, just call it a UTF8 token and pass that back */ static bcon_type_t _bcon_append_tokenize (va_list *ap, bcon_append_t *u) { char *mark; bcon_type_t type; mark = va_arg (*ap, char *); BSON_ASSERT (mark != BCONE_MAGIC); if (mark == NULL) { type = BCON_TYPE_END; } else if (mark == BCON_MAGIC) { type = va_arg (*ap, bcon_type_t); switch ((int) type) { case BCON_TYPE_UTF8: u->UTF8 = va_arg (*ap, char *); break; case BCON_TYPE_DOUBLE: u->DOUBLE = va_arg (*ap, double); break; case BCON_TYPE_DOCUMENT: u->DOCUMENT = va_arg (*ap, bson_t *); break; case BCON_TYPE_ARRAY: u->ARRAY = va_arg (*ap, bson_t *); break; case BCON_TYPE_BIN: u->BIN.subtype = va_arg (*ap, bson_subtype_t); u->BIN.binary = va_arg (*ap, uint8_t *); u->BIN.length = va_arg (*ap, uint32_t); break; case BCON_TYPE_UNDEFINED: break; case BCON_TYPE_OID: u->OID = va_arg (*ap, bson_oid_t *); break; case BCON_TYPE_BOOL: u->BOOL = va_arg (*ap, int); break; case BCON_TYPE_DATE_TIME: u->DATE_TIME = va_arg (*ap, int64_t); break; case BCON_TYPE_NULL: break; case BCON_TYPE_REGEX: u->REGEX.regex = va_arg (*ap, char *); u->REGEX.flags = va_arg (*ap, char *); break; case BCON_TYPE_DBPOINTER: u->DBPOINTER.collection = va_arg (*ap, char *); u->DBPOINTER.oid = va_arg (*ap, bson_oid_t *); break; case BCON_TYPE_CODE: u->CODE = va_arg (*ap, char *); break; case BCON_TYPE_SYMBOL: u->SYMBOL = va_arg (*ap, char *); break; case BCON_TYPE_CODEWSCOPE: u->CODEWSCOPE.js = va_arg (*ap, char *); u->CODEWSCOPE.scope = va_arg (*ap, bson_t *); break; case BCON_TYPE_INT32: u->INT32 = va_arg (*ap, int32_t); break; case BCON_TYPE_TIMESTAMP: u->TIMESTAMP.timestamp = va_arg (*ap, uint32_t); u->TIMESTAMP.increment = va_arg (*ap, uint32_t); break; case BCON_TYPE_INT64: u->INT64 = va_arg (*ap, int64_t); break; case BCON_TYPE_DECIMAL128: u->DECIMAL128 = va_arg (*ap, bson_decimal128_t *); break; case BCON_TYPE_MAXKEY: break; case BCON_TYPE_MINKEY: break; case BCON_TYPE_BCON: u->BCON = va_arg (*ap, bson_t *); break; case BCON_TYPE_ITER: u->ITER = va_arg (*ap, const bson_iter_t *); break; default: BSON_ASSERT (0); break; } } else { switch (mark[0]) { case '{': type = BCON_TYPE_DOC_START; break; case '}': type = BCON_TYPE_DOC_END; break; case '[': type = BCON_TYPE_ARRAY_START; break; case ']': type = BCON_TYPE_ARRAY_END; break; default: type = BCON_TYPE_UTF8; u->UTF8 = mark; break; } } return type; } /* Consumes ap, storing output values into u and returning the type of the * captured token. * * The basic workflow goes like this: * * 1. Look at the current arg. It will be a char * * a. If it's a NULL, we're done processing. * b. If it's BCONE_MAGIC (a symbol with storage in this module) * I. The next token is the type * II. The type specifies how many args to eat and their types * c. Otherwise it's either recursion related or a raw string * I. If the first byte is '{', '}', '[', or ']' pass back an * appropriate recursion token * II. If not, just call it a UTF8 token and pass that back */ static bcon_type_t _bcon_extract_tokenize (va_list *ap, bcon_extract_t *u) { char *mark; bcon_type_t type; mark = va_arg (*ap, char *); BSON_ASSERT (mark != BCON_MAGIC); if (mark == NULL) { type = BCON_TYPE_END; } else if (mark == BCONE_MAGIC) { type = va_arg (*ap, bcon_type_t); switch ((int) type) { case BCON_TYPE_UTF8: u->UTF8 = va_arg (*ap, const char **); break; case BCON_TYPE_DOUBLE: u->DOUBLE = va_arg (*ap, double *); break; case BCON_TYPE_DOCUMENT: u->DOCUMENT = va_arg (*ap, bson_t *); break; case BCON_TYPE_ARRAY: u->ARRAY = va_arg (*ap, bson_t *); break; case BCON_TYPE_BIN: u->BIN.subtype = va_arg (*ap, bson_subtype_t *); u->BIN.binary = va_arg (*ap, const uint8_t **); u->BIN.length = va_arg (*ap, uint32_t *); break; case BCON_TYPE_UNDEFINED: break; case BCON_TYPE_OID: u->OID = va_arg (*ap, const bson_oid_t **); break; case BCON_TYPE_BOOL: u->BOOL = va_arg (*ap, bool *); break; case BCON_TYPE_DATE_TIME: u->DATE_TIME = va_arg (*ap, int64_t *); break; case BCON_TYPE_NULL: break; case BCON_TYPE_REGEX: u->REGEX.regex = va_arg (*ap, const char **); u->REGEX.flags = va_arg (*ap, const char **); break; case BCON_TYPE_DBPOINTER: u->DBPOINTER.collection = va_arg (*ap, const char **); u->DBPOINTER.oid = va_arg (*ap, const bson_oid_t **); break; case BCON_TYPE_CODE: u->CODE = va_arg (*ap, const char **); break; case BCON_TYPE_SYMBOL: u->SYMBOL = va_arg (*ap, const char **); break; case BCON_TYPE_CODEWSCOPE: u->CODEWSCOPE.js = va_arg (*ap, const char **); u->CODEWSCOPE.scope = va_arg (*ap, bson_t *); break; case BCON_TYPE_INT32: u->INT32 = va_arg (*ap, int32_t *); break; case BCON_TYPE_TIMESTAMP: u->TIMESTAMP.timestamp = va_arg (*ap, uint32_t *); u->TIMESTAMP.increment = va_arg (*ap, uint32_t *); break; case BCON_TYPE_INT64: u->INT64 = va_arg (*ap, int64_t *); break; case BCON_TYPE_DECIMAL128: u->DECIMAL128 = va_arg (*ap, bson_decimal128_t *); break; case BCON_TYPE_MAXKEY: break; case BCON_TYPE_MINKEY: break; case BCON_TYPE_SKIP: u->TYPE = va_arg (*ap, bson_type_t); break; case BCON_TYPE_ITER: u->ITER = va_arg (*ap, bson_iter_t *); break; default: BSON_ASSERT (0); break; } } else { switch (mark[0]) { case '{': type = BCON_TYPE_DOC_START; break; case '}': type = BCON_TYPE_DOC_END; break; case '[': type = BCON_TYPE_ARRAY_START; break; case ']': type = BCON_TYPE_ARRAY_END; break; default: type = BCON_TYPE_RAW; u->key = mark; break; } } return type; } /* This trivial utility function is useful for concatenating a bson object onto * the end of another, ignoring the keys from the source bson object and * continuing to use and increment the keys from the source. It's only useful * when called from bcon_append_ctx_va */ static void _bson_concat_array (bson_t *dest, const bson_t *src, bcon_append_ctx_t *ctx) { bson_iter_t iter; const char *key; char i_str[16]; bool r; r = bson_iter_init (&iter, src); if (!r) { fprintf (stderr, "Invalid BSON document, possible memory coruption.\n"); return; } STACK_I--; while (bson_iter_next (&iter)) { bson_uint32_to_string (STACK_I, &key, i_str, sizeof i_str); STACK_I++; BSON_ASSERT (bson_append_iter (dest, key, -1, &iter)); } } /* Append_ctx_va consumes the va_list until NULL is found, appending into bson * as tokens are found. It can receive or return an in-progress bson object * via the ctx param. It can also operate on the middle of a va_list, and so * can be wrapped inside of another varargs function. * * Note that passing in a va_list that isn't perferectly formatted for BCON * ingestion will almost certainly result in undefined behavior * * The workflow relies on the passed ctx object, which holds a stack of bson * objects, along with metadata (if the emedded layer is an array, and which * element it is on if so). We iterate, generating tokens from the va_list, * until we reach an END token. If any errors occur, we just blow up (the * var_args stuff is already incredibly fragile to mistakes, and we have no way * of introspecting, so just don't screw it up). * * There are also a few STACK_* macros in here which manimpulate ctx that are * defined up top. * */ void bcon_append_ctx_va (bson_t *bson, bcon_append_ctx_t *ctx, va_list *ap) { bcon_type_t type; const char *key; char i_str[16]; bcon_append_t u = {0}; while (1) { if (STACK_IS_ARRAY) { bson_uint32_to_string (STACK_I, &key, i_str, sizeof i_str); STACK_I++; } else { type = _bcon_append_tokenize (ap, &u); if (type == BCON_TYPE_END) { return; } if (type == BCON_TYPE_DOC_END) { STACK_POP_DOC ( bson_append_document_end (STACK_BSON_PARENT, STACK_BSON_CHILD)); continue; } if (type == BCON_TYPE_BCON) { bson_concat (STACK_BSON_CHILD, u.BCON); continue; } BSON_ASSERT (type == BCON_TYPE_UTF8); key = u.UTF8; } type = _bcon_append_tokenize (ap, &u); BSON_ASSERT (type != BCON_TYPE_END); switch ((int) type) { case BCON_TYPE_BCON: BSON_ASSERT (STACK_IS_ARRAY); _bson_concat_array (STACK_BSON_CHILD, u.BCON, ctx); break; case BCON_TYPE_DOC_START: STACK_PUSH_DOC (bson_append_document_begin ( STACK_BSON_PARENT, key, -1, STACK_BSON_CHILD)); break; case BCON_TYPE_DOC_END: STACK_POP_DOC ( bson_append_document_end (STACK_BSON_PARENT, STACK_BSON_CHILD)); break; case BCON_TYPE_ARRAY_START: STACK_PUSH_ARRAY (bson_append_array_begin ( STACK_BSON_PARENT, key, -1, STACK_BSON_CHILD)); break; case BCON_TYPE_ARRAY_END: STACK_POP_ARRAY ( bson_append_array_end (STACK_BSON_PARENT, STACK_BSON_CHILD)); break; default: _bcon_append_single (STACK_BSON_CHILD, type, key, &u); break; } } } /* extract_ctx_va consumes the va_list until NULL is found, extracting values * as tokens are found. It can receive or return an in-progress bson object * via the ctx param. It can also operate on the middle of a va_list, and so * can be wrapped inside of another varargs function. * * Note that passing in a va_list that isn't perferectly formatted for BCON * ingestion will almost certainly result in undefined behavior * * The workflow relies on the passed ctx object, which holds a stack of iterator * objects, along with metadata (if the emedded layer is an array, and which * element it is on if so). We iterate, generating tokens from the va_list, * until we reach an END token. If any errors occur, we just blow up (the * var_args stuff is already incredibly fragile to mistakes, and we have no way * of introspecting, so just don't screw it up). * * There are also a few STACK_* macros in here which manimpulate ctx that are * defined up top. * * The function returns true if all tokens could be successfully matched, false * otherwise. * */ bool bcon_extract_ctx_va (bson_t *bson, bcon_extract_ctx_t *ctx, va_list *ap) { bcon_type_t type; const char *key; bson_iter_t root_iter; bson_iter_t current_iter; char i_str[16]; bcon_extract_t u = {0}; BSON_ASSERT (bson_iter_init (&root_iter, bson)); while (1) { if (STACK_IS_ARRAY) { bson_uint32_to_string (STACK_I, &key, i_str, sizeof i_str); STACK_I++; } else { type = _bcon_extract_tokenize (ap, &u); if (type == BCON_TYPE_END) { return true; } if (type == BCON_TYPE_DOC_END) { STACK_POP_DOC (_noop ()); continue; } BSON_ASSERT (type == BCON_TYPE_RAW); key = u.key; } type = _bcon_extract_tokenize (ap, &u); BSON_ASSERT (type != BCON_TYPE_END); if (type == BCON_TYPE_DOC_END) { STACK_POP_DOC (_noop ()); } else if (type == BCON_TYPE_ARRAY_END) { STACK_POP_ARRAY (_noop ()); } else { memcpy (¤t_iter, STACK_ITER_CHILD, sizeof current_iter); if (!bson_iter_find (¤t_iter, key)) { return false; } switch ((int) type) { case BCON_TYPE_DOC_START: if (bson_iter_type (¤t_iter) != BSON_TYPE_DOCUMENT) { return false; } STACK_PUSH_DOC ( bson_iter_recurse (¤t_iter, STACK_ITER_CHILD)); break; case BCON_TYPE_ARRAY_START: if (bson_iter_type (¤t_iter) != BSON_TYPE_ARRAY) { return false; } STACK_PUSH_ARRAY ( bson_iter_recurse (¤t_iter, STACK_ITER_CHILD)); break; default: if (!_bcon_extract_single (¤t_iter, type, &u)) { return false; } break; } } } } void bcon_extract_ctx_init (bcon_extract_ctx_t *ctx) { ctx->n = 0; ctx->stack[0].is_array = false; } bool bcon_extract (bson_t *bson, ...) { va_list ap; bcon_extract_ctx_t ctx; bool r; bcon_extract_ctx_init (&ctx); va_start (ap, bson); r = bcon_extract_ctx_va (bson, &ctx, &ap); va_end (ap); return r; } void bcon_append (bson_t *bson, ...) { va_list ap; bcon_append_ctx_t ctx; bcon_append_ctx_init (&ctx); va_start (ap, bson); bcon_append_ctx_va (bson, &ctx, &ap); va_end (ap); } void bcon_append_ctx (bson_t *bson, bcon_append_ctx_t *ctx, ...) { va_list ap; va_start (ap, ctx); bcon_append_ctx_va (bson, ctx, &ap); va_end (ap); } void bcon_extract_ctx (bson_t *bson, bcon_extract_ctx_t *ctx, ...) { va_list ap; va_start (ap, ctx); bcon_extract_ctx_va (bson, ctx, &ap); va_end (ap); } void bcon_append_ctx_init (bcon_append_ctx_t *ctx) { ctx->n = 0; ctx->stack[0].is_array = 0; } bson_t * bcon_new (void *unused, ...) { va_list ap; bcon_append_ctx_t ctx; bson_t *bson; bcon_append_ctx_init (&ctx); bson = bson_new (); va_start (ap, unused); bcon_append_ctx_va (bson, &ctx, &ap); va_end (ap); return bson; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bcon.h0000644000076500000240000002533613572250757022273 0ustar alcaeusstaff/* * @file bcon.h * @brief BCON (BSON C Object Notation) Declarations */ #include "bson/bson-prelude.h" /* Copyright 2009-2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BCON_H_ #define BCON_H_ #include "bson/bson.h" BSON_BEGIN_DECLS #define BCON_STACK_MAX 100 #define BCON_ENSURE_DECLARE(fun, type) \ static BSON_INLINE type bcon_ensure_##fun (type _t) \ { \ return _t; \ } #define BCON_ENSURE(fun, val) bcon_ensure_##fun (val) #define BCON_ENSURE_STORAGE(fun, val) bcon_ensure_##fun (&(val)) BCON_ENSURE_DECLARE (const_char_ptr, const char *) BCON_ENSURE_DECLARE (const_char_ptr_ptr, const char **) BCON_ENSURE_DECLARE (double, double) BCON_ENSURE_DECLARE (double_ptr, double *) BCON_ENSURE_DECLARE (const_bson_ptr, const bson_t *) BCON_ENSURE_DECLARE (bson_ptr, bson_t *) BCON_ENSURE_DECLARE (subtype, bson_subtype_t) BCON_ENSURE_DECLARE (subtype_ptr, bson_subtype_t *) BCON_ENSURE_DECLARE (const_uint8_ptr, const uint8_t *) BCON_ENSURE_DECLARE (const_uint8_ptr_ptr, const uint8_t **) BCON_ENSURE_DECLARE (uint32, uint32_t) BCON_ENSURE_DECLARE (uint32_ptr, uint32_t *) BCON_ENSURE_DECLARE (const_oid_ptr, const bson_oid_t *) BCON_ENSURE_DECLARE (const_oid_ptr_ptr, const bson_oid_t **) BCON_ENSURE_DECLARE (int32, int32_t) BCON_ENSURE_DECLARE (int32_ptr, int32_t *) BCON_ENSURE_DECLARE (int64, int64_t) BCON_ENSURE_DECLARE (int64_ptr, int64_t *) BCON_ENSURE_DECLARE (const_decimal128_ptr, const bson_decimal128_t *) BCON_ENSURE_DECLARE (bool, bool) BCON_ENSURE_DECLARE (bool_ptr, bool *) BCON_ENSURE_DECLARE (bson_type, bson_type_t) BCON_ENSURE_DECLARE (bson_iter_ptr, bson_iter_t *) BCON_ENSURE_DECLARE (const_bson_iter_ptr, const bson_iter_t *) #define BCON_UTF8(_val) \ BCON_MAGIC, BCON_TYPE_UTF8, BCON_ENSURE (const_char_ptr, (_val)) #define BCON_DOUBLE(_val) \ BCON_MAGIC, BCON_TYPE_DOUBLE, BCON_ENSURE (double, (_val)) #define BCON_DOCUMENT(_val) \ BCON_MAGIC, BCON_TYPE_DOCUMENT, BCON_ENSURE (const_bson_ptr, (_val)) #define BCON_ARRAY(_val) \ BCON_MAGIC, BCON_TYPE_ARRAY, BCON_ENSURE (const_bson_ptr, (_val)) #define BCON_BIN(_subtype, _binary, _length) \ BCON_MAGIC, BCON_TYPE_BIN, BCON_ENSURE (subtype, (_subtype)), \ BCON_ENSURE (const_uint8_ptr, (_binary)), \ BCON_ENSURE (uint32, (_length)) #define BCON_UNDEFINED BCON_MAGIC, BCON_TYPE_UNDEFINED #define BCON_OID(_val) \ BCON_MAGIC, BCON_TYPE_OID, BCON_ENSURE (const_oid_ptr, (_val)) #define BCON_BOOL(_val) BCON_MAGIC, BCON_TYPE_BOOL, BCON_ENSURE (bool, (_val)) #define BCON_DATE_TIME(_val) \ BCON_MAGIC, BCON_TYPE_DATE_TIME, BCON_ENSURE (int64, (_val)) #define BCON_NULL BCON_MAGIC, BCON_TYPE_NULL #define BCON_REGEX(_regex, _flags) \ BCON_MAGIC, BCON_TYPE_REGEX, BCON_ENSURE (const_char_ptr, (_regex)), \ BCON_ENSURE (const_char_ptr, (_flags)) #define BCON_DBPOINTER(_collection, _oid) \ BCON_MAGIC, BCON_TYPE_DBPOINTER, \ BCON_ENSURE (const_char_ptr, (_collection)), \ BCON_ENSURE (const_oid_ptr, (_oid)) #define BCON_CODE(_val) \ BCON_MAGIC, BCON_TYPE_CODE, BCON_ENSURE (const_char_ptr, (_val)) #define BCON_SYMBOL(_val) \ BCON_MAGIC, BCON_TYPE_SYMBOL, BCON_ENSURE (const_char_ptr, (_val)) #define BCON_CODEWSCOPE(_js, _scope) \ BCON_MAGIC, BCON_TYPE_CODEWSCOPE, BCON_ENSURE (const_char_ptr, (_js)), \ BCON_ENSURE (const_bson_ptr, (_scope)) #define BCON_INT32(_val) \ BCON_MAGIC, BCON_TYPE_INT32, BCON_ENSURE (int32, (_val)) #define BCON_TIMESTAMP(_timestamp, _increment) \ BCON_MAGIC, BCON_TYPE_TIMESTAMP, BCON_ENSURE (int32, (_timestamp)), \ BCON_ENSURE (int32, (_increment)) #define BCON_INT64(_val) \ BCON_MAGIC, BCON_TYPE_INT64, BCON_ENSURE (int64, (_val)) #define BCON_DECIMAL128(_val) \ BCON_MAGIC, BCON_TYPE_DECIMAL128, BCON_ENSURE (const_decimal128_ptr, (_val)) #define BCON_MAXKEY BCON_MAGIC, BCON_TYPE_MAXKEY #define BCON_MINKEY BCON_MAGIC, BCON_TYPE_MINKEY #define BCON(_val) \ BCON_MAGIC, BCON_TYPE_BCON, BCON_ENSURE (const_bson_ptr, (_val)) #define BCON_ITER(_val) \ BCON_MAGIC, BCON_TYPE_ITER, BCON_ENSURE (const_bson_iter_ptr, (_val)) #define BCONE_UTF8(_val) \ BCONE_MAGIC, BCON_TYPE_UTF8, BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_val)) #define BCONE_DOUBLE(_val) \ BCONE_MAGIC, BCON_TYPE_DOUBLE, BCON_ENSURE_STORAGE (double_ptr, (_val)) #define BCONE_DOCUMENT(_val) \ BCONE_MAGIC, BCON_TYPE_DOCUMENT, BCON_ENSURE_STORAGE (bson_ptr, (_val)) #define BCONE_ARRAY(_val) \ BCONE_MAGIC, BCON_TYPE_ARRAY, BCON_ENSURE_STORAGE (bson_ptr, (_val)) #define BCONE_BIN(subtype, binary, length) \ BCONE_MAGIC, BCON_TYPE_BIN, BCON_ENSURE_STORAGE (subtype_ptr, (subtype)), \ BCON_ENSURE_STORAGE (const_uint8_ptr_ptr, (binary)), \ BCON_ENSURE_STORAGE (uint32_ptr, (length)) #define BCONE_UNDEFINED BCONE_MAGIC, BCON_TYPE_UNDEFINED #define BCONE_OID(_val) \ BCONE_MAGIC, BCON_TYPE_OID, BCON_ENSURE_STORAGE (const_oid_ptr_ptr, (_val)) #define BCONE_BOOL(_val) \ BCONE_MAGIC, BCON_TYPE_BOOL, BCON_ENSURE_STORAGE (bool_ptr, (_val)) #define BCONE_DATE_TIME(_val) \ BCONE_MAGIC, BCON_TYPE_DATE_TIME, BCON_ENSURE_STORAGE (int64_ptr, (_val)) #define BCONE_NULL BCONE_MAGIC, BCON_TYPE_NULL #define BCONE_REGEX(_regex, _flags) \ BCONE_MAGIC, BCON_TYPE_REGEX, \ BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_regex)), \ BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_flags)) #define BCONE_DBPOINTER(_collection, _oid) \ BCONE_MAGIC, BCON_TYPE_DBPOINTER, \ BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_collection)), \ BCON_ENSURE_STORAGE (const_oid_ptr_ptr, (_oid)) #define BCONE_CODE(_val) \ BCONE_MAGIC, BCON_TYPE_CODE, BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_val)) #define BCONE_SYMBOL(_val) \ BCONE_MAGIC, BCON_TYPE_SYMBOL, \ BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_val)) #define BCONE_CODEWSCOPE(_js, _scope) \ BCONE_MAGIC, BCON_TYPE_CODEWSCOPE, \ BCON_ENSURE_STORAGE (const_char_ptr_ptr, (_js)), \ BCON_ENSURE_STORAGE (bson_ptr, (_scope)) #define BCONE_INT32(_val) \ BCONE_MAGIC, BCON_TYPE_INT32, BCON_ENSURE_STORAGE (int32_ptr, (_val)) #define BCONE_TIMESTAMP(_timestamp, _increment) \ BCONE_MAGIC, BCON_TYPE_TIMESTAMP, \ BCON_ENSURE_STORAGE (int32_ptr, (_timestamp)), \ BCON_ENSURE_STORAGE (int32_ptr, (_increment)) #define BCONE_INT64(_val) \ BCONE_MAGIC, BCON_TYPE_INT64, BCON_ENSURE_STORAGE (int64_ptr, (_val)) #define BCONE_DECIMAL128(_val) \ BCONE_MAGIC, BCON_TYPE_DECIMAL128, \ BCON_ENSURE_STORAGE (const_decimal128_ptr, (_val)) #define BCONE_MAXKEY BCONE_MAGIC, BCON_TYPE_MAXKEY #define BCONE_MINKEY BCONE_MAGIC, BCON_TYPE_MINKEY #define BCONE_SKIP(_val) \ BCONE_MAGIC, BCON_TYPE_SKIP, BCON_ENSURE (bson_type, (_val)) #define BCONE_ITER(_val) \ BCONE_MAGIC, BCON_TYPE_ITER, BCON_ENSURE_STORAGE (bson_iter_ptr, (_val)) #define BCON_MAGIC bson_bcon_magic () #define BCONE_MAGIC bson_bcone_magic () typedef enum { BCON_TYPE_UTF8, BCON_TYPE_DOUBLE, BCON_TYPE_DOCUMENT, BCON_TYPE_ARRAY, BCON_TYPE_BIN, BCON_TYPE_UNDEFINED, BCON_TYPE_OID, BCON_TYPE_BOOL, BCON_TYPE_DATE_TIME, BCON_TYPE_NULL, BCON_TYPE_REGEX, BCON_TYPE_DBPOINTER, BCON_TYPE_CODE, BCON_TYPE_SYMBOL, BCON_TYPE_CODEWSCOPE, BCON_TYPE_INT32, BCON_TYPE_TIMESTAMP, BCON_TYPE_INT64, BCON_TYPE_DECIMAL128, BCON_TYPE_MAXKEY, BCON_TYPE_MINKEY, BCON_TYPE_BCON, BCON_TYPE_ARRAY_START, BCON_TYPE_ARRAY_END, BCON_TYPE_DOC_START, BCON_TYPE_DOC_END, BCON_TYPE_END, BCON_TYPE_RAW, BCON_TYPE_SKIP, BCON_TYPE_ITER, BCON_TYPE_ERROR, } bcon_type_t; typedef struct bcon_append_ctx_frame { int i; bool is_array; bson_t bson; } bcon_append_ctx_frame_t; typedef struct bcon_extract_ctx_frame { int i; bool is_array; bson_iter_t iter; } bcon_extract_ctx_frame_t; typedef struct _bcon_append_ctx_t { bcon_append_ctx_frame_t stack[BCON_STACK_MAX]; int n; } bcon_append_ctx_t; typedef struct _bcon_extract_ctx_t { bcon_extract_ctx_frame_t stack[BCON_STACK_MAX]; int n; } bcon_extract_ctx_t; BSON_EXPORT (void) bcon_append (bson_t *bson, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (void) bcon_append_ctx (bson_t *bson, bcon_append_ctx_t *ctx, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (void) bcon_append_ctx_va (bson_t *bson, bcon_append_ctx_t *ctx, va_list *va); BSON_EXPORT (void) bcon_append_ctx_init (bcon_append_ctx_t *ctx); BSON_EXPORT (void) bcon_extract_ctx_init (bcon_extract_ctx_t *ctx); BSON_EXPORT (void) bcon_extract_ctx (bson_t *bson, bcon_extract_ctx_t *ctx, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (bool) bcon_extract_ctx_va (bson_t *bson, bcon_extract_ctx_t *ctx, va_list *ap); BSON_EXPORT (bool) bcon_extract (bson_t *bson, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (bool) bcon_extract_va (bson_t *bson, bcon_extract_ctx_t *ctx, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (bson_t *) bcon_new (void *unused, ...) BSON_GNUC_NULL_TERMINATED; /** * The bcon_..() functions are all declared with __attribute__((sentinel)). * * From GCC manual for "sentinel": "A valid NULL in this context is defined as * zero with any pointer type. If your system defines the NULL macro with an * integer type then you need to add an explicit cast." * Case in point: GCC on Solaris (at least) */ #define BCON_APPEND(_bson, ...) \ bcon_append ((_bson), __VA_ARGS__, (void *) NULL) #define BCON_APPEND_CTX(_bson, _ctx, ...) \ bcon_append_ctx ((_bson), (_ctx), __VA_ARGS__, (void *) NULL) #define BCON_EXTRACT(_bson, ...) \ bcon_extract ((_bson), __VA_ARGS__, (void *) NULL) #define BCON_EXTRACT_CTX(_bson, _ctx, ...) \ bcon_extract ((_bson), (_ctx), __VA_ARGS__, (void *) NULL) #define BCON_NEW(...) bcon_new (NULL, __VA_ARGS__, (void *) NULL) BSON_EXPORT (const char *) bson_bcon_magic (void) BSON_GNUC_PURE; BSON_EXPORT (const char *) bson_bcone_magic (void) BSON_GNUC_PURE; BSON_END_DECLS #endif mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-atomic.c0000644000076500000240000000323213572250757023547 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-atomic.h" /* * We should only ever hit these on non-Windows systems, for which we require * pthread support. Therefore, we will avoid making a threading portability * for threads here and just use pthreads directly. */ #ifdef __BSON_NEED_BARRIER #include static pthread_mutex_t gBarrier = PTHREAD_MUTEX_INITIALIZER; void bson_memory_barrier (void) { pthread_mutex_lock (&gBarrier); pthread_mutex_unlock (&gBarrier); } #endif #ifdef __BSON_NEED_ATOMIC_32 #include static pthread_mutex_t gSync32 = PTHREAD_MUTEX_INITIALIZER; int32_t bson_atomic_int_add (volatile int32_t *p, int32_t n) { int ret; pthread_mutex_lock (&gSync32); *p += n; ret = *p; pthread_mutex_unlock (&gSync32); return ret; } #endif #ifdef __BSON_NEED_ATOMIC_64 #include static pthread_mutex_t gSync64 = PTHREAD_MUTEX_INITIALIZER; int64_t bson_atomic_int64_add (volatile int64_t *p, int64_t n) { int64_t ret; pthread_mutex_lock (&gSync64); *p += n; ret = *p; pthread_mutex_unlock (&gSync64); return ret; } #endif mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-atomic.h0000644000076500000240000000550313572250757023557 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_ATOMIC_H #define BSON_ATOMIC_H #include "bson/bson-config.h" #include "bson/bson-compat.h" #include "bson/bson-macros.h" BSON_BEGIN_DECLS #if defined(__sun) && defined(__SVR4) /* Solaris */ #include #define bson_atomic_int_add(p, v) \ atomic_add_32_nv ((volatile uint32_t *) p, (v)) #define bson_atomic_int64_add(p, v) \ atomic_add_64_nv ((volatile uint64_t *) p, (v)) #elif defined(_WIN32) /* MSVC/MinGW */ #define bson_atomic_int_add(p, v) \ (InterlockedExchangeAdd ((volatile LONG *) (p), (LONG) (v)) + (LONG) (v)) #define bson_atomic_int64_add(p, v) \ (InterlockedExchangeAdd64 ((volatile LONGLONG *) (p), (LONGLONG) (v)) + \ (LONGLONG) (v)) #else #ifdef BSON_HAVE_ATOMIC_32_ADD_AND_FETCH #define bson_atomic_int_add(p, v) __sync_add_and_fetch ((p), (v)) #else #define __BSON_NEED_ATOMIC_32 #endif #ifdef BSON_HAVE_ATOMIC_64_ADD_AND_FETCH #if BSON_GNUC_IS_VERSION(4, 1) /* * GCC 4.1 on i386 can generate buggy 64-bit atomic increment. * So we will work around with a fallback. * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=40693 */ #define __BSON_NEED_ATOMIC_64 #else #define bson_atomic_int64_add(p, v) \ __sync_add_and_fetch ((volatile int64_t *) (p), (int64_t) (v)) #endif #else #define __BSON_NEED_ATOMIC_64 #endif #endif #ifdef __BSON_NEED_ATOMIC_32 BSON_EXPORT (int32_t) bson_atomic_int_add (volatile int32_t *p, int32_t n); #endif #ifdef __BSON_NEED_ATOMIC_64 BSON_EXPORT (int64_t) bson_atomic_int64_add (volatile int64_t *p, int64_t n); #endif #if defined(_WIN32) #define bson_memory_barrier() MemoryBarrier () #elif defined(__GNUC__) #if BSON_GNUC_CHECK_VERSION(4, 1) #define bson_memory_barrier() __sync_synchronize () #else #warning "GCC Pre-4.1 discovered, using inline assembly for memory barrier." #define bson_memory_barrier() __asm__ volatile("" ::: "memory") #endif #elif defined(__SUNPRO_C) #include #define bson_memory_barrier() __machine_rw_barrier () #elif defined(__xlC__) #define bson_memory_barrier() __sync () #else #define __BSON_NEED_BARRIER 1 #warning "Unknown compiler, using lock for compiler barrier." BSON_EXPORT (void) bson_memory_barrier (void); #endif BSON_END_DECLS #endif /* BSON_ATOMIC_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-clock.c0000644000076500000240000000765513572250757023403 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef __APPLE__ #include #include #include #include #endif #include "bson/bson-config.h" #include "bson/bson-compat.h" #if defined(BSON_HAVE_CLOCK_GETTIME) #include #include #endif #include "bson/bson-clock.h" /* *-------------------------------------------------------------------------- * * bson_gettimeofday -- * * A wrapper around gettimeofday() with fallback support for Windows. * * Returns: * 0 if successful. * * Side effects: * @tv is set. * *-------------------------------------------------------------------------- */ int bson_gettimeofday (struct timeval *tv) /* OUT */ { #if defined(_WIN32) #if defined(_MSC_VER) #define DELTA_EPOCH_IN_MICROSEC 11644473600000000Ui64 #else #define DELTA_EPOCH_IN_MICROSEC 11644473600000000ULL #endif FILETIME ft; uint64_t tmp = 0; /* * The const value is shamelessly stolen from * http://www.boost.org/doc/libs/1_55_0/boost/chrono/detail/inlined/win/chrono.hpp * * File times are the number of 100 nanosecond intervals elapsed since * 12:00 am Jan 1, 1601 UTC. I haven't check the math particularly hard * * ... good luck */ if (tv) { GetSystemTimeAsFileTime (&ft); /* pull out of the filetime into a 64 bit uint */ tmp |= ft.dwHighDateTime; tmp <<= 32; tmp |= ft.dwLowDateTime; /* convert from 100's of nanosecs to microsecs */ tmp /= 10; /* adjust to unix epoch */ tmp -= DELTA_EPOCH_IN_MICROSEC; tv->tv_sec = (long) (tmp / 1000000UL); tv->tv_usec = (long) (tmp % 1000000UL); } return 0; #else return gettimeofday (tv, NULL); #endif } /* *-------------------------------------------------------------------------- * * bson_get_monotonic_time -- * * Returns the monotonic system time, if available. A best effort is * made to use the monotonic clock. However, some systems may not * support such a feature. * * Returns: * The monotonic clock in microseconds. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int64_t bson_get_monotonic_time (void) { #if defined(BSON_HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) struct timespec ts; /* ts.tv_sec may be a four-byte integer on 32 bit machines, so cast to * int64_t to avoid truncation. */ clock_gettime (CLOCK_MONOTONIC, &ts); return (((int64_t) ts.tv_sec * 1000000) + (ts.tv_nsec / 1000)); #elif defined(__APPLE__) static mach_timebase_info_data_t info = {0}; static double ratio = 0.0; if (!info.denom) { /* the value from mach_absolute_time () * info.numer / info.denom * is in nano seconds. So we have to divid by 1000.0 to get micro * seconds*/ mach_timebase_info (&info); ratio = (double) info.numer / (double) info.denom / 1000.0; } return mach_absolute_time () * ratio; #elif defined(_WIN32) /* Despite it's name, this is in milliseconds! */ int64_t ticks = GetTickCount64 (); return (ticks * 1000); #elif defined(__hpux__) int64_t nanosec = gethrtime (); return (nanosec / 1000UL); #else #warning "Monotonic clock is not yet supported on your platform." struct timeval tv; bson_gettimeofday (&tv); return ((int64_t) tv.tv_sec * 1000000) + tv.tv_usec; #endif } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-clock.h0000644000076500000240000000165413572250757023401 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_CLOCK_H #define BSON_CLOCK_H #include "bson/bson-compat.h" #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (int64_t) bson_get_monotonic_time (void); BSON_EXPORT (int) bson_gettimeofday (struct timeval *tv); BSON_END_DECLS #endif /* BSON_CLOCK_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-compat.h0000644000076500000240000000712113572250757023564 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_COMPAT_H #define BSON_COMPAT_H #if defined(__MINGW32__) #if defined(__USE_MINGW_ANSI_STDIO) #if __USE_MINGW_ANSI_STDIO < 1 #error "__USE_MINGW_ANSI_STDIO > 0 is required for correct PRI* macros" #endif #else #define __USE_MINGW_ANSI_STDIO 1 #endif #endif #include "bson/bson-config.h" #include "bson/bson-macros.h" #ifdef BSON_OS_WIN32 #if defined(_WIN32_WINNT) && (_WIN32_WINNT < 0x0600) #undef _WIN32_WINNT #endif #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x0600 #endif #ifndef NOMINMAX #define NOMINMAX #endif #include #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #include #undef WIN32_LEAN_AND_MEAN #else #include #endif #include #include #endif #ifdef BSON_OS_UNIX #include #include #endif #include "bson/bson-macros.h" #include #include #include #include #include #include #include #include #include #include #include BSON_BEGIN_DECLS #if !defined(_MSC_VER) || (_MSC_VER >= 1800) #include #endif #ifdef _MSC_VER #ifndef __cplusplus /* benign redefinition of type */ #pragma warning(disable : 4142) #ifndef _SSIZE_T_DEFINED #define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; #endif #ifndef _SIZE_T_DEFINED #define _SIZE_T_DEFINED typedef SIZE_T size_t; #endif #pragma warning(default : 4142) #else /* * MSVC++ does not include ssize_t, just size_t. * So we need to synthesize that as well. */ #pragma warning(disable : 4142) #ifndef _SSIZE_T_DEFINED #define _SSIZE_T_DEFINED typedef SSIZE_T ssize_t; #endif #pragma warning(default : 4142) #endif #ifndef PRIi32 #define PRIi32 "d" #endif #ifndef PRId32 #define PRId32 "d" #endif #ifndef PRIu32 #define PRIu32 "u" #endif #ifndef PRIi64 #define PRIi64 "I64i" #endif #ifndef PRId64 #define PRId64 "I64i" #endif #ifndef PRIu64 #define PRIu64 "I64u" #endif #endif #if defined(__MINGW32__) && !defined(INIT_ONCE_STATIC_INIT) #define INIT_ONCE_STATIC_INIT RTL_RUN_ONCE_INIT typedef RTL_RUN_ONCE INIT_ONCE; #endif #ifdef BSON_HAVE_STDBOOL_H #include #elif !defined(__bool_true_false_are_defined) #ifndef __cplusplus typedef signed char bool; #define false 0 #define true 1 #endif #define __bool_true_false_are_defined 1 #endif #if defined(__GNUC__) #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1) #define bson_sync_synchronize() __sync_synchronize () #elif defined(__i386__) || defined(__i486__) || defined(__i586__) || \ defined(__i686__) || defined(__x86_64__) #define bson_sync_synchronize() asm volatile("mfence" ::: "memory") #else #define bson_sync_synchronize() asm volatile("sync" ::: "memory") #endif #elif defined(_MSC_VER) #define bson_sync_synchronize() MemoryBarrier () #endif #if !defined(va_copy) && defined(__va_copy) #define va_copy(dst, src) __va_copy (dst, src) #endif #if !defined(va_copy) #define va_copy(dst, src) ((dst) = (src)) #endif BSON_END_DECLS #endif /* BSON_COMPAT_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-config.h0000644000076500000240000000630513572250757023551 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION) #error "Only can be included directly." #endif #ifndef BSON_CONFIG_H #define BSON_CONFIG_H /* * Define to 1234 for Little Endian, 4321 for Big Endian. */ #define BSON_BYTE_ORDER 1234 /* * Define to 1 if you have stdbool.h */ #define BSON_HAVE_STDBOOL_H 1 #if BSON_HAVE_STDBOOL_H != 1 # undef BSON_HAVE_STDBOOL_H #endif /* * Define to 1 for POSIX-like systems, 2 for Windows. */ #define BSON_OS 1 /* * Define to 1 if we have access to GCC 32-bit atomic builtins. * While this requires GCC 4.1+ in most cases, it is also architecture * dependent. For example, some PPC or ARM systems may not have it even * if it is a recent GCC version. */ #define BSON_HAVE_ATOMIC_32_ADD_AND_FETCH 1 #if BSON_HAVE_ATOMIC_32_ADD_AND_FETCH != 1 # undef BSON_HAVE_ATOMIC_32_ADD_AND_FETCH #endif /* * Similarly, define to 1 if we have access to GCC 64-bit atomic builtins. */ #define BSON_HAVE_ATOMIC_64_ADD_AND_FETCH 1 #if BSON_HAVE_ATOMIC_64_ADD_AND_FETCH != 1 # undef BSON_HAVE_ATOMIC_64_ADD_AND_FETCH #endif /* * Define to 1 if you have clock_gettime() available. */ #define BSON_HAVE_CLOCK_GETTIME 1 #if BSON_HAVE_CLOCK_GETTIME != 1 # undef BSON_HAVE_CLOCK_GETTIME #endif /* * Define to 1 if you have strings.h available on your platform. */ #define BSON_HAVE_STRINGS_H 1 #if BSON_HAVE_STRINGS_H != 1 # undef BSON_HAVE_STRINGS_H #endif /* * Define to 1 if you have strnlen available on your platform. */ #define BSON_HAVE_STRNLEN 1 #if BSON_HAVE_STRNLEN != 1 # undef BSON_HAVE_STRNLEN #endif /* * Define to 1 if you have snprintf available on your platform. */ #define BSON_HAVE_SNPRINTF 1 #if BSON_HAVE_SNPRINTF != 1 # undef BSON_HAVE_SNPRINTF #endif /* * Define to 1 if you have gmtime_r available on your platform. */ #define BSON_HAVE_GMTIME_R 1 #if BSON_HAVE_GMTIME_R != 1 # undef BSON_HAVE_GMTIME_R #endif /* * Define to 1 if you have reallocf available on your platform. */ #define BSON_HAVE_REALLOCF 1 #if BSON_HAVE_REALLOCF != 1 # undef BSON_HAVE_REALLOCF #endif /* * Define to 1 if you have struct timespec available on your platform. */ #define BSON_HAVE_TIMESPEC 1 #if BSON_HAVE_TIMESPEC != 1 # undef BSON_HAVE_TIMESPEC #endif /* * Define to 1 if you want extra aligned types in libbson */ #define BSON_EXTRA_ALIGN 0 #if BSON_EXTRA_ALIGN != 1 # undef BSON_EXTRA_ALIGN #endif /* * Define to 1 if you have SYS_gettid syscall */ #define BSON_HAVE_SYSCALL_TID 0 #if BSON_HAVE_SYSCALL_TID != 1 # undef BSON_HAVE_SYSCALL_TID #endif #define BSON_HAVE_RAND_R 1 #if BSON_HAVE_RAND_R != 1 # undef BSON_HAVE_RAND_R #endif #endif /* BSON_CONFIG_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-config.h.in0000644000076500000240000000676413572250757024167 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION) #error "Only can be included directly." #endif #ifndef BSON_CONFIG_H #define BSON_CONFIG_H /* * Define to 1234 for Little Endian, 4321 for Big Endian. */ #define BSON_BYTE_ORDER @BSON_BYTE_ORDER@ /* * Define to 1 if you have stdbool.h */ #define BSON_HAVE_STDBOOL_H @BSON_HAVE_STDBOOL_H@ #if BSON_HAVE_STDBOOL_H != 1 # undef BSON_HAVE_STDBOOL_H #endif /* * Define to 1 for POSIX-like systems, 2 for Windows. */ #define BSON_OS @BSON_OS@ /* * Define to 1 if we have access to GCC 32-bit atomic builtins. * While this requires GCC 4.1+ in most cases, it is also architecture * dependent. For example, some PPC or ARM systems may not have it even * if it is a recent GCC version. */ #define BSON_HAVE_ATOMIC_32_ADD_AND_FETCH @BSON_HAVE_ATOMIC_32_ADD_AND_FETCH@ #if BSON_HAVE_ATOMIC_32_ADD_AND_FETCH != 1 # undef BSON_HAVE_ATOMIC_32_ADD_AND_FETCH #endif /* * Similarly, define to 1 if we have access to GCC 64-bit atomic builtins. */ #define BSON_HAVE_ATOMIC_64_ADD_AND_FETCH @BSON_HAVE_ATOMIC_64_ADD_AND_FETCH@ #if BSON_HAVE_ATOMIC_64_ADD_AND_FETCH != 1 # undef BSON_HAVE_ATOMIC_64_ADD_AND_FETCH #endif /* * Define to 1 if you have clock_gettime() available. */ #define BSON_HAVE_CLOCK_GETTIME @BSON_HAVE_CLOCK_GETTIME@ #if BSON_HAVE_CLOCK_GETTIME != 1 # undef BSON_HAVE_CLOCK_GETTIME #endif /* * Define to 1 if you have strings.h available on your platform. */ #define BSON_HAVE_STRINGS_H @BSON_HAVE_STRINGS_H@ #if BSON_HAVE_STRINGS_H != 1 # undef BSON_HAVE_STRINGS_H #endif /* * Define to 1 if you have strnlen available on your platform. */ #define BSON_HAVE_STRNLEN @BSON_HAVE_STRNLEN@ #if BSON_HAVE_STRNLEN != 1 # undef BSON_HAVE_STRNLEN #endif /* * Define to 1 if you have snprintf available on your platform. */ #define BSON_HAVE_SNPRINTF @BSON_HAVE_SNPRINTF@ #if BSON_HAVE_SNPRINTF != 1 # undef BSON_HAVE_SNPRINTF #endif /* * Define to 1 if you have gmtime_r available on your platform. */ #define BSON_HAVE_GMTIME_R @BSON_HAVE_GMTIME_R@ #if BSON_HAVE_GMTIME_R != 1 # undef BSON_HAVE_GMTIME_R #endif /* * Define to 1 if you have reallocf available on your platform. */ #define BSON_HAVE_REALLOCF @BSON_HAVE_REALLOCF@ #if BSON_HAVE_REALLOCF != 1 # undef BSON_HAVE_REALLOCF #endif /* * Define to 1 if you have struct timespec available on your platform. */ #define BSON_HAVE_TIMESPEC @BSON_HAVE_TIMESPEC@ #if BSON_HAVE_TIMESPEC != 1 # undef BSON_HAVE_TIMESPEC #endif /* * Define to 1 if you want extra aligned types in libbson */ #define BSON_EXTRA_ALIGN @BSON_EXTRA_ALIGN@ #if BSON_EXTRA_ALIGN != 1 # undef BSON_EXTRA_ALIGN #endif /* * Define to 1 if you have SYS_gettid syscall */ #define BSON_HAVE_SYSCALL_TID @BSON_HAVE_SYSCALL_TID@ #if BSON_HAVE_SYSCALL_TID != 1 # undef BSON_HAVE_SYSCALL_TID #endif #define BSON_HAVE_RAND_R @BSON_HAVE_RAND_R@ #if BSON_HAVE_RAND_R != 1 # undef BSON_HAVE_RAND_R #endif #endif /* BSON_CONFIG_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-context-private.h0000644000076500000240000000246013572250757025436 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_CONTEXT_PRIVATE_H #define BSON_CONTEXT_PRIVATE_H #include "bson/bson-context.h" #include "common-thread-private.h" BSON_BEGIN_DECLS struct _bson_context_t { /* flags are defined in bson_context_flags_t */ int flags; int32_t seq32; int64_t seq64; uint8_t rand[5]; uint16_t pid; void (*oid_set_seq32) (bson_context_t *context, bson_oid_t *oid); void (*oid_set_seq64) (bson_context_t *context, bson_oid_t *oid); /* this function pointer allows us to mock gethostname for testing. */ void (*gethostname) (char *out); }; void _bson_context_set_oid_rand (bson_context_t *context, bson_oid_t *oid); BSON_END_DECLS #endif /* BSON_CONTEXT_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-context.c0000644000076500000240000002621713572250757023767 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-compat.h" #include #include #include #include #include #include "bson/bson-atomic.h" #include "bson/bson-clock.h" #include "bson/bson-context.h" #include "bson/bson-context-private.h" #include "bson/bson-memory.h" #include "common-thread-private.h" #ifdef BSON_HAVE_SYSCALL_TID #include #endif #ifndef HOST_NAME_MAX #define HOST_NAME_MAX 256 #endif /* * Globals. */ static bson_context_t gContextDefault; static BSON_INLINE uint16_t _bson_getpid (void) { uint16_t pid; #ifdef BSON_OS_WIN32 DWORD real_pid; real_pid = GetCurrentProcessId (); pid = (real_pid & 0xFFFF) ^ ((real_pid >> 16) & 0xFFFF); #else pid = getpid (); #endif return pid; } /* *-------------------------------------------------------------------------- * * _bson_context_set_oid_seq32 -- * * 32-bit sequence generator, non-thread-safe version. * * Returns: * None. * * Side effects: * @oid is modified. * *-------------------------------------------------------------------------- */ static void _bson_context_set_oid_seq32 (bson_context_t *context, /* IN */ bson_oid_t *oid) /* OUT */ { uint32_t seq = context->seq32++; seq = BSON_UINT32_TO_BE (seq); memcpy (&oid->bytes[9], ((uint8_t *) &seq) + 1, 3); } /* *-------------------------------------------------------------------------- * * _bson_context_set_oid_seq32_threadsafe -- * * Thread-safe version of 32-bit sequence generator. * * Returns: * None. * * Side effects: * @oid is modified. * *-------------------------------------------------------------------------- */ static void _bson_context_set_oid_seq32_threadsafe (bson_context_t *context, /* IN */ bson_oid_t *oid) /* OUT */ { int32_t seq = bson_atomic_int_add (&context->seq32, 1); seq = BSON_UINT32_TO_BE (seq); memcpy (&oid->bytes[9], ((uint8_t *) &seq) + 1, 3); } /* *-------------------------------------------------------------------------- * * _bson_context_set_oid_seq64 -- * * 64-bit oid sequence generator, non-thread-safe version. * * Returns: * None. * * Side effects: * @oid is modified. * *-------------------------------------------------------------------------- */ static void _bson_context_set_oid_seq64 (bson_context_t *context, /* IN */ bson_oid_t *oid) /* OUT */ { uint64_t seq; BSON_ASSERT (context); BSON_ASSERT (oid); seq = BSON_UINT64_TO_BE (context->seq64++); memcpy (&oid->bytes[4], &seq, sizeof (seq)); } /* *-------------------------------------------------------------------------- * * _bson_context_set_oid_seq64_threadsafe -- * * Thread-safe 64-bit sequence generator. * * Returns: * None. * * Side effects: * @oid is modified. * *-------------------------------------------------------------------------- */ static void _bson_context_set_oid_seq64_threadsafe (bson_context_t *context, /* IN */ bson_oid_t *oid) /* OUT */ { int64_t seq = bson_atomic_int64_add (&context->seq64, 1); seq = BSON_UINT64_TO_BE (seq); memcpy (&oid->bytes[4], &seq, sizeof (seq)); } static void _bson_context_init_random (bson_context_t *context, bool init_sequence); /* *-------------------------------------------------------------------------- * * _bson_context_set_oid_rand -- * * Sets the process specific five byte random sequence in an oid. * * Returns: * None. * * Side effects: * @oid is modified. * *-------------------------------------------------------------------------- */ void _bson_context_set_oid_rand (bson_context_t *context, bson_oid_t *oid) { BSON_ASSERT (context); BSON_ASSERT (oid); if (context->flags & BSON_CONTEXT_DISABLE_PID_CACHE) { uint16_t pid = _bson_getpid (); if (pid != context->pid) { context->pid = pid; /* randomize the random bytes, not the sequence. */ _bson_context_init_random (context, false); } } memcpy (&oid->bytes[4], &context->rand, sizeof (context->rand)); } /* *-------------------------------------------------------------------------- * * _get_rand -- * * Gets a random four byte integer. Callers that will use the "rand" * function must call "srand" prior. * * Returns: * A random int32_t. * *-------------------------------------------------------------------------- */ static int32_t _get_rand (unsigned int *pseed) { int32_t result = 0; #ifndef BSON_HAVE_RAND_R /* ms's runtime is multithreaded by default, so no rand_r */ /* no rand_r on android either */ result = rand (); #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \ defined(__OpenBSD__) arc4random_buf (&result, sizeof (result)); #else result = rand_r (pseed); #endif return result; } /* * -------------------------------------------------------------------------- * * _bson_context_get_hostname * * Gets the hostname of the machine, logs a warning on failure. "out" * must be an array of HOST_NAME_MAX bytes. * * -------------------------------------------------------------------------- */ static void _bson_context_get_hostname (char *out) { if (gethostname (out, HOST_NAME_MAX) != 0) { if (errno == ENAMETOOLONG) { fprintf (stderr, "hostname exceeds %d characters, truncating.", HOST_NAME_MAX); } else { fprintf (stderr, "unable to get hostname: %d", errno); } } out[HOST_NAME_MAX - 1] = '\0'; } static void _bson_context_init_random (bson_context_t *context, bool init_sequence) { int64_t rand_bytes; struct timeval tv; unsigned int seed = 0; char hostname[HOST_NAME_MAX]; char *ptr; int hostname_chars_left; /* * The seed consists of the following xor'd together: * - current time in seconds * - current time in milliseconds * - current pid * - current hostname */ bson_gettimeofday (&tv); seed ^= (unsigned int) tv.tv_sec; seed ^= (unsigned int) tv.tv_usec; seed ^= (unsigned int) context->pid; context->gethostname (hostname); hostname_chars_left = strlen (hostname); ptr = hostname; while (hostname_chars_left) { uint32_t hostname_chunk = 0; uint32_t to_copy = hostname_chars_left > 4 ? 4 : hostname_chars_left; memcpy (&hostname_chunk, ptr, to_copy); seed ^= (unsigned int) hostname_chunk; hostname_chars_left -= to_copy; ptr += to_copy; } #ifndef BSON_HAVE_RAND_R srand (seed); #endif /* Generate a seed for the random starting position of our increment * bytes and the five byte random number. */ if (init_sequence) { /* We mask off the last nibble so that the last digit of the OID will * start at zero. Just to be nice. */ context->seq32 = _get_rand (&seed) & 0x007FFFF0; } rand_bytes = _get_rand (&seed); rand_bytes <<= 32; rand_bytes |= _get_rand (&seed); /* Copy five random bytes, endianness does not matter. */ memcpy (&context->rand, (char *) &rand_bytes, sizeof (context->rand)); } static void _bson_context_init (bson_context_t *context, bson_context_flags_t flags) { context->flags = (int) flags; context->oid_set_seq32 = _bson_context_set_oid_seq32; context->oid_set_seq64 = _bson_context_set_oid_seq64; context->gethostname = _bson_context_get_hostname; if ((flags & BSON_CONTEXT_THREAD_SAFE)) { context->oid_set_seq32 = _bson_context_set_oid_seq32_threadsafe; context->oid_set_seq64 = _bson_context_set_oid_seq64_threadsafe; } context->pid = _bson_getpid (); _bson_context_init_random (context, true); } /* *-------------------------------------------------------------------------- * * bson_context_new -- * * Initializes a new context with the flags specified. * * In most cases, you want to call this with @flags set to * BSON_CONTEXT_NONE. * * If you are running on Linux, %BSON_CONTEXT_USE_TASK_ID can result * in a healthy speedup for multi-threaded scenarios. * * If you absolutely must have a single context for your application * and use more than one thread, then %BSON_CONTEXT_THREAD_SAFE should * be bitwise-or'd with your flags. This requires synchronization * between threads. * * If you expect your hostname to change often, you may consider * specifying %BSON_CONTEXT_DISABLE_HOST_CACHE so that gethostname() * is called for every OID generated. This is much slower. * * If you expect your pid to change without notice, such as from an * unexpected call to fork(), then specify * %BSON_CONTEXT_DISABLE_PID_CACHE. * * Returns: * A newly allocated bson_context_t that should be freed with * bson_context_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_context_t * bson_context_new (bson_context_flags_t flags) { bson_context_t *context; context = bson_malloc0 (sizeof *context); _bson_context_init (context, flags); return context; } /* *-------------------------------------------------------------------------- * * bson_context_destroy -- * * Cleans up a bson_context_t and releases any associated resources. * This should be called when you are done using @context. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_context_destroy (bson_context_t *context) /* IN */ { bson_free (context); } static BSON_ONCE_FUN (_bson_context_init_default) { _bson_context_init ( &gContextDefault, (BSON_CONTEXT_THREAD_SAFE | BSON_CONTEXT_DISABLE_PID_CACHE)); BSON_ONCE_RETURN; } /* *-------------------------------------------------------------------------- * * bson_context_get_default -- * * Fetches the default, thread-safe implementation of #bson_context_t. * If you need faster generation, it is recommended you create your * own #bson_context_t with bson_context_new(). * * Returns: * A shared instance to the default #bson_context_t. This should not * be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_context_t * bson_context_get_default (void) { static bson_once_t once = BSON_ONCE_INIT; bson_once (&once, _bson_context_init_default); return &gContextDefault; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-context.h0000644000076500000240000000176513572250757023775 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_CONTEXT_H #define BSON_CONTEXT_H #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (bson_context_t *) bson_context_new (bson_context_flags_t flags); BSON_EXPORT (void) bson_context_destroy (bson_context_t *context); BSON_EXPORT (bson_context_t *) bson_context_get_default (void); BSON_END_DECLS #endif /* BSON_CONTEXT_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-decimal128.c0000644000076500000240000005613213572250757024133 0ustar alcaeusstaff /* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include "bson/bson-decimal128.h" #include "bson/bson-types.h" #include "bson/bson-macros.h" #include "bson/bson-string.h" #define BSON_DECIMAL128_EXPONENT_MAX 6111 #define BSON_DECIMAL128_EXPONENT_MIN -6176 #define BSON_DECIMAL128_EXPONENT_BIAS 6176 #define BSON_DECIMAL128_MAX_DIGITS 34 #define BSON_DECIMAL128_SET_NAN(dec) \ do { \ (dec).high = 0x7c00000000000000ull; \ (dec).low = 0; \ } while (0); #define BSON_DECIMAL128_SET_INF(dec, isneg) \ do { \ (dec).high = 0x7800000000000000ull + 0x8000000000000000ull * (isneg); \ (dec).low = 0; \ } while (0); /** * _bson_uint128_t: * * This struct represents a 128 bit integer. */ typedef struct { uint32_t parts[4]; /* 32-bit words stored high to low. */ } _bson_uint128_t; /** *------------------------------------------------------------------------------ * * _bson_uint128_divide1B -- * * This function divides a #_bson_uint128_t by 1000000000 (1 billion) and * computes the quotient and remainder. * * The remainder will contain 9 decimal digits for conversion to string. * * @value The #_bson_uint128_t operand. * @quotient A pointer to store the #_bson_uint128_t quotient. * @rem A pointer to store the #uint64_t remainder. * * Returns: * The quotient at @quotient and the remainder at @rem. * * Side effects: * None. * *------------------------------------------------------------------------------ */ static void _bson_uint128_divide1B (_bson_uint128_t value, /* IN */ _bson_uint128_t *quotient, /* OUT */ uint32_t *rem) /* OUT */ { const uint32_t DIVISOR = 1000 * 1000 * 1000; uint64_t _rem = 0; int i = 0; if (!value.parts[0] && !value.parts[1] && !value.parts[2] && !value.parts[3]) { *quotient = value; *rem = 0; return; } for (i = 0; i <= 3; i++) { _rem <<= 32; /* Adjust remainder to match value of next dividend */ _rem += value.parts[i]; /* Add the divided to _rem */ value.parts[i] = (uint32_t) (_rem / DIVISOR); _rem %= DIVISOR; /* Store the remainder */ } *quotient = value; *rem = (uint32_t) _rem; } /** *------------------------------------------------------------------------------ * * bson_decimal128_to_string -- * * This function converts a BID formatted decimal128 value to string, * accepting a &bson_decimal128_t as @dec. The string is stored at @str. * * @dec : The BID formatted decimal to convert. * @str : The output decimal128 string. At least %BSON_DECIMAL128_STRING *characters. * * Returns: * None. * * Side effects: * None. * *------------------------------------------------------------------------------ */ void bson_decimal128_to_string (const bson_decimal128_t *dec, /* IN */ char *str) /* OUT */ { uint32_t COMBINATION_MASK = 0x1f; /* Extract least significant 5 bits */ uint32_t EXPONENT_MASK = 0x3fff; /* Extract least significant 14 bits */ uint32_t COMBINATION_INFINITY = 30; /* Value of combination field for Inf */ uint32_t COMBINATION_NAN = 31; /* Value of combination field for NaN */ uint32_t EXPONENT_BIAS = 6176; /* decimal128 exponent bias */ char *str_out = str; /* output pointer in string */ char significand_str[35]; /* decoded significand digits */ /* Note: bits in this routine are referred to starting at 0, */ /* from the sign bit, towards the coefficient. */ uint32_t high; /* bits 0 - 31 */ uint32_t midh; /* bits 32 - 63 */ uint32_t midl; /* bits 64 - 95 */ uint32_t low; /* bits 96 - 127 */ uint32_t combination; /* bits 1 - 5 */ uint32_t biased_exponent; /* decoded biased exponent (14 bits) */ uint32_t significand_digits = 0; /* the number of significand digits */ uint32_t significand[36] = {0}; /* the base-10 digits in the significand */ uint32_t *significand_read = significand; /* read pointer into significand */ int32_t exponent; /* unbiased exponent */ int32_t scientific_exponent; /* the exponent if scientific notation is * used */ bool is_zero = false; /* true if the number is zero */ uint8_t significand_msb; /* the most signifcant significand bits (50-46) */ _bson_uint128_t significand128; /* temporary storage for significand decoding */ size_t i; /* indexing variables */ int j, k; memset (significand_str, 0, sizeof (significand_str)); if ((int64_t) dec->high < 0) { /* negative */ *(str_out++) = '-'; } low = (uint32_t) dec->low, midl = (uint32_t) (dec->low >> 32), midh = (uint32_t) dec->high, high = (uint32_t) (dec->high >> 32); /* Decode combination field and exponent */ combination = (high >> 26) & COMBINATION_MASK; if (BSON_UNLIKELY ((combination >> 3) == 3)) { /* Check for 'special' values */ if (combination == COMBINATION_INFINITY) { /* Infinity */ strcpy (str_out, BSON_DECIMAL128_INF); return; } else if (combination == COMBINATION_NAN) { /* NaN */ /* str, not str_out, to erase the sign */ strcpy (str, BSON_DECIMAL128_NAN); /* we don't care about the NaN payload. */ return; } else { biased_exponent = (high >> 15) & EXPONENT_MASK; significand_msb = 0x8 + ((high >> 14) & 0x1); } } else { significand_msb = (high >> 14) & 0x7; biased_exponent = (high >> 17) & EXPONENT_MASK; } exponent = biased_exponent - EXPONENT_BIAS; /* Create string of significand digits */ /* Convert the 114-bit binary number represented by */ /* (high, midh, midl, low) to at most 34 decimal */ /* digits through modulo and division. */ significand128.parts[0] = (high & 0x3fff) + ((significand_msb & 0xf) << 14); significand128.parts[1] = midh; significand128.parts[2] = midl; significand128.parts[3] = low; if (significand128.parts[0] == 0 && significand128.parts[1] == 0 && significand128.parts[2] == 0 && significand128.parts[3] == 0) { is_zero = true; } else if (significand128.parts[0] >= (1 << 17)) { /* The significand is non-canonical or zero. * In order to preserve compatibility with the densely packed decimal * format, the maximum value for the significand of decimal128 is * 1e34 - 1. If the value is greater than 1e34 - 1, the IEEE 754 * standard dictates that the significand is interpreted as zero. */ is_zero = true; } else { for (k = 3; k >= 0; k--) { uint32_t least_digits = 0; _bson_uint128_divide1B ( significand128, &significand128, &least_digits); /* We now have the 9 least significant digits (in base 2). */ /* Convert and output to string. */ if (!least_digits) { continue; } for (j = 8; j >= 0; j--) { significand[k * 9 + j] = least_digits % 10; least_digits /= 10; } } } /* Output format options: */ /* Scientific - [-]d.dddE(+/-)dd or [-]dE(+/-)dd */ /* Regular - ddd.ddd */ if (is_zero) { significand_digits = 1; *significand_read = 0; } else { significand_digits = 36; while (!(*significand_read)) { significand_digits--; significand_read++; } } scientific_exponent = significand_digits - 1 + exponent; /* The scientific exponent checks are dictated by the string conversion * specification and are somewhat arbitrary cutoffs. * * We must check exponent > 0, because if this is the case, the number * has trailing zeros. However, we *cannot* output these trailing zeros, * because doing so would change the precision of the value, and would * change stored data if the string converted number is round tripped. */ if (scientific_exponent < -6 || exponent > 0) { /* Scientific format */ *(str_out++) = *(significand_read++) + '0'; significand_digits--; if (significand_digits) { *(str_out++) = '.'; } for (i = 0; i < significand_digits && (str_out - str) < 36; i++) { *(str_out++) = *(significand_read++) + '0'; } /* Exponent */ *(str_out++) = 'E'; bson_snprintf (str_out, 6, "%+d", scientific_exponent); } else { /* Regular format with no decimal place */ if (exponent >= 0) { for (i = 0; i < significand_digits && (str_out - str) < 36; i++) { *(str_out++) = *(significand_read++) + '0'; } *str_out = '\0'; } else { int32_t radix_position = significand_digits + exponent; if (radix_position > 0) { /* non-zero digits before radix */ for (i = 0; i < radix_position && (str_out - str) < BSON_DECIMAL128_STRING; i++) { *(str_out++) = *(significand_read++) + '0'; } } else { /* leading zero before radix point */ *(str_out++) = '0'; } *(str_out++) = '.'; while (radix_position++ < 0) { /* add leading zeros after radix */ *(str_out++) = '0'; } for (i = 0; (i < significand_digits - BSON_MAX (radix_position - 1, 0)) && (str_out - str) < BSON_DECIMAL128_STRING; i++) { *(str_out++) = *(significand_read++) + '0'; } *str_out = '\0'; } } } typedef struct { uint64_t high, low; } _bson_uint128_6464_t; /** *------------------------------------------------------------------------- * * mul64x64 -- * * This function multiplies two &uint64_t into a &_bson_uint128_6464_t. * * Returns: * The product of @left and @right. * * Side Effects: * None. * *------------------------------------------------------------------------- */ static void _mul_64x64 (uint64_t left, /* IN */ uint64_t right, /* IN */ _bson_uint128_6464_t *product) /* OUT */ { uint64_t left_high, left_low, right_high, right_low, product_high, product_mid, product_mid2, product_low; _bson_uint128_6464_t rt = {0}; if (!left && !right) { *product = rt; return; } left_high = left >> 32; left_low = (uint32_t) left; right_high = right >> 32; right_low = (uint32_t) right; product_high = left_high * right_high; product_mid = left_high * right_low; product_mid2 = left_low * right_high; product_low = left_low * right_low; product_high += product_mid >> 32; product_mid = (uint32_t) product_mid + product_mid2 + (product_low >> 32); product_high = product_high + (product_mid >> 32); product_low = (product_mid << 32) + (uint32_t) product_low; rt.high = product_high; rt.low = product_low; *product = rt; } /** *------------------------------------------------------------------------------ * * _dec128_tolower -- * * This function converts the ASCII character @c to lowercase. It is locale * insensitive (unlike the stdlib tolower). * * Returns: * The lowercased character. */ char _dec128_tolower (char c) { if (isupper (c)) { c += 32; } return c; } /** *------------------------------------------------------------------------------ * * _dec128_istreq -- * * This function compares the null-terminated *ASCII* strings @a and @b * for case-insensitive equality. * * Returns: * true if the strings are equal, false otherwise. */ bool _dec128_istreq (const char *a, /* IN */ const char *b /* IN */) { while (*a != '\0' || *b != '\0') { /* strings are different lengths. */ if (*a == '\0' || *b == '\0') { return false; } if (_dec128_tolower (*a) != _dec128_tolower (*b)) { return false; } a++; b++; } return true; } /** *------------------------------------------------------------------------------ * * bson_decimal128_from_string -- * * This function converts @string in the format [+-]ddd[.]ddd[E][+-]dddd to * decimal128. Out of range values are converted to +/-Infinity. Invalid * strings are converted to NaN. * * If more digits are provided than the available precision allows, * round to the nearest expressable decimal128 with ties going to even will * occur. * * Note: @string must be ASCII only! * * Returns: * true on success, or false on failure. @dec will be NaN if @str was invalid * The &bson_decimal128_t converted from @string at @dec. * * Side effects: * None. * *------------------------------------------------------------------------------ */ bool bson_decimal128_from_string (const char *string, /* IN */ bson_decimal128_t *dec) /* OUT */ { return bson_decimal128_from_string_w_len (string, -1, dec); } /** *------------------------------------------------------------------------------ * * bson_decimal128_from_string_w_len -- * * This function converts @string in the format [+-]ddd[.]ddd[E][+-]dddd to * decimal128. Out of range values are converted to +/-Infinity. Invalid * strings are converted to NaN. @len is the length of the string, or -1 * meaning the string is null-terminated. * * If more digits are provided than the available precision allows, * round to the nearest expressable decimal128 with ties going to even will * occur. * * Note: @string must be ASCII only! * * Returns: * true on success, or false on failure. @dec will be NaN if @str was invalid * The &bson_decimal128_t converted from @string at @dec. * * Side effects: * None. * *------------------------------------------------------------------------------ */ bool bson_decimal128_from_string_w_len (const char *string, /* IN */ int len, /* IN */ bson_decimal128_t *dec) /* OUT */ { _bson_uint128_6464_t significand = {0}; const char *str_read = string; /* Read pointer for consuming str. */ /* Parsing state tracking */ bool is_negative = false; bool saw_radix = false; bool includes_sign = false; /* True if the input string contains a sign. */ bool found_nonzero = false; size_t significant_digits = 0; /* Total number of significant digits * (no leading or trailing zero) */ size_t ndigits_read = 0; /* Total number of significand digits read */ size_t ndigits = 0; /* Total number of digits (no leading zeros) */ size_t radix_position = 0; /* The number of the digits after radix */ size_t first_nonzero = 0; /* The index of the first non-zero in *str* */ uint16_t digits[BSON_DECIMAL128_MAX_DIGITS] = {0}; uint16_t ndigits_stored = 0; /* The number of digits in digits */ uint16_t *digits_insert = digits; /* Insertion pointer for digits */ size_t first_digit = 0; /* The index of the first non-zero digit */ size_t last_digit = 0; /* The index of the last digit */ int32_t exponent = 0; uint64_t significand_high = 0; /* The high 17 digits of the significand */ uint64_t significand_low = 0; /* The low 17 digits of the significand */ uint16_t biased_exponent = 0; /* The biased exponent */ BSON_ASSERT (dec); dec->high = 0; dec->low = 0; if (*str_read == '+' || *str_read == '-') { is_negative = *(str_read++) == '-'; includes_sign = true; } /* Check for Infinity or NaN */ if (!isdigit (*str_read) && *str_read != '.') { if (_dec128_istreq (str_read, "inf") || _dec128_istreq (str_read, "infinity")) { BSON_DECIMAL128_SET_INF (*dec, is_negative); return true; } else if (_dec128_istreq (str_read, "nan")) { BSON_DECIMAL128_SET_NAN (*dec); return true; } BSON_DECIMAL128_SET_NAN (*dec); return false; } /* Read digits */ while (((isdigit (*str_read) || *str_read == '.')) && (len == -1 || str_read < string + len)) { if (*str_read == '.') { if (saw_radix) { BSON_DECIMAL128_SET_NAN (*dec); return false; } saw_radix = true; str_read++; continue; } if (ndigits_stored < 34) { if (*str_read != '0' || found_nonzero) { if (!found_nonzero) { first_nonzero = ndigits_read; } found_nonzero = true; *(digits_insert++) = *(str_read) - '0'; /* Only store 34 digits */ ndigits_stored++; } } if (found_nonzero) { ndigits++; } if (saw_radix) { radix_position++; } ndigits_read++; str_read++; } if (saw_radix && !ndigits_read) { BSON_DECIMAL128_SET_NAN (*dec); return false; } /* Read exponent if exists */ if (*str_read == 'e' || *str_read == 'E') { int nread = 0; #ifdef _MSC_VER #define SSCANF sscanf_s #else #define SSCANF sscanf #endif int read_exponent = SSCANF (++str_read, "%d%n", &exponent, &nread); str_read += nread; if (!read_exponent || nread == 0) { BSON_DECIMAL128_SET_NAN (*dec); return false; } #undef SSCANF } if ((len == -1 || str_read < string + len) && *str_read) { BSON_DECIMAL128_SET_NAN (*dec); return false; } /* Done reading input. */ /* Find first non-zero digit in digits */ first_digit = 0; if (!ndigits_stored) { /* value is zero */ first_digit = 0; last_digit = 0; digits[0] = 0; ndigits = 1; ndigits_stored = 1; significant_digits = 0; } else { last_digit = ndigits_stored - 1; significant_digits = ndigits; /* Mark trailing zeros as non-significant */ while (string[first_nonzero + significant_digits - 1 + includes_sign + saw_radix] == '0') { significant_digits--; } } /* Normalization of exponent */ /* Correct exponent based on radix position, and shift significand as needed */ /* to represent user input */ /* Overflow prevention */ if (exponent <= radix_position && radix_position - exponent > (1 << 14)) { exponent = BSON_DECIMAL128_EXPONENT_MIN; } else { exponent -= radix_position; } /* Attempt to normalize the exponent */ while (exponent > BSON_DECIMAL128_EXPONENT_MAX) { /* Shift exponent to significand and decrease */ last_digit++; if (last_digit - first_digit > BSON_DECIMAL128_MAX_DIGITS) { /* The exponent is too great to shift into the significand. */ if (significant_digits == 0) { /* Value is zero, we are allowed to clamp the exponent. */ exponent = BSON_DECIMAL128_EXPONENT_MAX; break; } /* Overflow is not permitted, error. */ BSON_DECIMAL128_SET_NAN (*dec); return false; } exponent--; } while (exponent < BSON_DECIMAL128_EXPONENT_MIN || ndigits_stored < ndigits) { /* Shift last digit */ if (last_digit == 0) { /* underflow is not allowed, but zero clamping is */ if (significant_digits == 0) { exponent = BSON_DECIMAL128_EXPONENT_MIN; break; } BSON_DECIMAL128_SET_NAN (*dec); return false; } if (ndigits_stored < ndigits) { if (string[ndigits - 1 + includes_sign + saw_radix] - '0' != 0 && significant_digits != 0) { BSON_DECIMAL128_SET_NAN (*dec); return false; } ndigits--; /* adjust to match digits not stored */ } else { if (digits[last_digit] != 0) { /* Inexact rounding is not allowed. */ BSON_DECIMAL128_SET_NAN (*dec); return false; } last_digit--; /* adjust to round */ } if (exponent < BSON_DECIMAL128_EXPONENT_MAX) { exponent++; } else { BSON_DECIMAL128_SET_NAN (*dec); return false; } } /* Round */ /* We've normalized the exponent, but might still need to round. */ if (last_digit - first_digit + 1 < significant_digits) { uint8_t round_digit; /* There are non-zero digits after last_digit that need rounding. */ /* We round to nearest, ties to even */ round_digit = string[first_nonzero + last_digit + includes_sign + saw_radix + 1] - '0'; if (round_digit != 0) { /* Inexact (non-zero) rounding is not allowed */ BSON_DECIMAL128_SET_NAN (*dec); return false; } } /* Encode significand */ significand_high = 0, /* The high 17 digits of the significand */ significand_low = 0; /* The low 17 digits of the significand */ if (significant_digits == 0) { /* read a zero */ significand_high = 0; significand_low = 0; } else if (last_digit - first_digit < 17) { size_t d_idx = first_digit; significand_low = digits[d_idx++]; for (; d_idx <= last_digit; d_idx++) { significand_low *= 10; significand_low += digits[d_idx]; significand_high = 0; } } else { size_t d_idx = first_digit; significand_high = digits[d_idx++]; for (; d_idx <= last_digit - 17; d_idx++) { significand_high *= 10; significand_high += digits[d_idx]; } significand_low = digits[d_idx++]; for (; d_idx <= last_digit; d_idx++) { significand_low *= 10; significand_low += digits[d_idx]; } } _mul_64x64 (significand_high, 100000000000000000ull, &significand); significand.low += significand_low; if (significand.low < significand_low) { significand.high += 1; } biased_exponent = (exponent + (int16_t) BSON_DECIMAL128_EXPONENT_BIAS); /* Encode combination, exponent, and significand. */ if ((significand.high >> 49) & 1) { /* Encode '11' into bits 1 to 3 */ dec->high |= (0x3ull << 61); dec->high |= (biased_exponent & 0x3fffull) << 47; dec->high |= significand.high & 0x7fffffffffffull; } else { dec->high |= (biased_exponent & 0x3fffull) << 49; dec->high |= significand.high & 0x1ffffffffffffull; } dec->low = significand.low; /* Encode sign */ if (is_negative) { dec->high |= 0x8000000000000000ull; } return true; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-decimal128.h0000644000076500000240000000307413572250757024135 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_DECIMAL128_H #define BSON_DECIMAL128_H #include #include "bson/bson-macros.h" #include "bson/bson-config.h" #include "bson/bson-types.h" /** * BSON_DECIMAL128_STRING: * * The length of a decimal128 string (with null terminator). * * 1 for the sign * 35 for digits and radix * 2 for exponent indicator and sign * 4 for exponent digits */ #define BSON_DECIMAL128_STRING 43 #define BSON_DECIMAL128_INF "Infinity" #define BSON_DECIMAL128_NAN "NaN" BSON_BEGIN_DECLS BSON_EXPORT (void) bson_decimal128_to_string (const bson_decimal128_t *dec, char *str); /* Note: @string must be ASCII characters only! */ BSON_EXPORT (bool) bson_decimal128_from_string (const char *string, bson_decimal128_t *dec); BSON_EXPORT (bool) bson_decimal128_from_string_w_len (const char *string, int len, bson_decimal128_t *dec); BSON_END_DECLS #endif /* BSON_DECIMAL128_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-endian.h0000644000076500000240000001440713572250757023544 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_ENDIAN_H #define BSON_ENDIAN_H #if defined(__sun) #include #endif #include "bson/bson-config.h" #include "bson/bson-macros.h" #include "bson/bson-compat.h" BSON_BEGIN_DECLS #define BSON_BIG_ENDIAN 4321 #define BSON_LITTLE_ENDIAN 1234 #if defined(__sun) #define BSON_UINT16_SWAP_LE_BE(v) BSWAP_16 ((uint16_t) v) #define BSON_UINT32_SWAP_LE_BE(v) BSWAP_32 ((uint32_t) v) #define BSON_UINT64_SWAP_LE_BE(v) BSWAP_64 ((uint64_t) v) #elif defined(__clang__) && defined(__clang_major__) && \ defined(__clang_minor__) && (__clang_major__ >= 3) && \ (__clang_minor__ >= 1) #if __has_builtin(__builtin_bswap16) #define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16 (v) #endif #if __has_builtin(__builtin_bswap32) #define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32 (v) #endif #if __has_builtin(__builtin_bswap64) #define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64 (v) #endif #elif defined(__GNUC__) && (__GNUC__ >= 4) #if __GNUC__ > 4 || (defined(__GNUC_MINOR__) && __GNUC_MINOR__ >= 3) #define BSON_UINT32_SWAP_LE_BE(v) __builtin_bswap32 ((uint32_t) v) #define BSON_UINT64_SWAP_LE_BE(v) __builtin_bswap64 ((uint64_t) v) #endif #if __GNUC__ > 4 || (defined(__GNUC_MINOR__) && __GNUC_MINOR__ >= 8) #define BSON_UINT16_SWAP_LE_BE(v) __builtin_bswap16 ((uint32_t) v) #endif #endif #ifndef BSON_UINT16_SWAP_LE_BE #define BSON_UINT16_SWAP_LE_BE(v) __bson_uint16_swap_slow ((uint16_t) v) #endif #ifndef BSON_UINT32_SWAP_LE_BE #define BSON_UINT32_SWAP_LE_BE(v) __bson_uint32_swap_slow ((uint32_t) v) #endif #ifndef BSON_UINT64_SWAP_LE_BE #define BSON_UINT64_SWAP_LE_BE(v) __bson_uint64_swap_slow ((uint64_t) v) #endif #if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN #define BSON_UINT16_FROM_LE(v) ((uint16_t) v) #define BSON_UINT16_TO_LE(v) ((uint16_t) v) #define BSON_UINT16_FROM_BE(v) BSON_UINT16_SWAP_LE_BE (v) #define BSON_UINT16_TO_BE(v) BSON_UINT16_SWAP_LE_BE (v) #define BSON_UINT32_FROM_LE(v) ((uint32_t) v) #define BSON_UINT32_TO_LE(v) ((uint32_t) v) #define BSON_UINT32_FROM_BE(v) BSON_UINT32_SWAP_LE_BE (v) #define BSON_UINT32_TO_BE(v) BSON_UINT32_SWAP_LE_BE (v) #define BSON_UINT64_FROM_LE(v) ((uint64_t) v) #define BSON_UINT64_TO_LE(v) ((uint64_t) v) #define BSON_UINT64_FROM_BE(v) BSON_UINT64_SWAP_LE_BE (v) #define BSON_UINT64_TO_BE(v) BSON_UINT64_SWAP_LE_BE (v) #define BSON_DOUBLE_FROM_LE(v) ((double) v) #define BSON_DOUBLE_TO_LE(v) ((double) v) #elif BSON_BYTE_ORDER == BSON_BIG_ENDIAN #define BSON_UINT16_FROM_LE(v) BSON_UINT16_SWAP_LE_BE (v) #define BSON_UINT16_TO_LE(v) BSON_UINT16_SWAP_LE_BE (v) #define BSON_UINT16_FROM_BE(v) ((uint16_t) v) #define BSON_UINT16_TO_BE(v) ((uint16_t) v) #define BSON_UINT32_FROM_LE(v) BSON_UINT32_SWAP_LE_BE (v) #define BSON_UINT32_TO_LE(v) BSON_UINT32_SWAP_LE_BE (v) #define BSON_UINT32_FROM_BE(v) ((uint32_t) v) #define BSON_UINT32_TO_BE(v) ((uint32_t) v) #define BSON_UINT64_FROM_LE(v) BSON_UINT64_SWAP_LE_BE (v) #define BSON_UINT64_TO_LE(v) BSON_UINT64_SWAP_LE_BE (v) #define BSON_UINT64_FROM_BE(v) ((uint64_t) v) #define BSON_UINT64_TO_BE(v) ((uint64_t) v) #define BSON_DOUBLE_FROM_LE(v) (__bson_double_swap_slow (v)) #define BSON_DOUBLE_TO_LE(v) (__bson_double_swap_slow (v)) #else #error "The endianness of target architecture is unknown." #endif /* *-------------------------------------------------------------------------- * * __bson_uint16_swap_slow -- * * Fallback endianness conversion for 16-bit integers. * * Returns: * The endian swapped version. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE uint16_t __bson_uint16_swap_slow (uint16_t v) /* IN */ { return ((v & 0x00FF) << 8) | ((v & 0xFF00) >> 8); } /* *-------------------------------------------------------------------------- * * __bson_uint32_swap_slow -- * * Fallback endianness conversion for 32-bit integers. * * Returns: * The endian swapped version. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE uint32_t __bson_uint32_swap_slow (uint32_t v) /* IN */ { return ((v & 0x000000FFU) << 24) | ((v & 0x0000FF00U) << 8) | ((v & 0x00FF0000U) >> 8) | ((v & 0xFF000000U) >> 24); } /* *-------------------------------------------------------------------------- * * __bson_uint64_swap_slow -- * * Fallback endianness conversion for 64-bit integers. * * Returns: * The endian swapped version. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE uint64_t __bson_uint64_swap_slow (uint64_t v) /* IN */ { return ((v & 0x00000000000000FFULL) << 56) | ((v & 0x000000000000FF00ULL) << 40) | ((v & 0x0000000000FF0000ULL) << 24) | ((v & 0x00000000FF000000ULL) << 8) | ((v & 0x000000FF00000000ULL) >> 8) | ((v & 0x0000FF0000000000ULL) >> 24) | ((v & 0x00FF000000000000ULL) >> 40) | ((v & 0xFF00000000000000ULL) >> 56); } /* *-------------------------------------------------------------------------- * * __bson_double_swap_slow -- * * Fallback endianness conversion for double floating point. * * Returns: * The endian swapped version. * * Side effects: * None. * *-------------------------------------------------------------------------- */ BSON_STATIC_ASSERT2 (sizeof_uint64_t, sizeof (double) == sizeof (uint64_t)); static BSON_INLINE double __bson_double_swap_slow (double v) /* IN */ { uint64_t uv; memcpy (&uv, &v, sizeof (v)); uv = BSON_UINT64_SWAP_LE_BE (uv); memcpy (&v, &uv, sizeof (v)); return v; } BSON_END_DECLS #endif /* BSON_ENDIAN_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-error.c0000644000076500000240000000622413572250757023430 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "bson/bson-compat.h" #include "bson/bson-config.h" #include "bson/bson-error.h" #include "bson/bson-memory.h" #include "bson/bson-string.h" #include "bson/bson-types.h" /* *-------------------------------------------------------------------------- * * bson_set_error -- * * Initializes @error using the parameters specified. * * @domain is an application specific error domain which should * describe which module initiated the error. Think of this as the * exception type. * * @code is the @domain specific error code. * * @format is used to generate the format string. It uses vsnprintf() * internally so the format should match what you would use there. * * Parameters: * @error: A #bson_error_t. * @domain: The error domain. * @code: The error code. * @format: A printf style format string. * * Returns: * None. * * Side effects: * @error is initialized. * *-------------------------------------------------------------------------- */ void bson_set_error (bson_error_t *error, /* OUT */ uint32_t domain, /* IN */ uint32_t code, /* IN */ const char *format, /* IN */ ...) /* IN */ { va_list args; if (error) { error->domain = domain; error->code = code; va_start (args, format); bson_vsnprintf (error->message, sizeof error->message, format, args); va_end (args); error->message[sizeof error->message - 1] = '\0'; } } /* *-------------------------------------------------------------------------- * * bson_strerror_r -- * * This is a reentrant safe macro for strerror. * * The resulting string may be stored in @buf. * * Returns: * A pointer to a static string or @buf. * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_strerror_r (int err_code, /* IN */ char *buf, /* IN */ size_t buflen) /* IN */ { static const char *unknown_msg = "Unknown error"; char *ret = NULL; #if defined(_WIN32) if (strerror_s (buf, buflen, err_code) != 0) { ret = buf; } #elif defined(__GNUC__) && defined(_GNU_SOURCE) ret = strerror_r (err_code, buf, buflen); #else /* XSI strerror_r */ if (strerror_r (err_code, buf, buflen) == 0) { ret = buf; } #endif if (!ret) { bson_strncpy (buf, unknown_msg, buflen); ret = buf; } return ret; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-error.h0000644000076500000240000000225213572250757023432 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_ERROR_H #define BSON_ERROR_H #include "bson/bson-compat.h" #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS #define BSON_ERROR_JSON 1 #define BSON_ERROR_READER 2 #define BSON_ERROR_INVALID 3 BSON_EXPORT (void) bson_set_error (bson_error_t *error, uint32_t domain, uint32_t code, const char *format, ...) BSON_GNUC_PRINTF (4, 5); BSON_EXPORT (char *) bson_strerror_r (int err_code, char *buf, size_t buflen); BSON_END_DECLS #endif /* BSON_ERROR_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-iso8601-private.h0000644000076500000240000000251313572250757025062 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_ISO8601_PRIVATE_H #define BSON_ISO8601_PRIVATE_H #include "bson/bson-compat.h" #include "bson/bson-macros.h" #include "bson/bson-string.h" BSON_BEGIN_DECLS bool _bson_iso8601_date_parse (const char *str, int32_t len, int64_t *out, bson_error_t *error); /** * _bson_iso8601_date_format: * @msecs_since_epoch: A positive number of milliseconds since Jan 1, 1970. * @str: The string to append the ISO8601-formatted to. * * Appends a date formatted like "2012-12-24T12:15:30.500Z" to @str. */ void _bson_iso8601_date_format (int64_t msecs_since_epoch, bson_string_t *str); BSON_END_DECLS #endif /* BSON_ISO8601_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-iso8601.c0000644000076500000240000002102013572250757023377 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-compat.h" #include "bson/bson-macros.h" #include "bson/bson-error.h" #include "bson/bson-iso8601-private.h" #include "bson/bson-json.h" #include "bson/bson-timegm-private.h" static bool get_tok (const char *terminals, const char **ptr, int32_t *remaining, const char **out, int32_t *out_len) { const char *terminal; bool found_terminal = false; if (!*remaining) { *out = ""; *out_len = 0; } *out = *ptr; *out_len = -1; for (; *remaining && !found_terminal; (*ptr)++, (*remaining)--, (*out_len)++) { for (terminal = terminals; *terminal; terminal++) { if (**ptr == *terminal) { found_terminal = true; break; } } } if (!found_terminal) { (*out_len)++; } return found_terminal; } static bool digits_only (const char *str, int32_t len) { int i; for (i = 0; i < len; i++) { if (!isdigit (str[i])) { return false; } } return true; } static bool parse_num (const char *str, int32_t len, int32_t digits, int32_t min, int32_t max, int32_t *out) { int i; int magnitude = 1; int32_t value = 0; if ((digits >= 0 && len != digits) || !digits_only (str, len)) { return false; } for (i = 1; i <= len; i++, magnitude *= 10) { value += (str[len - i] - '0') * magnitude; } if (value < min || value > max) { return false; } *out = value; return true; } bool _bson_iso8601_date_parse (const char *str, int32_t len, int64_t *out, bson_error_t *error) { const char *ptr; int32_t remaining = len; const char *year_ptr = NULL; const char *month_ptr = NULL; const char *day_ptr = NULL; const char *hour_ptr = NULL; const char *min_ptr = NULL; const char *sec_ptr = NULL; const char *millis_ptr = NULL; const char *tz_ptr = NULL; int32_t year_len = 0; int32_t month_len = 0; int32_t day_len = 0; int32_t hour_len = 0; int32_t min_len = 0; int32_t sec_len = 0; int32_t millis_len = 0; int32_t tz_len = 0; int32_t year; int32_t month; int32_t day; int32_t hour; int32_t min; int32_t sec = 0; int64_t millis = 0; int32_t tz_adjustment = 0; struct bson_tm posix_date = {0}; #define DATE_PARSE_ERR(msg) \ bson_set_error (error, \ BSON_ERROR_JSON, \ BSON_JSON_ERROR_READ_INVALID_PARAM, \ "Could not parse \"%s\" as date: " msg, \ str); \ return false #define DEFAULT_DATE_PARSE_ERR \ DATE_PARSE_ERR ("use ISO8601 format yyyy-mm-ddThh:mm plus timezone, either" \ " \"Z\" or like \"+0500\"") ptr = str; /* we have to match at least yyyy-mm-ddThh:mm */ if (!(get_tok ("-", &ptr, &remaining, &year_ptr, &year_len) && get_tok ("-", &ptr, &remaining, &month_ptr, &month_len) && get_tok ("T", &ptr, &remaining, &day_ptr, &day_len) && get_tok (":", &ptr, &remaining, &hour_ptr, &hour_len) && get_tok (":+-Z", &ptr, &remaining, &min_ptr, &min_len))) { DEFAULT_DATE_PARSE_ERR; } /* if the minute has a ':' at the end look for seconds */ if (min_ptr[min_len] == ':') { if (remaining < 2) { DATE_PARSE_ERR ("reached end of date while looking for seconds"); } get_tok (".+-Z", &ptr, &remaining, &sec_ptr, &sec_len); if (!sec_len) { DATE_PARSE_ERR ("minute ends in \":\" seconds is required"); } } /* if we had a second and it is followed by a '.' look for milliseconds */ if (sec_len && sec_ptr[sec_len] == '.') { if (remaining < 2) { DATE_PARSE_ERR ("reached end of date while looking for milliseconds"); } get_tok ("+-Z", &ptr, &remaining, &millis_ptr, &millis_len); if (!millis_len) { DATE_PARSE_ERR ("seconds ends in \".\", milliseconds is required"); } } /* backtrack by 1 to put ptr on the timezone */ ptr--; remaining++; get_tok ("", &ptr, &remaining, &tz_ptr, &tz_len); if (!parse_num (year_ptr, year_len, 4, -9999, 9999, &year)) { DATE_PARSE_ERR ("year must be an integer"); } /* values are as in struct tm */ year -= 1900; if (!parse_num (month_ptr, month_len, 2, 1, 12, &month)) { DATE_PARSE_ERR ("month must be an integer"); } /* values are as in struct tm */ month -= 1; if (!parse_num (day_ptr, day_len, 2, 1, 31, &day)) { DATE_PARSE_ERR ("day must be an integer"); } if (!parse_num (hour_ptr, hour_len, 2, 0, 23, &hour)) { DATE_PARSE_ERR ("hour must be an integer"); } if (!parse_num (min_ptr, min_len, 2, 0, 59, &min)) { DATE_PARSE_ERR ("minute must be an integer"); } if (sec_len && !parse_num (sec_ptr, sec_len, 2, 0, 60, &sec)) { DATE_PARSE_ERR ("seconds must be an integer"); } if (tz_len > 0) { if (tz_ptr[0] == 'Z' && tz_len == 1) { /* valid */ } else if (tz_ptr[0] == '+' || tz_ptr[0] == '-') { int32_t tz_hour; int32_t tz_min; if (tz_len != 5 || !digits_only (tz_ptr + 1, 4)) { DATE_PARSE_ERR ("could not parse timezone"); } if (!parse_num (tz_ptr + 1, 2, -1, -23, 23, &tz_hour)) { DATE_PARSE_ERR ("timezone hour must be at most 23"); } if (!parse_num (tz_ptr + 3, 2, -1, 0, 59, &tz_min)) { DATE_PARSE_ERR ("timezone minute must be at most 59"); } /* we inflect the meaning of a 'positive' timezone. Those are hours * we have to subtract, and vice versa */ tz_adjustment = (tz_ptr[0] == '-' ? 1 : -1) * ((tz_min * 60) + (tz_hour * 60 * 60)); if (!(tz_adjustment > -86400 && tz_adjustment < 86400)) { DATE_PARSE_ERR ("timezone offset must be less than 24 hours"); } } else { DATE_PARSE_ERR ("timezone is required"); } } if (millis_len > 0) { int i; int magnitude; millis = 0; if (millis_len > 3 || !digits_only (millis_ptr, millis_len)) { DATE_PARSE_ERR ("milliseconds must be an integer"); } for (i = 1, magnitude = 1; i <= millis_len; i++, magnitude *= 10) { millis += (millis_ptr[millis_len - i] - '0') * magnitude; } if (millis_len == 1) { millis *= 100; } else if (millis_len == 2) { millis *= 10; } if (millis < 0 || millis > 1000) { DATE_PARSE_ERR ("milliseconds must be at least 0 and less than 1000"); } } posix_date.tm_sec = sec; posix_date.tm_min = min; posix_date.tm_hour = hour; posix_date.tm_mday = day; posix_date.tm_mon = month; posix_date.tm_year = year; posix_date.tm_wday = 0; posix_date.tm_yday = 0; millis = 1000 * _bson_timegm (&posix_date) + millis; millis += tz_adjustment * 1000; *out = millis; return true; } void _bson_iso8601_date_format (int64_t msec_since_epoch, bson_string_t *str) { time_t t; int64_t msecs_part; char buf[64]; msecs_part = msec_since_epoch % 1000; t = (time_t) (msec_since_epoch / 1000); #ifdef BSON_HAVE_GMTIME_R { struct tm posix_date; gmtime_r (&t, &posix_date); strftime (buf, sizeof buf, "%Y-%m-%dT%H:%M:%S", &posix_date); } #elif defined(_MSC_VER) { /* Windows gmtime_s is thread-safe */ struct tm time_buf; gmtime_s (&time_buf, &t); strftime (buf, sizeof buf, "%Y-%m-%dT%H:%M:%S", &time_buf); } #else strftime (buf, sizeof buf, "%Y-%m-%dT%H:%M:%S", gmtime (&t)); #endif if (msecs_part) { bson_string_append_printf (str, "%s.%03" PRId64 "Z", buf, msecs_part); } else { bson_string_append (str, buf); bson_string_append_c (str, 'Z'); } } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-iter.c0000644000076500000240000017012013572250757023237 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-iter.h" #include "bson/bson-config.h" #include "bson/bson-decimal128.h" #include "bson-types.h" #define ITER_TYPE(i) ((bson_type_t) * ((i)->raw + (i)->type)) /* *-------------------------------------------------------------------------- * * bson_iter_init -- * * Initializes @iter to be used to iterate @bson. * * Returns: * true if bson_iter_t was initialized. otherwise false. * * Side effects: * @iter is initialized. * *-------------------------------------------------------------------------- */ bool bson_iter_init (bson_iter_t *iter, /* OUT */ const bson_t *bson) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (bson); if (BSON_UNLIKELY (bson->len < 5)) { memset (iter, 0, sizeof *iter); return false; } iter->raw = bson_get_data (bson); iter->len = bson->len; iter->off = 0; iter->type = 0; iter->key = 0; iter->d1 = 0; iter->d2 = 0; iter->d3 = 0; iter->d4 = 0; iter->next_off = 4; iter->err_off = 0; return true; } /* *-------------------------------------------------------------------------- * * bson_iter_init_from_data -- * * Initializes @iter to be used to iterate @data of length @length * * Returns: * true if bson_iter_t was initialized. otherwise false. * * Side effects: * @iter is initialized. * *-------------------------------------------------------------------------- */ bool bson_iter_init_from_data (bson_iter_t *iter, /* OUT */ const uint8_t *data, /* IN */ size_t length) /* IN */ { uint32_t len_le; BSON_ASSERT (iter); BSON_ASSERT (data); if (BSON_UNLIKELY ((length < 5) || (length > INT_MAX))) { memset (iter, 0, sizeof *iter); return false; } memcpy (&len_le, data, sizeof (len_le)); if (BSON_UNLIKELY ((size_t) BSON_UINT32_FROM_LE (len_le) != length)) { memset (iter, 0, sizeof *iter); return false; } if (BSON_UNLIKELY (data[length - 1])) { memset (iter, 0, sizeof *iter); return false; } iter->raw = (uint8_t *) data; iter->len = length; iter->off = 0; iter->type = 0; iter->key = 0; iter->d1 = 0; iter->d2 = 0; iter->d3 = 0; iter->d4 = 0; iter->next_off = 4; iter->err_off = 0; return true; } /* *-------------------------------------------------------------------------- * * bson_iter_recurse -- * * Creates a new sub-iter looking at the document or array that @iter * is currently pointing at. * * Returns: * true if successful and @child was initialized. * * Side effects: * @child is initialized. * *-------------------------------------------------------------------------- */ bool bson_iter_recurse (const bson_iter_t *iter, /* IN */ bson_iter_t *child) /* OUT */ { const uint8_t *data = NULL; uint32_t len = 0; BSON_ASSERT (iter); BSON_ASSERT (child); if (ITER_TYPE (iter) == BSON_TYPE_DOCUMENT) { bson_iter_document (iter, &len, &data); } else if (ITER_TYPE (iter) == BSON_TYPE_ARRAY) { bson_iter_array (iter, &len, &data); } else { return false; } child->raw = data; child->len = len; child->off = 0; child->type = 0; child->key = 0; child->d1 = 0; child->d2 = 0; child->d3 = 0; child->d4 = 0; child->next_off = 4; child->err_off = 0; return true; } /* *-------------------------------------------------------------------------- * * bson_iter_init_find -- * * Initializes a #bson_iter_t and moves the iter to the first field * matching @key. * * Returns: * true if the field named @key was found; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_init_find (bson_iter_t *iter, /* INOUT */ const bson_t *bson, /* IN */ const char *key) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (bson); BSON_ASSERT (key); return bson_iter_init (iter, bson) && bson_iter_find (iter, key); } /* *-------------------------------------------------------------------------- * * bson_iter_init_find_w_len -- * * Initializes a #bson_iter_t and moves the iter to the first field * matching @key. * * Returns: * true if the field named @key was found; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_init_find_w_len (bson_iter_t *iter, /* INOUT */ const bson_t *bson, /* IN */ const char *key, /* IN */ int keylen) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (bson); BSON_ASSERT (key); return bson_iter_init (iter, bson) && bson_iter_find_w_len (iter, key, keylen); } /* *-------------------------------------------------------------------------- * * bson_iter_init_find_case -- * * A case-insensitive version of bson_iter_init_find(). * * Returns: * true if the field was found and @iter is observing that field. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_init_find_case (bson_iter_t *iter, /* INOUT */ const bson_t *bson, /* IN */ const char *key) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (bson); BSON_ASSERT (key); return bson_iter_init (iter, bson) && bson_iter_find_case (iter, key); } /* *-------------------------------------------------------------------------- * * bson_iter_find_w_len -- * * Searches through @iter starting from the current position for a key * matching @key. @keylen indicates the length of @key, or -1 to * determine the length with strlen(). * * Returns: * true if the field @key was found. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_find_w_len (bson_iter_t *iter, /* INOUT */ const char *key, /* IN */ int keylen) /* IN */ { const char *ikey; if (keylen < 0) { keylen = (int) strlen (key); } while (bson_iter_next (iter)) { ikey = bson_iter_key (iter); if ((0 == strncmp (key, ikey, keylen)) && (ikey[keylen] == '\0')) { return true; } } return false; } /* *-------------------------------------------------------------------------- * * bson_iter_find -- * * Searches through @iter starting from the current position for a key * matching @key. This is a case-sensitive search meaning "KEY" and * "key" would NOT match. * * Returns: * true if @key is found. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_find (bson_iter_t *iter, /* INOUT */ const char *key) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (key); return bson_iter_find_w_len (iter, key, -1); } /* *-------------------------------------------------------------------------- * * bson_iter_find_case -- * * Searches through @iter starting from the current position for a key * matching @key. This is a case-insensitive search meaning "KEY" and * "key" would match. * * Returns: * true if @key is found. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_find_case (bson_iter_t *iter, /* INOUT */ const char *key) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (key); while (bson_iter_next (iter)) { if (!bson_strcasecmp (key, bson_iter_key (iter))) { return true; } } return false; } /* *-------------------------------------------------------------------------- * * bson_iter_find_descendant -- * * Locates a descendant using the "parent.child.key" notation. This * operates similar to bson_iter_find() except that it can recurse * into children documents using the dot notation. * * Returns: * true if the descendant was found and @descendant was initialized. * * Side effects: * @descendant may be initialized. * *-------------------------------------------------------------------------- */ bool bson_iter_find_descendant (bson_iter_t *iter, /* INOUT */ const char *dotkey, /* IN */ bson_iter_t *descendant) /* OUT */ { bson_iter_t tmp; const char *dot; size_t sublen; BSON_ASSERT (iter); BSON_ASSERT (dotkey); BSON_ASSERT (descendant); if ((dot = strchr (dotkey, '.'))) { sublen = dot - dotkey; } else { sublen = strlen (dotkey); } if (bson_iter_find_w_len (iter, dotkey, (int) sublen)) { if (!dot) { *descendant = *iter; return true; } if (BSON_ITER_HOLDS_DOCUMENT (iter) || BSON_ITER_HOLDS_ARRAY (iter)) { if (bson_iter_recurse (iter, &tmp)) { return bson_iter_find_descendant (&tmp, dot + 1, descendant); } } } return false; } /* *-------------------------------------------------------------------------- * * bson_iter_key -- * * Retrieves the key of the current field. The resulting key is valid * while @iter is valid. * * Returns: * A string that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const char * bson_iter_key (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); return bson_iter_key_unsafe (iter); } /* *-------------------------------------------------------------------------- * * bson_iter_type -- * * Retrieves the type of the current field. It may be useful to check * the type using the BSON_ITER_HOLDS_*() macros. * * Returns: * A bson_type_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_type_t bson_iter_type (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); BSON_ASSERT (iter->raw); BSON_ASSERT (iter->len); return bson_iter_type_unsafe (iter); } /* *-------------------------------------------------------------------------- * * _bson_iter_next_internal -- * * Internal function to advance @iter to the next field and retrieve * the key and BSON type before error-checking. @next_keylen is * the key length of the next field being iterated or 0 if this is * not known. * * Return: * true if an element was decoded, else false. * * Side effects: * @key and @bson_type are set. * * If the return value is false: * - @iter is invalidated: @iter->raw is NULLed * - @unsupported is set to true if the bson type is unsupported * - otherwise if the BSON is corrupt, @iter->err_off is nonzero * - otherwise @bson_type is set to BSON_TYPE_EOD * *-------------------------------------------------------------------------- */ static bool _bson_iter_next_internal (bson_iter_t *iter, /* INOUT */ uint32_t next_keylen, /* IN */ const char **key, /* OUT */ uint32_t *bson_type, /* OUT */ bool *unsupported) /* OUT */ { const uint8_t *data; uint32_t o; unsigned int len; BSON_ASSERT (iter); *unsupported = false; if (!iter->raw) { *key = NULL; *bson_type = BSON_TYPE_EOD; return false; } data = iter->raw; len = iter->len; iter->off = iter->next_off; iter->type = iter->off; iter->key = iter->off + 1; iter->d1 = 0; iter->d2 = 0; iter->d3 = 0; iter->d4 = 0; if (next_keylen == 0) { /* iterate from start to end of NULL-terminated key string */ for (o = iter->key; o < len; o++) { if (!data[o]) { iter->d1 = ++o; goto fill_data_fields; } } } else { o = iter->key + next_keylen + 1; iter->d1 = o; goto fill_data_fields; } goto mark_invalid; fill_data_fields: *key = bson_iter_key_unsafe (iter); *bson_type = ITER_TYPE (iter); switch (*bson_type) { case BSON_TYPE_DATE_TIME: case BSON_TYPE_DOUBLE: case BSON_TYPE_INT64: case BSON_TYPE_TIMESTAMP: iter->next_off = o + 8; break; case BSON_TYPE_CODE: case BSON_TYPE_SYMBOL: case BSON_TYPE_UTF8: { uint32_t l; if ((o + 4) >= len) { iter->err_off = o; goto mark_invalid; } iter->d2 = o + 4; memcpy (&l, iter->raw + iter->d1, sizeof (l)); l = BSON_UINT32_FROM_LE (l); if (l > (len - (o + 4))) { iter->err_off = o; goto mark_invalid; } iter->next_off = o + 4 + l; /* * Make sure the string length includes the NUL byte. */ if (BSON_UNLIKELY ((l == 0) || (iter->next_off >= len))) { iter->err_off = o; goto mark_invalid; } /* * Make sure the last byte is a NUL byte. */ if (BSON_UNLIKELY ((iter->raw + iter->d2)[l - 1] != '\0')) { iter->err_off = o + 4 + l - 1; goto mark_invalid; } } break; case BSON_TYPE_BINARY: { bson_subtype_t subtype; uint32_t l; if (o >= (len - 4)) { iter->err_off = o; goto mark_invalid; } iter->d2 = o + 4; iter->d3 = o + 5; memcpy (&l, iter->raw + iter->d1, sizeof (l)); l = BSON_UINT32_FROM_LE (l); if (l >= (len - o - 4)) { iter->err_off = o; goto mark_invalid; } subtype = *(iter->raw + iter->d2); if (subtype == BSON_SUBTYPE_BINARY_DEPRECATED) { int32_t binary_len; if (l < 4) { iter->err_off = o; goto mark_invalid; } /* subtype 2 has a redundant length header in the data */ memcpy (&binary_len, (iter->raw + iter->d3), sizeof (binary_len)); binary_len = BSON_UINT32_FROM_LE (binary_len); if (binary_len + 4 != l) { iter->err_off = iter->d3; goto mark_invalid; } } iter->next_off = o + 5 + l; } break; case BSON_TYPE_ARRAY: case BSON_TYPE_DOCUMENT: { uint32_t l; if (o >= (len - 4)) { iter->err_off = o; goto mark_invalid; } memcpy (&l, iter->raw + iter->d1, sizeof (l)); l = BSON_UINT32_FROM_LE (l); if ((l > len) || (l > (len - o))) { iter->err_off = o; goto mark_invalid; } iter->next_off = o + l; } break; case BSON_TYPE_OID: iter->next_off = o + 12; break; case BSON_TYPE_BOOL: { char val; if (iter->d1 >= len) { iter->err_off = o; goto mark_invalid; } memcpy (&val, iter->raw + iter->d1, 1); if (val != 0x00 && val != 0x01) { iter->err_off = o; goto mark_invalid; } iter->next_off = o + 1; } break; case BSON_TYPE_REGEX: { bool eor = false; bool eoo = false; for (; o < len; o++) { if (!data[o]) { iter->d2 = ++o; eor = true; break; } } if (!eor) { iter->err_off = iter->next_off; goto mark_invalid; } for (; o < len; o++) { if (!data[o]) { eoo = true; break; } } if (!eoo) { iter->err_off = iter->next_off; goto mark_invalid; } iter->next_off = o + 1; } break; case BSON_TYPE_DBPOINTER: { uint32_t l; if (o >= (len - 4)) { iter->err_off = o; goto mark_invalid; } iter->d2 = o + 4; memcpy (&l, iter->raw + iter->d1, sizeof (l)); l = BSON_UINT32_FROM_LE (l); /* Check valid string length. l counts '\0' but not 4 bytes for itself. */ if (l == 0 || l > (len - o - 4)) { iter->err_off = o; goto mark_invalid; } if (*(iter->raw + o + l + 3)) { /* not null terminated */ iter->err_off = o + l + 3; goto mark_invalid; } iter->d3 = o + 4 + l; iter->next_off = o + 4 + l + 12; } break; case BSON_TYPE_CODEWSCOPE: { uint32_t l; uint32_t doclen; if ((len < 19) || (o >= (len - 14))) { iter->err_off = o; goto mark_invalid; } iter->d2 = o + 4; iter->d3 = o + 8; memcpy (&l, iter->raw + iter->d1, sizeof (l)); l = BSON_UINT32_FROM_LE (l); if ((l < 14) || (l >= (len - o))) { iter->err_off = o; goto mark_invalid; } iter->next_off = o + l; if (iter->next_off >= len) { iter->err_off = o; goto mark_invalid; } memcpy (&l, iter->raw + iter->d2, sizeof (l)); l = BSON_UINT32_FROM_LE (l); if (l == 0 || l >= (len - o - 4 - 4)) { iter->err_off = o; goto mark_invalid; } if ((o + 4 + 4 + l + 4) >= iter->next_off) { iter->err_off = o + 4; goto mark_invalid; } iter->d4 = o + 4 + 4 + l; memcpy (&doclen, iter->raw + iter->d4, sizeof (doclen)); doclen = BSON_UINT32_FROM_LE (doclen); if ((o + 4 + 4 + l + doclen) != iter->next_off) { iter->err_off = o + 4 + 4 + l; goto mark_invalid; } } break; case BSON_TYPE_INT32: iter->next_off = o + 4; break; case BSON_TYPE_DECIMAL128: iter->next_off = o + 16; break; case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: case BSON_TYPE_NULL: case BSON_TYPE_UNDEFINED: iter->next_off = o; break; default: *unsupported = true; /* FALL THROUGH */ case BSON_TYPE_EOD: iter->err_off = o; goto mark_invalid; } /* * Check to see if any of the field locations would overflow the * current BSON buffer. If so, set the error location to the offset * of where the field starts. */ if (iter->next_off >= len) { iter->err_off = o; goto mark_invalid; } iter->err_off = 0; return true; mark_invalid: iter->raw = NULL; iter->len = 0; iter->next_off = 0; return false; } /* *-------------------------------------------------------------------------- * * bson_iter_next -- * * Advances @iter to the next field of the underlying BSON document. * If all fields have been exhausted, then %false is returned. * * It is a programming error to use @iter after this function has * returned false. * * Returns: * true if the iter was advanced to the next record. * otherwise false and @iter should be considered invalid. * * Side effects: * @iter may be invalidated. * *-------------------------------------------------------------------------- */ bool bson_iter_next (bson_iter_t *iter) /* INOUT */ { uint32_t bson_type; const char *key; bool unsupported; return _bson_iter_next_internal (iter, 0, &key, &bson_type, &unsupported); } /* *-------------------------------------------------------------------------- * * bson_iter_binary -- * * Retrieves the BSON_TYPE_BINARY field. The subtype is stored in * @subtype. The length of @binary in bytes is stored in @binary_len. * * @binary should not be modified or freed and is only valid while * @iter's bson_t is valid and unmodified. * * Parameters: * @iter: A bson_iter_t * @subtype: A location for the binary subtype. * @binary_len: A location for the length of @binary. * @binary: A location for a pointer to the binary data. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_binary (const bson_iter_t *iter, /* IN */ bson_subtype_t *subtype, /* OUT */ uint32_t *binary_len, /* OUT */ const uint8_t **binary) /* OUT */ { bson_subtype_t backup; BSON_ASSERT (iter); BSON_ASSERT (!binary || binary_len); if (ITER_TYPE (iter) == BSON_TYPE_BINARY) { if (!subtype) { subtype = &backup; } *subtype = (bson_subtype_t) * (iter->raw + iter->d2); if (binary) { memcpy (binary_len, (iter->raw + iter->d1), sizeof (*binary_len)); *binary_len = BSON_UINT32_FROM_LE (*binary_len); *binary = iter->raw + iter->d3; if (*subtype == BSON_SUBTYPE_BINARY_DEPRECATED) { *binary_len -= sizeof (int32_t); *binary += sizeof (int32_t); } } return; } if (binary) { *binary = NULL; } if (binary_len) { *binary_len = 0; } if (subtype) { *subtype = BSON_SUBTYPE_BINARY; } } /* *-------------------------------------------------------------------------- * * bson_iter_bool -- * * Retrieves the current field of type BSON_TYPE_BOOL. * * Returns: * true or false, dependent on bson document. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_bool (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_BOOL) { return bson_iter_bool_unsafe (iter); } return false; } /* *-------------------------------------------------------------------------- * * bson_iter_as_bool -- * * If @iter is on a boolean field, returns the boolean. If it is on a * non-boolean field such as int32, int64, or double, it will convert * the value to a boolean. * * Zero is false, and non-zero is true. * * Returns: * true or false, dependent on field type. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_as_bool (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); switch ((int) ITER_TYPE (iter)) { case BSON_TYPE_BOOL: return bson_iter_bool (iter); case BSON_TYPE_DOUBLE: return !(bson_iter_double (iter) == 0.0); case BSON_TYPE_INT64: return !(bson_iter_int64 (iter) == 0); case BSON_TYPE_INT32: return !(bson_iter_int32 (iter) == 0); case BSON_TYPE_UTF8: return true; case BSON_TYPE_NULL: case BSON_TYPE_UNDEFINED: return false; default: return true; } } /* *-------------------------------------------------------------------------- * * bson_iter_double -- * * Retrieves the current field of type BSON_TYPE_DOUBLE. * * Returns: * A double. * * Side effects: * None. * *-------------------------------------------------------------------------- */ double bson_iter_double (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) { return bson_iter_double_unsafe (iter); } return 0; } /* *-------------------------------------------------------------------------- * * bson_iter_as_double -- * * If @iter is on a field of type BSON_TYPE_DOUBLE, * returns the double. If it is on an integer field * such as int32, int64, or bool, it will convert * the value to a double. * * * Returns: * A double. * * Side effects: * None. * *-------------------------------------------------------------------------- */ double bson_iter_as_double (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); switch ((int) ITER_TYPE (iter)) { case BSON_TYPE_BOOL: return (double) bson_iter_bool (iter); case BSON_TYPE_DOUBLE: return bson_iter_double (iter); case BSON_TYPE_INT32: return (double) bson_iter_int32 (iter); case BSON_TYPE_INT64: return (double) bson_iter_int64 (iter); default: return 0; } } /* *-------------------------------------------------------------------------- * * bson_iter_int32 -- * * Retrieves the value of the field of type BSON_TYPE_INT32. * * Returns: * A 32-bit signed integer. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int32_t bson_iter_int32 (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_INT32) { return bson_iter_int32_unsafe (iter); } return 0; } /* *-------------------------------------------------------------------------- * * bson_iter_int64 -- * * Retrieves a 64-bit signed integer for the current BSON_TYPE_INT64 * field. * * Returns: * A 64-bit signed integer. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int64_t bson_iter_int64 (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_INT64) { return bson_iter_int64_unsafe (iter); } return 0; } /* *-------------------------------------------------------------------------- * * bson_iter_as_int64 -- * * If @iter is not an int64 field, it will try to convert the value to * an int64. Such field types include: * * - bool * - double * - int32 * * Returns: * An int64_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int64_t bson_iter_as_int64 (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); switch ((int) ITER_TYPE (iter)) { case BSON_TYPE_BOOL: return (int64_t) bson_iter_bool (iter); case BSON_TYPE_DOUBLE: return (int64_t) bson_iter_double (iter); case BSON_TYPE_INT64: return bson_iter_int64 (iter); case BSON_TYPE_INT32: return (int64_t) bson_iter_int32 (iter); default: return 0; } } /* *-------------------------------------------------------------------------- * * bson_iter_decimal128 -- * * This function retrieves the current field of type *%BSON_TYPE_DECIMAL128. * The result is valid while @iter is valid, and is stored in @dec. * * Returns: * * True on success, false on failure. * * Side Effects: * None. * *-------------------------------------------------------------------------- */ bool bson_iter_decimal128 (const bson_iter_t *iter, /* IN */ bson_decimal128_t *dec) /* OUT */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DECIMAL128) { bson_iter_decimal128_unsafe (iter, dec); return true; } return false; } /* *-------------------------------------------------------------------------- * * bson_iter_oid -- * * Retrieves the current field of type %BSON_TYPE_OID. The result is * valid while @iter is valid. * * Returns: * A bson_oid_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const bson_oid_t * bson_iter_oid (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_OID) { return bson_iter_oid_unsafe (iter); } return NULL; } /* *-------------------------------------------------------------------------- * * bson_iter_regex -- * * Fetches the current field from the iter which should be of type * BSON_TYPE_REGEX. * * Returns: * Regex from @iter. This should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const char * bson_iter_regex (const bson_iter_t *iter, /* IN */ const char **options) /* IN */ { const char *ret = NULL; const char *ret_options = NULL; BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_REGEX) { ret = (const char *) (iter->raw + iter->d1); ret_options = (const char *) (iter->raw + iter->d2); } if (options) { *options = ret_options; } return ret; } /* *-------------------------------------------------------------------------- * * bson_iter_utf8 -- * * Retrieves the current field of type %BSON_TYPE_UTF8 as a UTF-8 * encoded string. * * Parameters: * @iter: A bson_iter_t. * @length: A location for the length of the string. * * Returns: * A string that should not be modified or freed. * * Side effects: * @length will be set to the result strings length if non-NULL. * *-------------------------------------------------------------------------- */ const char * bson_iter_utf8 (const bson_iter_t *iter, /* IN */ uint32_t *length) /* OUT */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_UTF8) { if (length) { *length = bson_iter_utf8_len_unsafe (iter); } return (const char *) (iter->raw + iter->d2); } if (length) { *length = 0; } return NULL; } /* *-------------------------------------------------------------------------- * * bson_iter_dup_utf8 -- * * Copies the current UTF-8 element into a newly allocated string. The * string should be freed using bson_free() when the caller is * finished with it. * * Returns: * A newly allocated char* that should be freed with bson_free(). * * Side effects: * @length will be set to the result strings length if non-NULL. * *-------------------------------------------------------------------------- */ char * bson_iter_dup_utf8 (const bson_iter_t *iter, /* IN */ uint32_t *length) /* OUT */ { uint32_t local_length = 0; const char *str; char *ret = NULL; BSON_ASSERT (iter); if ((str = bson_iter_utf8 (iter, &local_length))) { ret = bson_malloc0 (local_length + 1); memcpy (ret, str, local_length); ret[local_length] = '\0'; } if (length) { *length = local_length; } return ret; } /* *-------------------------------------------------------------------------- * * bson_iter_code -- * * Retrieves the current field of type %BSON_TYPE_CODE. The length of * the resulting string is stored in @length. * * Parameters: * @iter: A bson_iter_t. * @length: A location for the code length. * * Returns: * A NUL-terminated string containing the code which should not be * modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const char * bson_iter_code (const bson_iter_t *iter, /* IN */ uint32_t *length) /* OUT */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_CODE) { if (length) { *length = bson_iter_utf8_len_unsafe (iter); } return (const char *) (iter->raw + iter->d2); } if (length) { *length = 0; } return NULL; } /* *-------------------------------------------------------------------------- * * bson_iter_codewscope -- * * Similar to bson_iter_code() but with a scope associated encoded as * a BSON document. @scope should not be modified or freed. It is * valid while @iter is valid. * * Parameters: * @iter: A #bson_iter_t. * @length: A location for the length of resulting string. * @scope_len: A location for the length of @scope. * @scope: A location for the scope encoded as BSON. * * Returns: * A NUL-terminated string that should not be modified or freed. * * Side effects: * @length is set to the resulting string length in bytes. * @scope_len is set to the length of @scope in bytes. * @scope is set to the scope documents buffer which can be * turned into a bson document with bson_init_static(). * *-------------------------------------------------------------------------- */ const char * bson_iter_codewscope (const bson_iter_t *iter, /* IN */ uint32_t *length, /* OUT */ uint32_t *scope_len, /* OUT */ const uint8_t **scope) /* OUT */ { uint32_t len; BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_CODEWSCOPE) { if (length) { memcpy (&len, iter->raw + iter->d2, sizeof (len)); /* The string length was checked > 0 in _bson_iter_next_internal. */ len = BSON_UINT32_FROM_LE (len); BSON_ASSERT (len > 0); *length = len - 1; } memcpy (&len, iter->raw + iter->d4, sizeof (len)); *scope_len = BSON_UINT32_FROM_LE (len); *scope = iter->raw + iter->d4; return (const char *) (iter->raw + iter->d3); } if (length) { *length = 0; } if (scope_len) { *scope_len = 0; } if (scope) { *scope = NULL; } return NULL; } /* *-------------------------------------------------------------------------- * * bson_iter_dbpointer -- * * Retrieves a BSON_TYPE_DBPOINTER field. @collection_len will be set * to the length of the collection name. The collection name will be * placed into @collection. The oid will be placed into @oid. * * @collection and @oid should not be modified. * * Parameters: * @iter: A #bson_iter_t. * @collection_len: A location for the length of @collection. * @collection: A location for the collection name. * @oid: A location for the oid. * * Returns: * None. * * Side effects: * @collection_len is set to the length of @collection in bytes * excluding the null byte. * @collection is set to the collection name, including a terminating * null byte. * @oid is initialized with the oid. * *-------------------------------------------------------------------------- */ void bson_iter_dbpointer (const bson_iter_t *iter, /* IN */ uint32_t *collection_len, /* OUT */ const char **collection, /* OUT */ const bson_oid_t **oid) /* OUT */ { BSON_ASSERT (iter); if (collection) { *collection = NULL; } if (oid) { *oid = NULL; } if (ITER_TYPE (iter) == BSON_TYPE_DBPOINTER) { if (collection_len) { memcpy ( collection_len, (iter->raw + iter->d1), sizeof (*collection_len)); *collection_len = BSON_UINT32_FROM_LE (*collection_len); if ((*collection_len) > 0) { (*collection_len)--; } } if (collection) { *collection = (const char *) (iter->raw + iter->d2); } if (oid) { *oid = (const bson_oid_t *) (iter->raw + iter->d3); } } } /* *-------------------------------------------------------------------------- * * bson_iter_symbol -- * * Retrieves the symbol of the current field of type BSON_TYPE_SYMBOL. * * Parameters: * @iter: A bson_iter_t. * @length: A location for the length of the symbol. * * Returns: * A string containing the symbol as UTF-8. The value should not be * modified or freed. * * Side effects: * @length is set to the resulting strings length in bytes, * excluding the null byte. * *-------------------------------------------------------------------------- */ const char * bson_iter_symbol (const bson_iter_t *iter, /* IN */ uint32_t *length) /* OUT */ { const char *ret = NULL; uint32_t ret_length = 0; BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_SYMBOL) { ret = (const char *) (iter->raw + iter->d2); ret_length = bson_iter_utf8_len_unsafe (iter); } if (length) { *length = ret_length; } return ret; } /* *-------------------------------------------------------------------------- * * bson_iter_date_time -- * * Fetches the number of milliseconds elapsed since the UNIX epoch. * This value can be negative as times before 1970 are valid. * * Returns: * A signed 64-bit integer containing the number of milliseconds. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int64_t bson_iter_date_time (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) { return bson_iter_int64_unsafe (iter); } return 0; } /* *-------------------------------------------------------------------------- * * bson_iter_time_t -- * * Retrieves the current field of type BSON_TYPE_DATE_TIME as a * time_t. * * Returns: * A #time_t of the number of seconds since UNIX epoch in UTC. * * Side effects: * None. * *-------------------------------------------------------------------------- */ time_t bson_iter_time_t (const bson_iter_t *iter) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) { return bson_iter_time_t_unsafe (iter); } return 0; } /* *-------------------------------------------------------------------------- * * bson_iter_timestamp -- * * Fetches the current field if it is a BSON_TYPE_TIMESTAMP. * * Parameters: * @iter: A #bson_iter_t. * @timestamp: a location for the timestamp. * @increment: A location for the increment. * * Returns: * None. * * Side effects: * @timestamp is initialized. * @increment is initialized. * *-------------------------------------------------------------------------- */ void bson_iter_timestamp (const bson_iter_t *iter, /* IN */ uint32_t *timestamp, /* OUT */ uint32_t *increment) /* OUT */ { uint64_t encoded; uint32_t ret_timestamp = 0; uint32_t ret_increment = 0; BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_TIMESTAMP) { memcpy (&encoded, iter->raw + iter->d1, sizeof (encoded)); encoded = BSON_UINT64_FROM_LE (encoded); ret_timestamp = (encoded >> 32) & 0xFFFFFFFF; ret_increment = encoded & 0xFFFFFFFF; } if (timestamp) { *timestamp = ret_timestamp; } if (increment) { *increment = ret_increment; } } /* *-------------------------------------------------------------------------- * * bson_iter_timeval -- * * Retrieves the current field of type BSON_TYPE_DATE_TIME and stores * it into the struct timeval provided. tv->tv_sec is set to the * number of seconds since the UNIX epoch in UTC. * * Since BSON_TYPE_DATE_TIME does not support fractions of a second, * tv->tv_usec will always be set to zero. * * Returns: * None. * * Side effects: * @tv is initialized. * *-------------------------------------------------------------------------- */ void bson_iter_timeval (const bson_iter_t *iter, /* IN */ struct timeval *tv) /* OUT */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) { bson_iter_timeval_unsafe (iter, tv); return; } memset (tv, 0, sizeof *tv); } /** * bson_iter_document: * @iter: a bson_iter_t. * @document_len: A location for the document length. * @document: A location for a pointer to the document buffer. * */ /* *-------------------------------------------------------------------------- * * bson_iter_document -- * * Retrieves the data to the document BSON structure and stores the * length of the document buffer in @document_len and the document * buffer in @document. * * If you would like to iterate over the child contents, you might * consider creating a bson_t on the stack such as the following. It * allows you to call functions taking a const bson_t* only. * * bson_t b; * uint32_t len; * const uint8_t *data; * * bson_iter_document(iter, &len, &data); * * if (bson_init_static (&b, data, len)) { * ... * } * * There is no need to cleanup the bson_t structure as no data can be * modified in the process of its use (as it is static/const). * * Returns: * None. * * Side effects: * @document_len is initialized. * @document is initialized. * *-------------------------------------------------------------------------- */ void bson_iter_document (const bson_iter_t *iter, /* IN */ uint32_t *document_len, /* OUT */ const uint8_t **document) /* OUT */ { BSON_ASSERT (iter); BSON_ASSERT (document_len); BSON_ASSERT (document); *document = NULL; *document_len = 0; if (ITER_TYPE (iter) == BSON_TYPE_DOCUMENT) { memcpy (document_len, (iter->raw + iter->d1), sizeof (*document_len)); *document_len = BSON_UINT32_FROM_LE (*document_len); *document = (iter->raw + iter->d1); } } /** * bson_iter_array: * @iter: a #bson_iter_t. * @array_len: A location for the array length. * @array: A location for a pointer to the array buffer. */ /* *-------------------------------------------------------------------------- * * bson_iter_array -- * * Retrieves the data to the array BSON structure and stores the * length of the array buffer in @array_len and the array buffer in * @array. * * If you would like to iterate over the child contents, you might * consider creating a bson_t on the stack such as the following. It * allows you to call functions taking a const bson_t* only. * * bson_t b; * uint32_t len; * const uint8_t *data; * * bson_iter_array (iter, &len, &data); * * if (bson_init_static (&b, data, len)) { * ... * } * * There is no need to cleanup the #bson_t structure as no data can be * modified in the process of its use. * * Returns: * None. * * Side effects: * @array_len is initialized. * @array is initialized. * *-------------------------------------------------------------------------- */ void bson_iter_array (const bson_iter_t *iter, /* IN */ uint32_t *array_len, /* OUT */ const uint8_t **array) /* OUT */ { BSON_ASSERT (iter); BSON_ASSERT (array_len); BSON_ASSERT (array); *array = NULL; *array_len = 0; if (ITER_TYPE (iter) == BSON_TYPE_ARRAY) { memcpy (array_len, (iter->raw + iter->d1), sizeof (*array_len)); *array_len = BSON_UINT32_FROM_LE (*array_len); *array = (iter->raw + iter->d1); } } #define VISIT_FIELD(name) visitor->visit_##name && visitor->visit_##name #define VISIT_AFTER VISIT_FIELD (after) #define VISIT_BEFORE VISIT_FIELD (before) #define VISIT_CORRUPT \ if (visitor->visit_corrupt) \ visitor->visit_corrupt #define VISIT_DOUBLE VISIT_FIELD (double) #define VISIT_UTF8 VISIT_FIELD (utf8) #define VISIT_DOCUMENT VISIT_FIELD (document) #define VISIT_ARRAY VISIT_FIELD (array) #define VISIT_BINARY VISIT_FIELD (binary) #define VISIT_UNDEFINED VISIT_FIELD (undefined) #define VISIT_OID VISIT_FIELD (oid) #define VISIT_BOOL VISIT_FIELD (bool) #define VISIT_DATE_TIME VISIT_FIELD (date_time) #define VISIT_NULL VISIT_FIELD (null) #define VISIT_REGEX VISIT_FIELD (regex) #define VISIT_DBPOINTER VISIT_FIELD (dbpointer) #define VISIT_CODE VISIT_FIELD (code) #define VISIT_SYMBOL VISIT_FIELD (symbol) #define VISIT_CODEWSCOPE VISIT_FIELD (codewscope) #define VISIT_INT32 VISIT_FIELD (int32) #define VISIT_TIMESTAMP VISIT_FIELD (timestamp) #define VISIT_INT64 VISIT_FIELD (int64) #define VISIT_DECIMAL128 VISIT_FIELD (decimal128) #define VISIT_MAXKEY VISIT_FIELD (maxkey) #define VISIT_MINKEY VISIT_FIELD (minkey) bool bson_iter_visit_all (bson_iter_t *iter, /* INOUT */ const bson_visitor_t *visitor, /* IN */ void *data) /* IN */ { uint32_t bson_type; const char *key; bool unsupported; BSON_ASSERT (iter); BSON_ASSERT (visitor); while (_bson_iter_next_internal (iter, 0, &key, &bson_type, &unsupported)) { if (*key && !bson_utf8_validate (key, strlen (key), false)) { iter->err_off = iter->off; break; } if (VISIT_BEFORE (iter, key, data)) { return true; } switch (bson_type) { case BSON_TYPE_DOUBLE: if (VISIT_DOUBLE (iter, key, bson_iter_double (iter), data)) { return true; } break; case BSON_TYPE_UTF8: { uint32_t utf8_len; const char *utf8; utf8 = bson_iter_utf8 (iter, &utf8_len); if (!bson_utf8_validate (utf8, utf8_len, true)) { iter->err_off = iter->off; return true; } if (VISIT_UTF8 (iter, key, utf8_len, utf8, data)) { return true; } } break; case BSON_TYPE_DOCUMENT: { const uint8_t *docbuf = NULL; uint32_t doclen = 0; bson_t b; bson_iter_document (iter, &doclen, &docbuf); if (bson_init_static (&b, docbuf, doclen) && VISIT_DOCUMENT (iter, key, &b, data)) { return true; } } break; case BSON_TYPE_ARRAY: { const uint8_t *docbuf = NULL; uint32_t doclen = 0; bson_t b; bson_iter_array (iter, &doclen, &docbuf); if (bson_init_static (&b, docbuf, doclen) && VISIT_ARRAY (iter, key, &b, data)) { return true; } } break; case BSON_TYPE_BINARY: { const uint8_t *binary = NULL; bson_subtype_t subtype = BSON_SUBTYPE_BINARY; uint32_t binary_len = 0; bson_iter_binary (iter, &subtype, &binary_len, &binary); if (VISIT_BINARY (iter, key, subtype, binary_len, binary, data)) { return true; } } break; case BSON_TYPE_UNDEFINED: if (VISIT_UNDEFINED (iter, key, data)) { return true; } break; case BSON_TYPE_OID: if (VISIT_OID (iter, key, bson_iter_oid (iter), data)) { return true; } break; case BSON_TYPE_BOOL: if (VISIT_BOOL (iter, key, bson_iter_bool (iter), data)) { return true; } break; case BSON_TYPE_DATE_TIME: if (VISIT_DATE_TIME (iter, key, bson_iter_date_time (iter), data)) { return true; } break; case BSON_TYPE_NULL: if (VISIT_NULL (iter, key, data)) { return true; } break; case BSON_TYPE_REGEX: { const char *regex = NULL; const char *options = NULL; regex = bson_iter_regex (iter, &options); if (!bson_utf8_validate (regex, strlen (regex), true)) { iter->err_off = iter->off; return true; } if (VISIT_REGEX (iter, key, regex, options, data)) { return true; } } break; case BSON_TYPE_DBPOINTER: { uint32_t collection_len = 0; const char *collection = NULL; const bson_oid_t *oid = NULL; bson_iter_dbpointer (iter, &collection_len, &collection, &oid); if (!bson_utf8_validate (collection, collection_len, true)) { iter->err_off = iter->off; return true; } if (VISIT_DBPOINTER ( iter, key, collection_len, collection, oid, data)) { return true; } } break; case BSON_TYPE_CODE: { uint32_t code_len; const char *code; code = bson_iter_code (iter, &code_len); if (!bson_utf8_validate (code, code_len, true)) { iter->err_off = iter->off; return true; } if (VISIT_CODE (iter, key, code_len, code, data)) { return true; } } break; case BSON_TYPE_SYMBOL: { uint32_t symbol_len; const char *symbol; symbol = bson_iter_symbol (iter, &symbol_len); if (!bson_utf8_validate (symbol, symbol_len, true)) { iter->err_off = iter->off; return true; } if (VISIT_SYMBOL (iter, key, symbol_len, symbol, data)) { return true; } } break; case BSON_TYPE_CODEWSCOPE: { uint32_t length = 0; const char *code; const uint8_t *docbuf = NULL; uint32_t doclen = 0; bson_t b; code = bson_iter_codewscope (iter, &length, &doclen, &docbuf); if (!bson_utf8_validate (code, length, true)) { iter->err_off = iter->off; return true; } if (bson_init_static (&b, docbuf, doclen) && VISIT_CODEWSCOPE (iter, key, length, code, &b, data)) { return true; } } break; case BSON_TYPE_INT32: if (VISIT_INT32 (iter, key, bson_iter_int32 (iter), data)) { return true; } break; case BSON_TYPE_TIMESTAMP: { uint32_t timestamp; uint32_t increment; bson_iter_timestamp (iter, ×tamp, &increment); if (VISIT_TIMESTAMP (iter, key, timestamp, increment, data)) { return true; } } break; case BSON_TYPE_INT64: if (VISIT_INT64 (iter, key, bson_iter_int64 (iter), data)) { return true; } break; case BSON_TYPE_DECIMAL128: { bson_decimal128_t dec; bson_iter_decimal128 (iter, &dec); if (VISIT_DECIMAL128 (iter, key, &dec, data)) { return true; } } break; case BSON_TYPE_MAXKEY: if (VISIT_MAXKEY (iter, bson_iter_key_unsafe (iter), data)) { return true; } break; case BSON_TYPE_MINKEY: if (VISIT_MINKEY (iter, bson_iter_key_unsafe (iter), data)) { return true; } break; case BSON_TYPE_EOD: default: break; } if (VISIT_AFTER (iter, bson_iter_key_unsafe (iter), data)) { return true; } } if (iter->err_off) { if (unsupported && visitor->visit_unsupported_type && bson_utf8_validate (key, strlen (key), false)) { visitor->visit_unsupported_type (iter, key, bson_type, data); return false; } VISIT_CORRUPT (iter, data); } #undef VISIT_FIELD return false; } /* *-------------------------------------------------------------------------- * * bson_iter_overwrite_bool -- * * Overwrites the current BSON_TYPE_BOOLEAN field with a new value. * This is performed in-place and therefore no keys are moved. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_overwrite_bool (bson_iter_t *iter, /* IN */ bool value) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_BOOL) { memcpy ((void *) (iter->raw + iter->d1), &value, 1); } } void bson_iter_overwrite_oid (bson_iter_t *iter, const bson_oid_t *value) { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_OID) { memcpy ( (void *) (iter->raw + iter->d1), value->bytes, sizeof (value->bytes)); } } void bson_iter_overwrite_timestamp (bson_iter_t *iter, uint32_t timestamp, uint32_t increment) { uint64_t value; BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_TIMESTAMP) { value = ((((uint64_t) timestamp) << 32U) | ((uint64_t) increment)); value = BSON_UINT64_TO_LE (value); memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value)); } } void bson_iter_overwrite_date_time (bson_iter_t *iter, int64_t value) { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DATE_TIME) { value = BSON_UINT64_TO_LE (value); memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value)); } } /* *-------------------------------------------------------------------------- * * bson_iter_overwrite_int32 -- * * Overwrites the current BSON_TYPE_INT32 field with a new value. * This is performed in-place and therefore no keys are moved. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_overwrite_int32 (bson_iter_t *iter, /* IN */ int32_t value) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_INT32) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN value = BSON_UINT32_TO_LE (value); #endif memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value)); } } /* *-------------------------------------------------------------------------- * * bson_iter_overwrite_int64 -- * * Overwrites the current BSON_TYPE_INT64 field with a new value. * This is performed in-place and therefore no keys are moved. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_overwrite_int64 (bson_iter_t *iter, /* IN */ int64_t value) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_INT64) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN value = BSON_UINT64_TO_LE (value); #endif memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value)); } } /* *-------------------------------------------------------------------------- * * bson_iter_overwrite_double -- * * Overwrites the current BSON_TYPE_DOUBLE field with a new value. * This is performed in-place and therefore no keys are moved. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_overwrite_double (bson_iter_t *iter, /* IN */ double value) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DOUBLE) { value = BSON_DOUBLE_TO_LE (value); memcpy ((void *) (iter->raw + iter->d1), &value, sizeof (value)); } } /* *-------------------------------------------------------------------------- * * bson_iter_overwrite_decimal128 -- * * Overwrites the current BSON_TYPE_DECIMAL128 field with a new value. * This is performed in-place and therefore no keys are moved. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_iter_overwrite_decimal128 (bson_iter_t *iter, /* IN */ const bson_decimal128_t *value) /* IN */ { BSON_ASSERT (iter); if (ITER_TYPE (iter) == BSON_TYPE_DECIMAL128) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN uint64_t data[2]; data[0] = BSON_UINT64_TO_LE (value->low); data[1] = BSON_UINT64_TO_LE (value->high); memcpy ((void *) (iter->raw + iter->d1), data, sizeof (data)); #else memcpy ((void *) (iter->raw + iter->d1), value, sizeof (*value)); #endif } } /* *-------------------------------------------------------------------------- * * bson_iter_value -- * * Retrieves a bson_value_t containing the boxed value of the current * element. The result of this function valid until the state of * iter has been changed (through the use of bson_iter_next()). * * Returns: * A bson_value_t that should not be modified or freed. If you need * to hold on to the value, use bson_value_copy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ const bson_value_t * bson_iter_value (bson_iter_t *iter) /* IN */ { bson_value_t *value; BSON_ASSERT (iter); value = &iter->value; value->value_type = ITER_TYPE (iter); switch (value->value_type) { case BSON_TYPE_DOUBLE: value->value.v_double = bson_iter_double (iter); break; case BSON_TYPE_UTF8: value->value.v_utf8.str = (char *) bson_iter_utf8 (iter, &value->value.v_utf8.len); break; case BSON_TYPE_DOCUMENT: bson_iter_document (iter, &value->value.v_doc.data_len, (const uint8_t **) &value->value.v_doc.data); break; case BSON_TYPE_ARRAY: bson_iter_array (iter, &value->value.v_doc.data_len, (const uint8_t **) &value->value.v_doc.data); break; case BSON_TYPE_BINARY: bson_iter_binary (iter, &value->value.v_binary.subtype, &value->value.v_binary.data_len, (const uint8_t **) &value->value.v_binary.data); break; case BSON_TYPE_OID: bson_oid_copy (bson_iter_oid (iter), &value->value.v_oid); break; case BSON_TYPE_BOOL: value->value.v_bool = bson_iter_bool (iter); break; case BSON_TYPE_DATE_TIME: value->value.v_datetime = bson_iter_date_time (iter); break; case BSON_TYPE_REGEX: value->value.v_regex.regex = (char *) bson_iter_regex ( iter, (const char **) &value->value.v_regex.options); break; case BSON_TYPE_DBPOINTER: { const bson_oid_t *oid; bson_iter_dbpointer (iter, &value->value.v_dbpointer.collection_len, (const char **) &value->value.v_dbpointer.collection, &oid); bson_oid_copy (oid, &value->value.v_dbpointer.oid); break; } case BSON_TYPE_CODE: value->value.v_code.code = (char *) bson_iter_code (iter, &value->value.v_code.code_len); break; case BSON_TYPE_SYMBOL: value->value.v_symbol.symbol = (char *) bson_iter_symbol (iter, &value->value.v_symbol.len); break; case BSON_TYPE_CODEWSCOPE: value->value.v_codewscope.code = (char *) bson_iter_codewscope ( iter, &value->value.v_codewscope.code_len, &value->value.v_codewscope.scope_len, (const uint8_t **) &value->value.v_codewscope.scope_data); break; case BSON_TYPE_INT32: value->value.v_int32 = bson_iter_int32 (iter); break; case BSON_TYPE_TIMESTAMP: bson_iter_timestamp (iter, &value->value.v_timestamp.timestamp, &value->value.v_timestamp.increment); break; case BSON_TYPE_INT64: value->value.v_int64 = bson_iter_int64 (iter); break; case BSON_TYPE_DECIMAL128: bson_iter_decimal128 (iter, &(value->value.v_decimal128)); break; case BSON_TYPE_NULL: case BSON_TYPE_UNDEFINED: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: break; case BSON_TYPE_EOD: default: return NULL; } return value; } uint32_t bson_iter_key_len (const bson_iter_t *iter) { /* * f i e l d n a m e \0 _ * ^ ^ * | | * iter->key iter->d1 * */ BSON_ASSERT (iter->d1 > iter->key); return iter->d1 - iter->key - 1; } bool bson_iter_init_from_data_at_offset (bson_iter_t *iter, const uint8_t *data, size_t length, uint32_t offset, uint32_t keylen) { const char *key; uint32_t bson_type; bool unsupported; BSON_ASSERT (iter); BSON_ASSERT (data); if (BSON_UNLIKELY ((length < 5) || (length > INT_MAX))) { memset (iter, 0, sizeof *iter); return false; } iter->raw = (uint8_t *) data; iter->len = (uint32_t) length; iter->off = 0; iter->type = 0; iter->key = 0; iter->next_off = offset; iter->err_off = 0; if (!_bson_iter_next_internal ( iter, keylen, &key, &bson_type, &unsupported)) { memset (iter, 0, sizeof *iter); return false; } return true; } uint32_t bson_iter_offset (bson_iter_t *iter) { return iter->off; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-iter.h0000644000076500000240000003160513572250757023250 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_ITER_H #define BSON_ITER_H #include "bson/bson.h" #include "bson/bson-endian.h" #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS #define BSON_ITER_HOLDS_DOUBLE(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_DOUBLE) #define BSON_ITER_HOLDS_UTF8(iter) (bson_iter_type ((iter)) == BSON_TYPE_UTF8) #define BSON_ITER_HOLDS_DOCUMENT(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_DOCUMENT) #define BSON_ITER_HOLDS_ARRAY(iter) (bson_iter_type ((iter)) == BSON_TYPE_ARRAY) #define BSON_ITER_HOLDS_BINARY(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_BINARY) #define BSON_ITER_HOLDS_UNDEFINED(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_UNDEFINED) #define BSON_ITER_HOLDS_OID(iter) (bson_iter_type ((iter)) == BSON_TYPE_OID) #define BSON_ITER_HOLDS_BOOL(iter) (bson_iter_type ((iter)) == BSON_TYPE_BOOL) #define BSON_ITER_HOLDS_DATE_TIME(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_DATE_TIME) #define BSON_ITER_HOLDS_NULL(iter) (bson_iter_type ((iter)) == BSON_TYPE_NULL) #define BSON_ITER_HOLDS_REGEX(iter) (bson_iter_type ((iter)) == BSON_TYPE_REGEX) #define BSON_ITER_HOLDS_DBPOINTER(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_DBPOINTER) #define BSON_ITER_HOLDS_CODE(iter) (bson_iter_type ((iter)) == BSON_TYPE_CODE) #define BSON_ITER_HOLDS_SYMBOL(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_SYMBOL) #define BSON_ITER_HOLDS_CODEWSCOPE(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_CODEWSCOPE) #define BSON_ITER_HOLDS_INT32(iter) (bson_iter_type ((iter)) == BSON_TYPE_INT32) #define BSON_ITER_HOLDS_TIMESTAMP(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_TIMESTAMP) #define BSON_ITER_HOLDS_INT64(iter) (bson_iter_type ((iter)) == BSON_TYPE_INT64) #define BSON_ITER_HOLDS_DECIMAL128(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_DECIMAL128) #define BSON_ITER_HOLDS_MAXKEY(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_MAXKEY) #define BSON_ITER_HOLDS_MINKEY(iter) \ (bson_iter_type ((iter)) == BSON_TYPE_MINKEY) #define BSON_ITER_HOLDS_INT(iter) \ (BSON_ITER_HOLDS_INT32 (iter) || BSON_ITER_HOLDS_INT64 (iter)) #define BSON_ITER_HOLDS_NUMBER(iter) \ (BSON_ITER_HOLDS_INT (iter) || BSON_ITER_HOLDS_DOUBLE (iter)) #define BSON_ITER_IS_KEY(iter, key) \ (0 == strcmp ((key), bson_iter_key ((iter)))) BSON_EXPORT (const bson_value_t *) bson_iter_value (bson_iter_t *iter); /** * bson_iter_utf8_len_unsafe: * @iter: a bson_iter_t. * * Returns the length of a string currently pointed to by @iter. This performs * no validation so the is responsible for knowing the BSON is valid. Calling * bson_validate() is one way to do this ahead of time. */ static BSON_INLINE uint32_t bson_iter_utf8_len_unsafe (const bson_iter_t *iter) { int32_t val; memcpy (&val, iter->raw + iter->d1, sizeof (val)); val = BSON_UINT32_FROM_LE (val); return BSON_MAX (0, val - 1); } BSON_EXPORT (void) bson_iter_array (const bson_iter_t *iter, uint32_t *array_len, const uint8_t **array); BSON_EXPORT (void) bson_iter_binary (const bson_iter_t *iter, bson_subtype_t *subtype, uint32_t *binary_len, const uint8_t **binary); BSON_EXPORT (const char *) bson_iter_code (const bson_iter_t *iter, uint32_t *length); /** * bson_iter_code_unsafe: * @iter: A bson_iter_t. * @length: A location for the length of the resulting string. * * Like bson_iter_code() but performs no integrity checks. * * Returns: A string that should not be modified or freed. */ static BSON_INLINE const char * bson_iter_code_unsafe (const bson_iter_t *iter, uint32_t *length) { *length = bson_iter_utf8_len_unsafe (iter); return (const char *) (iter->raw + iter->d2); } BSON_EXPORT (const char *) bson_iter_codewscope (const bson_iter_t *iter, uint32_t *length, uint32_t *scope_len, const uint8_t **scope); BSON_EXPORT (void) bson_iter_dbpointer (const bson_iter_t *iter, uint32_t *collection_len, const char **collection, const bson_oid_t **oid); BSON_EXPORT (void) bson_iter_document (const bson_iter_t *iter, uint32_t *document_len, const uint8_t **document); BSON_EXPORT (double) bson_iter_double (const bson_iter_t *iter); BSON_EXPORT (double) bson_iter_as_double (const bson_iter_t *iter); /** * bson_iter_double_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_double() but does not perform an integrity checking. * * Returns: A double. */ static BSON_INLINE double bson_iter_double_unsafe (const bson_iter_t *iter) { double val; memcpy (&val, iter->raw + iter->d1, sizeof (val)); return BSON_DOUBLE_FROM_LE (val); } BSON_EXPORT (bool) bson_iter_init (bson_iter_t *iter, const bson_t *bson); BSON_EXPORT (bool) bson_iter_init_from_data (bson_iter_t *iter, const uint8_t *data, size_t length); BSON_EXPORT (bool) bson_iter_init_find (bson_iter_t *iter, const bson_t *bson, const char *key); BSON_EXPORT (bool) bson_iter_init_find_w_len (bson_iter_t *iter, const bson_t *bson, const char *key, int keylen); BSON_EXPORT (bool) bson_iter_init_find_case (bson_iter_t *iter, const bson_t *bson, const char *key); BSON_EXPORT (bool) bson_iter_init_from_data_at_offset (bson_iter_t *iter, const uint8_t *data, size_t length, uint32_t offset, uint32_t keylen); BSON_EXPORT (int32_t) bson_iter_int32 (const bson_iter_t *iter); /** * bson_iter_int32_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_int32() but with no integrity checking. * * Returns: A 32-bit signed integer. */ static BSON_INLINE int32_t bson_iter_int32_unsafe (const bson_iter_t *iter) { int32_t val; memcpy (&val, iter->raw + iter->d1, sizeof (val)); return BSON_UINT32_FROM_LE (val); } BSON_EXPORT (int64_t) bson_iter_int64 (const bson_iter_t *iter); BSON_EXPORT (int64_t) bson_iter_as_int64 (const bson_iter_t *iter); /** * bson_iter_int64_unsafe: * @iter: a bson_iter_t. * * Similar to bson_iter_int64() but without integrity checking. * * Returns: A 64-bit signed integer. */ static BSON_INLINE int64_t bson_iter_int64_unsafe (const bson_iter_t *iter) { int64_t val; memcpy (&val, iter->raw + iter->d1, sizeof (val)); return BSON_UINT64_FROM_LE (val); } BSON_EXPORT (bool) bson_iter_find (bson_iter_t *iter, const char *key); BSON_EXPORT (bool) bson_iter_find_w_len (bson_iter_t *iter, const char *key, int keylen); BSON_EXPORT (bool) bson_iter_find_case (bson_iter_t *iter, const char *key); BSON_EXPORT (bool) bson_iter_find_descendant (bson_iter_t *iter, const char *dotkey, bson_iter_t *descendant); BSON_EXPORT (bool) bson_iter_next (bson_iter_t *iter); BSON_EXPORT (const bson_oid_t *) bson_iter_oid (const bson_iter_t *iter); /** * bson_iter_oid_unsafe: * @iter: A #bson_iter_t. * * Similar to bson_iter_oid() but performs no integrity checks. * * Returns: A #bson_oid_t that should not be modified or freed. */ static BSON_INLINE const bson_oid_t * bson_iter_oid_unsafe (const bson_iter_t *iter) { return (const bson_oid_t *) (iter->raw + iter->d1); } BSON_EXPORT (bool) bson_iter_decimal128 (const bson_iter_t *iter, bson_decimal128_t *dec); /** * bson_iter_decimal128_unsafe: * @iter: A #bson_iter_t. * * Similar to bson_iter_decimal128() but performs no integrity checks. * * Returns: A #bson_decimal128_t. */ static BSON_INLINE void bson_iter_decimal128_unsafe (const bson_iter_t *iter, bson_decimal128_t *dec) { uint64_t low_le; uint64_t high_le; memcpy (&low_le, iter->raw + iter->d1, sizeof (low_le)); memcpy (&high_le, iter->raw + iter->d1 + 8, sizeof (high_le)); dec->low = BSON_UINT64_FROM_LE (low_le); dec->high = BSON_UINT64_FROM_LE (high_le); } BSON_EXPORT (const char *) bson_iter_key (const bson_iter_t *iter); BSON_EXPORT (uint32_t) bson_iter_key_len (const bson_iter_t *iter); /** * bson_iter_key_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_key() but performs no integrity checking. * * Returns: A string that should not be modified or freed. */ static BSON_INLINE const char * bson_iter_key_unsafe (const bson_iter_t *iter) { return (const char *) (iter->raw + iter->key); } BSON_EXPORT (const char *) bson_iter_utf8 (const bson_iter_t *iter, uint32_t *length); /** * bson_iter_utf8_unsafe: * * Similar to bson_iter_utf8() but performs no integrity checking. * * Returns: A string that should not be modified or freed. */ static BSON_INLINE const char * bson_iter_utf8_unsafe (const bson_iter_t *iter, size_t *length) { *length = bson_iter_utf8_len_unsafe (iter); return (const char *) (iter->raw + iter->d2); } BSON_EXPORT (char *) bson_iter_dup_utf8 (const bson_iter_t *iter, uint32_t *length); BSON_EXPORT (int64_t) bson_iter_date_time (const bson_iter_t *iter); BSON_EXPORT (time_t) bson_iter_time_t (const bson_iter_t *iter); /** * bson_iter_time_t_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_time_t() but performs no integrity checking. * * Returns: A time_t containing the number of seconds since UNIX epoch * in UTC. */ static BSON_INLINE time_t bson_iter_time_t_unsafe (const bson_iter_t *iter) { return (time_t) (bson_iter_int64_unsafe (iter) / 1000UL); } BSON_EXPORT (void) bson_iter_timeval (const bson_iter_t *iter, struct timeval *tv); /** * bson_iter_timeval_unsafe: * @iter: A bson_iter_t. * @tv: A struct timeval. * * Similar to bson_iter_timeval() but performs no integrity checking. */ static BSON_INLINE void bson_iter_timeval_unsafe (const bson_iter_t *iter, struct timeval *tv) { int64_t value = bson_iter_int64_unsafe (iter); #ifdef BSON_OS_WIN32 tv->tv_sec = (long) (value / 1000); #else tv->tv_sec = (suseconds_t) (value / 1000); #endif tv->tv_usec = (value % 1000) * 1000; } BSON_EXPORT (void) bson_iter_timestamp (const bson_iter_t *iter, uint32_t *timestamp, uint32_t *increment); BSON_EXPORT (bool) bson_iter_bool (const bson_iter_t *iter); /** * bson_iter_bool_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_bool() but performs no integrity checking. * * Returns: true or false. */ static BSON_INLINE bool bson_iter_bool_unsafe (const bson_iter_t *iter) { char val; memcpy (&val, iter->raw + iter->d1, 1); return !!val; } BSON_EXPORT (bool) bson_iter_as_bool (const bson_iter_t *iter); BSON_EXPORT (const char *) bson_iter_regex (const bson_iter_t *iter, const char **options); BSON_EXPORT (const char *) bson_iter_symbol (const bson_iter_t *iter, uint32_t *length); BSON_EXPORT (bson_type_t) bson_iter_type (const bson_iter_t *iter); /** * bson_iter_type_unsafe: * @iter: A bson_iter_t. * * Similar to bson_iter_type() but performs no integrity checking. * * Returns: A bson_type_t. */ static BSON_INLINE bson_type_t bson_iter_type_unsafe (const bson_iter_t *iter) { return (bson_type_t) (iter->raw + iter->type)[0]; } BSON_EXPORT (bool) bson_iter_recurse (const bson_iter_t *iter, bson_iter_t *child); BSON_EXPORT (void) bson_iter_overwrite_int32 (bson_iter_t *iter, int32_t value); BSON_EXPORT (void) bson_iter_overwrite_int64 (bson_iter_t *iter, int64_t value); BSON_EXPORT (void) bson_iter_overwrite_double (bson_iter_t *iter, double value); BSON_EXPORT (void) bson_iter_overwrite_decimal128 (bson_iter_t *iter, const bson_decimal128_t *value); BSON_EXPORT (void) bson_iter_overwrite_bool (bson_iter_t *iter, bool value); BSON_EXPORT (void) bson_iter_overwrite_oid (bson_iter_t *iter, const bson_oid_t *value); BSON_EXPORT (void) bson_iter_overwrite_timestamp (bson_iter_t *iter, uint32_t timestamp, uint32_t increment); BSON_EXPORT (void) bson_iter_overwrite_date_time (bson_iter_t *iter, int64_t value); BSON_EXPORT (bool) bson_iter_visit_all (bson_iter_t *iter, const bson_visitor_t *visitor, void *data); BSON_EXPORT (uint32_t) bson_iter_offset (bson_iter_t *iter); BSON_END_DECLS #endif /* BSON_ITER_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-json.c0000644000076500000240000022556013572250757023256 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include "bson/bson.h" #include "bson/bson-config.h" #include "bson/bson-json.h" #include "bson/bson-iso8601-private.h" #include "common-b64-private.h" #include "jsonsl/jsonsl.h" #ifdef _WIN32 #include #include #endif #ifndef _MSC_VER #include #endif #ifdef _MSC_VER #define SSCANF sscanf_s #else #define SSCANF sscanf #endif #define STACK_MAX 100 #define BSON_JSON_DEFAULT_BUF_SIZE (1 << 14) #define AT_LEAST_0(x) ((x) >= 0 ? (x) : 0) #define READ_STATE_ENUM(ENUM) BSON_JSON_##ENUM, #define GENERATE_STRING(STRING) #STRING, #define FOREACH_READ_STATE(RS) \ RS (REGULAR) \ RS (DONE) \ RS (ERROR) \ RS (IN_START_MAP) \ RS (IN_BSON_TYPE) \ RS (IN_BSON_TYPE_DATE_NUMBERLONG) \ RS (IN_BSON_TYPE_DATE_ENDMAP) \ RS (IN_BSON_TYPE_TIMESTAMP_STARTMAP) \ RS (IN_BSON_TYPE_TIMESTAMP_VALUES) \ RS (IN_BSON_TYPE_TIMESTAMP_ENDMAP) \ RS (IN_BSON_TYPE_REGEX_STARTMAP) \ RS (IN_BSON_TYPE_REGEX_VALUES) \ RS (IN_BSON_TYPE_REGEX_ENDMAP) \ RS (IN_BSON_TYPE_BINARY_VALUES) \ RS (IN_BSON_TYPE_BINARY_ENDMAP) \ RS (IN_BSON_TYPE_SCOPE_STARTMAP) \ RS (IN_BSON_TYPE_DBPOINTER_STARTMAP) \ RS (IN_SCOPE) \ RS (IN_DBPOINTER) typedef enum { FOREACH_READ_STATE (READ_STATE_ENUM) } bson_json_read_state_t; static const char *read_state_names[] = {FOREACH_READ_STATE (GENERATE_STRING)}; #define BSON_STATE_ENUM(ENUM) BSON_JSON_LF_##ENUM, #define FOREACH_BSON_STATE(BS) \ /* legacy {$regex: "...", $options: "..."} */ \ BS (REGEX) \ BS (OPTIONS) \ /* modern $regularExpression: {pattern: "...", options: "..."} */ \ BS (REGULAR_EXPRESSION_PATTERN) \ BS (REGULAR_EXPRESSION_OPTIONS) \ BS (CODE) \ BS (SCOPE) \ BS (OID) \ BS (BINARY) \ BS (TYPE) \ BS (DATE) \ BS (TIMESTAMP_T) \ BS (TIMESTAMP_I) \ BS (UNDEFINED) \ BS (MINKEY) \ BS (MAXKEY) \ BS (INT32) \ BS (INT64) \ BS (DOUBLE) \ BS (DECIMAL128) \ BS (DBPOINTER) \ BS (SYMBOL) \ BS (DBREF) typedef enum { FOREACH_BSON_STATE (BSON_STATE_ENUM) } bson_json_read_bson_state_t; static const char *bson_state_names[] = {FOREACH_BSON_STATE (GENERATE_STRING)}; typedef struct { uint8_t *buf; size_t n_bytes; size_t len; } bson_json_buf_t; typedef enum { BSON_JSON_FRAME_INITIAL = 0, BSON_JSON_FRAME_ARRAY, BSON_JSON_FRAME_DOC, BSON_JSON_FRAME_SCOPE, BSON_JSON_FRAME_DBPOINTER, } bson_json_frame_type_t; typedef struct { int i; bson_json_frame_type_t type; bool has_ref; bool has_id; bson_t bson; } bson_json_stack_frame_t; typedef union { struct { bool has_pattern; bool has_options; bool is_legacy; } regex; struct { bool has_oid; bson_oid_t oid; } oid; struct { bool has_binary; bool has_subtype; bson_subtype_t type; bool is_legacy; } binary; struct { bool has_date; int64_t date; } date; struct { bool has_t; bool has_i; uint32_t t; uint32_t i; } timestamp; struct { bool has_undefined; } undefined; struct { bool has_minkey; } minkey; struct { bool has_maxkey; } maxkey; struct { int32_t value; } v_int32; struct { int64_t value; } v_int64; struct { double value; } v_double; struct { bson_decimal128_t value; } v_decimal128; } bson_json_bson_data_t; /* collect info while parsing a {$code: "...", $scope: {...}} object */ typedef struct { bool has_code; bool has_scope; bool in_scope; bson_json_buf_t key_buf; bson_json_buf_t code_buf; } bson_json_code_t; static void _bson_json_code_cleanup (bson_json_code_t *code_data) { bson_free (code_data->key_buf.buf); bson_free (code_data->code_buf.buf); } typedef struct { bson_t *bson; bson_json_stack_frame_t stack[STACK_MAX]; int n; const char *key; bson_json_buf_t key_buf; bson_json_buf_t unescaped; bson_json_read_state_t read_state; bson_json_read_bson_state_t bson_state; bson_type_t bson_type; bson_json_buf_t bson_type_buf[3]; bson_json_bson_data_t bson_type_data; bson_json_code_t code_data; bson_json_buf_t dbpointer_key; } bson_json_reader_bson_t; typedef struct { void *data; bson_json_reader_cb cb; bson_json_destroy_cb dcb; uint8_t *buf; size_t buf_size; size_t bytes_read; size_t bytes_parsed; bool all_whitespace; } bson_json_reader_producer_t; struct _bson_json_reader_t { bson_json_reader_producer_t producer; bson_json_reader_bson_t bson; jsonsl_t json; ssize_t json_text_pos; bool should_reset; ssize_t advance; bson_json_buf_t tok_accumulator; bson_error_t *error; }; typedef struct { int fd; bool do_close; } bson_json_reader_handle_fd_t; /* forward decl */ static void _bson_json_save_map_key (bson_json_reader_bson_t *bson, const uint8_t *val, size_t len); static void _noop (void) { } #define STACK_ELE(_delta, _name) (bson->stack[(_delta) + bson->n]._name) #define STACK_BSON(_delta) \ (((_delta) + bson->n) == 0 ? bson->bson : &STACK_ELE (_delta, bson)) #define STACK_BSON_PARENT STACK_BSON (-1) #define STACK_BSON_CHILD STACK_BSON (0) #define STACK_I STACK_ELE (0, i) #define STACK_FRAME_TYPE STACK_ELE (0, type) #define STACK_IS_INITIAL (STACK_FRAME_TYPE == BSON_JSON_FRAME_INITIAL) #define STACK_IS_ARRAY (STACK_FRAME_TYPE == BSON_JSON_FRAME_ARRAY) #define STACK_IS_DOC (STACK_FRAME_TYPE == BSON_JSON_FRAME_DOC) #define STACK_IS_SCOPE (STACK_FRAME_TYPE == BSON_JSON_FRAME_SCOPE) #define STACK_IS_DBPOINTER (STACK_FRAME_TYPE == BSON_JSON_FRAME_DBPOINTER) #define FRAME_TYPE_HAS_BSON(_type) \ ((_type) == BSON_JSON_FRAME_SCOPE || (_type) == BSON_JSON_FRAME_DBPOINTER) #define STACK_HAS_BSON FRAME_TYPE_HAS_BSON (STACK_FRAME_TYPE) #define STACK_HAS_REF STACK_ELE (0, has_ref) #define STACK_HAS_ID STACK_ELE (0, has_id) #define STACK_PUSH(frame_type) \ do { \ if (bson->n >= (STACK_MAX - 1)) { \ return; \ } \ bson->n++; \ if (STACK_HAS_BSON) { \ if (FRAME_TYPE_HAS_BSON (frame_type)) { \ bson_reinit (STACK_BSON_CHILD); \ } else { \ bson_destroy (STACK_BSON_CHILD); \ } \ } else if (FRAME_TYPE_HAS_BSON (frame_type)) { \ bson_init (STACK_BSON_CHILD); \ } \ STACK_FRAME_TYPE = frame_type; \ } while (0) #define STACK_PUSH_ARRAY(statement) \ do { \ STACK_PUSH (BSON_JSON_FRAME_ARRAY); \ STACK_I = 0; \ if (bson->n != 0) { \ statement; \ } \ } while (0) #define STACK_PUSH_DOC(statement) \ do { \ STACK_PUSH (BSON_JSON_FRAME_DOC); \ STACK_HAS_REF = false; \ STACK_HAS_ID = false; \ if (bson->n != 0) { \ statement; \ } \ } while (0) #define STACK_PUSH_SCOPE \ do { \ STACK_PUSH (BSON_JSON_FRAME_SCOPE); \ bson->code_data.in_scope = true; \ } while (0) #define STACK_PUSH_DBPOINTER \ do { \ STACK_PUSH (BSON_JSON_FRAME_DBPOINTER); \ } while (0) #define STACK_POP_ARRAY(statement) \ do { \ if (!STACK_IS_ARRAY) { \ return; \ } \ if (bson->n < 0) { \ return; \ } \ if (bson->n > 0) { \ statement; \ } \ bson->n--; \ } while (0) #define STACK_POP_DOC(statement) \ do { \ if (STACK_IS_ARRAY) { \ return; \ } \ if (bson->n < 0) { \ return; \ } \ if (bson->n > 0) { \ statement; \ } \ bson->n--; \ } while (0) #define STACK_POP_SCOPE \ do { \ STACK_POP_DOC (_noop ()); \ bson->code_data.in_scope = false; \ } while (0); #define STACK_POP_DBPOINTER STACK_POP_DOC (_noop ()) #define BASIC_CB_PREAMBLE \ const char *key; \ size_t len; \ bson_json_reader_bson_t *bson = &reader->bson; \ _bson_json_read_fixup_key (bson); \ key = bson->key; \ len = bson->key_buf.len; #define BASIC_CB_BAIL_IF_NOT_NORMAL(_type) \ if (bson->read_state != BSON_JSON_REGULAR) { \ _bson_json_read_set_error (reader, \ "Invalid read of %s in state %s", \ (_type), \ read_state_names[bson->read_state]); \ return; \ } else if (!key) { \ _bson_json_read_set_error (reader, \ "Invalid read of %s without key in state %s", \ (_type), \ read_state_names[bson->read_state]); \ return; \ } #define HANDLE_OPTION(_key, _type, _state) \ (len == strlen (_key) && strncmp ((const char *) val, (_key), len) == 0) \ { \ if (bson->bson_type && bson->bson_type != (_type)) { \ _bson_json_read_set_error (reader, \ "Invalid key \"%s\". Looking for values " \ "for type \"%s\", got \"%s\"", \ (_key), \ _bson_json_type_name (bson->bson_type), \ _bson_json_type_name (_type)); \ return; \ } \ bson->bson_type = (_type); \ bson->bson_state = (_state); \ } static void _bson_json_read_set_error (bson_json_reader_t *reader, const char *fmt, ...) BSON_GNUC_PRINTF (2, 3); static void _bson_json_read_set_error (bson_json_reader_t *reader, /* IN */ const char *fmt, /* IN */ ...) { va_list ap; if (reader->error) { reader->error->domain = BSON_ERROR_JSON; reader->error->code = BSON_JSON_ERROR_READ_INVALID_PARAM; va_start (ap, fmt); bson_vsnprintf ( reader->error->message, sizeof reader->error->message, fmt, ap); va_end (ap); reader->error->message[sizeof reader->error->message - 1] = '\0'; } reader->bson.read_state = BSON_JSON_ERROR; jsonsl_stop (reader->json); } static void _bson_json_read_corrupt (bson_json_reader_t *reader, const char *fmt, ...) BSON_GNUC_PRINTF (2, 3); static void _bson_json_read_corrupt (bson_json_reader_t *reader, /* IN */ const char *fmt, /* IN */ ...) { va_list ap; if (reader->error) { reader->error->domain = BSON_ERROR_JSON; reader->error->code = BSON_JSON_ERROR_READ_CORRUPT_JS; va_start (ap, fmt); bson_vsnprintf ( reader->error->message, sizeof reader->error->message, fmt, ap); va_end (ap); reader->error->message[sizeof reader->error->message - 1] = '\0'; } reader->bson.read_state = BSON_JSON_ERROR; jsonsl_stop (reader->json); } static void _bson_json_buf_ensure (bson_json_buf_t *buf, /* IN */ size_t len) /* IN */ { if (buf->n_bytes < len) { bson_free (buf->buf); buf->n_bytes = bson_next_power_of_two (len); buf->buf = bson_malloc (buf->n_bytes); } } static void _bson_json_buf_set (bson_json_buf_t *buf, const void *from, size_t len) { _bson_json_buf_ensure (buf, len + 1); memcpy (buf->buf, from, len); buf->buf[len] = '\0'; buf->len = len; } static void _bson_json_buf_append (bson_json_buf_t *buf, const void *from, size_t len) { size_t len_with_null = len + 1; if (buf->len == 0) { _bson_json_buf_ensure (buf, len_with_null); } else if (buf->n_bytes < buf->len + len_with_null) { buf->n_bytes = bson_next_power_of_two (buf->len + len_with_null); buf->buf = bson_realloc (buf->buf, buf->n_bytes); } memcpy (buf->buf + buf->len, from, len); buf->len += len; buf->buf[buf->len] = '\0'; } static const char * _bson_json_type_name (bson_type_t type) { switch (type) { case BSON_TYPE_EOD: return "end of document"; case BSON_TYPE_DOUBLE: return "double"; case BSON_TYPE_UTF8: return "utf-8"; case BSON_TYPE_DOCUMENT: return "document"; case BSON_TYPE_ARRAY: return "array"; case BSON_TYPE_BINARY: return "binary"; case BSON_TYPE_UNDEFINED: return "undefined"; case BSON_TYPE_OID: return "objectid"; case BSON_TYPE_BOOL: return "bool"; case BSON_TYPE_DATE_TIME: return "datetime"; case BSON_TYPE_NULL: return "null"; case BSON_TYPE_REGEX: return "regex"; case BSON_TYPE_DBPOINTER: return "dbpointer"; case BSON_TYPE_CODE: return "code"; case BSON_TYPE_SYMBOL: return "symbol"; case BSON_TYPE_CODEWSCOPE: return "code with scope"; case BSON_TYPE_INT32: return "int32"; case BSON_TYPE_TIMESTAMP: return "timestamp"; case BSON_TYPE_INT64: return "int64"; case BSON_TYPE_DECIMAL128: return "decimal128"; case BSON_TYPE_MAXKEY: return "maxkey"; case BSON_TYPE_MINKEY: return "minkey"; default: return ""; } } static void _bson_json_read_fixup_key (bson_json_reader_bson_t *bson) /* IN */ { bson_json_read_state_t rs = bson->read_state; if (bson->n >= 0 && STACK_IS_ARRAY && rs == BSON_JSON_REGULAR) { _bson_json_buf_ensure (&bson->key_buf, 12); bson->key_buf.len = bson_uint32_to_string ( STACK_I, &bson->key, (char *) bson->key_buf.buf, 12); STACK_I++; } } static void _bson_json_read_null (bson_json_reader_t *reader) { BASIC_CB_PREAMBLE; BASIC_CB_BAIL_IF_NOT_NORMAL ("null"); bson_append_null (STACK_BSON_CHILD, key, (int) len); } static void _bson_json_read_boolean (bson_json_reader_t *reader, /* IN */ int val) /* IN */ { BASIC_CB_PREAMBLE; if (bson->read_state == BSON_JSON_IN_BSON_TYPE && bson->bson_state == BSON_JSON_LF_UNDEFINED) { bson->bson_type_data.undefined.has_undefined = true; return; } BASIC_CB_BAIL_IF_NOT_NORMAL ("boolean"); bson_append_bool (STACK_BSON_CHILD, key, (int) len, val); } /* sign is -1 or 1 */ static void _bson_json_read_integer (bson_json_reader_t *reader, uint64_t val, int64_t sign) { bson_json_read_state_t rs; bson_json_read_bson_state_t bs; BASIC_CB_PREAMBLE; if (sign == 1 && val > INT64_MAX) { _bson_json_read_set_error ( reader, "Number \"%" PRIu64 "\" is out of range", val); return; } else if (sign == -1 && val > ((uint64_t) INT64_MAX + 1)) { _bson_json_read_set_error ( reader, "Number \"-%" PRIu64 "\" is out of range", val); return; } rs = bson->read_state; bs = bson->bson_state; if (rs == BSON_JSON_REGULAR) { BASIC_CB_BAIL_IF_NOT_NORMAL ("integer"); if (val <= INT32_MAX || (sign == -1 && val <= (uint64_t) INT32_MAX + 1)) { bson_append_int32 ( STACK_BSON_CHILD, key, (int) len, (int) (val * sign)); } else if (sign == -1) { bson_append_int64 (STACK_BSON_CHILD, key, (int) len, (int64_t) -val); } else { bson_append_int64 (STACK_BSON_CHILD, key, (int) len, (int64_t) val); } } else if (rs == BSON_JSON_IN_BSON_TYPE || rs == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) { switch (bs) { case BSON_JSON_LF_DATE: bson->bson_type_data.date.has_date = true; bson->bson_type_data.date.date = sign * val; break; case BSON_JSON_LF_TIMESTAMP_T: if (sign == -1) { _bson_json_read_set_error ( reader, "Invalid timestamp value: \"-%" PRIu64 "\"", val); return; } bson->bson_type_data.timestamp.has_t = true; bson->bson_type_data.timestamp.t = (uint32_t) val; break; case BSON_JSON_LF_TIMESTAMP_I: if (sign == -1) { _bson_json_read_set_error ( reader, "Invalid timestamp value: \"-%" PRIu64 "\"", val); return; } bson->bson_type_data.timestamp.has_i = true; bson->bson_type_data.timestamp.i = (uint32_t) val; break; case BSON_JSON_LF_MINKEY: if (sign == -1) { _bson_json_read_set_error ( reader, "Invalid MinKey value: \"-%" PRIu64 "\"", val); return; } else if (val != 1) { _bson_json_read_set_error ( reader, "Invalid MinKey value: \"%" PRIu64 "\"", val); } bson->bson_type_data.minkey.has_minkey = true; break; case BSON_JSON_LF_MAXKEY: if (sign == -1) { _bson_json_read_set_error ( reader, "Invalid MinKey value: \"-%" PRIu64 "\"", val); return; } else if (val != 1) { _bson_json_read_set_error ( reader, "Invalid MinKey value: \"%" PRIu64 "\"", val); } bson->bson_type_data.maxkey.has_maxkey = true; break; case BSON_JSON_LF_INT32: case BSON_JSON_LF_INT64: _bson_json_read_set_error ( reader, "Invalid state for integer read: %s, " "expected number as quoted string like \"123\"", bson_state_names[bs]); break; case BSON_JSON_LF_REGEX: case BSON_JSON_LF_OPTIONS: case BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN: case BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS: case BSON_JSON_LF_CODE: case BSON_JSON_LF_SCOPE: case BSON_JSON_LF_OID: case BSON_JSON_LF_BINARY: case BSON_JSON_LF_TYPE: case BSON_JSON_LF_UNDEFINED: case BSON_JSON_LF_DOUBLE: case BSON_JSON_LF_DECIMAL128: case BSON_JSON_LF_DBPOINTER: case BSON_JSON_LF_SYMBOL: case BSON_JSON_LF_DBREF: default: _bson_json_read_set_error (reader, "Unexpected integer %s%" PRIu64 " in type \"%s\"", sign == -1 ? "-" : "", val, _bson_json_type_name (bson->bson_type)); } } else { _bson_json_read_set_error (reader, "Unexpected integer %s%" PRIu64 " in state \"%s\"", sign == -1 ? "-" : "", val, read_state_names[rs]); } } static bool _bson_json_parse_double (bson_json_reader_t *reader, const char *val, size_t vlen, double *d) { errno = 0; *d = strtod (val, NULL); #ifdef _MSC_VER /* Microsoft's strtod parses "NaN", "Infinity", "-Infinity" as 0 */ if (*d == 0.0) { if (!_strnicmp (val, "nan", vlen)) { #ifdef NAN *d = NAN; #else /* Visual Studio 2010 doesn't define NAN or INFINITY * https://msdn.microsoft.com/en-us/library/w22adx1s(v=vs.100).aspx */ unsigned long nan[2] = {0xffffffff, 0x7fffffff}; *d = *(double *) nan; #endif return true; } else if (!_strnicmp (val, "infinity", vlen)) { #ifdef INFINITY *d = INFINITY; #else unsigned long inf[2] = {0x00000000, 0x7ff00000}; *d = *(double *) inf; #endif return true; } else if (!_strnicmp (val, "-infinity", vlen)) { #ifdef INFINITY *d = -INFINITY; #else unsigned long inf[2] = {0x00000000, 0xfff00000}; *d = *(double *) inf; #endif return true; } } if ((*d == HUGE_VAL || *d == -HUGE_VAL) && errno == ERANGE) { _bson_json_read_set_error ( reader, "Number \"%.*s\" is out of range", (int) vlen, val); return false; } #else /* not MSVC - set err on overflow, but avoid err for infinity */ if ((*d == HUGE_VAL || *d == -HUGE_VAL) && errno == ERANGE && strncasecmp (val, "infinity", vlen) && strncasecmp (val, "-infinity", vlen)) { _bson_json_read_set_error ( reader, "Number \"%.*s\" is out of range", (int) vlen, val); return false; } #endif /* _MSC_VER */ return true; } static void _bson_json_read_double (bson_json_reader_t *reader, /* IN */ double val) /* IN */ { BASIC_CB_PREAMBLE; BASIC_CB_BAIL_IF_NOT_NORMAL ("double"); if (!bson_append_double (STACK_BSON_CHILD, key, (int) len, val)) { _bson_json_read_set_error (reader, "Cannot append double value %g", val); } } static bool _bson_json_read_int64_or_set_error (bson_json_reader_t *reader, /* IN */ const unsigned char *val, /* IN */ size_t vlen, /* IN */ int64_t *v64) /* OUT */ { bson_json_reader_bson_t *bson = &reader->bson; char *endptr = NULL; _bson_json_read_fixup_key (bson); errno = 0; *v64 = bson_ascii_strtoll ((const char *) val, &endptr, 10); if (((*v64 == INT64_MIN) || (*v64 == INT64_MAX)) && (errno == ERANGE)) { _bson_json_read_set_error (reader, "Number \"%s\" is out of range", val); return false; } if (endptr != ((const char *) val + vlen)) { _bson_json_read_set_error (reader, "Number \"%s\" is invalid", val); return false; } return true; } /* parse a value for "base64", "subType" or legacy "$binary" or "$type" */ static void _bson_json_parse_binary_elem (bson_json_reader_t *reader, const char *val_w_null, size_t vlen) { bson_json_read_bson_state_t bs; bson_json_bson_data_t *data; int binary_len; BASIC_CB_PREAMBLE; bs = bson->bson_state; data = &bson->bson_type_data; if (bs == BSON_JSON_LF_BINARY) { data->binary.has_binary = true; binary_len = bson_b64_pton (val_w_null, NULL, 0); if (binary_len < 0) { _bson_json_read_set_error ( reader, "Invalid input string \"%s\", looking for base64-encoded binary", val_w_null); } _bson_json_buf_ensure (&bson->bson_type_buf[0], (size_t) binary_len + 1); if (bson_b64_pton (val_w_null, bson->bson_type_buf[0].buf, (size_t) binary_len + 1) < 0) { _bson_json_read_set_error ( reader, "Invalid input string \"%s\", looking for base64-encoded binary", val_w_null); } bson->bson_type_buf[0].len = (size_t) binary_len; } else if (bs == BSON_JSON_LF_TYPE) { data->binary.has_subtype = true; if (SSCANF (val_w_null, "%02x", &data->binary.type) != 1) { if (!data->binary.is_legacy || data->binary.has_binary) { /* misformatted subtype, like {$binary: {base64: "", subType: "x"}}, * or legacy {$binary: "", $type: "x"} */ _bson_json_read_set_error ( reader, "Invalid input string \"%s\", looking for binary subtype", val_w_null); } else { /* actually a query operator: {x: {$type: "array"}}*/ bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_DOC (bson_append_document_begin ( STACK_BSON_PARENT, key, (int) len, STACK_BSON_CHILD)); bson_append_utf8 (STACK_BSON_CHILD, "$type", 5, (const char *) val_w_null, (int) vlen); } } } } static void _bson_json_read_string (bson_json_reader_t *reader, /* IN */ const unsigned char *val, /* IN */ size_t vlen) /* IN */ { bson_json_read_state_t rs; bson_json_read_bson_state_t bs; BASIC_CB_PREAMBLE; rs = bson->read_state; bs = bson->bson_state; if (!bson_utf8_validate ((const char *) val, vlen, true /*allow null*/)) { _bson_json_read_corrupt (reader, "invalid bytes in UTF8 string"); return; } if (rs == BSON_JSON_REGULAR) { BASIC_CB_BAIL_IF_NOT_NORMAL ("string"); bson_append_utf8 ( STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen); } else if (rs == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP || rs == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) { _bson_json_read_set_error (reader, "Invalid read of \"%s\" in state \"%s\"", val, read_state_names[rs]); } else if (rs == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) { const char *val_w_null; _bson_json_buf_set (&bson->bson_type_buf[2], val, vlen); val_w_null = (const char *) bson->bson_type_buf[2].buf; _bson_json_parse_binary_elem (reader, val_w_null, vlen); } else if (rs == BSON_JSON_IN_BSON_TYPE || rs == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES || rs == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES || rs == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) { const char *val_w_null; _bson_json_buf_set (&bson->bson_type_buf[2], val, vlen); val_w_null = (const char *) bson->bson_type_buf[2].buf; switch (bs) { case BSON_JSON_LF_REGEX: bson->bson_type_data.regex.is_legacy = true; /* FALL THROUGH */ case BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN: bson->bson_type_data.regex.has_pattern = true; _bson_json_buf_set (&bson->bson_type_buf[0], val, vlen); break; case BSON_JSON_LF_OPTIONS: bson->bson_type_data.regex.is_legacy = true; /* FALL THROUGH */ case BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS: bson->bson_type_data.regex.has_options = true; _bson_json_buf_set (&bson->bson_type_buf[1], val, vlen); break; case BSON_JSON_LF_OID: if (vlen != 24) { goto BAD_PARSE; } bson->bson_type_data.oid.has_oid = true; bson_oid_init_from_string (&bson->bson_type_data.oid.oid, val_w_null); break; case BSON_JSON_LF_BINARY: case BSON_JSON_LF_TYPE: bson->bson_type_data.binary.is_legacy = true; _bson_json_parse_binary_elem (reader, val_w_null, vlen); break; case BSON_JSON_LF_INT32: { int64_t v64; if (!_bson_json_read_int64_or_set_error (reader, val, vlen, &v64)) { /* the error is set, return and let the reader exit */ return; } if (v64 < INT32_MIN || v64 > INT32_MAX) { goto BAD_PARSE; } if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { bson->bson_type_data.v_int32.value = (int32_t) v64; } else { goto BAD_PARSE; } } break; case BSON_JSON_LF_INT64: { int64_t v64; if (!_bson_json_read_int64_or_set_error (reader, val, vlen, &v64)) { /* the error is set, return and let the reader exit */ return; } if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { bson->bson_type_data.v_int64.value = v64; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) { bson->bson_type_data.date.has_date = true; bson->bson_type_data.date.date = v64; } else { goto BAD_PARSE; } } break; case BSON_JSON_LF_DOUBLE: { if (!_bson_json_parse_double (reader, (const char *) val, vlen, &bson->bson_type_data.v_double.value)) { /* the error is set, return and let the reader exit */ return; } } break; case BSON_JSON_LF_DATE: { int64_t v64; if (!_bson_iso8601_date_parse ( (char *) val, (int) vlen, &v64, reader->error)) { jsonsl_stop (reader->json); } else { bson->bson_type_data.date.has_date = true; bson->bson_type_data.date.date = v64; } } break; case BSON_JSON_LF_DECIMAL128: { bson_decimal128_t decimal128; bson_decimal128_from_string (val_w_null, &decimal128); if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { bson->bson_type_data.v_decimal128.value = decimal128; } else { goto BAD_PARSE; } } break; case BSON_JSON_LF_CODE: _bson_json_buf_set (&bson->code_data.code_buf, val, vlen); break; case BSON_JSON_LF_SYMBOL: bson_append_symbol ( STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen); break; case BSON_JSON_LF_DBREF: /* the "$ref" of a {$ref: "...", $id: ... }, append normally */ bson_append_utf8 ( STACK_BSON_CHILD, key, (int) len, (const char *) val, (int) vlen); bson->read_state = BSON_JSON_REGULAR; break; case BSON_JSON_LF_SCOPE: case BSON_JSON_LF_TIMESTAMP_T: case BSON_JSON_LF_TIMESTAMP_I: case BSON_JSON_LF_UNDEFINED: case BSON_JSON_LF_MINKEY: case BSON_JSON_LF_MAXKEY: case BSON_JSON_LF_DBPOINTER: default: goto BAD_PARSE; } return; BAD_PARSE: _bson_json_read_set_error (reader, "Invalid input string \"%s\", looking for %s", val_w_null, bson_state_names[bs]); } else { _bson_json_read_set_error ( reader, "Invalid state to look for string: %s", read_state_names[rs]); } } static void _bson_json_read_start_map (bson_json_reader_t *reader) /* IN */ { BASIC_CB_PREAMBLE; if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { if (bson->bson_state == BSON_JSON_LF_DATE) { bson->read_state = BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG; } else if (bson->bson_state == BSON_JSON_LF_BINARY) { bson->read_state = BSON_JSON_IN_BSON_TYPE_BINARY_VALUES; } else if (bson->bson_state == BSON_JSON_LF_TYPE) { /* special case, we started parsing {$type: {$numberInt: "2"}} and we * expected a legacy Binary format. now we see the second "{", so * backtrack and parse $type query operator. */ bson->read_state = BSON_JSON_IN_START_MAP; STACK_PUSH_DOC (bson_append_document_begin ( STACK_BSON_PARENT, key, len, STACK_BSON_CHILD)); _bson_json_save_map_key (bson, (const uint8_t *) "$type", 5); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_STARTMAP) { bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP) { bson->read_state = BSON_JSON_IN_SCOPE; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) { bson->read_state = BSON_JSON_IN_DBPOINTER; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_STARTMAP) { bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_VALUES; } else { bson->read_state = BSON_JSON_IN_START_MAP; } /* silence some warnings */ (void) len; (void) key; } static bool _is_known_key (const char *key, size_t len) { bool ret; #define IS_KEY(k) (len == strlen (k) && (0 == memcmp (k, key, len))) ret = (IS_KEY ("$regularExpression") || IS_KEY ("$regex") || IS_KEY ("$options") || IS_KEY ("$code") || IS_KEY ("$scope") || IS_KEY ("$oid") || IS_KEY ("$binary") || IS_KEY ("$type") || IS_KEY ("$date") || IS_KEY ("$undefined") || IS_KEY ("$maxKey") || IS_KEY ("$minKey") || IS_KEY ("$timestamp") || IS_KEY ("$numberInt") || IS_KEY ("$numberLong") || IS_KEY ("$numberDouble") || IS_KEY ("$numberDecimal") || IS_KEY ("$numberInt") || IS_KEY ("$numberLong") || IS_KEY ("$numberDouble") || IS_KEY ("$numberDecimal") || IS_KEY ("$dbPointer") || IS_KEY ("$symbol")); #undef IS_KEY return ret; } static void _bson_json_save_map_key (bson_json_reader_bson_t *bson, const uint8_t *val, size_t len) { _bson_json_buf_set (&bson->key_buf, val, len); bson->key = (const char *) bson->key_buf.buf; } static void _bson_json_read_code_or_scope_key (bson_json_reader_bson_t *bson, bool is_scope, const uint8_t *val, size_t len) { bson_json_code_t *code = &bson->code_data; if (code->in_scope) { /* we're reading something weirdly nested, e.g. we just read "$code" in * "$scope: {x: {$code: {}}}". just create the subdoc within the scope. */ bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT, bson->key, (int) bson->key_buf.len, STACK_BSON_CHILD)); _bson_json_save_map_key (bson, val, len); } else { if (!bson->code_data.key_buf.len) { /* save the key, e.g. {"key": {"$code": "return x", "$scope":{"x":1}}}, * in case it is overwritten while parsing scope sub-object */ _bson_json_buf_set ( &bson->code_data.key_buf, bson->key_buf.buf, bson->key_buf.len); } if (is_scope) { bson->bson_type = BSON_TYPE_CODEWSCOPE; bson->read_state = BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP; bson->bson_state = BSON_JSON_LF_SCOPE; bson->code_data.has_scope = true; } else { bson->bson_type = BSON_TYPE_CODE; bson->bson_state = BSON_JSON_LF_CODE; bson->code_data.has_code = true; } } } static void _bson_json_bad_key_in_type (bson_json_reader_t *reader, /* IN */ const uint8_t *val) /* IN */ { bson_json_reader_bson_t *bson = &reader->bson; _bson_json_read_set_error ( reader, "Invalid key \"%s\". Looking for values for type \"%s\"", val, _bson_json_type_name (bson->bson_type)); } static void _bson_json_read_map_key (bson_json_reader_t *reader, /* IN */ const uint8_t *val, /* IN */ size_t len) /* IN */ { bson_json_reader_bson_t *bson = &reader->bson; if (!bson_utf8_validate ((const char *) val, len, true /* allow null */)) { _bson_json_read_corrupt (reader, "invalid bytes in UTF8 string"); return; } if (bson->read_state == BSON_JSON_IN_START_MAP) { if (len > 0 && val[0] == '$' && _is_known_key ((const char *) val, len) && bson->n >= 0 /* key is in subdocument */) { bson->read_state = BSON_JSON_IN_BSON_TYPE; bson->bson_type = (bson_type_t) 0; memset (&bson->bson_type_data, 0, sizeof bson->bson_type_data); } else { bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT, bson->key, (int) bson->key_buf.len, STACK_BSON_CHILD)); } } else if (bson->read_state == BSON_JSON_IN_SCOPE) { /* we've read "key" in {$code: "", $scope: {key: ""}}*/ bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_SCOPE; _bson_json_save_map_key (bson, val, len); } else if (bson->read_state == BSON_JSON_IN_DBPOINTER) { /* we've read "$ref" or "$id" in {$dbPointer: {$ref: ..., $id: ...}} */ bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_DBPOINTER; _bson_json_save_map_key (bson, val, len); } if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { if HANDLE_OPTION ("$regex", BSON_TYPE_REGEX, BSON_JSON_LF_REGEX) else if HANDLE_OPTION ("$options", BSON_TYPE_REGEX, BSON_JSON_LF_OPTIONS) else if HANDLE_OPTION ("$oid", BSON_TYPE_OID, BSON_JSON_LF_OID) else if HANDLE_OPTION ("$binary", BSON_TYPE_BINARY, BSON_JSON_LF_BINARY) else if HANDLE_OPTION ("$type", BSON_TYPE_BINARY, BSON_JSON_LF_TYPE) else if HANDLE_OPTION ("$date", BSON_TYPE_DATE_TIME, BSON_JSON_LF_DATE) else if HANDLE_OPTION ( "$undefined", BSON_TYPE_UNDEFINED, BSON_JSON_LF_UNDEFINED) else if HANDLE_OPTION ("$minKey", BSON_TYPE_MINKEY, BSON_JSON_LF_MINKEY) else if HANDLE_OPTION ("$maxKey", BSON_TYPE_MAXKEY, BSON_JSON_LF_MAXKEY) else if HANDLE_OPTION ("$numberInt", BSON_TYPE_INT32, BSON_JSON_LF_INT32) else if HANDLE_OPTION ("$numberLong", BSON_TYPE_INT64, BSON_JSON_LF_INT64) else if HANDLE_OPTION ("$numberDouble", BSON_TYPE_DOUBLE, BSON_JSON_LF_DOUBLE) else if HANDLE_OPTION ("$symbol", BSON_TYPE_SYMBOL, BSON_JSON_LF_SYMBOL) else if HANDLE_OPTION ( "$numberDecimal", BSON_TYPE_DECIMAL128, BSON_JSON_LF_DECIMAL128) else if (!strcmp ("$timestamp", (const char *) val)) { bson->bson_type = BSON_TYPE_TIMESTAMP; bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_STARTMAP; } else if (!strcmp ("$regularExpression", (const char *) val)) { bson->bson_type = BSON_TYPE_REGEX; bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_STARTMAP; } else if (!strcmp ("$dbPointer", (const char *) val)) { /* start parsing "key": {"$dbPointer": {...}}, save "key" for later */ _bson_json_buf_set ( &bson->dbpointer_key, bson->key_buf.buf, bson->key_buf.len); bson->bson_type = BSON_TYPE_DBPOINTER; bson->read_state = BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP; } else if (!strcmp ("$code", (const char *) val)) { _bson_json_read_code_or_scope_key ( bson, false /* is_scope */, val, len); } else if (!strcmp ("$scope", (const char *) val)) { _bson_json_read_code_or_scope_key ( bson, true /* is_scope */, val, len); } else { _bson_json_bad_key_in_type (reader, val); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) { if HANDLE_OPTION ("$numberLong", BSON_TYPE_DATE_TIME, BSON_JSON_LF_INT64) else { _bson_json_bad_key_in_type (reader, val); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) { if HANDLE_OPTION ("t", BSON_TYPE_TIMESTAMP, BSON_JSON_LF_TIMESTAMP_T) else if HANDLE_OPTION ("i", BSON_TYPE_TIMESTAMP, BSON_JSON_LF_TIMESTAMP_I) else { _bson_json_bad_key_in_type (reader, val); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES) { if HANDLE_OPTION ( "pattern", BSON_TYPE_REGEX, BSON_JSON_LF_REGULAR_EXPRESSION_PATTERN) else if HANDLE_OPTION ( "options", BSON_TYPE_REGEX, BSON_JSON_LF_REGULAR_EXPRESSION_OPTIONS) else { _bson_json_bad_key_in_type (reader, val); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) { if HANDLE_OPTION ("base64", BSON_TYPE_BINARY, BSON_JSON_LF_BINARY) else if HANDLE_OPTION ("subType", BSON_TYPE_BINARY, BSON_JSON_LF_TYPE) else { _bson_json_bad_key_in_type (reader, val); } } else { _bson_json_save_map_key (bson, val, len); /* in x: {$ref: "collection", $id: {$oid: "..."}, $db: "..." } */ if (bson->n > 0) { if (!strcmp ("$ref", (const char *) val)) { STACK_HAS_REF = true; bson->read_state = BSON_JSON_IN_BSON_TYPE; bson->bson_state = BSON_JSON_LF_DBREF; } else if (!strcmp ("$id", (const char *) val)) { STACK_HAS_ID = true; } else if (!strcmp ("$db", (const char *) val)) { bson->read_state = BSON_JSON_IN_BSON_TYPE; bson->bson_state = BSON_JSON_LF_DBREF; } } } } static void _bson_json_read_append_binary (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { bson_json_bson_data_t *data = &bson->bson_type_data; if (data->binary.is_legacy) { if (!data->binary.has_binary) { _bson_json_read_set_error ( reader, "Missing \"$binary\" after \"$type\" reading type \"binary\""); return; } else if (!data->binary.has_subtype) { _bson_json_read_set_error ( reader, "Missing \"$type\" after \"$binary\" reading type \"binary\""); return; } } else { if (!data->binary.has_binary) { _bson_json_read_set_error ( reader, "Missing \"base64\" after \"subType\" reading type \"binary\""); return; } else if (!data->binary.has_subtype) { _bson_json_read_set_error ( reader, "Missing \"subType\" after \"base64\" reading type \"binary\""); return; } } if (!bson_append_binary (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, data->binary.type, bson->bson_type_buf[0].buf, (uint32_t) bson->bson_type_buf[0].len)) { _bson_json_read_set_error (reader, "Error storing binary data"); } } static void _bson_json_read_append_regex (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { bson_json_bson_data_t *data = &bson->bson_type_data; if (data->regex.is_legacy) { if (!data->regex.has_pattern) { _bson_json_read_set_error (reader, "Missing \"$regex\" after \"$options\""); return; } if (!data->regex.has_options) { _bson_json_read_set_error (reader, "Missing \"$options\" after \"$regex\""); return; } } else if (!data->regex.has_pattern) { _bson_json_read_set_error ( reader, "Missing \"pattern\" after \"options\" in regular expression"); return; } else if (!data->regex.has_options) { _bson_json_read_set_error ( reader, "Missing \"options\" after \"pattern\" in regular expression"); return; } if (!bson_append_regex (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, (char *) bson->bson_type_buf[0].buf, (char *) bson->bson_type_buf[1].buf)) { _bson_json_read_set_error (reader, "Error storing regex"); } } static void _bson_json_read_append_code (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { bson_json_code_t *code_data; char *code = NULL; bson_t *scope = NULL; bool r; code_data = &bson->code_data; BSON_ASSERT (!code_data->in_scope); if (!code_data->has_code) { _bson_json_read_set_error (reader, "Missing $code after $scope"); return; } code = (char *) code_data->code_buf.buf; if (code_data->has_scope) { scope = STACK_BSON (1); } /* creates BSON "code" elem, or "code with scope" if scope is not NULL */ r = bson_append_code_with_scope (STACK_BSON_CHILD, (const char *) code_data->key_buf.buf, (int) code_data->key_buf.len, code, scope); if (!r) { _bson_json_read_set_error (reader, "Error storing Javascript code"); } /* keep the buffer but truncate it */ code_data->key_buf.len = 0; code_data->has_code = code_data->has_scope = false; } static void _bson_json_read_append_dbpointer (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { bson_t *db_pointer; bson_iter_t iter; const char *ns = NULL; const bson_oid_t *oid = NULL; bool r; BSON_ASSERT (reader->bson.dbpointer_key.buf); db_pointer = STACK_BSON (1); if (!bson_iter_init (&iter, db_pointer)) { _bson_json_read_set_error (reader, "Error storing DBPointer"); return; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "$id")) { if (!BSON_ITER_HOLDS_OID (&iter)) { _bson_json_read_set_error ( reader, "$dbPointer.$id must be like {\"$oid\": ...\"}"); return; } oid = bson_iter_oid (&iter); } else if (!strcmp (bson_iter_key (&iter), "$ref")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { _bson_json_read_set_error ( reader, "$dbPointer.$ref must be a string like \"db.collection\""); return; } ns = bson_iter_utf8 (&iter, NULL); } else { _bson_json_read_set_error (reader, "$dbPointer contains invalid key: \"%s\"", bson_iter_key (&iter)); return; } } if (!oid || !ns) { _bson_json_read_set_error (reader, "$dbPointer requires both $id and $ref"); return; } r = bson_append_dbpointer (STACK_BSON_CHILD, (char *) reader->bson.dbpointer_key.buf, (int) reader->bson.dbpointer_key.len, ns, oid); if (!r) { _bson_json_read_set_error (reader, "Error storing DBPointer"); } } static void _bson_json_read_append_oid (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { if (!bson_append_oid (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, &bson->bson_type_data.oid.oid)) { _bson_json_read_set_error (reader, "Error storing ObjectId"); } } static void _bson_json_read_append_date_time (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { if (!bson_append_date_time (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, bson->bson_type_data.date.date)) { _bson_json_read_set_error (reader, "Error storing datetime"); } } static void _bson_json_read_append_timestamp (bson_json_reader_t *reader, /* IN */ bson_json_reader_bson_t *bson) /* IN */ { if (!bson->bson_type_data.timestamp.has_t) { _bson_json_read_set_error ( reader, "Missing t after $timestamp in BSON_TYPE_TIMESTAMP"); return; } else if (!bson->bson_type_data.timestamp.has_i) { _bson_json_read_set_error ( reader, "Missing i after $timestamp in BSON_TYPE_TIMESTAMP"); return; } bson_append_timestamp (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, bson->bson_type_data.timestamp.t, bson->bson_type_data.timestamp.i); } static void _bad_extended_json (bson_json_reader_t *reader) { _bson_json_read_corrupt (reader, "Invalid MongoDB extended JSON"); } static void _bson_json_read_end_map (bson_json_reader_t *reader) /* IN */ { bson_json_reader_bson_t *bson = &reader->bson; bool r = true; if (bson->read_state == BSON_JSON_IN_START_MAP) { bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_DOC (bson_append_document_begin (STACK_BSON_PARENT, bson->key, (int) bson->key_buf.len, STACK_BSON_CHILD)); } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_SCOPE_STARTMAP) { bson->read_state = BSON_JSON_REGULAR; STACK_PUSH_SCOPE; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP) { /* we've read last "}" in "{$dbPointer: {$id: ..., $ref: ...}}" */ _bson_json_read_append_dbpointer (reader, bson); bson->read_state = BSON_JSON_REGULAR; return; } if (bson->read_state == BSON_JSON_IN_BSON_TYPE) { if (!bson->key) { /* invalid, like {$numberLong: "1"} at the document top level */ _bad_extended_json (reader); return; } bson->read_state = BSON_JSON_REGULAR; switch (bson->bson_type) { case BSON_TYPE_REGEX: _bson_json_read_append_regex (reader, bson); break; case BSON_TYPE_CODE: case BSON_TYPE_CODEWSCOPE: /* we've read the closing "}" in "{$code: ..., $scope: ...}" */ _bson_json_read_append_code (reader, bson); break; case BSON_TYPE_OID: _bson_json_read_append_oid (reader, bson); break; case BSON_TYPE_BINARY: _bson_json_read_append_binary (reader, bson); break; case BSON_TYPE_DATE_TIME: _bson_json_read_append_date_time (reader, bson); break; case BSON_TYPE_UNDEFINED: r = bson_append_undefined ( STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len); break; case BSON_TYPE_MINKEY: r = bson_append_minkey ( STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len); break; case BSON_TYPE_MAXKEY: r = bson_append_maxkey ( STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len); break; case BSON_TYPE_INT32: r = bson_append_int32 (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, bson->bson_type_data.v_int32.value); break; case BSON_TYPE_INT64: r = bson_append_int64 (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, bson->bson_type_data.v_int64.value); break; case BSON_TYPE_DOUBLE: r = bson_append_double (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, bson->bson_type_data.v_double.value); break; case BSON_TYPE_DECIMAL128: r = bson_append_decimal128 (STACK_BSON_CHILD, bson->key, (int) bson->key_buf.len, &bson->bson_type_data.v_decimal128.value); break; case BSON_TYPE_DBPOINTER: /* shouldn't set type to DBPointer unless inside $dbPointer: {...} */ _bson_json_read_set_error ( reader, "Internal error: shouldn't be in state BSON_TYPE_DBPOINTER"); break; case BSON_TYPE_SYMBOL: break; case BSON_TYPE_EOD: case BSON_TYPE_UTF8: case BSON_TYPE_DOCUMENT: case BSON_TYPE_ARRAY: case BSON_TYPE_BOOL: case BSON_TYPE_NULL: case BSON_TYPE_TIMESTAMP: default: _bson_json_read_set_error ( reader, "Internal error: can't parse JSON wrapper for type \"%s\"", _bson_json_type_name (bson->bson_type)); break; } if (!r) { _bson_json_read_set_error ( reader, "Cannot append value at end of JSON object for key %s", bson->key); } } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_VALUES) { if (!bson->key) { _bad_extended_json (reader); return; } bson->read_state = BSON_JSON_IN_BSON_TYPE_TIMESTAMP_ENDMAP; _bson_json_read_append_timestamp (reader, bson); return; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_VALUES) { if (!bson->key) { _bad_extended_json (reader); return; } bson->read_state = BSON_JSON_IN_BSON_TYPE_REGEX_ENDMAP; _bson_json_read_append_regex (reader, bson); return; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_VALUES) { if (!bson->key) { _bad_extended_json (reader); return; } bson->read_state = BSON_JSON_IN_BSON_TYPE_BINARY_ENDMAP; _bson_json_read_append_binary (reader, bson); return; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_TIMESTAMP_ENDMAP) { bson->read_state = BSON_JSON_REGULAR; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_REGEX_ENDMAP) { bson->read_state = BSON_JSON_REGULAR; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_BINARY_ENDMAP) { bson->read_state = BSON_JSON_REGULAR; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_NUMBERLONG) { if (!bson->key) { _bad_extended_json (reader); return; } bson->read_state = BSON_JSON_IN_BSON_TYPE_DATE_ENDMAP; _bson_json_read_append_date_time (reader, bson); return; } else if (bson->read_state == BSON_JSON_IN_BSON_TYPE_DATE_ENDMAP) { bson->read_state = BSON_JSON_REGULAR; } else if (bson->read_state == BSON_JSON_REGULAR) { if (STACK_IS_SCOPE) { bson->read_state = BSON_JSON_IN_BSON_TYPE; bson->bson_type = BSON_TYPE_CODE; STACK_POP_SCOPE; } else if (STACK_IS_DBPOINTER) { bson->read_state = BSON_JSON_IN_BSON_TYPE_DBPOINTER_STARTMAP; STACK_POP_DBPOINTER; } else { if (STACK_HAS_ID != STACK_HAS_REF) { _bson_json_read_set_error ( reader, "%s", "DBRef object must have both $ref and $id keys"); } STACK_POP_DOC ( bson_append_document_end (STACK_BSON_PARENT, STACK_BSON_CHILD)); } if (bson->n == -1) { bson->read_state = BSON_JSON_DONE; } } else if (bson->read_state == BSON_JSON_IN_SCOPE) { /* empty $scope */ BSON_ASSERT (bson->code_data.has_scope); STACK_PUSH_SCOPE; STACK_POP_SCOPE; bson->read_state = BSON_JSON_IN_BSON_TYPE; bson->bson_type = BSON_TYPE_CODE; } else if (bson->read_state == BSON_JSON_IN_DBPOINTER) { /* empty $dbPointer??? */ _bson_json_read_set_error (reader, "Empty $dbPointer"); } else { _bson_json_read_set_error ( reader, "Invalid state \"%s\"", read_state_names[bson->read_state]); } } static void _bson_json_read_start_array (bson_json_reader_t *reader) /* IN */ { const char *key; size_t len; bson_json_reader_bson_t *bson = &reader->bson; if (bson->read_state != BSON_JSON_REGULAR) { _bson_json_read_set_error (reader, "Invalid read of \"[\" in state \"%s\"", read_state_names[bson->read_state]); return; } if (bson->n == -1) { STACK_PUSH_ARRAY (_noop ()); } else { _bson_json_read_fixup_key (bson); key = bson->key; len = bson->key_buf.len; STACK_PUSH_ARRAY (bson_append_array_begin ( STACK_BSON_PARENT, key, (int) len, STACK_BSON_CHILD)); } } static void _bson_json_read_end_array (bson_json_reader_t *reader) /* IN */ { bson_json_reader_bson_t *bson = &reader->bson; if (bson->read_state != BSON_JSON_REGULAR) { _bson_json_read_set_error (reader, "Invalid read of \"]\" in state \"%s\"", read_state_names[bson->read_state]); return; } STACK_POP_ARRAY ( bson_append_array_end (STACK_BSON_PARENT, STACK_BSON_CHILD)); if (bson->n == -1) { bson->read_state = BSON_JSON_DONE; } } /* put unescaped text in reader->bson.unescaped, or set reader->error. * json_text has length len and it is not null-terminated. */ static bool _bson_json_unescape (bson_json_reader_t *reader, struct jsonsl_state_st *state, const char *json_text, ssize_t len) { bson_json_reader_bson_t *reader_bson; jsonsl_error_t err; reader_bson = &reader->bson; /* add 1 for NULL */ _bson_json_buf_ensure (&reader_bson->unescaped, (size_t) len + 1); /* length of unescaped str is always <= len */ reader_bson->unescaped.len = jsonsl_util_unescape ( json_text, (char *) reader_bson->unescaped.buf, (size_t) len, NULL, &err); if (err != JSONSL_ERROR_SUCCESS) { bson_set_error (reader->error, BSON_ERROR_JSON, BSON_JSON_ERROR_READ_CORRUPT_JS, "error near position %d: \"%s\"", (int) state->pos_begin, jsonsl_strerror (err)); return false; } reader_bson->unescaped.buf[reader_bson->unescaped.len] = '\0'; return true; } /* read the buffered JSON plus new data, and fill out @len with its length */ static const char * _get_json_text (jsonsl_t json, /* IN */ struct jsonsl_state_st *state, /* IN */ const char *buf /* IN */, ssize_t *len /* OUT */) { bson_json_reader_t *reader; ssize_t bytes_available; reader = (bson_json_reader_t *) json->data; BSON_ASSERT (state->pos_cur > state->pos_begin); *len = (ssize_t) (state->pos_cur - state->pos_begin); bytes_available = buf - json->base; if (*len <= bytes_available) { /* read directly from stream, not from saved JSON */ return buf - (size_t) *len; } else { /* combine saved text with new data from the jsonsl_t */ ssize_t append = buf - json->base; if (append > 0) { _bson_json_buf_append ( &reader->tok_accumulator, buf - append, (size_t) append); } return (const char *) reader->tok_accumulator.buf; } } static void _push_callback (jsonsl_t json, jsonsl_action_t action, struct jsonsl_state_st *state, const char *buf) { bson_json_reader_t *reader = (bson_json_reader_t *) json->data; switch (state->type) { case JSONSL_T_STRING: case JSONSL_T_HKEY: case JSONSL_T_SPECIAL: case JSONSL_T_UESCAPE: reader->json_text_pos = state->pos_begin; break; case JSONSL_T_OBJECT: _bson_json_read_start_map (reader); break; case JSONSL_T_LIST: _bson_json_read_start_array (reader); break; default: break; } } static void _pop_callback (jsonsl_t json, jsonsl_action_t action, struct jsonsl_state_st *state, const char *buf) { bson_json_reader_t *reader; bson_json_reader_bson_t *reader_bson; ssize_t len; double d; const char *obj_text; reader = (bson_json_reader_t *) json->data; reader_bson = &reader->bson; switch (state->type) { case JSONSL_T_HKEY: case JSONSL_T_STRING: obj_text = _get_json_text (json, state, buf, &len); BSON_ASSERT (obj_text[0] == '"'); /* remove start/end quotes, replace backslash-escapes, null-terminate */ /* you'd think it would be faster to check if state->nescapes > 0 first, * but tests show no improvement */ if (!_bson_json_unescape (reader, state, obj_text + 1, len - 1)) { /* reader->error is set */ jsonsl_stop (json); break; } if (state->type == JSONSL_T_HKEY) { _bson_json_read_map_key ( reader, reader_bson->unescaped.buf, reader_bson->unescaped.len); } else { _bson_json_read_string ( reader, reader_bson->unescaped.buf, reader_bson->unescaped.len); } break; case JSONSL_T_OBJECT: _bson_json_read_end_map (reader); break; case JSONSL_T_LIST: _bson_json_read_end_array (reader); break; case JSONSL_T_SPECIAL: obj_text = _get_json_text (json, state, buf, &len); if (state->special_flags & JSONSL_SPECIALf_NUMNOINT) { if (_bson_json_parse_double (reader, obj_text, (size_t) len, &d)) { _bson_json_read_double (reader, d); } } else if (state->special_flags & JSONSL_SPECIALf_NUMERIC) { /* jsonsl puts the unsigned value in state->nelem */ _bson_json_read_integer ( reader, state->nelem, state->special_flags & JSONSL_SPECIALf_SIGNED ? -1 : 1); } else if (state->special_flags & JSONSL_SPECIALf_BOOLEAN) { _bson_json_read_boolean (reader, obj_text[0] == 't' ? 1 : 0); } else if (state->special_flags & JSONSL_SPECIALf_NULL) { _bson_json_read_null (reader); } break; default: break; } reader->json_text_pos = -1; reader->tok_accumulator.len = 0; } static int _error_callback (jsonsl_t json, jsonsl_error_t err, struct jsonsl_state_st *state, char *errat) { bson_json_reader_t *reader = (bson_json_reader_t *) json->data; if (err == JSONSL_ERROR_CANT_INSERT && *errat == '{') { /* start the next document */ reader->should_reset = true; reader->advance = errat - json->base; return 0; } bson_set_error (reader->error, BSON_ERROR_JSON, BSON_JSON_ERROR_READ_CORRUPT_JS, "Got parse error at \"%c\", position %d: \"%s\"", *errat, (int) json->pos, jsonsl_strerror (err)); return 0; } /* *-------------------------------------------------------------------------- * * bson_json_reader_read -- * * Read the next json document from @reader and write its value * into @bson. @bson will be allocated as part of this process. * * @bson MUST be initialized before calling this function as it * will not be initialized automatically. The reasoning for this * is so that you can chain together bson_json_reader_t with * other components like bson_writer_t. * * Returns: * 1 if successful and data was read. * 0 if successful and no data was read. * -1 if there was an error and @error is set. * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ int bson_json_reader_read (bson_json_reader_t *reader, /* IN */ bson_t *bson, /* IN */ bson_error_t *error) /* OUT */ { bson_json_reader_producer_t *p; ssize_t start_pos; ssize_t r; ssize_t buf_offset; ssize_t accum; bson_error_t error_tmp; int ret = 0; BSON_ASSERT (reader); BSON_ASSERT (bson); p = &reader->producer; reader->bson.bson = bson; reader->bson.n = -1; reader->bson.read_state = BSON_JSON_REGULAR; reader->error = error ? error : &error_tmp; memset (reader->error, 0, sizeof (bson_error_t)); for (;;) { start_pos = reader->json->pos; if (p->bytes_read > 0) { /* leftover data from previous JSON doc in the stream */ r = p->bytes_read; } else { /* read a chunk of bytes by executing the callback */ r = p->cb (p->data, p->buf, p->buf_size); } if (r < 0) { if (error) { bson_set_error (error, BSON_ERROR_JSON, BSON_JSON_ERROR_READ_CB_FAILURE, "reader cb failed"); } ret = -1; goto cleanup; } else if (r == 0) { break; } else { ret = 1; p->bytes_read = (size_t) r; jsonsl_feed (reader->json, (const jsonsl_char_t *) p->buf, (size_t) r); if (reader->should_reset) { /* end of a document */ jsonsl_reset (reader->json); reader->should_reset = false; /* advance past already-parsed data */ memmove (p->buf, p->buf + reader->advance, r - reader->advance); p->bytes_read -= reader->advance; ret = 1; goto cleanup; } if (reader->error->domain) { ret = -1; goto cleanup; } /* accumulate a key or string value */ if (reader->json_text_pos != -1) { if (reader->json_text_pos < reader->json->pos) { accum = BSON_MIN (reader->json->pos - reader->json_text_pos, r); /* if this chunk stopped mid-token, buf_offset is how far into * our current chunk the token begins. */ buf_offset = AT_LEAST_0 (reader->json_text_pos - start_pos); _bson_json_buf_append (&reader->tok_accumulator, p->buf + buf_offset, (size_t) accum); } } p->bytes_read = 0; } } cleanup: if (ret == 1 && reader->bson.read_state != BSON_JSON_DONE) { /* data ended in the middle */ _bson_json_read_corrupt (reader, "%s", "Incomplete JSON"); return -1; } return ret; } bson_json_reader_t * bson_json_reader_new (void *data, /* IN */ bson_json_reader_cb cb, /* IN */ bson_json_destroy_cb dcb, /* IN */ bool allow_multiple, /* unused */ size_t buf_size) /* IN */ { bson_json_reader_t *r; bson_json_reader_producer_t *p; r = bson_malloc0 (sizeof *r); r->json = jsonsl_new (STACK_MAX); r->json->error_callback = _error_callback; r->json->action_callback_PUSH = _push_callback; r->json->action_callback_POP = _pop_callback; r->json->data = r; r->json_text_pos = -1; jsonsl_enable_all_callbacks (r->json); p = &r->producer; p->data = data; p->cb = cb; p->dcb = dcb; p->buf_size = buf_size ? buf_size : BSON_JSON_DEFAULT_BUF_SIZE; p->buf = bson_malloc (p->buf_size); return r; } void bson_json_reader_destroy (bson_json_reader_t *reader) /* IN */ { int i; bson_json_reader_producer_t *p; bson_json_reader_bson_t *b; if (!reader) { return; } p = &reader->producer; b = &reader->bson; if (reader->producer.dcb) { reader->producer.dcb (reader->producer.data); } bson_free (p->buf); bson_free (b->key_buf.buf); bson_free (b->unescaped.buf); bson_free (b->dbpointer_key.buf); /* destroy each bson_t initialized in parser stack frames */ for (i = 1; i < STACK_MAX; i++) { if (b->stack[i].type == BSON_JSON_FRAME_INITIAL) { /* highest the stack grew */ break; } if (FRAME_TYPE_HAS_BSON (b->stack[i].type)) { bson_destroy (&b->stack[i].bson); } } for (i = 0; i < 3; i++) { bson_free (b->bson_type_buf[i].buf); } _bson_json_code_cleanup (&b->code_data); jsonsl_destroy (reader->json); bson_free (reader->tok_accumulator.buf); bson_free (reader); } typedef struct { const uint8_t *data; size_t len; size_t bytes_parsed; } bson_json_data_reader_t; static ssize_t _bson_json_data_reader_cb (void *_ctx, uint8_t *buf, size_t len) { size_t bytes; bson_json_data_reader_t *ctx = (bson_json_data_reader_t *) _ctx; if (!ctx->data) { return -1; } bytes = BSON_MIN (len, ctx->len - ctx->bytes_parsed); memcpy (buf, ctx->data + ctx->bytes_parsed, bytes); ctx->bytes_parsed += bytes; return bytes; } bson_json_reader_t * bson_json_data_reader_new (bool allow_multiple, /* IN */ size_t size) /* IN */ { bson_json_data_reader_t *dr = bson_malloc0 (sizeof *dr); return bson_json_reader_new ( dr, &_bson_json_data_reader_cb, &bson_free, allow_multiple, size); } void bson_json_data_reader_ingest (bson_json_reader_t *reader, /* IN */ const uint8_t *data, /* IN */ size_t len) /* IN */ { bson_json_data_reader_t *ctx = (bson_json_data_reader_t *) reader->producer.data; ctx->data = data; ctx->len = len; ctx->bytes_parsed = 0; } bson_t * bson_new_from_json (const uint8_t *data, /* IN */ ssize_t len, /* IN */ bson_error_t *error) /* OUT */ { bson_json_reader_t *reader; bson_t *bson; int r; BSON_ASSERT (data); if (len < 0) { len = (ssize_t) strlen ((const char *) data); } bson = bson_new (); reader = bson_json_data_reader_new (false, BSON_JSON_DEFAULT_BUF_SIZE); bson_json_data_reader_ingest (reader, data, len); r = bson_json_reader_read (reader, bson, error); bson_json_reader_destroy (reader); if (r == 0) { bson_set_error (error, BSON_ERROR_JSON, BSON_JSON_ERROR_READ_INVALID_PARAM, "Empty JSON string"); } if (r != 1) { bson_destroy (bson); return NULL; } return bson; } bool bson_init_from_json (bson_t *bson, /* OUT */ const char *data, /* IN */ ssize_t len, /* IN */ bson_error_t *error) /* OUT */ { bson_json_reader_t *reader; int r; BSON_ASSERT (bson); BSON_ASSERT (data); if (len < 0) { len = strlen (data); } bson_init (bson); reader = bson_json_data_reader_new (false, BSON_JSON_DEFAULT_BUF_SIZE); bson_json_data_reader_ingest (reader, (const uint8_t *) data, len); r = bson_json_reader_read (reader, bson, error); bson_json_reader_destroy (reader); if (r == 0) { bson_set_error (error, BSON_ERROR_JSON, BSON_JSON_ERROR_READ_INVALID_PARAM, "Empty JSON string"); } if (r != 1) { bson_destroy (bson); return false; } return true; } static void _bson_json_reader_handle_fd_destroy (void *handle) /* IN */ { bson_json_reader_handle_fd_t *fd = handle; if (fd) { if ((fd->fd != -1) && fd->do_close) { #ifdef _WIN32 _close (fd->fd); #else close (fd->fd); #endif } bson_free (fd); } } static ssize_t _bson_json_reader_handle_fd_read (void *handle, /* IN */ uint8_t *buf, /* IN */ size_t len) /* IN */ { bson_json_reader_handle_fd_t *fd = handle; ssize_t ret = -1; if (fd && (fd->fd != -1)) { again: #ifdef BSON_OS_WIN32 ret = _read (fd->fd, buf, (unsigned int) len); #else ret = read (fd->fd, buf, len); #endif if ((ret == -1) && (errno == EAGAIN)) { goto again; } } return ret; } bson_json_reader_t * bson_json_reader_new_from_fd (int fd, /* IN */ bool close_on_destroy) /* IN */ { bson_json_reader_handle_fd_t *handle; BSON_ASSERT (fd != -1); handle = bson_malloc0 (sizeof *handle); handle->fd = fd; handle->do_close = close_on_destroy; return bson_json_reader_new (handle, _bson_json_reader_handle_fd_read, _bson_json_reader_handle_fd_destroy, true, BSON_JSON_DEFAULT_BUF_SIZE); } bson_json_reader_t * bson_json_reader_new_from_file (const char *path, /* IN */ bson_error_t *error) /* OUT */ { char errmsg_buf[BSON_ERROR_BUFFER_SIZE]; char *errmsg; int fd = -1; BSON_ASSERT (path); #ifdef BSON_OS_WIN32 _sopen_s (&fd, path, (_O_RDONLY | _O_BINARY), _SH_DENYNO, _S_IREAD); #else fd = open (path, O_RDONLY); #endif if (fd == -1) { errmsg = bson_strerror_r (errno, errmsg_buf, sizeof errmsg_buf); bson_set_error ( error, BSON_ERROR_READER, BSON_ERROR_READER_BADFD, "%s", errmsg); return NULL; } return bson_json_reader_new_from_fd (fd, true); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-json.h0000644000076500000240000000413313572250757023252 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_JSON_H #define BSON_JSON_H #include "bson/bson.h" BSON_BEGIN_DECLS typedef struct _bson_json_reader_t bson_json_reader_t; typedef enum { BSON_JSON_ERROR_READ_CORRUPT_JS = 1, BSON_JSON_ERROR_READ_INVALID_PARAM, BSON_JSON_ERROR_READ_CB_FAILURE, } bson_json_error_code_t; typedef ssize_t (*bson_json_reader_cb) (void *handle, uint8_t *buf, size_t count); typedef void (*bson_json_destroy_cb) (void *handle); BSON_EXPORT (bson_json_reader_t *) bson_json_reader_new (void *data, bson_json_reader_cb cb, bson_json_destroy_cb dcb, bool allow_multiple, size_t buf_size); BSON_EXPORT (bson_json_reader_t *) bson_json_reader_new_from_fd (int fd, bool close_on_destroy); BSON_EXPORT (bson_json_reader_t *) bson_json_reader_new_from_file (const char *filename, bson_error_t *error); BSON_EXPORT (void) bson_json_reader_destroy (bson_json_reader_t *reader); BSON_EXPORT (int) bson_json_reader_read (bson_json_reader_t *reader, bson_t *bson, bson_error_t *error); BSON_EXPORT (bson_json_reader_t *) bson_json_data_reader_new (bool allow_multiple, size_t size); BSON_EXPORT (void) bson_json_data_reader_ingest (bson_json_reader_t *reader, const uint8_t *data, size_t len); BSON_END_DECLS #endif /* BSON_JSON_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-keys.c0000644000076500000240000002235213572250757023252 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "bson/bson-keys.h" #include "bson/bson-string.h" static const char *gUint32Strs[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "100", "101", "102", "103", "104", "105", "106", "107", "108", "109", "110", "111", "112", "113", "114", "115", "116", "117", "118", "119", "120", "121", "122", "123", "124", "125", "126", "127", "128", "129", "130", "131", "132", "133", "134", "135", "136", "137", "138", "139", "140", "141", "142", "143", "144", "145", "146", "147", "148", "149", "150", "151", "152", "153", "154", "155", "156", "157", "158", "159", "160", "161", "162", "163", "164", "165", "166", "167", "168", "169", "170", "171", "172", "173", "174", "175", "176", "177", "178", "179", "180", "181", "182", "183", "184", "185", "186", "187", "188", "189", "190", "191", "192", "193", "194", "195", "196", "197", "198", "199", "200", "201", "202", "203", "204", "205", "206", "207", "208", "209", "210", "211", "212", "213", "214", "215", "216", "217", "218", "219", "220", "221", "222", "223", "224", "225", "226", "227", "228", "229", "230", "231", "232", "233", "234", "235", "236", "237", "238", "239", "240", "241", "242", "243", "244", "245", "246", "247", "248", "249", "250", "251", "252", "253", "254", "255", "256", "257", "258", "259", "260", "261", "262", "263", "264", "265", "266", "267", "268", "269", "270", "271", "272", "273", "274", "275", "276", "277", "278", "279", "280", "281", "282", "283", "284", "285", "286", "287", "288", "289", "290", "291", "292", "293", "294", "295", "296", "297", "298", "299", "300", "301", "302", "303", "304", "305", "306", "307", "308", "309", "310", "311", "312", "313", "314", "315", "316", "317", "318", "319", "320", "321", "322", "323", "324", "325", "326", "327", "328", "329", "330", "331", "332", "333", "334", "335", "336", "337", "338", "339", "340", "341", "342", "343", "344", "345", "346", "347", "348", "349", "350", "351", "352", "353", "354", "355", "356", "357", "358", "359", "360", "361", "362", "363", "364", "365", "366", "367", "368", "369", "370", "371", "372", "373", "374", "375", "376", "377", "378", "379", "380", "381", "382", "383", "384", "385", "386", "387", "388", "389", "390", "391", "392", "393", "394", "395", "396", "397", "398", "399", "400", "401", "402", "403", "404", "405", "406", "407", "408", "409", "410", "411", "412", "413", "414", "415", "416", "417", "418", "419", "420", "421", "422", "423", "424", "425", "426", "427", "428", "429", "430", "431", "432", "433", "434", "435", "436", "437", "438", "439", "440", "441", "442", "443", "444", "445", "446", "447", "448", "449", "450", "451", "452", "453", "454", "455", "456", "457", "458", "459", "460", "461", "462", "463", "464", "465", "466", "467", "468", "469", "470", "471", "472", "473", "474", "475", "476", "477", "478", "479", "480", "481", "482", "483", "484", "485", "486", "487", "488", "489", "490", "491", "492", "493", "494", "495", "496", "497", "498", "499", "500", "501", "502", "503", "504", "505", "506", "507", "508", "509", "510", "511", "512", "513", "514", "515", "516", "517", "518", "519", "520", "521", "522", "523", "524", "525", "526", "527", "528", "529", "530", "531", "532", "533", "534", "535", "536", "537", "538", "539", "540", "541", "542", "543", "544", "545", "546", "547", "548", "549", "550", "551", "552", "553", "554", "555", "556", "557", "558", "559", "560", "561", "562", "563", "564", "565", "566", "567", "568", "569", "570", "571", "572", "573", "574", "575", "576", "577", "578", "579", "580", "581", "582", "583", "584", "585", "586", "587", "588", "589", "590", "591", "592", "593", "594", "595", "596", "597", "598", "599", "600", "601", "602", "603", "604", "605", "606", "607", "608", "609", "610", "611", "612", "613", "614", "615", "616", "617", "618", "619", "620", "621", "622", "623", "624", "625", "626", "627", "628", "629", "630", "631", "632", "633", "634", "635", "636", "637", "638", "639", "640", "641", "642", "643", "644", "645", "646", "647", "648", "649", "650", "651", "652", "653", "654", "655", "656", "657", "658", "659", "660", "661", "662", "663", "664", "665", "666", "667", "668", "669", "670", "671", "672", "673", "674", "675", "676", "677", "678", "679", "680", "681", "682", "683", "684", "685", "686", "687", "688", "689", "690", "691", "692", "693", "694", "695", "696", "697", "698", "699", "700", "701", "702", "703", "704", "705", "706", "707", "708", "709", "710", "711", "712", "713", "714", "715", "716", "717", "718", "719", "720", "721", "722", "723", "724", "725", "726", "727", "728", "729", "730", "731", "732", "733", "734", "735", "736", "737", "738", "739", "740", "741", "742", "743", "744", "745", "746", "747", "748", "749", "750", "751", "752", "753", "754", "755", "756", "757", "758", "759", "760", "761", "762", "763", "764", "765", "766", "767", "768", "769", "770", "771", "772", "773", "774", "775", "776", "777", "778", "779", "780", "781", "782", "783", "784", "785", "786", "787", "788", "789", "790", "791", "792", "793", "794", "795", "796", "797", "798", "799", "800", "801", "802", "803", "804", "805", "806", "807", "808", "809", "810", "811", "812", "813", "814", "815", "816", "817", "818", "819", "820", "821", "822", "823", "824", "825", "826", "827", "828", "829", "830", "831", "832", "833", "834", "835", "836", "837", "838", "839", "840", "841", "842", "843", "844", "845", "846", "847", "848", "849", "850", "851", "852", "853", "854", "855", "856", "857", "858", "859", "860", "861", "862", "863", "864", "865", "866", "867", "868", "869", "870", "871", "872", "873", "874", "875", "876", "877", "878", "879", "880", "881", "882", "883", "884", "885", "886", "887", "888", "889", "890", "891", "892", "893", "894", "895", "896", "897", "898", "899", "900", "901", "902", "903", "904", "905", "906", "907", "908", "909", "910", "911", "912", "913", "914", "915", "916", "917", "918", "919", "920", "921", "922", "923", "924", "925", "926", "927", "928", "929", "930", "931", "932", "933", "934", "935", "936", "937", "938", "939", "940", "941", "942", "943", "944", "945", "946", "947", "948", "949", "950", "951", "952", "953", "954", "955", "956", "957", "958", "959", "960", "961", "962", "963", "964", "965", "966", "967", "968", "969", "970", "971", "972", "973", "974", "975", "976", "977", "978", "979", "980", "981", "982", "983", "984", "985", "986", "987", "988", "989", "990", "991", "992", "993", "994", "995", "996", "997", "998", "999"}; /* *-------------------------------------------------------------------------- * * bson_uint32_to_string -- * * Converts @value to a string. * * If @value is from 0 to 1000, it will use a constant string in the * data section of the library. * * If not, a string will be formatted using @str and snprintf(). This * is much slower, of course and therefore we try to optimize it out. * * @strptr will always be set. It will either point to @str or a * constant string. You will want to use this as your key. * * Parameters: * @value: A #uint32_t to convert to string. * @strptr: (out): A pointer to the resulting string. * @str: (out): Storage for a string made with snprintf. * @size: Size of @str. * * Returns: * The number of bytes in the resulting string. * * Side effects: * None. * *-------------------------------------------------------------------------- */ size_t bson_uint32_to_string (uint32_t value, /* IN */ const char **strptr, /* OUT */ char *str, /* OUT */ size_t size) /* IN */ { if (value < 1000) { *strptr = gUint32Strs[value]; if (value < 10) { return 1; } else if (value < 100) { return 2; } else { return 3; } } *strptr = str; return bson_snprintf (str, size, "%u", value); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-keys.h0000644000076500000240000000171213572250757023254 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_KEYS_H #define BSON_KEYS_H #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (size_t) bson_uint32_to_string (uint32_t value, const char **strptr, char *str, size_t size); BSON_END_DECLS #endif /* BSON_KEYS_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-macros.h0000644000076500000240000001611713572250757023572 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_MACROS_H #define BSON_MACROS_H #include #ifdef __cplusplus #include #endif #include "bson/bson-config.h" #if BSON_OS == 1 #define BSON_OS_UNIX #elif BSON_OS == 2 #define BSON_OS_WIN32 #else #error "Unknown operating system." #endif #ifdef __cplusplus #define BSON_BEGIN_DECLS extern "C" { #define BSON_END_DECLS } #else #define BSON_BEGIN_DECLS #define BSON_END_DECLS #endif #if defined(__GNUC__) #define BSON_GNUC_CHECK_VERSION(major, minor) \ ((__GNUC__ > (major)) || \ ((__GNUC__ == (major)) && (__GNUC_MINOR__ >= (minor)))) #else #define BSON_GNUC_CHECK_VERSION(major, minor) 0 #endif #if defined(__GNUC__) #define BSON_GNUC_IS_VERSION(major, minor) \ ((__GNUC__ == (major)) && (__GNUC_MINOR__ == (minor))) #else #define BSON_GNUC_IS_VERSION(major, minor) 0 #endif /* Decorate public functions: * - if BSON_STATIC, we're compiling a program that uses libbson as a static * library, don't decorate functions * - else if BSON_COMPILATION, we're compiling a static or shared libbson, mark * public functions for export from the shared lib (which has no effect on * the static lib) * - else, we're compiling a program that uses libbson as a shared library, * mark public functions as DLL imports for Microsoft Visual C */ #ifdef _MSC_VER /* * Microsoft Visual C */ #ifdef BSON_STATIC #define BSON_API #elif defined(BSON_COMPILATION) #define BSON_API __declspec(dllexport) #else #define BSON_API __declspec(dllimport) #endif #define BSON_CALL __cdecl #elif defined(__GNUC__) /* * GCC */ #ifdef BSON_STATIC #define BSON_API #elif defined(BSON_COMPILATION) #define BSON_API __attribute__ ((visibility ("default"))) #else #define BSON_API #endif #define BSON_CALL #else /* * Other compilers */ #define BSON_API #define BSON_CALL #endif #define BSON_EXPORT(type) BSON_API type BSON_CALL #ifdef MIN #define BSON_MIN MIN #elif defined(__cplusplus) #define BSON_MIN(a, b) ((std::min) (a, b)) #elif defined(_MSC_VER) #define BSON_MIN(a, b) ((a) < (b) ? (a) : (b)) #else #define BSON_MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif #ifdef MAX #define BSON_MAX MAX #elif defined(__cplusplus) #define BSON_MAX(a, b) ((std::max) (a, b)) #elif defined(_MSC_VER) #define BSON_MAX(a, b) ((a) > (b) ? (a) : (b)) #else #define BSON_MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif #ifdef ABS #define BSON_ABS ABS #else #define BSON_ABS(a) (((a) < 0) ? ((a) * -1) : (a)) #endif #ifdef _MSC_VER #ifdef _WIN64 #define BSON_ALIGN_OF_PTR 8 #else #define BSON_ALIGN_OF_PTR 4 #endif #else #define BSON_ALIGN_OF_PTR (sizeof (void *)) #endif #ifdef BSON_EXTRA_ALIGN #if defined(_MSC_VER) #define BSON_ALIGNED_BEGIN(_N) __declspec(align (_N)) #define BSON_ALIGNED_END(_N) #else #define BSON_ALIGNED_BEGIN(_N) #define BSON_ALIGNED_END(_N) __attribute__ ((aligned (_N))) #endif #else #if defined(_MSC_VER) #define BSON_ALIGNED_BEGIN(_N) __declspec(align (BSON_ALIGN_OF_PTR)) #define BSON_ALIGNED_END(_N) #else #define BSON_ALIGNED_BEGIN(_N) #define BSON_ALIGNED_END(_N) \ __attribute__ ( \ (aligned ((_N) > BSON_ALIGN_OF_PTR ? BSON_ALIGN_OF_PTR : (_N)))) #endif #endif #define bson_str_empty(s) (!s[0]) #define bson_str_empty0(s) (!s || !s[0]) #if defined(_WIN32) #define BSON_FUNC __FUNCTION__ #elif defined(__STDC_VERSION__) && __STDC_VERSION__ < 199901L #define BSON_FUNC __FUNCTION__ #else #define BSON_FUNC __func__ #endif #define BSON_ASSERT(test) \ do { \ if (!(BSON_LIKELY (test))) { \ fprintf (stderr, \ "%s:%d %s(): precondition failed: %s\n", \ __FILE__, \ __LINE__, \ BSON_FUNC, \ #test); \ abort (); \ } \ } while (0) /* obsolete macros, preserved for compatibility */ #define BSON_STATIC_ASSERT(s) BSON_STATIC_ASSERT_ (s, __LINE__) #define BSON_STATIC_ASSERT_JOIN(a, b) BSON_STATIC_ASSERT_JOIN2 (a, b) #define BSON_STATIC_ASSERT_JOIN2(a, b) a##b #define BSON_STATIC_ASSERT_(s, l) \ typedef char BSON_STATIC_ASSERT_JOIN (static_assert_test_, \ __LINE__)[(s) ? 1 : -1] /* modern macros */ #define BSON_STATIC_ASSERT2(_name, _s) \ BSON_STATIC_ASSERT2_ (_s, __LINE__, _name) #define BSON_STATIC_ASSERT_JOIN3(_a, _b, _name) \ BSON_STATIC_ASSERT_JOIN4 (_a, _b, _name) #define BSON_STATIC_ASSERT_JOIN4(_a, _b, _name) _a##_b##_name #define BSON_STATIC_ASSERT2_(_s, _l, _name) \ typedef char BSON_STATIC_ASSERT_JOIN3 ( \ static_assert_test_, __LINE__, _name)[(_s) ? 1 : -1] #if defined(__GNUC__) #define BSON_GNUC_PURE __attribute__ ((pure)) #define BSON_GNUC_WARN_UNUSED_RESULT __attribute__ ((warn_unused_result)) #else #define BSON_GNUC_PURE #define BSON_GNUC_WARN_UNUSED_RESULT #endif #if BSON_GNUC_CHECK_VERSION(4, 0) && !defined(_WIN32) #define BSON_GNUC_NULL_TERMINATED __attribute__ ((sentinel)) #define BSON_GNUC_INTERNAL __attribute__ ((visibility ("hidden"))) #else #define BSON_GNUC_NULL_TERMINATED #define BSON_GNUC_INTERNAL #endif #if defined(__GNUC__) #define BSON_LIKELY(x) __builtin_expect (!!(x), 1) #define BSON_UNLIKELY(x) __builtin_expect (!!(x), 0) #else #define BSON_LIKELY(v) v #define BSON_UNLIKELY(v) v #endif #if defined(__clang__) #define BSON_GNUC_PRINTF(f, v) __attribute__ ((format (printf, f, v))) #elif BSON_GNUC_CHECK_VERSION(4, 4) #define BSON_GNUC_PRINTF(f, v) __attribute__ ((format (gnu_printf, f, v))) #else #define BSON_GNUC_PRINTF(f, v) #endif #if defined(__LP64__) || defined(_LP64) #define BSON_WORD_SIZE 64 #else #define BSON_WORD_SIZE 32 #endif #if defined(_MSC_VER) #define BSON_INLINE __inline #else #define BSON_INLINE __inline__ #endif #ifdef _MSC_VER #define BSON_ENSURE_ARRAY_PARAM_SIZE(_n) #define BSON_TYPEOF decltype #else #define BSON_ENSURE_ARRAY_PARAM_SIZE(_n) static(_n) #define BSON_TYPEOF typeof #endif #if BSON_GNUC_CHECK_VERSION(3, 1) #define BSON_GNUC_DEPRECATED __attribute__ ((__deprecated__)) #else #define BSON_GNUC_DEPRECATED #endif #if BSON_GNUC_CHECK_VERSION(4, 5) #define BSON_GNUC_DEPRECATED_FOR(f) \ __attribute__ ((deprecated ("Use " #f " instead"))) #else #define BSON_GNUC_DEPRECATED_FOR(f) BSON_GNUC_DEPRECATED #endif #endif /* BSON_MACROS_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-md5.c0000644000076500000240000000057513572250757022767 0ustar alcaeusstaff#include "bson/bson-compat.h" #include "bson/bson-md5.h" #include "common-md5-private.h" void bson_md5_init (bson_md5_t *pms) { _bson_md5_init (pms); } void bson_md5_append (bson_md5_t *pms, const uint8_t *data, uint32_t nbytes) { _bson_md5_append (pms, data, nbytes); } void bson_md5_finish (bson_md5_t *pms, uint8_t digest[16]) { _bson_md5_finish (pms, digest); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-md5.h0000644000076500000240000000545213572250757022773 0ustar alcaeusstaff/* Copyright (C) 1999, 2002 Aladdin Enterprises. All rights reserved. This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgement in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. L. Peter Deutsch ghost@aladdin.com */ /* $Id: md5.h,v 1.4 2002/04/13 19:20:28 lpd Exp $ */ /* Independent implementation of MD5 (RFC 1321). This code implements the MD5 Algorithm defined in RFC 1321, whose text is available at http://www.ietf.org/rfc/rfc1321.txt The code is derived from the text of the RFC, including the test suite (section A.5) but excluding the rest of Appendix A. It does not include any code or documentation that is identified in the RFC as being copyrighted. The original and principal author of md5.h is L. Peter Deutsch . Other authors are noted in the change history that follows (in reverse chronological order): 2002-04-13 lpd Removed support for non-ANSI compilers; removed references to Ghostscript; clarified derivation from RFC 1321; now handles byte order either statically or dynamically. 1999-11-04 lpd Edited comments slightly for automatic TOC extraction. 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5); added conditionalization for C++ compilation from Martin Purschke . 1999-05-03 lpd Original version. */ /* * The following MD5 implementation has been modified to use types as * specified in libbson. */ #include "bson/bson-prelude.h" #ifndef BSON_MD5_H #define BSON_MD5_H #include "bson/bson-endian.h" BSON_BEGIN_DECLS typedef struct { uint32_t count[2]; /* message length in bits, lsw first */ uint32_t abcd[4]; /* digest buffer */ uint8_t buf[64]; /* accumulate block */ } bson_md5_t; BSON_EXPORT (void) bson_md5_init (bson_md5_t *pms) BSON_GNUC_DEPRECATED; BSON_EXPORT (void) bson_md5_append (bson_md5_t *pms, const uint8_t *data, uint32_t nbytes) BSON_GNUC_DEPRECATED; BSON_EXPORT (void) bson_md5_finish (bson_md5_t *pms, uint8_t digest[16]) BSON_GNUC_DEPRECATED; BSON_END_DECLS #endif /* BSON_MD5_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-memory.c0000644000076500000240000001600313572250757023603 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include "bson/bson-atomic.h" #include "bson/bson-config.h" #include "bson/bson-memory.h" static bson_mem_vtable_t gMemVtable = { malloc, calloc, #ifdef BSON_HAVE_REALLOCF reallocf, #else realloc, #endif free, }; /* *-------------------------------------------------------------------------- * * bson_malloc -- * * Allocates @num_bytes of memory and returns a pointer to it. If * malloc failed to allocate the memory, abort() is called. * * Libbson does not try to handle OOM conditions as it is beyond the * scope of this library to handle so appropriately. * * Parameters: * @num_bytes: The number of bytes to allocate. * * Returns: * A pointer if successful; otherwise abort() is called and this * function will never return. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void * bson_malloc (size_t num_bytes) /* IN */ { void *mem = NULL; if (BSON_LIKELY (num_bytes)) { if (BSON_UNLIKELY (!(mem = gMemVtable.malloc (num_bytes)))) { fprintf (stderr, "Failure to allocate memory in bson_malloc(). errno: %d.\n", errno); abort (); } } return mem; } /* *-------------------------------------------------------------------------- * * bson_malloc0 -- * * Like bson_malloc() except the memory is zeroed first. This is * similar to calloc() except that abort() is called in case of * failure to allocate memory. * * Parameters: * @num_bytes: The number of bytes to allocate. * * Returns: * A pointer if successful; otherwise abort() is called and this * function will never return. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void * bson_malloc0 (size_t num_bytes) /* IN */ { void *mem = NULL; if (BSON_LIKELY (num_bytes)) { if (BSON_UNLIKELY (!(mem = gMemVtable.calloc (1, num_bytes)))) { fprintf (stderr, "Failure to allocate memory in bson_malloc0(). errno: %d.\n", errno); abort (); } } return mem; } /* *-------------------------------------------------------------------------- * * bson_realloc -- * * This function behaves similar to realloc() except that if there is * a failure abort() is called. * * Parameters: * @mem: The memory to realloc, or NULL. * @num_bytes: The size of the new allocation or 0 to free. * * Returns: * The new allocation if successful; otherwise abort() is called and * this function never returns. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void * bson_realloc (void *mem, /* IN */ size_t num_bytes) /* IN */ { /* * Not all platforms are guaranteed to free() the memory if a call to * realloc() with a size of zero occurs. Windows, Linux, and FreeBSD do, * however, OS X does not. */ if (BSON_UNLIKELY (num_bytes == 0)) { gMemVtable.free (mem); return NULL; } mem = gMemVtable.realloc (mem, num_bytes); if (BSON_UNLIKELY (!mem)) { fprintf (stderr, "Failure to re-allocate memory in bson_realloc(). errno: %d.\n", errno); abort (); } return mem; } /* *-------------------------------------------------------------------------- * * bson_realloc_ctx -- * * This wraps bson_realloc and provides a compatible api for similar * functions with a context * * Parameters: * @mem: The memory to realloc, or NULL. * @num_bytes: The size of the new allocation or 0 to free. * @ctx: Ignored * * Returns: * The new allocation if successful; otherwise abort() is called and * this function never returns. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void * bson_realloc_ctx (void *mem, /* IN */ size_t num_bytes, /* IN */ void *ctx) /* IN */ { return bson_realloc (mem, num_bytes); } /* *-------------------------------------------------------------------------- * * bson_free -- * * Frees @mem using the underlying allocator. * * Currently, this only calls free() directly, but that is subject to * change. * * Parameters: * @mem: An allocation to free. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_free (void *mem) /* IN */ { gMemVtable.free (mem); } /* *-------------------------------------------------------------------------- * * bson_zero_free -- * * Frees @mem using the underlying allocator. @size bytes of @mem will * be zeroed before freeing the memory. This is useful in scenarios * where @mem contains passwords or other sensitive information. * * Parameters: * @mem: An allocation to free. * @size: The number of bytes in @mem. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_zero_free (void *mem, /* IN */ size_t size) /* IN */ { if (BSON_LIKELY (mem)) { memset (mem, 0, size); gMemVtable.free (mem); } } /* *-------------------------------------------------------------------------- * * bson_mem_set_vtable -- * * This function will change our allocation vtable. * * It is imperative that this is called at the beginning of the * process before any memory has been allocated by the default * allocator. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_mem_set_vtable (const bson_mem_vtable_t *vtable) { BSON_ASSERT (vtable); if (!vtable->malloc || !vtable->calloc || !vtable->realloc || !vtable->free) { fprintf (stderr, "Failure to install BSON vtable, " "missing functions.\n"); return; } gMemVtable = *vtable; } void bson_mem_restore_vtable (void) { bson_mem_vtable_t vtable = { malloc, calloc, #ifdef BSON_HAVE_REALLOCF reallocf, #else realloc, #endif free, }; bson_mem_set_vtable (&vtable); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-memory.h0000644000076500000240000000311713572250757023612 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_MEMORY_H #define BSON_MEMORY_H #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS typedef void *(*bson_realloc_func) (void *mem, size_t num_bytes, void *ctx); typedef struct _bson_mem_vtable_t { void *(*malloc) (size_t num_bytes); void *(*calloc) (size_t n_members, size_t num_bytes); void *(*realloc) (void *mem, size_t num_bytes); void (*free) (void *mem); void *padding[4]; } bson_mem_vtable_t; BSON_EXPORT (void) bson_mem_set_vtable (const bson_mem_vtable_t *vtable); BSON_EXPORT (void) bson_mem_restore_vtable (void); BSON_EXPORT (void *) bson_malloc (size_t num_bytes); BSON_EXPORT (void *) bson_malloc0 (size_t num_bytes); BSON_EXPORT (void *) bson_realloc (void *mem, size_t num_bytes); BSON_EXPORT (void *) bson_realloc_ctx (void *mem, size_t num_bytes, void *ctx); BSON_EXPORT (void) bson_free (void *mem); BSON_EXPORT (void) bson_zero_free (void *mem, size_t size); BSON_END_DECLS #endif /* BSON_MEMORY_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-oid.c0000644000076500000240000002172313572250757023053 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-compat.h" #include #include #include #include #include "bson/bson-context-private.h" #include "bson/bson-oid.h" #include "bson/bson-string.h" /* * This table contains an array of two character pairs for every possible * uint8_t. It is used as a lookup table when encoding a bson_oid_t * to hex formatted ASCII. Performing two characters at a time roughly * reduces the number of operations by one-half. */ static const uint16_t gHexCharPairs[] = { #if BSON_BYTE_ORDER == BSON_BIG_ENDIAN 12336, 12337, 12338, 12339, 12340, 12341, 12342, 12343, 12344, 12345, 12385, 12386, 12387, 12388, 12389, 12390, 12592, 12593, 12594, 12595, 12596, 12597, 12598, 12599, 12600, 12601, 12641, 12642, 12643, 12644, 12645, 12646, 12848, 12849, 12850, 12851, 12852, 12853, 12854, 12855, 12856, 12857, 12897, 12898, 12899, 12900, 12901, 12902, 13104, 13105, 13106, 13107, 13108, 13109, 13110, 13111, 13112, 13113, 13153, 13154, 13155, 13156, 13157, 13158, 13360, 13361, 13362, 13363, 13364, 13365, 13366, 13367, 13368, 13369, 13409, 13410, 13411, 13412, 13413, 13414, 13616, 13617, 13618, 13619, 13620, 13621, 13622, 13623, 13624, 13625, 13665, 13666, 13667, 13668, 13669, 13670, 13872, 13873, 13874, 13875, 13876, 13877, 13878, 13879, 13880, 13881, 13921, 13922, 13923, 13924, 13925, 13926, 14128, 14129, 14130, 14131, 14132, 14133, 14134, 14135, 14136, 14137, 14177, 14178, 14179, 14180, 14181, 14182, 14384, 14385, 14386, 14387, 14388, 14389, 14390, 14391, 14392, 14393, 14433, 14434, 14435, 14436, 14437, 14438, 14640, 14641, 14642, 14643, 14644, 14645, 14646, 14647, 14648, 14649, 14689, 14690, 14691, 14692, 14693, 14694, 24880, 24881, 24882, 24883, 24884, 24885, 24886, 24887, 24888, 24889, 24929, 24930, 24931, 24932, 24933, 24934, 25136, 25137, 25138, 25139, 25140, 25141, 25142, 25143, 25144, 25145, 25185, 25186, 25187, 25188, 25189, 25190, 25392, 25393, 25394, 25395, 25396, 25397, 25398, 25399, 25400, 25401, 25441, 25442, 25443, 25444, 25445, 25446, 25648, 25649, 25650, 25651, 25652, 25653, 25654, 25655, 25656, 25657, 25697, 25698, 25699, 25700, 25701, 25702, 25904, 25905, 25906, 25907, 25908, 25909, 25910, 25911, 25912, 25913, 25953, 25954, 25955, 25956, 25957, 25958, 26160, 26161, 26162, 26163, 26164, 26165, 26166, 26167, 26168, 26169, 26209, 26210, 26211, 26212, 26213, 26214 #else 12336, 12592, 12848, 13104, 13360, 13616, 13872, 14128, 14384, 14640, 24880, 25136, 25392, 25648, 25904, 26160, 12337, 12593, 12849, 13105, 13361, 13617, 13873, 14129, 14385, 14641, 24881, 25137, 25393, 25649, 25905, 26161, 12338, 12594, 12850, 13106, 13362, 13618, 13874, 14130, 14386, 14642, 24882, 25138, 25394, 25650, 25906, 26162, 12339, 12595, 12851, 13107, 13363, 13619, 13875, 14131, 14387, 14643, 24883, 25139, 25395, 25651, 25907, 26163, 12340, 12596, 12852, 13108, 13364, 13620, 13876, 14132, 14388, 14644, 24884, 25140, 25396, 25652, 25908, 26164, 12341, 12597, 12853, 13109, 13365, 13621, 13877, 14133, 14389, 14645, 24885, 25141, 25397, 25653, 25909, 26165, 12342, 12598, 12854, 13110, 13366, 13622, 13878, 14134, 14390, 14646, 24886, 25142, 25398, 25654, 25910, 26166, 12343, 12599, 12855, 13111, 13367, 13623, 13879, 14135, 14391, 14647, 24887, 25143, 25399, 25655, 25911, 26167, 12344, 12600, 12856, 13112, 13368, 13624, 13880, 14136, 14392, 14648, 24888, 25144, 25400, 25656, 25912, 26168, 12345, 12601, 12857, 13113, 13369, 13625, 13881, 14137, 14393, 14649, 24889, 25145, 25401, 25657, 25913, 26169, 12385, 12641, 12897, 13153, 13409, 13665, 13921, 14177, 14433, 14689, 24929, 25185, 25441, 25697, 25953, 26209, 12386, 12642, 12898, 13154, 13410, 13666, 13922, 14178, 14434, 14690, 24930, 25186, 25442, 25698, 25954, 26210, 12387, 12643, 12899, 13155, 13411, 13667, 13923, 14179, 14435, 14691, 24931, 25187, 25443, 25699, 25955, 26211, 12388, 12644, 12900, 13156, 13412, 13668, 13924, 14180, 14436, 14692, 24932, 25188, 25444, 25700, 25956, 26212, 12389, 12645, 12901, 13157, 13413, 13669, 13925, 14181, 14437, 14693, 24933, 25189, 25445, 25701, 25957, 26213, 12390, 12646, 12902, 13158, 13414, 13670, 13926, 14182, 14438, 14694, 24934, 25190, 25446, 25702, 25958, 26214 #endif }; void bson_oid_init_sequence (bson_oid_t *oid, /* OUT */ bson_context_t *context) /* IN */ { uint32_t now = (uint32_t) (time (NULL)); if (!context) { context = bson_context_get_default (); } now = BSON_UINT32_TO_BE (now); memcpy (&oid->bytes[0], &now, sizeof (now)); context->oid_set_seq64 (context, oid); } void bson_oid_init (bson_oid_t *oid, /* OUT */ bson_context_t *context) /* IN */ { uint32_t now = (uint32_t) (time (NULL)); BSON_ASSERT (oid); if (!context) { context = bson_context_get_default (); } now = BSON_UINT32_TO_BE (now); memcpy (&oid->bytes[0], &now, sizeof (now)); _bson_context_set_oid_rand (context, oid); context->oid_set_seq32 (context, oid); } void bson_oid_init_from_data (bson_oid_t *oid, /* OUT */ const uint8_t *data) /* IN */ { BSON_ASSERT (oid); BSON_ASSERT (data); memcpy (oid, data, 12); } void bson_oid_init_from_string (bson_oid_t *oid, /* OUT */ const char *str) /* IN */ { BSON_ASSERT (oid); BSON_ASSERT (str); bson_oid_init_from_string_unsafe (oid, str); } time_t bson_oid_get_time_t (const bson_oid_t *oid) /* IN */ { BSON_ASSERT (oid); return bson_oid_get_time_t_unsafe (oid); } void bson_oid_to_string (const bson_oid_t *oid, /* IN */ char str[BSON_ENSURE_ARRAY_PARAM_SIZE (25)]) /* OUT */ { #if !defined(__i386__) && !defined(__x86_64__) && !defined(_M_IX86) && \ !defined(_M_X64) BSON_ASSERT (oid); BSON_ASSERT (str); bson_snprintf (str, 25, "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", oid->bytes[0], oid->bytes[1], oid->bytes[2], oid->bytes[3], oid->bytes[4], oid->bytes[5], oid->bytes[6], oid->bytes[7], oid->bytes[8], oid->bytes[9], oid->bytes[10], oid->bytes[11]); #else uint16_t *dst; uint8_t *id = (uint8_t *) oid; BSON_ASSERT (oid); BSON_ASSERT (str); dst = (uint16_t *) (void *) str; dst[0] = gHexCharPairs[id[0]]; dst[1] = gHexCharPairs[id[1]]; dst[2] = gHexCharPairs[id[2]]; dst[3] = gHexCharPairs[id[3]]; dst[4] = gHexCharPairs[id[4]]; dst[5] = gHexCharPairs[id[5]]; dst[6] = gHexCharPairs[id[6]]; dst[7] = gHexCharPairs[id[7]]; dst[8] = gHexCharPairs[id[8]]; dst[9] = gHexCharPairs[id[9]]; dst[10] = gHexCharPairs[id[10]]; dst[11] = gHexCharPairs[id[11]]; str[24] = '\0'; #endif } uint32_t bson_oid_hash (const bson_oid_t *oid) /* IN */ { BSON_ASSERT (oid); return bson_oid_hash_unsafe (oid); } int bson_oid_compare (const bson_oid_t *oid1, /* IN */ const bson_oid_t *oid2) /* IN */ { BSON_ASSERT (oid1); BSON_ASSERT (oid2); return bson_oid_compare_unsafe (oid1, oid2); } bool bson_oid_equal (const bson_oid_t *oid1, /* IN */ const bson_oid_t *oid2) /* IN */ { BSON_ASSERT (oid1); BSON_ASSERT (oid2); return bson_oid_equal_unsafe (oid1, oid2); } void bson_oid_copy (const bson_oid_t *src, /* IN */ bson_oid_t *dst) /* OUT */ { BSON_ASSERT (src); BSON_ASSERT (dst); bson_oid_copy_unsafe (src, dst); } bool bson_oid_is_valid (const char *str, /* IN */ size_t length) /* IN */ { size_t i; BSON_ASSERT (str); if ((length == 25) && (str[24] == '\0')) { length = 24; } if (length == 24) { for (i = 0; i < length; i++) { switch (str[i]) { case '0': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': case '9': case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': break; default: return false; } } return true; } return false; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-oid.h0000644000076500000240000001371213572250757023057 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_OID_H #define BSON_OID_H #include #include "bson/bson-context.h" #include "bson/bson-macros.h" #include "bson/bson-types.h" #include "bson/bson-endian.h" BSON_BEGIN_DECLS BSON_EXPORT (int) bson_oid_compare (const bson_oid_t *oid1, const bson_oid_t *oid2); BSON_EXPORT (void) bson_oid_copy (const bson_oid_t *src, bson_oid_t *dst); BSON_EXPORT (bool) bson_oid_equal (const bson_oid_t *oid1, const bson_oid_t *oid2); BSON_EXPORT (bool) bson_oid_is_valid (const char *str, size_t length); BSON_EXPORT (time_t) bson_oid_get_time_t (const bson_oid_t *oid); BSON_EXPORT (uint32_t) bson_oid_hash (const bson_oid_t *oid); BSON_EXPORT (void) bson_oid_init (bson_oid_t *oid, bson_context_t *context); BSON_EXPORT (void) bson_oid_init_from_data (bson_oid_t *oid, const uint8_t *data); BSON_EXPORT (void) bson_oid_init_from_string (bson_oid_t *oid, const char *str); BSON_EXPORT (void) bson_oid_init_sequence (bson_oid_t *oid, bson_context_t *context) BSON_GNUC_DEPRECATED; BSON_EXPORT (void) bson_oid_to_string (const bson_oid_t *oid, char str[25]); /** * bson_oid_compare_unsafe: * @oid1: A bson_oid_t. * @oid2: A bson_oid_t. * * Performs a qsort() style comparison between @oid1 and @oid2. * * This function is meant to be as fast as possible and therefore performs * no argument validation. That is the callers responsibility. * * Returns: An integer < 0 if @oid1 is less than @oid2. Zero if they are equal. * An integer > 0 if @oid1 is greater than @oid2. */ static BSON_INLINE int bson_oid_compare_unsafe (const bson_oid_t *oid1, const bson_oid_t *oid2) { return memcmp (oid1, oid2, sizeof *oid1); } /** * bson_oid_equal_unsafe: * @oid1: A bson_oid_t. * @oid2: A bson_oid_t. * * Checks the equality of @oid1 and @oid2. * * This function is meant to be as fast as possible and therefore performs * no checks for argument validity. That is the callers responsibility. * * Returns: true if @oid1 and @oid2 are equal; otherwise false. */ static BSON_INLINE bool bson_oid_equal_unsafe (const bson_oid_t *oid1, const bson_oid_t *oid2) { return !memcmp (oid1, oid2, sizeof *oid1); } /** * bson_oid_hash_unsafe: * @oid: A bson_oid_t. * * This function performs a DJB style hash upon the bytes contained in @oid. * The result is a hash key suitable for use in a hashtable. * * This function is meant to be as fast as possible and therefore performs no * validation of arguments. The caller is responsible to ensure they are * passing valid arguments. * * Returns: A uint32_t containing a hash code. */ static BSON_INLINE uint32_t bson_oid_hash_unsafe (const bson_oid_t *oid) { uint32_t hash = 5381; uint32_t i; for (i = 0; i < sizeof oid->bytes; i++) { hash = ((hash << 5) + hash) + oid->bytes[i]; } return hash; } /** * bson_oid_copy_unsafe: * @src: A bson_oid_t to copy from. * @dst: A bson_oid_t to copy into. * * Copies the contents of @src into @dst. This function is meant to be as * fast as possible and therefore performs no argument checking. It is the * callers responsibility to ensure they are passing valid data into the * function. */ static BSON_INLINE void bson_oid_copy_unsafe (const bson_oid_t *src, bson_oid_t *dst) { memcpy (dst, src, sizeof *src); } /** * bson_oid_parse_hex_char: * @hex: A character to parse to its integer value. * * This function contains a jump table to return the integer value for a * character containing a hexadecimal value (0-9, a-f, A-F). If the character * is not a hexadecimal character then zero is returned. * * Returns: An integer between 0 and 15. */ static BSON_INLINE uint8_t bson_oid_parse_hex_char (char hex) { switch (hex) { case '0': return 0; case '1': return 1; case '2': return 2; case '3': return 3; case '4': return 4; case '5': return 5; case '6': return 6; case '7': return 7; case '8': return 8; case '9': return 9; case 'a': case 'A': return 0xa; case 'b': case 'B': return 0xb; case 'c': case 'C': return 0xc; case 'd': case 'D': return 0xd; case 'e': case 'E': return 0xe; case 'f': case 'F': return 0xf; default: return 0; } } /** * bson_oid_init_from_string_unsafe: * @oid: A bson_oid_t to store the result. * @str: A 24-character hexadecimal encoded string. * * Parses a string containing 24 hexadecimal encoded bytes into a bson_oid_t. * This function is meant to be as fast as possible and inlined into your * code. For that purpose, the function does not perform any sort of bounds * checking and it is the callers responsibility to ensure they are passing * valid input to the function. */ static BSON_INLINE void bson_oid_init_from_string_unsafe (bson_oid_t *oid, const char *str) { int i; for (i = 0; i < 12; i++) { oid->bytes[i] = ((bson_oid_parse_hex_char (str[2 * i]) << 4) | (bson_oid_parse_hex_char (str[2 * i + 1]))); } } /** * bson_oid_get_time_t_unsafe: * @oid: A bson_oid_t. * * Fetches the time @oid was generated. * * Returns: A time_t containing the UNIX timestamp of generation. */ static BSON_INLINE time_t bson_oid_get_time_t_unsafe (const bson_oid_t *oid) { uint32_t t; memcpy (&t, oid, sizeof (t)); return BSON_UINT32_FROM_BE (t); } BSON_END_DECLS #endif /* BSON_OID_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-prelude.h0000644000076500000240000000132013572250757023734 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(BSON_INSIDE) && !defined(BSON_COMPILATION) #error "Only can be included directly." #endifmongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-private.h0000644000076500000240000000615613572250757023762 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_PRIVATE_H #define BSON_PRIVATE_H #include "bson/bson-macros.h" #include "bson/bson-memory.h" #include "bson/bson-types.h" #if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) #define BEGIN_IGNORE_DEPRECATIONS \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #define END_IGNORE_DEPRECATIONS _Pragma ("GCC diagnostic pop") #elif defined(__clang__) #define BEGIN_IGNORE_DEPRECATIONS \ _Pragma ("clang diagnostic push") \ _Pragma ("clang diagnostic ignored \"-Wdeprecated-declarations\"") #define END_IGNORE_DEPRECATIONS _Pragma ("clang diagnostic pop") #else #define BEGIN_IGNORE_DEPRECATIONS #define END_IGNORE_DEPRECATIONS #endif BSON_BEGIN_DECLS typedef enum { BSON_FLAG_NONE = 0, BSON_FLAG_INLINE = (1 << 0), BSON_FLAG_STATIC = (1 << 1), BSON_FLAG_RDONLY = (1 << 2), BSON_FLAG_CHILD = (1 << 3), BSON_FLAG_IN_CHILD = (1 << 4), BSON_FLAG_NO_FREE = (1 << 5), } bson_flags_t; #ifdef BSON_MEMCHECK #define BSON_INLINE_DATA_SIZE (120 - sizeof (char *)) #else #define BSON_INLINE_DATA_SIZE 120 #endif BSON_ALIGNED_BEGIN (128) typedef struct { bson_flags_t flags; uint32_t len; #ifdef BSON_MEMCHECK char *canary; #endif uint8_t data[BSON_INLINE_DATA_SIZE]; } bson_impl_inline_t BSON_ALIGNED_END (128); BSON_STATIC_ASSERT2 (impl_inline_t, sizeof (bson_impl_inline_t) == 128); BSON_ALIGNED_BEGIN (128) typedef struct { bson_flags_t flags; /* flags describing the bson_t */ /* len is part of the public bson_t declaration. It is not * exposed through an accessor function. Plus, it's redundant since * BSON self describes the length in the first four bytes of the * buffer. */ uint32_t len; /* length of bson document in bytes */ bson_t *parent; /* parent bson if a child */ uint32_t depth; /* Subdocument depth. */ uint8_t **buf; /* pointer to buffer pointer */ size_t *buflen; /* pointer to buffer length */ size_t offset; /* our offset inside *buf */ uint8_t *alloc; /* buffer that we own. */ size_t alloclen; /* length of buffer that we own. */ bson_realloc_func realloc; /* our realloc implementation */ void *realloc_func_ctx; /* context for our realloc func */ } bson_impl_alloc_t BSON_ALIGNED_END (128); BSON_STATIC_ASSERT2 (impl_alloc_t, sizeof (bson_impl_alloc_t) <= 128); #define BSON_REGEX_OPTIONS_SORTED "ilmsux" BSON_END_DECLS #endif /* BSON_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-reader.c0000644000076500000240000004605713572250757023551 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson.h" #include #include #ifdef BSON_OS_WIN32 #include #include #endif #include #include #include #include #include "bson/bson-reader.h" #include "bson/bson-memory.h" typedef enum { BSON_READER_HANDLE = 1, BSON_READER_DATA = 2, } bson_reader_type_t; typedef struct { bson_reader_type_t type; void *handle; bool done : 1; bool failed : 1; size_t end; size_t len; size_t offset; size_t bytes_read; bson_t inline_bson; uint8_t *data; bson_reader_read_func_t read_func; bson_reader_destroy_func_t destroy_func; } bson_reader_handle_t; typedef struct { int fd; bool do_close; } bson_reader_handle_fd_t; typedef struct { bson_reader_type_t type; const uint8_t *data; size_t length; size_t offset; bson_t inline_bson; } bson_reader_data_t; /* *-------------------------------------------------------------------------- * * _bson_reader_handle_fill_buffer -- * * Attempt to read as much as possible until the underlying buffer * in @reader is filled or we have reached end-of-stream or * read failure. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _bson_reader_handle_fill_buffer (bson_reader_handle_t *reader) /* IN */ { ssize_t ret; /* * Handle first read specially. */ if ((!reader->done) && (!reader->offset) && (!reader->end)) { ret = reader->read_func (reader->handle, &reader->data[0], reader->len); if (ret <= 0) { reader->done = true; return; } reader->bytes_read += ret; reader->end = ret; return; } /* * Move valid data to head. */ memmove (&reader->data[0], &reader->data[reader->offset], reader->end - reader->offset); reader->end = reader->end - reader->offset; reader->offset = 0; /* * Read in data to fill the buffer. */ ret = reader->read_func ( reader->handle, &reader->data[reader->end], reader->len - reader->end); if (ret <= 0) { reader->done = true; reader->failed = (ret < 0); } else { reader->bytes_read += ret; reader->end += ret; } BSON_ASSERT (reader->offset == 0); BSON_ASSERT (reader->end <= reader->len); } /* *-------------------------------------------------------------------------- * * bson_reader_new_from_handle -- * * Allocates and initializes a new bson_reader_t using the opaque * handle provided. * * Parameters: * @handle: an opaque handle to use to read data. * @rf: a function to perform reads on @handle. * @df: a function to release @handle, or NULL. * * Returns: * A newly allocated bson_reader_t if successful, otherwise NULL. * Free the successful result with bson_reader_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_reader_t * bson_reader_new_from_handle (void *handle, bson_reader_read_func_t rf, bson_reader_destroy_func_t df) { bson_reader_handle_t *real; BSON_ASSERT (handle); BSON_ASSERT (rf); real = bson_malloc0 (sizeof *real); real->type = BSON_READER_HANDLE; real->data = bson_malloc0 (1024); real->handle = handle; real->len = 1024; real->offset = 0; bson_reader_set_read_func ((bson_reader_t *) real, rf); if (df) { bson_reader_set_destroy_func ((bson_reader_t *) real, df); } _bson_reader_handle_fill_buffer (real); return (bson_reader_t *) real; } /* *-------------------------------------------------------------------------- * * _bson_reader_handle_fd_destroy -- * * Cleanup allocations associated with state created in * bson_reader_new_from_fd(). * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _bson_reader_handle_fd_destroy (void *handle) /* IN */ { bson_reader_handle_fd_t *fd = handle; if (fd) { if ((fd->fd != -1) && fd->do_close) { #ifdef _WIN32 _close (fd->fd); #else close (fd->fd); #endif } bson_free (fd); } } /* *-------------------------------------------------------------------------- * * _bson_reader_handle_fd_read -- * * Perform read on opaque handle created in * bson_reader_new_from_fd(). * * The underlying file descriptor is read from the current position * using the bson_reader_handle_fd_t allocated. * * Returns: * -1 on failure. * 0 on end of stream. * Greater than zero on success. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static ssize_t _bson_reader_handle_fd_read (void *handle, /* IN */ void *buf, /* IN */ size_t len) /* IN */ { bson_reader_handle_fd_t *fd = handle; ssize_t ret = -1; if (fd && (fd->fd != -1)) { again: #ifdef BSON_OS_WIN32 ret = _read (fd->fd, buf, (unsigned int) len); #else ret = read (fd->fd, buf, len); #endif if ((ret == -1) && (errno == EAGAIN)) { goto again; } } return ret; } /* *-------------------------------------------------------------------------- * * bson_reader_new_from_fd -- * * Create a new bson_reader_t using the file-descriptor provided. * * Parameters: * @fd: a libc style file-descriptor. * @close_on_destroy: if close() should be called on @fd when * bson_reader_destroy() is called. * * Returns: * A newly allocated bson_reader_t on success; otherwise NULL. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_reader_t * bson_reader_new_from_fd (int fd, /* IN */ bool close_on_destroy) /* IN */ { bson_reader_handle_fd_t *handle; BSON_ASSERT (fd != -1); handle = bson_malloc0 (sizeof *handle); handle->fd = fd; handle->do_close = close_on_destroy; return bson_reader_new_from_handle ( handle, _bson_reader_handle_fd_read, _bson_reader_handle_fd_destroy); } /** * bson_reader_set_read_func: * @reader: A bson_reader_t. * * Note that @reader must be initialized by bson_reader_init_from_handle(), or * data * will be destroyed. */ /* *-------------------------------------------------------------------------- * * bson_reader_set_read_func -- * * Set the read func to be provided for @reader. * * You probably want to use bson_reader_new_from_handle() or * bson_reader_new_from_fd() instead. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_reader_set_read_func (bson_reader_t *reader, /* IN */ bson_reader_read_func_t func) /* IN */ { bson_reader_handle_t *real = (bson_reader_handle_t *) reader; BSON_ASSERT (reader->type == BSON_READER_HANDLE); real->read_func = func; } /* *-------------------------------------------------------------------------- * * bson_reader_set_destroy_func -- * * Set the function to cleanup state when @reader is destroyed. * * You probably want bson_reader_new_from_fd() or * bson_reader_new_from_handle() instead. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_reader_set_destroy_func (bson_reader_t *reader, /* IN */ bson_reader_destroy_func_t func) /* IN */ { bson_reader_handle_t *real = (bson_reader_handle_t *) reader; BSON_ASSERT (reader->type == BSON_READER_HANDLE); real->destroy_func = func; } /* *-------------------------------------------------------------------------- * * _bson_reader_handle_grow_buffer -- * * Grow the buffer to the next power of two. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _bson_reader_handle_grow_buffer (bson_reader_handle_t *reader) /* IN */ { size_t size; size = reader->len * 2; reader->data = bson_realloc (reader->data, size); reader->len = size; } /* *-------------------------------------------------------------------------- * * _bson_reader_handle_tell -- * * Tell the current position within the underlying file-descriptor. * * Returns: * An off_t containing the current offset. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static off_t _bson_reader_handle_tell (bson_reader_handle_t *reader) /* IN */ { off_t off; off = (off_t) reader->bytes_read; off -= (off_t) reader->end; off += (off_t) reader->offset; return off; } /* *-------------------------------------------------------------------------- * * _bson_reader_handle_read -- * * Read the next chunk of data from the underlying file descriptor * and return a bson_t which should not be modified. * * There was a failure if NULL is returned and @reached_eof is * not set to true. * * Returns: * NULL on failure or end of stream. * * Side effects: * @reached_eof is set if non-NULL. * *-------------------------------------------------------------------------- */ static const bson_t * _bson_reader_handle_read (bson_reader_handle_t *reader, /* IN */ bool *reached_eof) /* IN */ { int32_t blen; if (reached_eof) { *reached_eof = false; } while (!reader->done) { if ((reader->end - reader->offset) < 4) { _bson_reader_handle_fill_buffer (reader); continue; } memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if (blen < 5) { return NULL; } if (blen > (int32_t) (reader->end - reader->offset)) { if (blen > (int32_t) reader->len) { _bson_reader_handle_grow_buffer (reader); } _bson_reader_handle_fill_buffer (reader); continue; } if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], (uint32_t) blen)) { return NULL; } reader->offset += blen; return &reader->inline_bson; } if (reached_eof) { *reached_eof = reader->done && !reader->failed; } return NULL; } /* *-------------------------------------------------------------------------- * * bson_reader_new_from_data -- * * Allocates and initializes a new bson_reader_t that reads the memory * provided as a stream of BSON documents. * * Parameters: * @data: A buffer to read BSON documents from. * @length: The length of @data. * * Returns: * A newly allocated bson_reader_t that should be freed with * bson_reader_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_reader_t * bson_reader_new_from_data (const uint8_t *data, /* IN */ size_t length) /* IN */ { bson_reader_data_t *real; BSON_ASSERT (data); real = (bson_reader_data_t *) bson_malloc0 (sizeof *real); real->type = BSON_READER_DATA; real->data = data; real->length = length; real->offset = 0; return (bson_reader_t *) real; } /* *-------------------------------------------------------------------------- * * _bson_reader_data_read -- * * Read the next document from the underlying buffer. * * Returns: * NULL on failure or end of stream. * a bson_t which should not be modified. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static const bson_t * _bson_reader_data_read (bson_reader_data_t *reader, /* IN */ bool *reached_eof) /* IN */ { int32_t blen; if (reached_eof) { *reached_eof = false; } if ((reader->offset + 4) < reader->length) { memcpy (&blen, &reader->data[reader->offset], sizeof blen); blen = BSON_UINT32_FROM_LE (blen); if (blen < 5) { return NULL; } if (blen > (int32_t) (reader->length - reader->offset)) { return NULL; } if (!bson_init_static (&reader->inline_bson, &reader->data[reader->offset], (uint32_t) blen)) { return NULL; } reader->offset += blen; return &reader->inline_bson; } if (reached_eof) { *reached_eof = (reader->offset == reader->length); } return NULL; } /* *-------------------------------------------------------------------------- * * _bson_reader_data_tell -- * * Tell the current position in the underlying buffer. * * Returns: * An off_t of the current offset. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static off_t _bson_reader_data_tell (bson_reader_data_t *reader) /* IN */ { return (off_t) reader->offset; } /* *-------------------------------------------------------------------------- * * bson_reader_destroy -- * * Release a bson_reader_t created with bson_reader_new_from_data(), * bson_reader_new_from_fd(), or bson_reader_new_from_handle(). * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_reader_destroy (bson_reader_t *reader) /* IN */ { if (!reader) { return; } switch (reader->type) { case 0: break; case BSON_READER_HANDLE: { bson_reader_handle_t *handle = (bson_reader_handle_t *) reader; if (handle->destroy_func) { handle->destroy_func (handle->handle); } bson_free (handle->data); } break; case BSON_READER_DATA: break; default: fprintf (stderr, "No such reader type: %02x\n", reader->type); break; } reader->type = 0; bson_free (reader); } /* *-------------------------------------------------------------------------- * * bson_reader_read -- * * Reads the next bson_t in the underlying memory or storage. The * resulting bson_t should not be modified or freed. You may copy it * and iterate over it. Functions that take a const bson_t* are safe * to use. * * This structure does not survive calls to bson_reader_read() or * bson_reader_destroy() as it uses memory allocated by the reader or * underlying storage/memory. * * If NULL is returned then @reached_eof will be set to true if the * end of the file or buffer was reached. This indicates if there was * an error parsing the document stream. * * Returns: * A const bson_t that should not be modified or freed. * NULL on failure or end of stream. * * Side effects: * @reached_eof is set if non-NULL. * *-------------------------------------------------------------------------- */ const bson_t * bson_reader_read (bson_reader_t *reader, /* IN */ bool *reached_eof) /* OUT */ { BSON_ASSERT (reader); switch (reader->type) { case BSON_READER_HANDLE: return _bson_reader_handle_read ((bson_reader_handle_t *) reader, reached_eof); case BSON_READER_DATA: return _bson_reader_data_read ((bson_reader_data_t *) reader, reached_eof); default: fprintf (stderr, "No such reader type: %02x\n", reader->type); break; } return NULL; } /* *-------------------------------------------------------------------------- * * bson_reader_tell -- * * Return the current position in the underlying reader. This will * always be at the beginning of a bson document or end of file. * * Returns: * An off_t containing the current offset. * * Side effects: * None. * *-------------------------------------------------------------------------- */ off_t bson_reader_tell (bson_reader_t *reader) /* IN */ { BSON_ASSERT (reader); switch (reader->type) { case BSON_READER_HANDLE: return _bson_reader_handle_tell ((bson_reader_handle_t *) reader); case BSON_READER_DATA: return _bson_reader_data_tell ((bson_reader_data_t *) reader); default: fprintf (stderr, "No such reader type: %02x\n", reader->type); return -1; } } /* *-------------------------------------------------------------------------- * * bson_reader_new_from_file -- * * A convenience function to open a file containing sequential * bson documents and read them using bson_reader_t. * * Returns: * A new bson_reader_t if successful, otherwise NULL and * @error is set. Free the non-NULL result with * bson_reader_destroy(). * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ bson_reader_t * bson_reader_new_from_file (const char *path, /* IN */ bson_error_t *error) /* OUT */ { char errmsg_buf[BSON_ERROR_BUFFER_SIZE]; char *errmsg; int fd; BSON_ASSERT (path); #ifdef BSON_OS_WIN32 if (_sopen_s (&fd, path, (_O_RDONLY | _O_BINARY), _SH_DENYNO, 0) != 0) { fd = -1; } #else fd = open (path, O_RDONLY); #endif if (fd == -1) { errmsg = bson_strerror_r (errno, errmsg_buf, sizeof errmsg_buf); bson_set_error ( error, BSON_ERROR_READER, BSON_ERROR_READER_BADFD, "%s", errmsg); return NULL; } return bson_reader_new_from_fd (fd, true); } /* *-------------------------------------------------------------------------- * * bson_reader_reset -- * * Restore the reader to its initial state. Valid only for readers * created with bson_reader_new_from_data. * *-------------------------------------------------------------------------- */ void bson_reader_reset (bson_reader_t *reader) { bson_reader_data_t *real = (bson_reader_data_t *) reader; if (real->type != BSON_READER_DATA) { fprintf (stderr, "Reader type cannot be reset\n"); return; } real->offset = 0; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-reader.h0000644000076500000240000000644413572250757023552 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_READER_H #define BSON_READER_H #include "bson/bson-compat.h" #include "bson/bson-oid.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS #define BSON_ERROR_READER_BADFD 1 /* *-------------------------------------------------------------------------- * * bson_reader_read_func_t -- * * This function is a callback used by bson_reader_t to read the * next chunk of data from the underlying opaque file descriptor. * * This function is meant to operate similar to the read() function * as part of libc on UNIX-like systems. * * Parameters: * @handle: The handle to read from. * @buf: The buffer to read into. * @count: The number of bytes to read. * * Returns: * 0 for end of stream. * -1 for read failure. * Greater than zero for number of bytes read into @buf. * * Side effects: * None. * *-------------------------------------------------------------------------- */ typedef ssize_t (*bson_reader_read_func_t) (void *handle, /* IN */ void *buf, /* IN */ size_t count); /* IN */ /* *-------------------------------------------------------------------------- * * bson_reader_destroy_func_t -- * * Destroy callback to release any resources associated with the * opaque handle. * * Parameters: * @handle: the handle provided to bson_reader_new_from_handle(). * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ typedef void (*bson_reader_destroy_func_t) (void *handle); /* IN */ BSON_EXPORT (bson_reader_t *) bson_reader_new_from_handle (void *handle, bson_reader_read_func_t rf, bson_reader_destroy_func_t df); BSON_EXPORT (bson_reader_t *) bson_reader_new_from_fd (int fd, bool close_on_destroy); BSON_EXPORT (bson_reader_t *) bson_reader_new_from_file (const char *path, bson_error_t *error); BSON_EXPORT (bson_reader_t *) bson_reader_new_from_data (const uint8_t *data, size_t length); BSON_EXPORT (void) bson_reader_destroy (bson_reader_t *reader); BSON_EXPORT (void) bson_reader_set_read_func (bson_reader_t *reader, bson_reader_read_func_t func); BSON_EXPORT (void) bson_reader_set_destroy_func (bson_reader_t *reader, bson_reader_destroy_func_t func); BSON_EXPORT (const bson_t *) bson_reader_read (bson_reader_t *reader, bool *reached_eof); BSON_EXPORT (off_t) bson_reader_tell (bson_reader_t *reader); BSON_EXPORT (void) bson_reader_reset (bson_reader_t *reader); BSON_END_DECLS #endif /* BSON_READER_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-string.c0000644000076500000240000004065013572250757023606 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "bson/bson-compat.h" #include "bson/bson-string.h" #include "bson/bson-memory.h" #include "bson/bson-utf8.h" #ifdef BSON_HAVE_STRINGS_H #include #else #include #endif /* *-------------------------------------------------------------------------- * * bson_string_new -- * * Create a new bson_string_t. * * bson_string_t is a power-of-2 allocation growing string. Every * time data is appended the next power of two size is chosen for * the allocation. Pretty standard stuff. * * It is UTF-8 aware through the use of bson_string_append_unichar(). * The proper UTF-8 character sequence will be used. * * Parameters: * @str: a string to copy or NULL. * * Returns: * A newly allocated bson_string_t that should be freed with * bson_string_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_string_t * bson_string_new (const char *str) /* IN */ { bson_string_t *ret; ret = bson_malloc0 (sizeof *ret); ret->len = str ? (int) strlen (str) : 0; ret->alloc = ret->len + 1; if (!bson_is_power_of_two (ret->alloc)) { ret->alloc = (uint32_t) bson_next_power_of_two ((size_t) ret->alloc); } BSON_ASSERT (ret->alloc >= 1); ret->str = bson_malloc (ret->alloc); if (str) { memcpy (ret->str, str, ret->len); } ret->str[ret->len] = '\0'; ret->str[ret->len] = '\0'; return ret; } char * bson_string_free (bson_string_t *string, /* IN */ bool free_segment) /* IN */ { char *ret = NULL; BSON_ASSERT (string); if (!free_segment) { ret = string->str; } else { bson_free (string->str); } bson_free (string); return ret; } /* *-------------------------------------------------------------------------- * * bson_string_append -- * * Append the UTF-8 string @str to @string. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_string_append (bson_string_t *string, /* IN */ const char *str) /* IN */ { uint32_t len; BSON_ASSERT (string); BSON_ASSERT (str); len = (uint32_t) strlen (str); if ((string->alloc - string->len - 1) < len) { string->alloc += len; if (!bson_is_power_of_two (string->alloc)) { string->alloc = (uint32_t) bson_next_power_of_two ((size_t) string->alloc); } string->str = bson_realloc (string->str, string->alloc); } memcpy (string->str + string->len, str, len); string->len += len; string->str[string->len] = '\0'; } /* *-------------------------------------------------------------------------- * * bson_string_append_c -- * * Append the ASCII character @c to @string. * * Do not use this if you are working with UTF-8 sequences, * use bson_string_append_unichar(). * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_string_append_c (bson_string_t *string, /* IN */ char c) /* IN */ { char cc[2]; BSON_ASSERT (string); if (BSON_UNLIKELY (string->alloc == (string->len + 1))) { cc[0] = c; cc[1] = '\0'; bson_string_append (string, cc); return; } string->str[string->len++] = c; string->str[string->len] = '\0'; } /* *-------------------------------------------------------------------------- * * bson_string_append_unichar -- * * Append the bson_unichar_t @unichar to the string @string. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_string_append_unichar (bson_string_t *string, /* IN */ bson_unichar_t unichar) /* IN */ { uint32_t len; char str[8]; BSON_ASSERT (string); BSON_ASSERT (unichar); bson_utf8_from_unichar (unichar, str, &len); if (len <= 6) { str[len] = '\0'; bson_string_append (string, str); } } /* *-------------------------------------------------------------------------- * * bson_string_append_printf -- * * Format a string according to @format and append it to @string. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_string_append_printf (bson_string_t *string, const char *format, ...) { va_list args; char *ret; BSON_ASSERT (string); BSON_ASSERT (format); va_start (args, format); ret = bson_strdupv_printf (format, args); va_end (args); bson_string_append (string, ret); bson_free (ret); } /* *-------------------------------------------------------------------------- * * bson_string_truncate -- * * Truncate the string @string to @len bytes. * * The underlying memory will be released via realloc() down to * the minimum required size specified by @len. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_string_truncate (bson_string_t *string, /* IN */ uint32_t len) /* IN */ { uint32_t alloc; BSON_ASSERT (string); BSON_ASSERT (len < INT_MAX); alloc = len + 1; if (alloc < 16) { alloc = 16; } if (!bson_is_power_of_two (alloc)) { alloc = (uint32_t) bson_next_power_of_two ((size_t) alloc); } string->str = bson_realloc (string->str, alloc); string->alloc = alloc; string->len = len; string->str[string->len] = '\0'; } /* *-------------------------------------------------------------------------- * * bson_strdup -- * * Portable strdup(). * * Returns: * A newly allocated string that should be freed with bson_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_strdup (const char *str) /* IN */ { long len; char *out; if (!str) { return NULL; } len = (long) strlen (str); out = bson_malloc (len + 1); if (!out) { return NULL; } memcpy (out, str, len + 1); return out; } /* *-------------------------------------------------------------------------- * * bson_strdupv_printf -- * * Like bson_strdup_printf() but takes a va_list. * * Returns: * A newly allocated string that should be freed with bson_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_strdupv_printf (const char *format, /* IN */ va_list args) /* IN */ { va_list my_args; char *buf; int len = 32; int n; BSON_ASSERT (format); buf = bson_malloc0 (len); while (true) { va_copy (my_args, args); n = bson_vsnprintf (buf, len, format, my_args); va_end (my_args); if (n > -1 && n < len) { return buf; } if (n > -1) { len = n + 1; } else { len *= 2; } buf = bson_realloc (buf, len); } } /* *-------------------------------------------------------------------------- * * bson_strdup_printf -- * * Convenience function that formats a string according to @format * and returns a copy of it. * * Returns: * A newly created string that should be freed with bson_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_strdup_printf (const char *format, /* IN */ ...) /* IN */ { va_list args; char *ret; BSON_ASSERT (format); va_start (args, format); ret = bson_strdupv_printf (format, args); va_end (args); return ret; } /* *-------------------------------------------------------------------------- * * bson_strndup -- * * A portable strndup(). * * Returns: * A newly allocated string that should be freed with bson_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_strndup (const char *str, /* IN */ size_t n_bytes) /* IN */ { char *ret; BSON_ASSERT (str); ret = bson_malloc (n_bytes + 1); bson_strncpy (ret, str, n_bytes + 1); return ret; } /* *-------------------------------------------------------------------------- * * bson_strfreev -- * * Frees each string in a NULL terminated array of strings. * This also frees the underlying array. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_strfreev (char **str) /* IN */ { int i; if (str) { for (i = 0; str[i]; i++) bson_free (str[i]); bson_free (str); } } /* *-------------------------------------------------------------------------- * * bson_strnlen -- * * A portable strnlen(). * * Returns: * The length of @s up to @maxlen. * * Side effects: * None. * *-------------------------------------------------------------------------- */ size_t bson_strnlen (const char *s, /* IN */ size_t maxlen) /* IN */ { #ifdef BSON_HAVE_STRNLEN return strnlen (s, maxlen); #else size_t i; for (i = 0; i < maxlen; i++) { if (s[i] == '\0') { return i; } } return maxlen; #endif } /* *-------------------------------------------------------------------------- * * bson_strncpy -- * * A portable strncpy. * * Copies @src into @dst, which must be @size bytes or larger. * The result is guaranteed to be \0 terminated. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_strncpy (char *dst, /* IN */ const char *src, /* IN */ size_t size) /* IN */ { if (size == 0) { return; } #ifdef _MSC_VER strncpy_s (dst, size, src, _TRUNCATE); #else strncpy (dst, src, size); dst[size - 1] = '\0'; #endif } /* *-------------------------------------------------------------------------- * * bson_vsnprintf -- * * A portable vsnprintf. * * If more than @size bytes are required (exluding the null byte), * then @size bytes will be written to @string and the return value * is the number of bytes required. * * This function will always return a NULL terminated string. * * Returns: * The number of bytes required for @format excluding the null byte. * * Side effects: * @str is initialized with the formatted string. * *-------------------------------------------------------------------------- */ int bson_vsnprintf (char *str, /* IN */ size_t size, /* IN */ const char *format, /* IN */ va_list ap) /* IN */ { #ifdef _MSC_VER int r = -1; BSON_ASSERT (str); if (size == 0) { return 0; } r = _vsnprintf_s (str, size, _TRUNCATE, format, ap); if (r == -1) { r = _vscprintf (format, ap); } str[size - 1] = '\0'; return r; #else int r; BSON_ASSERT (str); if (size == 0) { return 0; } r = vsnprintf (str, size, format, ap); str[size - 1] = '\0'; return r; #endif } /* *-------------------------------------------------------------------------- * * bson_snprintf -- * * A portable snprintf. * * If @format requires more than @size bytes, then @size bytes are * written and the result is the number of bytes required (excluding * the null byte). * * This function will always return a NULL terminated string. * * Returns: * The number of bytes required for @format. * * Side effects: * @str is initialized. * *-------------------------------------------------------------------------- */ int bson_snprintf (char *str, /* IN */ size_t size, /* IN */ const char *format, /* IN */ ...) { int r; va_list ap; BSON_ASSERT (str); va_start (ap, format); r = bson_vsnprintf (str, size, format, ap); va_end (ap); return r; } /* *-------------------------------------------------------------------------- * * bson_ascii_strtoll -- * * A portable strtoll. * * Convert a string to a 64-bit signed integer according to the given * @base, which must be 16, 10, or 8. Leading whitespace will be ignored. * * If base is 0 is passed in, the base is inferred from the string's * leading characters. Base-16 numbers start with "0x" or "0X", base-8 * numbers start with "0", base-10 numbers start with a digit from 1 to 9. * * If @e is not NULL, it will be assigned the address of the first invalid * character of @s, or its null terminating byte if the entire string was * valid. * * If an invalid value is encountered, errno will be set to EINVAL and * zero will be returned. If the number is out of range, errno is set to * ERANGE and LLONG_MAX or LLONG_MIN is returned. * * Returns: * The result of the conversion. * * Side effects: * errno will be set on error. * *-------------------------------------------------------------------------- */ int64_t bson_ascii_strtoll (const char *s, char **e, int base) { char *tok = (char *) s; char *digits_start; char c; int64_t number = 0; int64_t sign = 1; int64_t cutoff; int64_t cutlim; errno = 0; if (!s) { errno = EINVAL; return 0; } c = *tok; while (isspace (c)) { c = *++tok; } if (c == '-') { sign = -1; c = *++tok; } else if (c == '+') { c = *++tok; } else if (!isdigit (c)) { errno = EINVAL; return 0; } /* from here down, inspired by NetBSD's strtoll */ if ((base == 0 || base == 16) && c == '0' && (tok[1] == 'x' || tok[1] == 'X')) { tok += 2; c = *tok; base = 16; } if (base == 0) { base = c == '0' ? 8 : 10; } /* Cutoff is the greatest magnitude we'll be able to multiply by base without * range error. If the current number is past cutoff and we see valid digit, * fail. If the number is *equal* to cutoff, then the next digit must be less * than cutlim, otherwise fail. */ cutoff = sign == -1 ? INT64_MIN : INT64_MAX; cutlim = (int) (cutoff % base); cutoff /= base; if (sign == -1) { if (cutlim > 0) { cutlim -= base; cutoff += 1; } cutlim = -cutlim; } digits_start = tok; while ((c = *tok)) { if (isdigit (c)) { c -= '0'; } else if (isalpha (c)) { c -= isupper (c) ? 'A' - 10 : 'a' - 10; } else { /* end of number string */ break; } if (c >= base) { break; } if (sign == -1) { if (number < cutoff || (number == cutoff && c > cutlim)) { number = INT64_MIN; errno = ERANGE; break; } else { number *= base; number -= c; } } else { if (number > cutoff || (number == cutoff && c > cutlim)) { number = INT64_MAX; errno = ERANGE; break; } else { number *= base; number += c; } } tok++; } /* did we parse any digits at all? */ if (e != NULL && tok > digits_start) { *e = tok; } return number; } int bson_strcasecmp (const char *s1, const char *s2) { #ifdef BSON_OS_WIN32 return _stricmp (s1, s2); #else return strcasecmp (s1, s2); #endif } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-string.h0000644000076500000240000000452513572250757023614 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_STRING_H #define BSON_STRING_H #include #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS typedef struct { char *str; uint32_t len; uint32_t alloc; } bson_string_t; BSON_EXPORT (bson_string_t *) bson_string_new (const char *str); BSON_EXPORT (char *) bson_string_free (bson_string_t *string, bool free_segment); BSON_EXPORT (void) bson_string_append (bson_string_t *string, const char *str); BSON_EXPORT (void) bson_string_append_c (bson_string_t *string, char str); BSON_EXPORT (void) bson_string_append_unichar (bson_string_t *string, bson_unichar_t unichar); BSON_EXPORT (void) bson_string_append_printf (bson_string_t *string, const char *format, ...) BSON_GNUC_PRINTF (2, 3); BSON_EXPORT (void) bson_string_truncate (bson_string_t *string, uint32_t len); BSON_EXPORT (char *) bson_strdup (const char *str); BSON_EXPORT (char *) bson_strdup_printf (const char *format, ...) BSON_GNUC_PRINTF (1, 2); BSON_EXPORT (char *) bson_strdupv_printf (const char *format, va_list args) BSON_GNUC_PRINTF (1, 0); BSON_EXPORT (char *) bson_strndup (const char *str, size_t n_bytes); BSON_EXPORT (void) bson_strncpy (char *dst, const char *src, size_t size); BSON_EXPORT (int) bson_vsnprintf (char *str, size_t size, const char *format, va_list ap) BSON_GNUC_PRINTF (3, 0); BSON_EXPORT (int) bson_snprintf (char *str, size_t size, const char *format, ...) BSON_GNUC_PRINTF (3, 4); BSON_EXPORT (void) bson_strfreev (char **strv); BSON_EXPORT (size_t) bson_strnlen (const char *s, size_t maxlen); BSON_EXPORT (int64_t) bson_ascii_strtoll (const char *str, char **endptr, int base); BSON_EXPORT (int) bson_strcasecmp (const char *s1, const char *s2); BSON_END_DECLS #endif /* BSON_STRING_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-timegm-private.h0000644000076500000240000000300613572250757025231 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_TIMEGM_PRIVATE_H #define BSON_TIMEGM_PRIVATE_H #include "bson/bson-compat.h" #include "bson/bson-macros.h" BSON_BEGIN_DECLS /* avoid system-dependent struct tm definitions */ struct bson_tm { int64_t tm_sec; /* seconds after the minute [0-60] */ int64_t tm_min; /* minutes after the hour [0-59] */ int64_t tm_hour; /* hours since midnight [0-23] */ int64_t tm_mday; /* day of the month [1-31] */ int64_t tm_mon; /* months since January [0-11] */ int64_t tm_year; /* years since 1900 */ int64_t tm_wday; /* days since Sunday [0-6] */ int64_t tm_yday; /* days since January 1 [0-365] */ int64_t tm_isdst; /* Daylight Savings Time flag */ int64_t tm_gmtoff; /* offset from CUT in seconds */ char *tm_zone; /* timezone abbreviation */ }; int64_t _bson_timegm (struct bson_tm *const tmp); BSON_END_DECLS #endif /* BSON_TIMEGM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-timegm.c0000644000076500000240000005537613572250757023575 0ustar alcaeusstaff/* ** The original version of this file is in the public domain, so clarified as of ** 1996-06-05 by Arthur David Olson. */ /* ** Leap second handling from Bradley White. ** POSIX-style TZ environment variable handling from Guy Harris. ** Updated to use int64_t's instead of system-dependent definitions of int64_t ** and struct tm by A. Jesse Jiryu Davis for MongoDB, Inc. */ #include "bson/bson-compat.h" #include "bson/bson-macros.h" #include "bson/bson-timegm-private.h" #include "errno.h" #include "string.h" #include /* for INT64_MAX and INT64_MIN */ /* Unlike 's isdigit, this also works if c < 0 | c > UCHAR_MAX. */ #define is_digit(c) ((unsigned) (c) - '0' <= 9) #if 2 < __GNUC__ + (96 <= __GNUC_MINOR__) #define ATTRIBUTE_CONST __attribute__ ((const)) #define ATTRIBUTE_PURE __attribute__ ((__pure__)) #define ATTRIBUTE_FORMAT(spec) __attribute__ ((__format__ spec)) #else #define ATTRIBUTE_CONST /* empty */ #define ATTRIBUTE_PURE /* empty */ #define ATTRIBUTE_FORMAT(spec) /* empty */ #endif #if !defined _Noreturn && \ (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 201112) #if 2 < __GNUC__ + (8 <= __GNUC_MINOR__) #define _Noreturn __attribute__((__noreturn__)) #else #define _Noreturn #endif #endif #if (!defined(__STDC_VERSION__) || __STDC_VERSION__ < 199901) && \ !defined restrict #define restrict /* empty */ #endif #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunknown-pragmas" #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wshift-negative-value" #endif /* The minimum and maximum finite time values. */ static int64_t const time_t_min = INT64_MIN; static int64_t const time_t_max = INT64_MAX; #ifdef __clang__ #pragma clang diagnostic pop #pragma clang diagnostic pop #endif #ifndef TZ_MAX_TIMES #define TZ_MAX_TIMES 2000 #endif /* !defined TZ_MAX_TIMES */ #ifndef TZ_MAX_TYPES /* This must be at least 17 for Europe/Samara and Europe/Vilnius. */ #define TZ_MAX_TYPES 256 /* Limited by what (unsigned char)'s can hold */ #endif /* !defined TZ_MAX_TYPES */ #ifndef TZ_MAX_CHARS #define TZ_MAX_CHARS 50 /* Maximum number of abbreviation characters */ /* (limited by what unsigned chars can hold) */ #endif /* !defined TZ_MAX_CHARS */ #ifndef TZ_MAX_LEAPS #define TZ_MAX_LEAPS 50 /* Maximum number of leap second corrections */ #endif /* !defined TZ_MAX_LEAPS */ #define SECSPERMIN 60 #define MINSPERHOUR 60 #define HOURSPERDAY 24 #define DAYSPERWEEK 7 #define DAYSPERNYEAR 365 #define DAYSPERLYEAR 366 #define SECSPERHOUR (SECSPERMIN * MINSPERHOUR) #define SECSPERDAY ((int_fast32_t) SECSPERHOUR * HOURSPERDAY) #define MONSPERYEAR 12 #define TM_SUNDAY 0 #define TM_MONDAY 1 #define TM_TUESDAY 2 #define TM_WEDNESDAY 3 #define TM_THURSDAY 4 #define TM_FRIDAY 5 #define TM_SATURDAY 6 #define TM_JANUARY 0 #define TM_FEBRUARY 1 #define TM_MARCH 2 #define TM_APRIL 3 #define TM_MAY 4 #define TM_JUNE 5 #define TM_JULY 6 #define TM_AUGUST 7 #define TM_SEPTEMBER 8 #define TM_OCTOBER 9 #define TM_NOVEMBER 10 #define TM_DECEMBER 11 #define TM_YEAR_BASE 1900 #define EPOCH_YEAR 1970 #define EPOCH_WDAY TM_THURSDAY #define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0)) /* ** Since everything in isleap is modulo 400 (or a factor of 400), we know that ** isleap(y) == isleap(y % 400) ** and so ** isleap(a + b) == isleap((a + b) % 400) ** or ** isleap(a + b) == isleap(a % 400 + b % 400) ** This is true even if % means modulo rather than Fortran remainder ** (which is allowed by C89 but not C99). ** We use this to avoid addition overflow problems. */ #define isleap_sum(a, b) isleap ((a) % 400 + (b) % 400) #ifndef TZ_ABBR_MAX_LEN #define TZ_ABBR_MAX_LEN 16 #endif /* !defined TZ_ABBR_MAX_LEN */ #ifndef TZ_ABBR_CHAR_SET #define TZ_ABBR_CHAR_SET \ "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 :+-._" #endif /* !defined TZ_ABBR_CHAR_SET */ #ifndef TZ_ABBR_ERR_CHAR #define TZ_ABBR_ERR_CHAR '_' #endif /* !defined TZ_ABBR_ERR_CHAR */ #ifndef WILDABBR /* ** Someone might make incorrect use of a time zone abbreviation: ** 1. They might reference tzname[0] before calling tzset (explicitly ** or implicitly). ** 2. They might reference tzname[1] before calling tzset (explicitly ** or implicitly). ** 3. They might reference tzname[1] after setting to a time zone ** in which Daylight Saving Time is never observed. ** 4. They might reference tzname[0] after setting to a time zone ** in which Standard Time is never observed. ** 5. They might reference tm.TM_ZONE after calling offtime. ** What's best to do in the above cases is open to debate; ** for now, we just set things up so that in any of the five cases ** WILDABBR is used. Another possibility: initialize tzname[0] to the ** string "tzname[0] used before set", and similarly for the other cases. ** And another: initialize tzname[0] to "ERA", with an explanation in the ** manual page of what this "time zone abbreviation" means (doing this so ** that tzname[0] has the "normal" length of three characters). */ #define WILDABBR " " #endif /* !defined WILDABBR */ #ifdef TM_ZONE static const char wildabbr[] = WILDABBR; static const char gmt[] = "GMT"; #endif struct ttinfo { /* time type information */ int_fast32_t tt_gmtoff; /* UT offset in seconds */ int tt_isdst; /* used to set tm_isdst */ int tt_abbrind; /* abbreviation list index */ int tt_ttisstd; /* true if transition is std time */ int tt_ttisgmt; /* true if transition is UT */ }; struct lsinfo { /* leap second information */ int64_t ls_trans; /* transition time */ int_fast64_t ls_corr; /* correction to apply */ }; #define BIGGEST(a, b) (((a) > (b)) ? (a) : (b)) #ifdef TZNAME_MAX #define MY_TZNAME_MAX TZNAME_MAX #endif /* defined TZNAME_MAX */ #ifndef TZNAME_MAX #define MY_TZNAME_MAX 255 #endif /* !defined TZNAME_MAX */ struct state { int leapcnt; int timecnt; int typecnt; int charcnt; int goback; int goahead; int64_t ats[TZ_MAX_TIMES]; unsigned char types[TZ_MAX_TIMES]; struct ttinfo ttis[TZ_MAX_TYPES]; char chars[BIGGEST (TZ_MAX_CHARS + 1, (2 * (MY_TZNAME_MAX + 1)))]; struct lsinfo lsis[TZ_MAX_LEAPS]; int defaulttype; /* for early times or if no transitions */ }; struct rule { int r_type; /* type of rule--see below */ int r_day; /* day number of rule */ int r_week; /* week number of rule */ int r_mon; /* month number of rule */ int_fast32_t r_time; /* transition time of rule */ }; #define JULIAN_DAY 0 /* Jn - Julian day */ #define DAY_OF_YEAR 1 /* n - day of year */ #define MONTH_NTH_DAY_OF_WEEK 2 /* Mm.n.d - month, week, day of week */ /* ** Prototypes for static functions. */ static void gmtload (struct state *sp); static struct bson_tm * gmtsub (const int64_t *timep, int_fast32_t offset, struct bson_tm *tmp); static int64_t increment_overflow (int64_t *number, int64_t delta); static int64_t leaps_thru_end_of (int64_t y) ATTRIBUTE_PURE; static int64_t increment_overflow32 (int_fast32_t *number, int64_t delta); static int64_t normalize_overflow32 (int_fast32_t *tensptr, int64_t *unitsptr, int64_t base); static int64_t normalize_overflow (int64_t *tensptr, int64_t *unitsptr, int64_t base); static int64_t time1 (struct bson_tm *tmp, struct bson_tm *(*funcp) (const int64_t *, int_fast32_t, struct bson_tm *), int_fast32_t offset); static int64_t time2 (struct bson_tm *tmp, struct bson_tm *(*funcp) (const int64_t *, int_fast32_t, struct bson_tm *), int_fast32_t offset, int64_t *okayp); static int64_t time2sub (struct bson_tm *tmp, struct bson_tm *(*funcp) (const int64_t *, int_fast32_t, struct bson_tm *), int_fast32_t offset, int64_t *okayp, int64_t do_norm_secs); static struct bson_tm * timesub (const int64_t *timep, int_fast32_t offset, const struct state *sp, struct bson_tm *tmp); static int64_t tmcomp (const struct bson_tm *atmp, const struct bson_tm *btmp); static struct state gmtmem; #define gmtptr (&gmtmem) static int gmt_is_set; static const int mon_lengths[2][MONSPERYEAR] = { {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; static const int year_lengths[2] = {DAYSPERNYEAR, DAYSPERLYEAR}; static void gmtload (struct state *const sp) { memset (sp, 0, sizeof (struct state)); sp->typecnt = 1; sp->charcnt = 4; sp->chars[0] = 'G'; sp->chars[1] = 'M'; sp->chars[2] = 'T'; } /* ** gmtsub is to gmtime as localsub is to localtime. */ static struct bson_tm * gmtsub (const int64_t *const timep, const int_fast32_t offset, struct bson_tm *const tmp) { register struct bson_tm *result; if (!gmt_is_set) { gmt_is_set = true; gmtload (gmtptr); } result = timesub (timep, offset, gmtptr, tmp); #ifdef TM_ZONE /* ** Could get fancy here and deliver something such as ** "UT+xxxx" or "UT-xxxx" if offset is non-zero, ** but this is no time for a treasure hunt. */ tmp->TM_ZONE = offset ? wildabbr : gmtptr ? gmtptr->chars : gmt; #endif /* defined TM_ZONE */ return result; } /* ** Return the number of leap years through the end of the given year ** where, to make the math easy, the answer for year zero is defined as zero. */ static int64_t leaps_thru_end_of (register const int64_t y) { return (y >= 0) ? (y / 4 - y / 100 + y / 400) : -(leaps_thru_end_of (-(y + 1)) + 1); } static struct bson_tm * timesub (const int64_t *const timep, const int_fast32_t offset, register const struct state *const sp, register struct bson_tm *const tmp) { register const struct lsinfo *lp; register int64_t tdays; register int64_t idays; /* unsigned would be so 2003 */ register int_fast64_t rem; int64_t y; register const int *ip; register int_fast64_t corr; register int64_t hit; register int64_t i; corr = 0; hit = 0; i = (sp == NULL) ? 0 : sp->leapcnt; while (--i >= 0) { lp = &sp->lsis[i]; if (*timep >= lp->ls_trans) { if (*timep == lp->ls_trans) { hit = ((i == 0 && lp->ls_corr > 0) || lp->ls_corr > sp->lsis[i - 1].ls_corr); if (hit) while (i > 0 && sp->lsis[i].ls_trans == sp->lsis[i - 1].ls_trans + 1 && sp->lsis[i].ls_corr == sp->lsis[i - 1].ls_corr + 1) { ++hit; --i; } } corr = lp->ls_corr; break; } } y = EPOCH_YEAR; tdays = *timep / SECSPERDAY; rem = *timep - tdays * SECSPERDAY; while (tdays < 0 || tdays >= year_lengths[isleap (y)]) { int64_t newy; register int64_t tdelta; register int64_t idelta; register int64_t leapdays; tdelta = tdays / DAYSPERLYEAR; idelta = tdelta; if (idelta == 0) idelta = (tdays < 0) ? -1 : 1; newy = y; if (increment_overflow (&newy, idelta)) return NULL; leapdays = leaps_thru_end_of (newy - 1) - leaps_thru_end_of (y - 1); tdays -= ((int64_t) newy - y) * DAYSPERNYEAR; tdays -= leapdays; y = newy; } { register int_fast32_t seconds; seconds = (int_fast32_t) (tdays * SECSPERDAY); tdays = seconds / SECSPERDAY; rem += seconds - tdays * SECSPERDAY; } /* ** Given the range, we can now fearlessly cast... */ idays = (int64_t) tdays; rem += offset - corr; while (rem < 0) { rem += SECSPERDAY; --idays; } while (rem >= SECSPERDAY) { rem -= SECSPERDAY; ++idays; } while (idays < 0) { if (increment_overflow (&y, -1)) return NULL; idays += year_lengths[isleap (y)]; } while (idays >= year_lengths[isleap (y)]) { idays -= year_lengths[isleap (y)]; if (increment_overflow (&y, 1)) return NULL; } tmp->tm_year = y; if (increment_overflow (&tmp->tm_year, -TM_YEAR_BASE)) return NULL; tmp->tm_yday = idays; /* ** The "extra" mods below avoid overflow problems. */ tmp->tm_wday = EPOCH_WDAY + ((y - EPOCH_YEAR) % DAYSPERWEEK) * (DAYSPERNYEAR % DAYSPERWEEK) + leaps_thru_end_of (y - 1) - leaps_thru_end_of (EPOCH_YEAR - 1) + idays; tmp->tm_wday %= DAYSPERWEEK; if (tmp->tm_wday < 0) tmp->tm_wday += DAYSPERWEEK; tmp->tm_hour = (int64_t) (rem / SECSPERHOUR); rem %= SECSPERHOUR; tmp->tm_min = (int64_t) (rem / SECSPERMIN); /* ** A positive leap second requires a special ** representation. This uses "... ??:59:60" et seq. */ tmp->tm_sec = (int64_t) (rem % SECSPERMIN) + hit; ip = mon_lengths[isleap (y)]; for (tmp->tm_mon = 0; idays >= ip[tmp->tm_mon]; ++(tmp->tm_mon)) idays -= ip[tmp->tm_mon]; tmp->tm_mday = (int64_t) (idays + 1); tmp->tm_isdst = 0; #ifdef TM_GMTOFF tmp->TM_GMTOFF = offset; #endif /* defined TM_GMTOFF */ return tmp; } /* ** Adapted from code provided by Robert Elz, who writes: ** The "best" way to do mktime I think is based on an idea of Bob ** Kridle's (so its said...) from a long time ago. ** It does a binary search of the int64_t space. Since int64_t's are ** just 32 bits, its a max of 32 iterations (even at 64 bits it ** would still be very reasonable). */ #ifndef WRONG #define WRONG (-1) #endif /* !defined WRONG */ /* ** Normalize logic courtesy Paul Eggert. */ static int64_t increment_overflow (int64_t *const ip, int64_t j) { register int64_t const i = *ip; /* ** If i >= 0 there can only be overflow if i + j > INT_MAX ** or if j > INT_MAX - i; given i >= 0, INT_MAX - i cannot overflow. ** If i < 0 there can only be overflow if i + j < INT_MIN ** or if j < INT_MIN - i; given i < 0, INT_MIN - i cannot overflow. */ if ((i >= 0) ? (j > INT_MAX - i) : (j < INT_MIN - i)) return true; *ip += j; return false; } static int64_t increment_overflow32 (int_fast32_t *const lp, int64_t const m) { register int_fast32_t const l = *lp; if ((l >= 0) ? (m > INT_FAST32_MAX - l) : (m < INT_FAST32_MIN - l)) return true; *lp += m; return false; } static int64_t normalize_overflow (int64_t *const tensptr, int64_t *const unitsptr, const int64_t base) { register int64_t tensdelta; tensdelta = (*unitsptr >= 0) ? (*unitsptr / base) : (-1 - (-1 - *unitsptr) / base); *unitsptr -= tensdelta * base; return increment_overflow (tensptr, tensdelta); } static int64_t normalize_overflow32 (int_fast32_t *const tensptr, int64_t *const unitsptr, const int64_t base) { register int64_t tensdelta; tensdelta = (*unitsptr >= 0) ? (*unitsptr / base) : (-1 - (-1 - *unitsptr) / base); *unitsptr -= tensdelta * base; return increment_overflow32 (tensptr, tensdelta); } static int64_t tmcomp (register const struct bson_tm *const atmp, register const struct bson_tm *const btmp) { register int64_t result; if (atmp->tm_year != btmp->tm_year) return atmp->tm_year < btmp->tm_year ? -1 : 1; if ((result = (atmp->tm_mon - btmp->tm_mon)) == 0 && (result = (atmp->tm_mday - btmp->tm_mday)) == 0 && (result = (atmp->tm_hour - btmp->tm_hour)) == 0 && (result = (atmp->tm_min - btmp->tm_min)) == 0) result = atmp->tm_sec - btmp->tm_sec; return result; } static int64_t time2sub (struct bson_tm *const tmp, struct bson_tm *(*const funcp) (const int64_t *, int_fast32_t, struct bson_tm *), const int_fast32_t offset, int64_t *const okayp, const int64_t do_norm_secs) { register const struct state *sp; register int64_t dir; register int64_t i, j; register int64_t saved_seconds; register int_fast32_t li; register int64_t lo; register int64_t hi; int_fast32_t y; int64_t newt; int64_t t; struct bson_tm yourtm, mytm; *okayp = false; yourtm = *tmp; if (do_norm_secs) { if (normalize_overflow (&yourtm.tm_min, &yourtm.tm_sec, SECSPERMIN)) return WRONG; } if (normalize_overflow (&yourtm.tm_hour, &yourtm.tm_min, MINSPERHOUR)) return WRONG; if (normalize_overflow (&yourtm.tm_mday, &yourtm.tm_hour, HOURSPERDAY)) return WRONG; y = (int_fast32_t) yourtm.tm_year; if (normalize_overflow32 (&y, &yourtm.tm_mon, MONSPERYEAR)) return WRONG; /* ** Turn y into an actual year number for now. ** It is converted back to an offset from TM_YEAR_BASE later. */ if (increment_overflow32 (&y, TM_YEAR_BASE)) return WRONG; while (yourtm.tm_mday <= 0) { if (increment_overflow32 (&y, -1)) return WRONG; li = y + (1 < yourtm.tm_mon); yourtm.tm_mday += year_lengths[isleap (li)]; } while (yourtm.tm_mday > DAYSPERLYEAR) { li = y + (1 < yourtm.tm_mon); yourtm.tm_mday -= year_lengths[isleap (li)]; if (increment_overflow32 (&y, 1)) return WRONG; } for (;;) { i = mon_lengths[isleap (y)][yourtm.tm_mon]; if (yourtm.tm_mday <= i) break; yourtm.tm_mday -= i; if (++yourtm.tm_mon >= MONSPERYEAR) { yourtm.tm_mon = 0; if (increment_overflow32 (&y, 1)) return WRONG; } } if (increment_overflow32 (&y, -TM_YEAR_BASE)) return WRONG; yourtm.tm_year = y; if (yourtm.tm_year != y) return WRONG; if (yourtm.tm_sec >= 0 && yourtm.tm_sec < SECSPERMIN) saved_seconds = 0; else if (y + TM_YEAR_BASE < EPOCH_YEAR) { /* ** We can't set tm_sec to 0, because that might push the ** time below the minimum representable time. ** Set tm_sec to 59 instead. ** This assumes that the minimum representable time is ** not in the same minute that a leap second was deleted from, ** which is a safer assumption than using 58 would be. */ if (increment_overflow (&yourtm.tm_sec, 1 - SECSPERMIN)) return WRONG; saved_seconds = yourtm.tm_sec; yourtm.tm_sec = SECSPERMIN - 1; } else { saved_seconds = yourtm.tm_sec; yourtm.tm_sec = 0; } /* ** Do a binary search. */ lo = INT64_MIN; hi = INT64_MAX; for (;;) { t = lo / 2 + hi / 2; if (t < lo) t = lo; else if (t > hi) t = hi; if ((*funcp) (&t, offset, &mytm) == NULL) { /* ** Assume that t is too extreme to be represented in ** a struct bson_tm; arrange things so that it is less ** extreme on the next pass. */ dir = (t > 0) ? 1 : -1; } else dir = tmcomp (&mytm, &yourtm); if (dir != 0) { if (t == lo) { if (t == time_t_max) return WRONG; ++t; ++lo; } else if (t == hi) { if (t == time_t_min) return WRONG; --t; --hi; } if (lo > hi) return WRONG; if (dir > 0) hi = t; else lo = t; continue; } if (yourtm.tm_isdst < 0 || mytm.tm_isdst == yourtm.tm_isdst) break; /* ** Right time, wrong type. ** Hunt for right time, right type. ** It's okay to guess wrong since the guess ** gets checked. */ sp = (const struct state *) gmtptr; if (sp == NULL) return WRONG; for (i = sp->typecnt - 1; i >= 0; --i) { if (sp->ttis[i].tt_isdst != yourtm.tm_isdst) continue; for (j = sp->typecnt - 1; j >= 0; --j) { if (sp->ttis[j].tt_isdst == yourtm.tm_isdst) continue; newt = t + sp->ttis[j].tt_gmtoff - sp->ttis[i].tt_gmtoff; if ((*funcp) (&newt, offset, &mytm) == NULL) continue; if (tmcomp (&mytm, &yourtm) != 0) continue; if (mytm.tm_isdst != yourtm.tm_isdst) continue; /* ** We have a match. */ t = newt; goto label; } } return WRONG; } label: newt = t + saved_seconds; if ((newt < t) != (saved_seconds < 0)) return WRONG; t = newt; if ((*funcp) (&t, offset, tmp)) *okayp = true; return t; } static int64_t time2 (struct bson_tm *const tmp, struct bson_tm *(*const funcp) (const int64_t *, int_fast32_t, struct bson_tm *), const int_fast32_t offset, int64_t *const okayp) { int64_t t; /* ** First try without normalization of seconds ** (in case tm_sec contains a value associated with a leap second). ** If that fails, try with normalization of seconds. */ t = time2sub (tmp, funcp, offset, okayp, false); return *okayp ? t : time2sub (tmp, funcp, offset, okayp, true); } static int64_t time1 (struct bson_tm *const tmp, struct bson_tm *(*const funcp) (const int64_t *, int_fast32_t, struct bson_tm *), const int_fast32_t offset) { register int64_t t; register const struct state *sp; register int64_t samei, otheri; register int64_t sameind, otherind; register int64_t i; register int64_t nseen; int64_t seen[TZ_MAX_TYPES]; int64_t types[TZ_MAX_TYPES]; int64_t okay; if (tmp == NULL) { errno = EINVAL; return WRONG; } if (tmp->tm_isdst > 1) tmp->tm_isdst = 1; t = time2 (tmp, funcp, offset, &okay); if (okay) return t; if (tmp->tm_isdst < 0) #ifdef PCTS /* ** POSIX Conformance Test Suite code courtesy Grant Sullivan. */ tmp->tm_isdst = 0; /* reset to std and try again */ #else return t; #endif /* !defined PCTS */ /* ** We're supposed to assume that somebody took a time of one type ** and did some math on it that yielded a "struct tm" that's bad. ** We try to divine the type they started from and adjust to the ** type they need. */ sp = (const struct state *) gmtptr; if (sp == NULL) return WRONG; for (i = 0; i < sp->typecnt; ++i) seen[i] = false; nseen = 0; for (i = sp->timecnt - 1; i >= 0; --i) if (!seen[sp->types[i]]) { seen[sp->types[i]] = true; types[nseen++] = sp->types[i]; } for (sameind = 0; sameind < nseen; ++sameind) { samei = types[sameind]; if (sp->ttis[samei].tt_isdst != tmp->tm_isdst) continue; for (otherind = 0; otherind < nseen; ++otherind) { otheri = types[otherind]; if (sp->ttis[otheri].tt_isdst == tmp->tm_isdst) continue; tmp->tm_sec += sp->ttis[otheri].tt_gmtoff - sp->ttis[samei].tt_gmtoff; tmp->tm_isdst = !tmp->tm_isdst; t = time2 (tmp, funcp, offset, &okay); if (okay) return t; tmp->tm_sec -= sp->ttis[otheri].tt_gmtoff - sp->ttis[samei].tt_gmtoff; tmp->tm_isdst = !tmp->tm_isdst; } } return WRONG; } int64_t _bson_timegm (struct bson_tm *const tmp) { if (tmp != NULL) tmp->tm_isdst = 0; return time1 (tmp, gmtsub, 0L); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-types.h0000644000076500000240000004235313572250757023453 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_TYPES_H #define BSON_TYPES_H #include #include #include "bson/bson-macros.h" #include "bson/bson-config.h" #include "bson/bson-compat.h" #include "bson/bson-endian.h" BSON_BEGIN_DECLS /* *-------------------------------------------------------------------------- * * bson_unichar_t -- * * bson_unichar_t provides an unsigned 32-bit type for containing * unicode characters. When iterating UTF-8 sequences, this should * be used to avoid losing the high-bits of non-ascii characters. * * See also: * bson_string_append_unichar() * *-------------------------------------------------------------------------- */ typedef uint32_t bson_unichar_t; /** * bson_context_flags_t: * * This enumeration is used to configure a bson_context_t. * * %BSON_CONTEXT_NONE: Use default options. * %BSON_CONTEXT_THREAD_SAFE: Context will be called from multiple threads. * %BSON_CONTEXT_DISABLE_PID_CACHE: Call getpid() instead of caching the * result of getpid() when initializing the context. * %BSON_CONTEXT_DISABLE_HOST_CACHE: Call gethostname() instead of caching the * result of gethostname() when initializing the context. */ typedef enum { BSON_CONTEXT_NONE = 0, BSON_CONTEXT_THREAD_SAFE = (1 << 0), BSON_CONTEXT_DISABLE_HOST_CACHE = (1 << 1), BSON_CONTEXT_DISABLE_PID_CACHE = (1 << 2), #ifdef BSON_HAVE_SYSCALL_TID BSON_CONTEXT_USE_TASK_ID = (1 << 3), #endif } bson_context_flags_t; /** * bson_context_t: * * This structure manages context for the bson library. It handles * configuration for thread-safety and other performance related requirements. * Consumers will create a context and may use multiple under a variety of * situations. * * If your program calls fork(), you should initialize a new bson_context_t * using bson_context_init(). * * If you are using threading, it is suggested that you use a bson_context_t * per thread for best performance. Alternatively, you can initialize the * bson_context_t with BSON_CONTEXT_THREAD_SAFE, although a performance penalty * will be incurred. * * Many functions will require that you provide a bson_context_t such as OID * generation. * * This structure is opaque in that you cannot see the contents of the * structure. However, it is stack allocatable in that enough padding is * provided in _bson_context_t to hold the structure. */ typedef struct _bson_context_t bson_context_t; /** * bson_t: * * This structure manages a buffer whose contents are a properly formatted * BSON document. You may perform various transforms on the BSON documents. * Additionally, it can be iterated over using bson_iter_t. * * See bson_iter_init() for iterating the contents of a bson_t. * * When building a bson_t structure using the various append functions, * memory allocations may occur. That is performed using power of two * allocations and realloc(). * * See http://bsonspec.org for the BSON document spec. * * This structure is meant to fit in two sequential 64-byte cachelines. */ #ifdef BSON_MEMCHECK BSON_ALIGNED_BEGIN (128) typedef struct _bson_t { uint32_t flags; /* Internal flags for the bson_t. */ uint32_t len; /* Length of BSON data. */ char *canary; /* For valgrind check */ uint8_t padding[120 - sizeof (char*)]; } bson_t BSON_ALIGNED_END (128); #else BSON_ALIGNED_BEGIN (128) typedef struct _bson_t { uint32_t flags; /* Internal flags for the bson_t. */ uint32_t len; /* Length of BSON data. */ uint8_t padding[120]; /* Padding for stack allocation. */ } bson_t BSON_ALIGNED_END (128); #endif /** * BSON_INITIALIZER: * * This macro can be used to initialize a #bson_t structure on the stack * without calling bson_init(). * * |[ * bson_t b = BSON_INITIALIZER; * ]| */ #ifdef BSON_MEMCHECK #define BSON_INITIALIZER \ { \ 3, 5, \ bson_malloc (1), \ { \ 5 \ }, \ } #else #define BSON_INITIALIZER \ { \ 3, 5, \ { \ 5 \ } \ } #endif BSON_STATIC_ASSERT2 (bson_t, sizeof (bson_t) == 128); /** * bson_oid_t: * * This structure contains the binary form of a BSON Object Id as specified * on http://bsonspec.org. If you would like the bson_oid_t in string form * see bson_oid_to_string() or bson_oid_to_string_r(). */ typedef struct { uint8_t bytes[12]; } bson_oid_t; BSON_STATIC_ASSERT2 (oid_t, sizeof (bson_oid_t) == 12); /** * bson_decimal128_t: * * @high The high-order bytes of the decimal128. This field contains sign, * combination bits, exponent, and part of the coefficient continuation. * @low The low-order bytes of the decimal128. This field contains the second * part of the coefficient continuation. * * This structure is a boxed type containing the value for the BSON decimal128 * type. The structure stores the 128 bits such that they correspond to the * native format for the IEEE decimal128 type, if it is implemented. **/ typedef struct { #if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN uint64_t low; uint64_t high; #elif BSON_BYTE_ORDER == BSON_BIG_ENDIAN uint64_t high; uint64_t low; #endif } bson_decimal128_t; /** * bson_validate_flags_t: * * This enumeration is used for validation of BSON documents. It allows * selective control on what you wish to validate. * * %BSON_VALIDATE_NONE: No additional validation occurs. * %BSON_VALIDATE_UTF8: Check that strings are valid UTF-8. * %BSON_VALIDATE_DOLLAR_KEYS: Check that keys do not start with $. * %BSON_VALIDATE_DOT_KEYS: Check that keys do not contain a period. * %BSON_VALIDATE_UTF8_ALLOW_NULL: Allow NUL bytes in UTF-8 text. * %BSON_VALIDATE_EMPTY_KEYS: Prohibit zero-length field names */ typedef enum { BSON_VALIDATE_NONE = 0, BSON_VALIDATE_UTF8 = (1 << 0), BSON_VALIDATE_DOLLAR_KEYS = (1 << 1), BSON_VALIDATE_DOT_KEYS = (1 << 2), BSON_VALIDATE_UTF8_ALLOW_NULL = (1 << 3), BSON_VALIDATE_EMPTY_KEYS = (1 << 4), } bson_validate_flags_t; /** * bson_type_t: * * This enumeration contains all of the possible types within a BSON document. * Use bson_iter_type() to fetch the type of a field while iterating over it. */ typedef enum { BSON_TYPE_EOD = 0x00, BSON_TYPE_DOUBLE = 0x01, BSON_TYPE_UTF8 = 0x02, BSON_TYPE_DOCUMENT = 0x03, BSON_TYPE_ARRAY = 0x04, BSON_TYPE_BINARY = 0x05, BSON_TYPE_UNDEFINED = 0x06, BSON_TYPE_OID = 0x07, BSON_TYPE_BOOL = 0x08, BSON_TYPE_DATE_TIME = 0x09, BSON_TYPE_NULL = 0x0A, BSON_TYPE_REGEX = 0x0B, BSON_TYPE_DBPOINTER = 0x0C, BSON_TYPE_CODE = 0x0D, BSON_TYPE_SYMBOL = 0x0E, BSON_TYPE_CODEWSCOPE = 0x0F, BSON_TYPE_INT32 = 0x10, BSON_TYPE_TIMESTAMP = 0x11, BSON_TYPE_INT64 = 0x12, BSON_TYPE_DECIMAL128 = 0x13, BSON_TYPE_MAXKEY = 0x7F, BSON_TYPE_MINKEY = 0xFF, } bson_type_t; /** * bson_subtype_t: * * This enumeration contains the various subtypes that may be used in a binary * field. See http://bsonspec.org for more information. */ typedef enum { BSON_SUBTYPE_BINARY = 0x00, BSON_SUBTYPE_FUNCTION = 0x01, BSON_SUBTYPE_BINARY_DEPRECATED = 0x02, BSON_SUBTYPE_UUID_DEPRECATED = 0x03, BSON_SUBTYPE_UUID = 0x04, BSON_SUBTYPE_MD5 = 0x05, BSON_SUBTYPE_USER = 0x80, } bson_subtype_t; /* *-------------------------------------------------------------------------- * * bson_value_t -- * * A boxed type to contain various bson_type_t types. * * See also: * bson_value_copy() * bson_value_destroy() * *-------------------------------------------------------------------------- */ BSON_ALIGNED_BEGIN (8) typedef struct _bson_value_t { bson_type_t value_type; int32_t padding; union { bson_oid_t v_oid; int64_t v_int64; int32_t v_int32; int8_t v_int8; double v_double; bool v_bool; int64_t v_datetime; struct { uint32_t timestamp; uint32_t increment; } v_timestamp; struct { char *str; uint32_t len; } v_utf8; struct { uint8_t *data; uint32_t data_len; } v_doc; struct { uint8_t *data; uint32_t data_len; bson_subtype_t subtype; } v_binary; struct { char *regex; char *options; } v_regex; struct { char *collection; uint32_t collection_len; bson_oid_t oid; } v_dbpointer; struct { char *code; uint32_t code_len; } v_code; struct { char *code; uint8_t *scope_data; uint32_t code_len; uint32_t scope_len; } v_codewscope; struct { char *symbol; uint32_t len; } v_symbol; bson_decimal128_t v_decimal128; } value; } bson_value_t BSON_ALIGNED_END (8); /** * bson_iter_t: * * This structure manages iteration over a bson_t structure. It keeps track * of the location of the current key and value within the buffer. Using the * various functions to get the value of the iter will read from these * locations. * * This structure is safe to discard on the stack. No cleanup is necessary * after using it. */ BSON_ALIGNED_BEGIN (128) typedef struct { const uint8_t *raw; /* The raw buffer being iterated. */ uint32_t len; /* The length of raw. */ uint32_t off; /* The offset within the buffer. */ uint32_t type; /* The offset of the type byte. */ uint32_t key; /* The offset of the key byte. */ uint32_t d1; /* The offset of the first data byte. */ uint32_t d2; /* The offset of the second data byte. */ uint32_t d3; /* The offset of the third data byte. */ uint32_t d4; /* The offset of the fourth data byte. */ uint32_t next_off; /* The offset of the next field. */ uint32_t err_off; /* The offset of the error. */ bson_value_t value; /* Internal value for various state. */ } bson_iter_t BSON_ALIGNED_END (128); /** * bson_reader_t: * * This structure is used to iterate over a sequence of BSON documents. It * allows for them to be iterated with the possibility of no additional * memory allocations under certain circumstances such as reading from an * incoming mongo packet. */ BSON_ALIGNED_BEGIN (BSON_ALIGN_OF_PTR) typedef struct { uint32_t type; /*< private >*/ } bson_reader_t BSON_ALIGNED_END (BSON_ALIGN_OF_PTR); /** * bson_visitor_t: * * This structure contains a series of pointers that can be executed for * each field of a BSON document based on the field type. * * For example, if an int32 field is found, visit_int32 will be called. * * When visiting each field using bson_iter_visit_all(), you may provide a * data pointer that will be provided with each callback. This might be useful * if you are marshaling to another language. * * You may pre-maturely stop the visitation of fields by returning true in your * visitor. Returning false will continue visitation to further fields. */ BSON_ALIGNED_BEGIN (8) typedef struct { /* run before / after descending into a document */ bool (*visit_before) (const bson_iter_t *iter, const char *key, void *data); bool (*visit_after) (const bson_iter_t *iter, const char *key, void *data); /* corrupt BSON, or unsupported type and visit_unsupported_type not set */ void (*visit_corrupt) (const bson_iter_t *iter, void *data); /* normal bson field callbacks */ bool (*visit_double) (const bson_iter_t *iter, const char *key, double v_double, void *data); bool (*visit_utf8) (const bson_iter_t *iter, const char *key, size_t v_utf8_len, const char *v_utf8, void *data); bool (*visit_document) (const bson_iter_t *iter, const char *key, const bson_t *v_document, void *data); bool (*visit_array) (const bson_iter_t *iter, const char *key, const bson_t *v_array, void *data); bool (*visit_binary) (const bson_iter_t *iter, const char *key, bson_subtype_t v_subtype, size_t v_binary_len, const uint8_t *v_binary, void *data); /* normal field with deprecated "Undefined" BSON type */ bool (*visit_undefined) (const bson_iter_t *iter, const char *key, void *data); bool (*visit_oid) (const bson_iter_t *iter, const char *key, const bson_oid_t *v_oid, void *data); bool (*visit_bool) (const bson_iter_t *iter, const char *key, bool v_bool, void *data); bool (*visit_date_time) (const bson_iter_t *iter, const char *key, int64_t msec_since_epoch, void *data); bool (*visit_null) (const bson_iter_t *iter, const char *key, void *data); bool (*visit_regex) (const bson_iter_t *iter, const char *key, const char *v_regex, const char *v_options, void *data); bool (*visit_dbpointer) (const bson_iter_t *iter, const char *key, size_t v_collection_len, const char *v_collection, const bson_oid_t *v_oid, void *data); bool (*visit_code) (const bson_iter_t *iter, const char *key, size_t v_code_len, const char *v_code, void *data); bool (*visit_symbol) (const bson_iter_t *iter, const char *key, size_t v_symbol_len, const char *v_symbol, void *data); bool (*visit_codewscope) (const bson_iter_t *iter, const char *key, size_t v_code_len, const char *v_code, const bson_t *v_scope, void *data); bool (*visit_int32) (const bson_iter_t *iter, const char *key, int32_t v_int32, void *data); bool (*visit_timestamp) (const bson_iter_t *iter, const char *key, uint32_t v_timestamp, uint32_t v_increment, void *data); bool (*visit_int64) (const bson_iter_t *iter, const char *key, int64_t v_int64, void *data); bool (*visit_maxkey) (const bson_iter_t *iter, const char *key, void *data); bool (*visit_minkey) (const bson_iter_t *iter, const char *key, void *data); /* if set, called instead of visit_corrupt when an apparently valid BSON * includes an unrecognized field type (reading future version of BSON) */ void (*visit_unsupported_type) (const bson_iter_t *iter, const char *key, uint32_t type_code, void *data); bool (*visit_decimal128) (const bson_iter_t *iter, const char *key, const bson_decimal128_t *v_decimal128, void *data); void *padding[7]; } bson_visitor_t BSON_ALIGNED_END (8); #define BSON_ERROR_BUFFER_SIZE 504 BSON_ALIGNED_BEGIN (8) typedef struct _bson_error_t { uint32_t domain; uint32_t code; char message[BSON_ERROR_BUFFER_SIZE]; } bson_error_t BSON_ALIGNED_END (8); BSON_STATIC_ASSERT2 (error_t, sizeof (bson_error_t) == 512); /** * bson_next_power_of_two: * @v: A 32-bit unsigned integer of required bytes. * * Determines the next larger power of two for the value of @v * in a constant number of operations. * * It is up to the caller to guarantee this will not overflow. * * Returns: The next power of 2 from @v. */ static BSON_INLINE size_t bson_next_power_of_two (size_t v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; #if BSON_WORD_SIZE == 64 v |= v >> 32; #endif v++; return v; } static BSON_INLINE bool bson_is_power_of_two (uint32_t v) { return ((v != 0) && ((v & (v - 1)) == 0)); } BSON_END_DECLS #endif /* BSON_TYPES_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-utf8.c0000644000076500000240000002632513572250757023171 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "bson/bson-memory.h" #include "bson/bson-string.h" #include "bson/bson-utf8.h" /* *-------------------------------------------------------------------------- * * _bson_utf8_get_sequence -- * * Determine the sequence length of the first UTF-8 character in * @utf8. The sequence length is stored in @seq_length and the mask * for the first character is stored in @first_mask. * * Returns: * None. * * Side effects: * @seq_length is set. * @first_mask is set. * *-------------------------------------------------------------------------- */ static BSON_INLINE void _bson_utf8_get_sequence (const char *utf8, /* IN */ uint8_t *seq_length, /* OUT */ uint8_t *first_mask) /* OUT */ { unsigned char c = *(const unsigned char *) utf8; uint8_t m; uint8_t n; /* * See the following[1] for a description of what the given multi-byte * sequences will be based on the bits set of the first byte. We also need * to mask the first byte based on that. All subsequent bytes are masked * against 0x3F. * * [1] http://www.joelonsoftware.com/articles/Unicode.html */ if ((c & 0x80) == 0) { n = 1; m = 0x7F; } else if ((c & 0xE0) == 0xC0) { n = 2; m = 0x1F; } else if ((c & 0xF0) == 0xE0) { n = 3; m = 0x0F; } else if ((c & 0xF8) == 0xF0) { n = 4; m = 0x07; } else { n = 0; m = 0; } *seq_length = n; *first_mask = m; } /* *-------------------------------------------------------------------------- * * bson_utf8_validate -- * * Validates that @utf8 is a valid UTF-8 string. Note that we only * support UTF-8 characters which have sequence length less than or equal * to 4 bytes (RFC 3629). * * If @allow_null is true, then \0 is allowed within @utf8_len bytes * of @utf8. Generally, this is bad practice since the main point of * UTF-8 strings is that they can be used with strlen() and friends. * However, some languages such as Python can send UTF-8 encoded * strings with NUL's in them. * * Parameters: * @utf8: A UTF-8 encoded string. * @utf8_len: The length of @utf8 in bytes. * @allow_null: If \0 is allowed within @utf8, exclusing trailing \0. * * Returns: * true if @utf8 is valid UTF-8. otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_utf8_validate (const char *utf8, /* IN */ size_t utf8_len, /* IN */ bool allow_null) /* IN */ { bson_unichar_t c; uint8_t first_mask; uint8_t seq_length; unsigned i; unsigned j; BSON_ASSERT (utf8); for (i = 0; i < utf8_len; i += seq_length) { _bson_utf8_get_sequence (&utf8[i], &seq_length, &first_mask); /* * Ensure we have a valid multi-byte sequence length. */ if (!seq_length) { return false; } /* * Ensure we have enough bytes left. */ if ((utf8_len - i) < seq_length) { return false; } /* * Also calculate the next char as a unichar so we can * check code ranges for non-shortest form. */ c = utf8[i] & first_mask; /* * Check the high-bits for each additional sequence byte. */ for (j = i + 1; j < (i + seq_length); j++) { c = (c << 6) | (utf8[j] & 0x3F); if ((utf8[j] & 0xC0) != 0x80) { return false; } } /* * Check for NULL bytes afterwards. * * Hint: if you want to optimize this function, starting here to do * this in the same pass as the data above would probably be a good * idea. You would add a branch into the inner loop, but save possibly * on cache-line bouncing on larger strings. Just a thought. */ if (!allow_null) { for (j = 0; j < seq_length; j++) { if (((i + j) > utf8_len) || !utf8[i + j]) { return false; } } } /* * Code point won't fit in utf-16, not allowed. */ if (c > 0x0010FFFF) { return false; } /* * Byte is in reserved range for UTF-16 high-marks * for surrogate pairs. */ if ((c & 0xFFFFF800) == 0xD800) { return false; } /* * Check non-shortest form unicode. */ switch (seq_length) { case 1: if (c <= 0x007F) { continue; } return false; case 2: if ((c >= 0x0080) && (c <= 0x07FF)) { continue; } else if (c == 0) { /* Two-byte representation for NULL. */ if (!allow_null) { return false; } continue; } return false; case 3: if (((c >= 0x0800) && (c <= 0x0FFF)) || ((c >= 0x1000) && (c <= 0xFFFF))) { continue; } return false; case 4: if (((c >= 0x10000) && (c <= 0x3FFFF)) || ((c >= 0x40000) && (c <= 0xFFFFF)) || ((c >= 0x100000) && (c <= 0x10FFFF))) { continue; } return false; default: return false; } } return true; } /* *-------------------------------------------------------------------------- * * bson_utf8_escape_for_json -- * * Allocates a new string matching @utf8 except that special * characters in JSON will be escaped. The resulting string is also * UTF-8 encoded. * * Both " and \ characters will be escaped. Additionally, if a NUL * byte is found before @utf8_len bytes, it will be converted to the * two byte UTF-8 sequence. * * Parameters: * @utf8: A UTF-8 encoded string. * @utf8_len: The length of @utf8 in bytes or -1 if NUL terminated. * * Returns: * A newly allocated string that should be freed with bson_free(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ char * bson_utf8_escape_for_json (const char *utf8, /* IN */ ssize_t utf8_len) /* IN */ { bson_unichar_t c; bson_string_t *str; bool length_provided = true; const char *end; BSON_ASSERT (utf8); str = bson_string_new (NULL); if (utf8_len < 0) { length_provided = false; utf8_len = strlen (utf8); } end = utf8 + utf8_len; while (utf8 < end) { c = bson_utf8_get_char (utf8); switch (c) { case '\\': case '"': bson_string_append_c (str, '\\'); bson_string_append_unichar (str, c); break; case '\b': bson_string_append (str, "\\b"); break; case '\f': bson_string_append (str, "\\f"); break; case '\n': bson_string_append (str, "\\n"); break; case '\r': bson_string_append (str, "\\r"); break; case '\t': bson_string_append (str, "\\t"); break; default: if (c < ' ') { bson_string_append_printf (str, "\\u%04x", (unsigned) c); } else { bson_string_append_unichar (str, c); } break; } if (c) { utf8 = bson_utf8_next_char (utf8); } else { if (length_provided && !*utf8) { /* we escaped nil as '\u0000', now advance past it */ utf8++; } else { /* invalid UTF-8 */ bson_string_free (str, true); return NULL; } } } return bson_string_free (str, false); } /* *-------------------------------------------------------------------------- * * bson_utf8_get_char -- * * Fetches the next UTF-8 character from the UTF-8 sequence. * * Parameters: * @utf8: A string containing validated UTF-8. * * Returns: * A 32-bit bson_unichar_t reprsenting the multi-byte sequence. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_unichar_t bson_utf8_get_char (const char *utf8) /* IN */ { bson_unichar_t c; uint8_t mask; uint8_t num; int i; BSON_ASSERT (utf8); _bson_utf8_get_sequence (utf8, &num, &mask); c = (*utf8) & mask; for (i = 1; i < num; i++) { c = (c << 6) | (utf8[i] & 0x3F); } return c; } /* *-------------------------------------------------------------------------- * * bson_utf8_next_char -- * * Returns an incremented pointer to the beginning of the next * multi-byte sequence in @utf8. * * Parameters: * @utf8: A string containing validated UTF-8. * * Returns: * An incremented pointer in @utf8. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const char * bson_utf8_next_char (const char *utf8) /* IN */ { uint8_t mask; uint8_t num; BSON_ASSERT (utf8); _bson_utf8_get_sequence (utf8, &num, &mask); return utf8 + num; } /* *-------------------------------------------------------------------------- * * bson_utf8_from_unichar -- * * Converts the unichar to a sequence of utf8 bytes and stores those * in @utf8. The number of bytes in the sequence are stored in @len. * * Parameters: * @unichar: A bson_unichar_t. * @utf8: A location for the multi-byte sequence. * @len: A location for number of bytes stored in @utf8. * * Returns: * None. * * Side effects: * @utf8 is set. * @len is set. * *-------------------------------------------------------------------------- */ void bson_utf8_from_unichar (bson_unichar_t unichar, /* IN */ char utf8[BSON_ENSURE_ARRAY_PARAM_SIZE (6)], /* OUT */ uint32_t *len) /* OUT */ { BSON_ASSERT (utf8); BSON_ASSERT (len); if (unichar <= 0x7F) { utf8[0] = unichar; *len = 1; } else if (unichar <= 0x7FF) { *len = 2; utf8[0] = 0xC0 | ((unichar >> 6) & 0x3F); utf8[1] = 0x80 | ((unichar) &0x3F); } else if (unichar <= 0xFFFF) { *len = 3; utf8[0] = 0xE0 | ((unichar >> 12) & 0xF); utf8[1] = 0x80 | ((unichar >> 6) & 0x3F); utf8[2] = 0x80 | ((unichar) &0x3F); } else if (unichar <= 0x1FFFFF) { *len = 4; utf8[0] = 0xF0 | ((unichar >> 18) & 0x7); utf8[1] = 0x80 | ((unichar >> 12) & 0x3F); utf8[2] = 0x80 | ((unichar >> 6) & 0x3F); utf8[3] = 0x80 | ((unichar) &0x3F); } else { *len = 0; } } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-utf8.h0000644000076500000240000000226413572250757023172 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_UTF8_H #define BSON_UTF8_H #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (bool) bson_utf8_validate (const char *utf8, size_t utf8_len, bool allow_null); BSON_EXPORT (char *) bson_utf8_escape_for_json (const char *utf8, ssize_t utf8_len); BSON_EXPORT (bson_unichar_t) bson_utf8_get_char (const char *utf8); BSON_EXPORT (const char *) bson_utf8_next_char (const char *utf8); BSON_EXPORT (void) bson_utf8_from_unichar (bson_unichar_t unichar, char utf8[6], uint32_t *len); BSON_END_DECLS #endif /* BSON_UTF8_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-value.c0000644000076500000240000001465013572250757023415 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-memory.h" #include "bson/bson-string.h" #include "bson/bson-value.h" #include "bson/bson-oid.h" void bson_value_copy (const bson_value_t *src, /* IN */ bson_value_t *dst) /* OUT */ { BSON_ASSERT (src); BSON_ASSERT (dst); dst->value_type = src->value_type; switch (src->value_type) { case BSON_TYPE_DOUBLE: dst->value.v_double = src->value.v_double; break; case BSON_TYPE_UTF8: dst->value.v_utf8.len = src->value.v_utf8.len; dst->value.v_utf8.str = bson_malloc (src->value.v_utf8.len + 1); memcpy ( dst->value.v_utf8.str, src->value.v_utf8.str, dst->value.v_utf8.len); dst->value.v_utf8.str[dst->value.v_utf8.len] = '\0'; break; case BSON_TYPE_DOCUMENT: case BSON_TYPE_ARRAY: dst->value.v_doc.data_len = src->value.v_doc.data_len; dst->value.v_doc.data = bson_malloc (src->value.v_doc.data_len); memcpy (dst->value.v_doc.data, src->value.v_doc.data, dst->value.v_doc.data_len); break; case BSON_TYPE_BINARY: dst->value.v_binary.subtype = src->value.v_binary.subtype; dst->value.v_binary.data_len = src->value.v_binary.data_len; dst->value.v_binary.data = bson_malloc (src->value.v_binary.data_len); if (dst->value.v_binary.data_len) { memcpy (dst->value.v_binary.data, src->value.v_binary.data, dst->value.v_binary.data_len); } break; case BSON_TYPE_OID: bson_oid_copy (&src->value.v_oid, &dst->value.v_oid); break; case BSON_TYPE_BOOL: dst->value.v_bool = src->value.v_bool; break; case BSON_TYPE_DATE_TIME: dst->value.v_datetime = src->value.v_datetime; break; case BSON_TYPE_REGEX: dst->value.v_regex.regex = bson_strdup (src->value.v_regex.regex); dst->value.v_regex.options = bson_strdup (src->value.v_regex.options); break; case BSON_TYPE_DBPOINTER: dst->value.v_dbpointer.collection_len = src->value.v_dbpointer.collection_len; dst->value.v_dbpointer.collection = bson_malloc (src->value.v_dbpointer.collection_len + 1); memcpy (dst->value.v_dbpointer.collection, src->value.v_dbpointer.collection, dst->value.v_dbpointer.collection_len); dst->value.v_dbpointer.collection[dst->value.v_dbpointer.collection_len] = '\0'; bson_oid_copy (&src->value.v_dbpointer.oid, &dst->value.v_dbpointer.oid); break; case BSON_TYPE_CODE: dst->value.v_code.code_len = src->value.v_code.code_len; dst->value.v_code.code = bson_malloc (src->value.v_code.code_len + 1); memcpy (dst->value.v_code.code, src->value.v_code.code, dst->value.v_code.code_len); dst->value.v_code.code[dst->value.v_code.code_len] = '\0'; break; case BSON_TYPE_SYMBOL: dst->value.v_symbol.len = src->value.v_symbol.len; dst->value.v_symbol.symbol = bson_malloc (src->value.v_symbol.len + 1); memcpy (dst->value.v_symbol.symbol, src->value.v_symbol.symbol, dst->value.v_symbol.len); dst->value.v_symbol.symbol[dst->value.v_symbol.len] = '\0'; break; case BSON_TYPE_CODEWSCOPE: dst->value.v_codewscope.code_len = src->value.v_codewscope.code_len; dst->value.v_codewscope.code = bson_malloc (src->value.v_codewscope.code_len + 1); memcpy (dst->value.v_codewscope.code, src->value.v_codewscope.code, dst->value.v_codewscope.code_len); dst->value.v_codewscope.code[dst->value.v_codewscope.code_len] = '\0'; dst->value.v_codewscope.scope_len = src->value.v_codewscope.scope_len; dst->value.v_codewscope.scope_data = bson_malloc (src->value.v_codewscope.scope_len); memcpy (dst->value.v_codewscope.scope_data, src->value.v_codewscope.scope_data, dst->value.v_codewscope.scope_len); break; case BSON_TYPE_INT32: dst->value.v_int32 = src->value.v_int32; break; case BSON_TYPE_TIMESTAMP: dst->value.v_timestamp.timestamp = src->value.v_timestamp.timestamp; dst->value.v_timestamp.increment = src->value.v_timestamp.increment; break; case BSON_TYPE_INT64: dst->value.v_int64 = src->value.v_int64; break; case BSON_TYPE_DECIMAL128: dst->value.v_decimal128 = src->value.v_decimal128; break; case BSON_TYPE_UNDEFINED: case BSON_TYPE_NULL: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: break; case BSON_TYPE_EOD: default: BSON_ASSERT (false); return; } } void bson_value_destroy (bson_value_t *value) /* IN */ { if (!value) { return; } switch (value->value_type) { case BSON_TYPE_UTF8: bson_free (value->value.v_utf8.str); break; case BSON_TYPE_DOCUMENT: case BSON_TYPE_ARRAY: bson_free (value->value.v_doc.data); break; case BSON_TYPE_BINARY: bson_free (value->value.v_binary.data); break; case BSON_TYPE_REGEX: bson_free (value->value.v_regex.regex); bson_free (value->value.v_regex.options); break; case BSON_TYPE_DBPOINTER: bson_free (value->value.v_dbpointer.collection); break; case BSON_TYPE_CODE: bson_free (value->value.v_code.code); break; case BSON_TYPE_SYMBOL: bson_free (value->value.v_symbol.symbol); break; case BSON_TYPE_CODEWSCOPE: bson_free (value->value.v_codewscope.code); bson_free (value->value.v_codewscope.scope_data); break; case BSON_TYPE_DOUBLE: case BSON_TYPE_UNDEFINED: case BSON_TYPE_OID: case BSON_TYPE_BOOL: case BSON_TYPE_DATE_TIME: case BSON_TYPE_NULL: case BSON_TYPE_INT32: case BSON_TYPE_TIMESTAMP: case BSON_TYPE_INT64: case BSON_TYPE_DECIMAL128: case BSON_TYPE_MAXKEY: case BSON_TYPE_MINKEY: case BSON_TYPE_EOD: default: break; } } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-value.h0000644000076500000240000000165413572250757023422 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_VALUE_H #define BSON_VALUE_H #include "bson/bson-macros.h" #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (void) bson_value_copy (const bson_value_t *src, bson_value_t *dst); BSON_EXPORT (void) bson_value_destroy (bson_value_t *value); BSON_END_DECLS #endif /* BSON_VALUE_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-version-functions.c0000644000076500000240000000320013572250757025761 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-version.h" #include "bson/bson-version-functions.h" /** * bson_get_major_version: * * Helper function to return the runtime major version of the library. */ int bson_get_major_version (void) { return BSON_MAJOR_VERSION; } /** * bson_get_minor_version: * * Helper function to return the runtime minor version of the library. */ int bson_get_minor_version (void) { return BSON_MINOR_VERSION; } /** * bson_get_micro_version: * * Helper function to return the runtime micro version of the library. */ int bson_get_micro_version (void) { return BSON_MICRO_VERSION; } /** * bson_get_version: * * Helper function to return the runtime string version of the library. */ const char * bson_get_version (void) { return BSON_VERSION_S; } /** * bson_check_version: * * True if libmongoc's version is greater than or equal to the required * version. */ bool bson_check_version (int required_major, int required_minor, int required_micro) { return BSON_CHECK_VERSION (required_major, required_minor, required_micro); } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-version-functions.h0000644000076500000240000000211313572250757025770 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_VERSION_FUNCTIONS_H #define BSON_VERSION_FUNCTIONS_H #include "bson/bson-types.h" BSON_BEGIN_DECLS BSON_EXPORT (int) bson_get_major_version (void); BSON_EXPORT (int) bson_get_minor_version (void); BSON_EXPORT (int) bson_get_micro_version (void); BSON_EXPORT (const char *) bson_get_version (void); BSON_EXPORT (bool) bson_check_version (int required_major, int required_minor, int required_micro); BSON_END_DECLS #endif /* BSON_VERSION_FUNCTIONS_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-version.h0000644000076500000240000000453113572250757023770 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined (BSON_INSIDE) && !defined (BSON_COMPILATION) #error "Only can be included directly." #endif #ifndef BSON_VERSION_H #define BSON_VERSION_H /** * BSON_MAJOR_VERSION: * * BSON major version component (e.g. 1 if %BSON_VERSION is 1.2.3) */ #define BSON_MAJOR_VERSION (1) /** * BSON_MINOR_VERSION: * * BSON minor version component (e.g. 2 if %BSON_VERSION is 1.2.3) */ #define BSON_MINOR_VERSION (15) /** * BSON_MICRO_VERSION: * * BSON micro version component (e.g. 3 if %BSON_VERSION is 1.2.3) */ #define BSON_MICRO_VERSION (2) /** * BSON_PRERELEASE_VERSION: * * BSON prerelease version component (e.g. pre if %BSON_VERSION is 1.2.3-pre) */ #define BSON_PRERELEASE_VERSION () /** * BSON_VERSION: * * BSON version. */ #define BSON_VERSION (1.15.2) /** * BSON_VERSION_S: * * BSON version, encoded as a string, useful for printing and * concatenation. */ #define BSON_VERSION_S "1.15.2" /** * BSON_VERSION_HEX: * * BSON version, encoded as an hexadecimal number, useful for * integer comparisons. */ #define BSON_VERSION_HEX (BSON_MAJOR_VERSION << 24 | \ BSON_MINOR_VERSION << 16 | \ BSON_MICRO_VERSION << 8) /** * BSON_CHECK_VERSION: * @major: required major version * @minor: required minor version * @micro: required micro version * * Compile-time version checking. Evaluates to %TRUE if the version * of BSON is greater than the required one. */ #define BSON_CHECK_VERSION(major,minor,micro) \ (BSON_MAJOR_VERSION > (major) || \ (BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION > (minor)) || \ (BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION == (minor) && \ BSON_MICRO_VERSION >= (micro))) #endif /* BSON_VERSION_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-version.h.in0000644000076500000240000000467213572250757024403 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined (BSON_INSIDE) && !defined (BSON_COMPILATION) #error "Only can be included directly." #endif #ifndef BSON_VERSION_H #define BSON_VERSION_H /** * BSON_MAJOR_VERSION: * * BSON major version component (e.g. 1 if %BSON_VERSION is 1.2.3) */ #define BSON_MAJOR_VERSION (@BSON_MAJOR_VERSION@) /** * BSON_MINOR_VERSION: * * BSON minor version component (e.g. 2 if %BSON_VERSION is 1.2.3) */ #define BSON_MINOR_VERSION (@BSON_MINOR_VERSION@) /** * BSON_MICRO_VERSION: * * BSON micro version component (e.g. 3 if %BSON_VERSION is 1.2.3) */ #define BSON_MICRO_VERSION (@BSON_MICRO_VERSION@) /** * BSON_PRERELEASE_VERSION: * * BSON prerelease version component (e.g. pre if %BSON_VERSION is 1.2.3-pre) */ #define BSON_PRERELEASE_VERSION (@BSON_PRERELEASE_VERSION@) /** * BSON_VERSION: * * BSON version. */ #define BSON_VERSION (@BSON_VERSION@) /** * BSON_VERSION_S: * * BSON version, encoded as a string, useful for printing and * concatenation. */ #define BSON_VERSION_S "@BSON_VERSION@" /** * BSON_VERSION_HEX: * * BSON version, encoded as an hexadecimal number, useful for * integer comparisons. */ #define BSON_VERSION_HEX (BSON_MAJOR_VERSION << 24 | \ BSON_MINOR_VERSION << 16 | \ BSON_MICRO_VERSION << 8) /** * BSON_CHECK_VERSION: * @major: required major version * @minor: required minor version * @micro: required micro version * * Compile-time version checking. Evaluates to %TRUE if the version * of BSON is greater than the required one. */ #define BSON_CHECK_VERSION(major,minor,micro) \ (BSON_MAJOR_VERSION > (major) || \ (BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION > (minor)) || \ (BSON_MAJOR_VERSION == (major) && BSON_MINOR_VERSION == (minor) && \ BSON_MICRO_VERSION >= (micro))) #endif /* BSON_VERSION_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-writer.c0000644000076500000240000001531113572250757023610 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-private.h" #include "bson/bson-writer.h" struct _bson_writer_t { bool ready; uint8_t **buf; size_t *buflen; size_t offset; bson_realloc_func realloc_func; void *realloc_func_ctx; bson_t b; }; /* *-------------------------------------------------------------------------- * * bson_writer_new -- * * Creates a new instance of bson_writer_t using the buffer, length, * offset, and realloc() function supplied. * * The caller is expected to clean up the structure when finished * using bson_writer_destroy(). * * Parameters: * @buf: (inout): A pointer to a target buffer. * @buflen: (inout): A pointer to the buffer length. * @offset: The offset in the target buffer to start from. * @realloc_func: A realloc() style function or NULL. * * Returns: * A newly allocated bson_writer_t that should be freed with * bson_writer_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ bson_writer_t * bson_writer_new (uint8_t **buf, /* IN */ size_t *buflen, /* IN */ size_t offset, /* IN */ bson_realloc_func realloc_func, /* IN */ void *realloc_func_ctx) /* IN */ { bson_writer_t *writer; writer = bson_malloc0 (sizeof *writer); writer->buf = buf; writer->buflen = buflen; writer->offset = offset; writer->realloc_func = realloc_func; writer->realloc_func_ctx = realloc_func_ctx; writer->ready = true; return writer; } /* *-------------------------------------------------------------------------- * * bson_writer_destroy -- * * Cleanup after @writer and release any allocated memory. Note that * the buffer supplied to bson_writer_new() is NOT freed from this * method. The caller is responsible for that. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_writer_destroy (bson_writer_t *writer) /* IN */ { bson_free (writer); } /* *-------------------------------------------------------------------------- * * bson_writer_get_length -- * * Fetches the current length of the content written by the buffer * (including the initial offset). This includes a partly written * document currently being written. * * This is useful if you want to check to see if you've passed a given * memory boundary that cannot be sent in a packet. See * bson_writer_rollback() to abort the current document being written. * * Returns: * The number of bytes written plus initial offset. * * Side effects: * None. * *-------------------------------------------------------------------------- */ size_t bson_writer_get_length (bson_writer_t *writer) /* IN */ { return writer->offset + writer->b.len; } /* *-------------------------------------------------------------------------- * * bson_writer_begin -- * * Begins writing a new document. The caller may use the bson * structure to write out a new BSON document. When completed, the * caller must call either bson_writer_end() or * bson_writer_rollback(). * * Parameters: * @writer: A bson_writer_t. * @bson: (out): A location for a bson_t*. * * Returns: * true if the underlying realloc was successful; otherwise false. * * Side effects: * @bson is initialized if true is returned. * *-------------------------------------------------------------------------- */ bool bson_writer_begin (bson_writer_t *writer, /* IN */ bson_t **bson) /* OUT */ { bson_impl_alloc_t *b; bool grown = false; BSON_ASSERT (writer); BSON_ASSERT (writer->ready); BSON_ASSERT (bson); writer->ready = false; memset (&writer->b, 0, sizeof (bson_t)); b = (bson_impl_alloc_t *) &writer->b; b->flags = BSON_FLAG_STATIC | BSON_FLAG_NO_FREE; b->len = 5; b->parent = NULL; b->buf = writer->buf; b->buflen = writer->buflen; b->offset = writer->offset; b->alloc = NULL; b->alloclen = 0; b->realloc = writer->realloc_func; b->realloc_func_ctx = writer->realloc_func_ctx; while ((writer->offset + writer->b.len) > *writer->buflen) { if (!writer->realloc_func) { memset (&writer->b, 0, sizeof (bson_t)); writer->ready = true; return false; } grown = true; if (!*writer->buflen) { *writer->buflen = 64; } else { (*writer->buflen) *= 2; } } if (grown) { *writer->buf = writer->realloc_func ( *writer->buf, *writer->buflen, writer->realloc_func_ctx); } memset ((*writer->buf) + writer->offset + 1, 0, 5); (*writer->buf)[writer->offset] = 5; *bson = &writer->b; return true; } /* *-------------------------------------------------------------------------- * * bson_writer_end -- * * Complete writing of a bson_writer_t to the buffer supplied. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_writer_end (bson_writer_t *writer) /* IN */ { BSON_ASSERT (writer); BSON_ASSERT (!writer->ready); writer->offset += writer->b.len; memset (&writer->b, 0, sizeof (bson_t)); writer->ready = true; } /* *-------------------------------------------------------------------------- * * bson_writer_rollback -- * * Abort the appending of the current bson_t to the memory region * managed by @writer. This is useful if you detected that you went * past a particular memory limit. For example, MongoDB has 48MB * message limits. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void bson_writer_rollback (bson_writer_t *writer) /* IN */ { BSON_ASSERT (writer); if (writer->b.len) { memset (&writer->b, 0, sizeof (bson_t)); } writer->ready = true; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson-writer.h0000644000076500000240000000337413572250757023623 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson-prelude.h" #ifndef BSON_WRITER_H #define BSON_WRITER_H #include "bson/bson.h" BSON_BEGIN_DECLS /** * bson_writer_t: * * The bson_writer_t structure is a helper for writing a series of BSON * documents to a single malloc() buffer. You can provide a realloc() style * function to grow the buffer as you go. * * This is useful if you want to build a series of BSON documents right into * the target buffer for an outgoing packet. The offset parameter allows you to * start at an offset of the target buffer. */ typedef struct _bson_writer_t bson_writer_t; BSON_EXPORT (bson_writer_t *) bson_writer_new (uint8_t **buf, size_t *buflen, size_t offset, bson_realloc_func realloc_func, void *realloc_func_ctx); BSON_EXPORT (void) bson_writer_destroy (bson_writer_t *writer); BSON_EXPORT (size_t) bson_writer_get_length (bson_writer_t *writer); BSON_EXPORT (bool) bson_writer_begin (bson_writer_t *writer, bson_t **bson); BSON_EXPORT (void) bson_writer_end (bson_writer_t *writer); BSON_EXPORT (void) bson_writer_rollback (bson_writer_t *writer); BSON_END_DECLS #endif /* BSON_WRITER_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson.c0000644000076500000240000025670013572250757022307 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson.h" #include "bson/bson-config.h" #include "bson/bson-private.h" #include "bson/bson-string.h" #include "bson/bson-iso8601-private.h" #include "common-b64-private.h" #include #include #ifndef BSON_MAX_RECURSION #define BSON_MAX_RECURSION 200 #endif typedef enum { BSON_VALIDATE_PHASE_START, BSON_VALIDATE_PHASE_TOP, BSON_VALIDATE_PHASE_LF_REF_KEY, BSON_VALIDATE_PHASE_LF_REF_UTF8, BSON_VALIDATE_PHASE_LF_ID_KEY, BSON_VALIDATE_PHASE_LF_DB_KEY, BSON_VALIDATE_PHASE_LF_DB_UTF8, BSON_VALIDATE_PHASE_NOT_DBREF, } bson_validate_phase_t; typedef enum { BSON_JSON_MODE_LEGACY, BSON_JSON_MODE_CANONICAL, BSON_JSON_MODE_RELAXED, } bson_json_mode_t; /* * Structures. */ typedef struct { bson_validate_flags_t flags; ssize_t err_offset; bson_validate_phase_t phase; bson_error_t error; } bson_validate_state_t; typedef struct { uint32_t count; bool keys; ssize_t *err_offset; uint32_t depth; bson_string_t *str; bson_json_mode_t mode; } bson_json_state_t; /* * Forward declarations. */ static bool _bson_as_json_visit_array (const bson_iter_t *iter, const char *key, const bson_t *v_array, void *data); static bool _bson_as_json_visit_document (const bson_iter_t *iter, const char *key, const bson_t *v_document, void *data); static char * _bson_as_json_visit_all (const bson_t *bson, size_t *length, bson_json_mode_t mode); /* * Globals. */ static const uint8_t gZero; /* *-------------------------------------------------------------------------- * * _bson_impl_inline_grow -- * * Document growth implementation for documents that currently * contain stack based buffers. The document may be switched to * a malloc based buffer. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _bson_impl_inline_grow (bson_impl_inline_t *impl, /* IN */ size_t size) /* IN */ { bson_impl_alloc_t *alloc = (bson_impl_alloc_t *) impl; uint8_t *data; size_t req; if (((size_t) impl->len + size) <= sizeof impl->data) { return true; } req = bson_next_power_of_two (impl->len + size); if (req <= INT32_MAX) { data = bson_malloc (req); memcpy (data, impl->data, impl->len); #ifdef BSON_MEMCHECK bson_free (impl->canary); #endif alloc->flags &= ~BSON_FLAG_INLINE; alloc->parent = NULL; alloc->depth = 0; alloc->buf = &alloc->alloc; alloc->buflen = &alloc->alloclen; alloc->offset = 0; alloc->alloc = data; alloc->alloclen = req; alloc->realloc = bson_realloc_ctx; alloc->realloc_func_ctx = NULL; return true; } return false; } /* *-------------------------------------------------------------------------- * * _bson_impl_alloc_grow -- * * Document growth implementation for documents containing malloc * based buffers. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _bson_impl_alloc_grow (bson_impl_alloc_t *impl, /* IN */ size_t size) /* IN */ { size_t req; /* * Determine how many bytes we need for this document in the buffer * including necessary trailing bytes for parent documents. */ req = (impl->offset + impl->len + size + impl->depth); if (req <= *impl->buflen) { return true; } req = bson_next_power_of_two (req); if ((req <= INT32_MAX) && impl->realloc) { *impl->buf = impl->realloc (*impl->buf, req, impl->realloc_func_ctx); *impl->buflen = req; return true; } return false; } /* *-------------------------------------------------------------------------- * * _bson_grow -- * * Grows the bson_t structure to be large enough to contain @size * bytes. * * Returns: * true if successful, false if the size would overflow. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _bson_grow (bson_t *bson, /* IN */ uint32_t size) /* IN */ { if ((bson->flags & BSON_FLAG_INLINE)) { return _bson_impl_inline_grow ((bson_impl_inline_t *) bson, size); } return _bson_impl_alloc_grow ((bson_impl_alloc_t *) bson, size); } /* *-------------------------------------------------------------------------- * * _bson_data -- * * A helper function to return the contents of the bson document * taking into account the polymorphic nature of bson_t. * * Returns: * A buffer which should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE uint8_t * _bson_data (const bson_t *bson) /* IN */ { if ((bson->flags & BSON_FLAG_INLINE)) { return ((bson_impl_inline_t *) bson)->data; } else { bson_impl_alloc_t *impl = (bson_impl_alloc_t *) bson; return (*impl->buf) + impl->offset; } } /* *-------------------------------------------------------------------------- * * _bson_encode_length -- * * Helper to encode the length of the bson_t in the first 4 bytes * of the bson document. Little endian format is used as specified * by bsonspec. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE void _bson_encode_length (bson_t *bson) /* IN */ { #if BSON_BYTE_ORDER == BSON_LITTLE_ENDIAN memcpy (_bson_data (bson), &bson->len, sizeof (bson->len)); #else uint32_t length_le = BSON_UINT32_TO_LE (bson->len); memcpy (_bson_data (bson), &length_le, sizeof (length_le)); #endif } /* *-------------------------------------------------------------------------- * * _bson_append_va -- * * Appends the length,buffer pairs to the bson_t. @n_bytes is an * optimization to perform one array growth rather than many small * growths. * * @bson: A bson_t * @n_bytes: The number of bytes to append to the document. * @n_pairs: The number of length,buffer pairs. * @first_len: Length of first buffer. * @first_data: First buffer. * @args: va_list of additional tuples. * * Returns: * true if the bytes were appended successfully. * false if it bson would overflow INT_MAX. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE bool _bson_append_va (bson_t *bson, /* IN */ uint32_t n_bytes, /* IN */ uint32_t n_pairs, /* IN */ uint32_t first_len, /* IN */ const uint8_t *first_data, /* IN */ va_list args) /* IN */ { const uint8_t *data; uint32_t data_len; uint8_t *buf; BSON_ASSERT (!(bson->flags & BSON_FLAG_IN_CHILD)); BSON_ASSERT (!(bson->flags & BSON_FLAG_RDONLY)); if (BSON_UNLIKELY (!_bson_grow (bson, n_bytes))) { return false; } data = first_data; data_len = first_len; buf = _bson_data (bson) + bson->len - 1; do { n_pairs--; memcpy (buf, data, data_len); bson->len += data_len; buf += data_len; if (n_pairs) { data_len = va_arg (args, uint32_t); data = va_arg (args, const uint8_t *); } } while (n_pairs); _bson_encode_length (bson); *buf = '\0'; return true; } /* *-------------------------------------------------------------------------- * * _bson_append -- * * Variadic function to append length,buffer pairs to a bson_t. If the * append would cause the bson_t to overflow a 32-bit length, it will * return false and no append will have occurred. * * Parameters: * @bson: A bson_t. * @n_pairs: Number of length,buffer pairs. * @n_bytes: the total number of bytes being appended. * @first_len: Length of first buffer. * @first_data: First buffer. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _bson_append (bson_t *bson, /* IN */ uint32_t n_pairs, /* IN */ uint32_t n_bytes, /* IN */ uint32_t first_len, /* IN */ const uint8_t *first_data, /* IN */ ...) { va_list args; bool ok; BSON_ASSERT (n_pairs); BSON_ASSERT (first_len); BSON_ASSERT (first_data); /* * Check to see if this append would overflow 32-bit signed integer. I know * what you're thinking. BSON uses a signed 32-bit length field? Yeah. It * does. */ if (BSON_UNLIKELY (n_bytes > (BSON_MAX_SIZE - bson->len))) { return false; } va_start (args, first_data); ok = _bson_append_va (bson, n_bytes, n_pairs, first_len, first_data, args); va_end (args); return ok; } /* *-------------------------------------------------------------------------- * * _bson_append_bson_begin -- * * Begin appending a subdocument or subarray to the document using * the key provided by @key. * * If @key_length is < 0, then strlen() will be called on @key * to determine the length. * * @key_type MUST be either BSON_TYPE_DOCUMENT or BSON_TYPE_ARRAY. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * @child is initialized if true is returned. * *-------------------------------------------------------------------------- */ static bool _bson_append_bson_begin (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ bson_type_t child_type, /* IN */ bson_t *child) /* OUT */ { const uint8_t type = child_type; const uint8_t empty[5] = {5}; bson_impl_alloc_t *aparent = (bson_impl_alloc_t *) bson; bson_impl_alloc_t *achild = (bson_impl_alloc_t *) child; BSON_ASSERT (!(bson->flags & BSON_FLAG_RDONLY)); BSON_ASSERT (!(bson->flags & BSON_FLAG_IN_CHILD)); BSON_ASSERT (key); BSON_ASSERT ((child_type == BSON_TYPE_DOCUMENT) || (child_type == BSON_TYPE_ARRAY)); BSON_ASSERT (child); if (key_length < 0) { key_length = (int) strlen (key); } /* * If the parent is an inline bson_t, then we need to convert * it to a heap allocated buffer. This makes extending buffers * of child bson documents much simpler logic, as they can just * realloc the *buf pointer. */ if ((bson->flags & BSON_FLAG_INLINE)) { BSON_ASSERT (bson->len <= 120); if (!_bson_grow (bson, 128 - bson->len)) { return false; } BSON_ASSERT (!(bson->flags & BSON_FLAG_INLINE)); } /* * Append the type and key for the field. */ if (!_bson_append (bson, 4, (1 + key_length + 1 + 5), 1, &type, key_length, key, 1, &gZero, 5, empty)) { return false; } /* * Mark the document as working on a child document so that no * further modifications can happen until the caller has called * bson_append_{document,array}_end(). */ bson->flags |= BSON_FLAG_IN_CHILD; /* * Initialize the child bson_t structure and point it at the parents * buffers. This allows us to realloc directly from the child without * walking up to the parent bson_t. */ achild->flags = (BSON_FLAG_CHILD | BSON_FLAG_NO_FREE | BSON_FLAG_STATIC); if ((bson->flags & BSON_FLAG_CHILD)) { achild->depth = ((bson_impl_alloc_t *) bson)->depth + 1; } else { achild->depth = 1; } achild->parent = bson; achild->buf = aparent->buf; achild->buflen = aparent->buflen; achild->offset = aparent->offset + aparent->len - 1 - 5; achild->len = 5; achild->alloc = NULL; achild->alloclen = 0; achild->realloc = aparent->realloc; achild->realloc_func_ctx = aparent->realloc_func_ctx; return true; } /* *-------------------------------------------------------------------------- * * _bson_append_bson_end -- * * Complete a call to _bson_append_bson_begin. * * Returns: * true if successful. * * Side effects: * @child is destroyed and no longer valid after calling this * function. * *-------------------------------------------------------------------------- */ static bool _bson_append_bson_end (bson_t *bson, /* IN */ bson_t *child) /* IN */ { BSON_ASSERT (bson); BSON_ASSERT ((bson->flags & BSON_FLAG_IN_CHILD)); BSON_ASSERT (!(child->flags & BSON_FLAG_IN_CHILD)); /* * Unmark the IN_CHILD flag. */ bson->flags &= ~BSON_FLAG_IN_CHILD; /* * Now that we are done building the sub-document, add the size to the * parent, not including the default 5 byte empty document already added. */ bson->len = (bson->len + child->len - 5); /* * Ensure we have a \0 byte at the end and proper length encoded at * the beginning of the document. */ _bson_data (bson)[bson->len - 1] = '\0'; _bson_encode_length (bson); return true; } /* *-------------------------------------------------------------------------- * * bson_append_array_begin -- * * Start appending a new array. * * Use @child to append to the data area for the given field. * * It is a programming error to call any other bson function on * @bson until bson_append_array_end() has been called. It is * valid to call bson_append*() functions on @child. * * This function is useful to allow building nested documents using * a single buffer owned by the top-level bson document. * * Returns: * true if successful; otherwise false and @child is invalid. * * Side effects: * @child is initialized if true is returned. * *-------------------------------------------------------------------------- */ bool bson_append_array_begin (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ bson_t *child) /* IN */ { BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (child); return _bson_append_bson_begin ( bson, key, key_length, BSON_TYPE_ARRAY, child); } /* *-------------------------------------------------------------------------- * * bson_append_array_end -- * * Complete a call to bson_append_array_begin(). * * It is safe to append other fields to @bson after calling this * function. * * Returns: * true if successful. * * Side effects: * @child is invalid after calling this function. * *-------------------------------------------------------------------------- */ bool bson_append_array_end (bson_t *bson, /* IN */ bson_t *child) /* IN */ { BSON_ASSERT (bson); BSON_ASSERT (child); return _bson_append_bson_end (bson, child); } /* *-------------------------------------------------------------------------- * * bson_append_document_begin -- * * Start appending a new document. * * Use @child to append to the data area for the given field. * * It is a programming error to call any other bson function on * @bson until bson_append_document_end() has been called. It is * valid to call bson_append*() functions on @child. * * This function is useful to allow building nested documents using * a single buffer owned by the top-level bson document. * * Returns: * true if successful; otherwise false and @child is invalid. * * Side effects: * @child is initialized if true is returned. * *-------------------------------------------------------------------------- */ bool bson_append_document_begin (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ bson_t *child) /* IN */ { BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (child); return _bson_append_bson_begin ( bson, key, key_length, BSON_TYPE_DOCUMENT, child); } /* *-------------------------------------------------------------------------- * * bson_append_document_end -- * * Complete a call to bson_append_document_begin(). * * It is safe to append new fields to @bson after calling this * function, if true is returned. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * @child is destroyed and invalid after calling this function. * *-------------------------------------------------------------------------- */ bool bson_append_document_end (bson_t *bson, /* IN */ bson_t *child) /* IN */ { BSON_ASSERT (bson); BSON_ASSERT (child); return _bson_append_bson_end (bson, child); } /* *-------------------------------------------------------------------------- * * bson_append_array -- * * Append an array to @bson. * * Generally, bson_append_array_begin() will result in faster code * since few buffers need to be malloced. * * Returns: * true if successful; otherwise false indicating INT_MAX overflow. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_append_array (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ const bson_t *array) /* IN */ { static const uint8_t type = BSON_TYPE_ARRAY; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (array); if (key_length < 0) { key_length = (int) strlen (key); } /* * Let's be a bit pedantic and ensure the array has properly formatted key * names. We will verify this simply by checking the first element for "0" * if the array is non-empty. */ if (array && !bson_empty (array)) { bson_iter_t iter; if (bson_iter_init (&iter, array) && bson_iter_next (&iter)) { if (0 != strcmp ("0", bson_iter_key (&iter))) { fprintf (stderr, "%s(): invalid array detected. first element of array " "parameter is not \"0\".\n", BSON_FUNC); } } } return _bson_append (bson, 4, (1 + key_length + 1 + array->len), 1, &type, key_length, key, 1, &gZero, array->len, _bson_data (array)); } /* *-------------------------------------------------------------------------- * * bson_append_binary -- * * Append binary data to @bson. The field will have the * BSON_TYPE_BINARY type. * * Parameters: * @subtype: the BSON Binary Subtype. See bsonspec.org for more * information. * @binary: a pointer to the raw binary data. * @length: the size of @binary in bytes. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_append_binary (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ bson_subtype_t subtype, /* IN */ const uint8_t *binary, /* IN */ uint32_t length) /* IN */ { static const uint8_t type = BSON_TYPE_BINARY; uint32_t length_le; uint32_t deprecated_length_le; uint8_t subtype8 = 0; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (binary); if (key_length < 0) { key_length = (int) strlen (key); } subtype8 = subtype; if (subtype == BSON_SUBTYPE_BINARY_DEPRECATED) { length_le = BSON_UINT32_TO_LE (length + 4); deprecated_length_le = BSON_UINT32_TO_LE (length); return _bson_append (bson, 7, (1 + key_length + 1 + 4 + 1 + 4 + length), 1, &type, key_length, key, 1, &gZero, 4, &length_le, 1, &subtype8, 4, &deprecated_length_le, length, binary); } else { length_le = BSON_UINT32_TO_LE (length); return _bson_append (bson, 6, (1 + key_length + 1 + 4 + 1 + length), 1, &type, key_length, key, 1, &gZero, 4, &length_le, 1, &subtype8, length, binary); } } /* *-------------------------------------------------------------------------- * * bson_append_bool -- * * Append a new field to @bson with the name @key. The value is * a boolean indicated by @value. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_append_bool (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ bool value) /* IN */ { static const uint8_t type = BSON_TYPE_BOOL; uint8_t abyte = !!value; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append (bson, 4, (1 + key_length + 1 + 1), 1, &type, key_length, key, 1, &gZero, 1, &abyte); } /* *-------------------------------------------------------------------------- * * bson_append_code -- * * Append a new field to @bson containing javascript code. * * @javascript MUST be a zero terminated UTF-8 string. It MUST NOT * containing embedded \0 characters. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * * See also: * bson_append_code_with_scope(). * *-------------------------------------------------------------------------- */ bool bson_append_code (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ const char *javascript) /* IN */ { static const uint8_t type = BSON_TYPE_CODE; uint32_t length; uint32_t length_le; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (javascript); if (key_length < 0) { key_length = (int) strlen (key); } length = (int) strlen (javascript) + 1; length_le = BSON_UINT32_TO_LE (length); return _bson_append (bson, 5, (1 + key_length + 1 + 4 + length), 1, &type, key_length, key, 1, &gZero, 4, &length_le, length, javascript); } /* *-------------------------------------------------------------------------- * * bson_append_code_with_scope -- * * Append a new field to @bson containing javascript code with * supplied scope. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_append_code_with_scope (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ const char *javascript, /* IN */ const bson_t *scope) /* IN */ { static const uint8_t type = BSON_TYPE_CODEWSCOPE; uint32_t codews_length_le; uint32_t codews_length; uint32_t js_length_le; uint32_t js_length; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (javascript); if (scope == NULL) { return bson_append_code (bson, key, key_length, javascript); } if (key_length < 0) { key_length = (int) strlen (key); } js_length = (int) strlen (javascript) + 1; js_length_le = BSON_UINT32_TO_LE (js_length); codews_length = 4 + 4 + js_length + scope->len; codews_length_le = BSON_UINT32_TO_LE (codews_length); return _bson_append (bson, 7, (1 + key_length + 1 + 4 + 4 + js_length + scope->len), 1, &type, key_length, key, 1, &gZero, 4, &codews_length_le, 4, &js_length_le, js_length, javascript, scope->len, _bson_data (scope)); } /* *-------------------------------------------------------------------------- * * bson_append_dbpointer -- * * This BSON data type is DEPRECATED. * * Append a BSON dbpointer field to @bson. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool bson_append_dbpointer (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ const char *collection, /* IN */ const bson_oid_t *oid) { static const uint8_t type = BSON_TYPE_DBPOINTER; uint32_t length; uint32_t length_le; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (collection); BSON_ASSERT (oid); if (key_length < 0) { key_length = (int) strlen (key); } length = (int) strlen (collection) + 1; length_le = BSON_UINT32_TO_LE (length); return _bson_append (bson, 6, (1 + key_length + 1 + 4 + length + 12), 1, &type, key_length, key, 1, &gZero, 4, &length_le, length, collection, 12, oid); } /* *-------------------------------------------------------------------------- * * bson_append_document -- * * Append a new field to @bson containing a BSON document. * * In general, using bson_append_document_begin() results in faster * code and less memory fragmentation. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * * See also: * bson_append_document_begin(). * *-------------------------------------------------------------------------- */ bool bson_append_document (bson_t *bson, /* IN */ const char *key, /* IN */ int key_length, /* IN */ const bson_t *value) /* IN */ { static const uint8_t type = BSON_TYPE_DOCUMENT; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (value); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append (bson, 4, (1 + key_length + 1 + value->len), 1, &type, key_length, key, 1, &gZero, value->len, _bson_data (value)); } bool bson_append_double (bson_t *bson, const char *key, int key_length, double value) { static const uint8_t type = BSON_TYPE_DOUBLE; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } #if BSON_BYTE_ORDER == BSON_BIG_ENDIAN value = BSON_DOUBLE_TO_LE (value); #endif return _bson_append (bson, 4, (1 + key_length + 1 + 8), 1, &type, key_length, key, 1, &gZero, 8, &value); } bool bson_append_int32 (bson_t *bson, const char *key, int key_length, int32_t value) { static const uint8_t type = BSON_TYPE_INT32; uint32_t value_le; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } value_le = BSON_UINT32_TO_LE (value); return _bson_append (bson, 4, (1 + key_length + 1 + 4), 1, &type, key_length, key, 1, &gZero, 4, &value_le); } bool bson_append_int64 (bson_t *bson, const char *key, int key_length, int64_t value) { static const uint8_t type = BSON_TYPE_INT64; uint64_t value_le; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } value_le = BSON_UINT64_TO_LE (value); return _bson_append (bson, 4, (1 + key_length + 1 + 8), 1, &type, key_length, key, 1, &gZero, 8, &value_le); } bool bson_append_decimal128 (bson_t *bson, const char *key, int key_length, const bson_decimal128_t *value) { static const uint8_t type = BSON_TYPE_DECIMAL128; uint64_t value_le[2]; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (value); if (key_length < 0) { key_length = (int) strlen (key); } value_le[0] = BSON_UINT64_TO_LE (value->low); value_le[1] = BSON_UINT64_TO_LE (value->high); return _bson_append (bson, 4, (1 + key_length + 1 + 16), 1, &type, key_length, key, 1, &gZero, 16, value_le); } bool bson_append_iter (bson_t *bson, const char *key, int key_length, const bson_iter_t *iter) { bool ret = false; BSON_ASSERT (bson); BSON_ASSERT (iter); if (!key) { key = bson_iter_key (iter); key_length = -1; } switch (bson_iter_type_unsafe (iter)) { case BSON_TYPE_EOD: return false; case BSON_TYPE_DOUBLE: ret = bson_append_double (bson, key, key_length, bson_iter_double (iter)); break; case BSON_TYPE_UTF8: { uint32_t len = 0; const char *str; str = bson_iter_utf8 (iter, &len); ret = bson_append_utf8 (bson, key, key_length, str, len); } break; case BSON_TYPE_DOCUMENT: { const uint8_t *buf = NULL; uint32_t len = 0; bson_t doc; bson_iter_document (iter, &len, &buf); if (bson_init_static (&doc, buf, len)) { ret = bson_append_document (bson, key, key_length, &doc); bson_destroy (&doc); } } break; case BSON_TYPE_ARRAY: { const uint8_t *buf = NULL; uint32_t len = 0; bson_t doc; bson_iter_array (iter, &len, &buf); if (bson_init_static (&doc, buf, len)) { ret = bson_append_array (bson, key, key_length, &doc); bson_destroy (&doc); } } break; case BSON_TYPE_BINARY: { const uint8_t *binary = NULL; bson_subtype_t subtype = BSON_SUBTYPE_BINARY; uint32_t len = 0; bson_iter_binary (iter, &subtype, &len, &binary); ret = bson_append_binary (bson, key, key_length, subtype, binary, len); } break; case BSON_TYPE_UNDEFINED: ret = bson_append_undefined (bson, key, key_length); break; case BSON_TYPE_OID: ret = bson_append_oid (bson, key, key_length, bson_iter_oid (iter)); break; case BSON_TYPE_BOOL: ret = bson_append_bool (bson, key, key_length, bson_iter_bool (iter)); break; case BSON_TYPE_DATE_TIME: ret = bson_append_date_time ( bson, key, key_length, bson_iter_date_time (iter)); break; case BSON_TYPE_NULL: ret = bson_append_null (bson, key, key_length); break; case BSON_TYPE_REGEX: { const char *regex; const char *options; regex = bson_iter_regex (iter, &options); ret = bson_append_regex (bson, key, key_length, regex, options); } break; case BSON_TYPE_DBPOINTER: { const bson_oid_t *oid; uint32_t len; const char *collection; bson_iter_dbpointer (iter, &len, &collection, &oid); ret = bson_append_dbpointer (bson, key, key_length, collection, oid); } break; case BSON_TYPE_CODE: { uint32_t len; const char *code; code = bson_iter_code (iter, &len); ret = bson_append_code (bson, key, key_length, code); } break; case BSON_TYPE_SYMBOL: { uint32_t len; const char *symbol; symbol = bson_iter_symbol (iter, &len); ret = bson_append_symbol (bson, key, key_length, symbol, len); } break; case BSON_TYPE_CODEWSCOPE: { const uint8_t *scope = NULL; uint32_t scope_len = 0; uint32_t len = 0; const char *javascript = NULL; bson_t doc; javascript = bson_iter_codewscope (iter, &len, &scope_len, &scope); if (bson_init_static (&doc, scope, scope_len)) { ret = bson_append_code_with_scope ( bson, key, key_length, javascript, &doc); bson_destroy (&doc); } } break; case BSON_TYPE_INT32: ret = bson_append_int32 (bson, key, key_length, bson_iter_int32 (iter)); break; case BSON_TYPE_TIMESTAMP: { uint32_t ts; uint32_t inc; bson_iter_timestamp (iter, &ts, &inc); ret = bson_append_timestamp (bson, key, key_length, ts, inc); } break; case BSON_TYPE_INT64: ret = bson_append_int64 (bson, key, key_length, bson_iter_int64 (iter)); break; case BSON_TYPE_DECIMAL128: { bson_decimal128_t dec; if (!bson_iter_decimal128 (iter, &dec)) { return false; } ret = bson_append_decimal128 (bson, key, key_length, &dec); } break; case BSON_TYPE_MAXKEY: ret = bson_append_maxkey (bson, key, key_length); break; case BSON_TYPE_MINKEY: ret = bson_append_minkey (bson, key, key_length); break; default: break; } return ret; } bool bson_append_maxkey (bson_t *bson, const char *key, int key_length) { static const uint8_t type = BSON_TYPE_MAXKEY; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append ( bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero); } bool bson_append_minkey (bson_t *bson, const char *key, int key_length) { static const uint8_t type = BSON_TYPE_MINKEY; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append ( bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero); } bool bson_append_null (bson_t *bson, const char *key, int key_length) { static const uint8_t type = BSON_TYPE_NULL; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append ( bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero); } bool bson_append_oid (bson_t *bson, const char *key, int key_length, const bson_oid_t *value) { static const uint8_t type = BSON_TYPE_OID; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (value); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append (bson, 4, (1 + key_length + 1 + 12), 1, &type, key_length, key, 1, &gZero, 12, value); } /* *-------------------------------------------------------------------------- * * _bson_append_regex_options_sorted -- * * Helper to append regex options to a buffer in a sorted order. * Any duplicate or unsupported options will be ignored. * * Parameters: * @buffer: Buffer to which sorted options will be appended * @options: Regex options * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static BSON_INLINE void _bson_append_regex_options_sorted (bson_string_t *buffer, /* IN */ const char *options) /* IN */ { const char *c; for (c = BSON_REGEX_OPTIONS_SORTED; *c; c++) { if (strchr (options, *c)) { bson_string_append_c (buffer, *c); } } } bool bson_append_regex (bson_t *bson, const char *key, int key_length, const char *regex, const char *options) { return bson_append_regex_w_len (bson, key, key_length, regex, -1, options); } bool bson_append_regex_w_len (bson_t *bson, const char *key, int key_length, const char *regex, int regex_length, const char *options) { static const uint8_t type = BSON_TYPE_REGEX; bson_string_t *options_sorted; bool r; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } if (regex_length < 0) { regex_length = (int) strlen (regex); } if (!regex) { regex = ""; } if (!options) { options = ""; } options_sorted = bson_string_new (NULL); _bson_append_regex_options_sorted (options_sorted, options); r = _bson_append ( bson, 6, (1 + key_length + 1 + regex_length + 1 + options_sorted->len + 1), 1, &type, key_length, key, 1, &gZero, regex_length, regex, 1, &gZero, options_sorted->len + 1, options_sorted->str); bson_string_free (options_sorted, true); return r; } bool bson_append_utf8 ( bson_t *bson, const char *key, int key_length, const char *value, int length) { static const uint8_t type = BSON_TYPE_UTF8; uint32_t length_le; BSON_ASSERT (bson); BSON_ASSERT (key); if (BSON_UNLIKELY (!value)) { return bson_append_null (bson, key, key_length); } if (BSON_UNLIKELY (key_length < 0)) { key_length = (int) strlen (key); } if (BSON_UNLIKELY (length < 0)) { length = (int) strlen (value); } length_le = BSON_UINT32_TO_LE (length + 1); return _bson_append (bson, 6, (1 + key_length + 1 + 4 + length + 1), 1, &type, key_length, key, 1, &gZero, 4, &length_le, length, value, 1, &gZero); } bool bson_append_symbol ( bson_t *bson, const char *key, int key_length, const char *value, int length) { static const uint8_t type = BSON_TYPE_SYMBOL; uint32_t length_le; BSON_ASSERT (bson); BSON_ASSERT (key); if (!value) { return bson_append_null (bson, key, key_length); } if (key_length < 0) { key_length = (int) strlen (key); } if (length < 0) { length = (int) strlen (value); } length_le = BSON_UINT32_TO_LE (length + 1); return _bson_append (bson, 6, (1 + key_length + 1 + 4 + length + 1), 1, &type, key_length, key, 1, &gZero, 4, &length_le, length, value, 1, &gZero); } bool bson_append_time_t (bson_t *bson, const char *key, int key_length, time_t value) { #ifdef BSON_OS_WIN32 struct timeval tv = {(long) value, 0}; #else struct timeval tv = {value, 0}; #endif BSON_ASSERT (bson); BSON_ASSERT (key); return bson_append_timeval (bson, key, key_length, &tv); } bool bson_append_timestamp (bson_t *bson, const char *key, int key_length, uint32_t timestamp, uint32_t increment) { static const uint8_t type = BSON_TYPE_TIMESTAMP; uint64_t value; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } value = ((((uint64_t) timestamp) << 32) | ((uint64_t) increment)); value = BSON_UINT64_TO_LE (value); return _bson_append (bson, 4, (1 + key_length + 1 + 8), 1, &type, key_length, key, 1, &gZero, 8, &value); } bool bson_append_now_utc (bson_t *bson, const char *key, int key_length) { BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (key_length >= -1); return bson_append_time_t (bson, key, key_length, time (NULL)); } bool bson_append_date_time (bson_t *bson, const char *key, int key_length, int64_t value) { static const uint8_t type = BSON_TYPE_DATE_TIME; uint64_t value_le; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } value_le = BSON_UINT64_TO_LE (value); return _bson_append (bson, 4, (1 + key_length + 1 + 8), 1, &type, key_length, key, 1, &gZero, 8, &value_le); } bool bson_append_timeval (bson_t *bson, const char *key, int key_length, struct timeval *value) { uint64_t unix_msec; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (value); unix_msec = (((uint64_t) value->tv_sec) * 1000UL) + (value->tv_usec / 1000UL); return bson_append_date_time (bson, key, key_length, unix_msec); } bool bson_append_undefined (bson_t *bson, const char *key, int key_length) { static const uint8_t type = BSON_TYPE_UNDEFINED; BSON_ASSERT (bson); BSON_ASSERT (key); if (key_length < 0) { key_length = (int) strlen (key); } return _bson_append ( bson, 3, (1 + key_length + 1), 1, &type, key_length, key, 1, &gZero); } bool bson_append_value (bson_t *bson, const char *key, int key_length, const bson_value_t *value) { bson_t local; bool ret = false; BSON_ASSERT (bson); BSON_ASSERT (key); BSON_ASSERT (value); switch (value->value_type) { case BSON_TYPE_DOUBLE: ret = bson_append_double (bson, key, key_length, value->value.v_double); break; case BSON_TYPE_UTF8: ret = bson_append_utf8 (bson, key, key_length, value->value.v_utf8.str, value->value.v_utf8.len); break; case BSON_TYPE_DOCUMENT: if (bson_init_static ( &local, value->value.v_doc.data, value->value.v_doc.data_len)) { ret = bson_append_document (bson, key, key_length, &local); bson_destroy (&local); } break; case BSON_TYPE_ARRAY: if (bson_init_static ( &local, value->value.v_doc.data, value->value.v_doc.data_len)) { ret = bson_append_array (bson, key, key_length, &local); bson_destroy (&local); } break; case BSON_TYPE_BINARY: ret = bson_append_binary (bson, key, key_length, value->value.v_binary.subtype, value->value.v_binary.data, value->value.v_binary.data_len); break; case BSON_TYPE_UNDEFINED: ret = bson_append_undefined (bson, key, key_length); break; case BSON_TYPE_OID: ret = bson_append_oid (bson, key, key_length, &value->value.v_oid); break; case BSON_TYPE_BOOL: ret = bson_append_bool (bson, key, key_length, value->value.v_bool); break; case BSON_TYPE_DATE_TIME: ret = bson_append_date_time (bson, key, key_length, value->value.v_datetime); break; case BSON_TYPE_NULL: ret = bson_append_null (bson, key, key_length); break; case BSON_TYPE_REGEX: ret = bson_append_regex (bson, key, key_length, value->value.v_regex.regex, value->value.v_regex.options); break; case BSON_TYPE_DBPOINTER: ret = bson_append_dbpointer (bson, key, key_length, value->value.v_dbpointer.collection, &value->value.v_dbpointer.oid); break; case BSON_TYPE_CODE: ret = bson_append_code (bson, key, key_length, value->value.v_code.code); break; case BSON_TYPE_SYMBOL: ret = bson_append_symbol (bson, key, key_length, value->value.v_symbol.symbol, value->value.v_symbol.len); break; case BSON_TYPE_CODEWSCOPE: if (bson_init_static (&local, value->value.v_codewscope.scope_data, value->value.v_codewscope.scope_len)) { ret = bson_append_code_with_scope ( bson, key, key_length, value->value.v_codewscope.code, &local); bson_destroy (&local); } break; case BSON_TYPE_INT32: ret = bson_append_int32 (bson, key, key_length, value->value.v_int32); break; case BSON_TYPE_TIMESTAMP: ret = bson_append_timestamp (bson, key, key_length, value->value.v_timestamp.timestamp, value->value.v_timestamp.increment); break; case BSON_TYPE_INT64: ret = bson_append_int64 (bson, key, key_length, value->value.v_int64); break; case BSON_TYPE_DECIMAL128: ret = bson_append_decimal128 ( bson, key, key_length, &(value->value.v_decimal128)); break; case BSON_TYPE_MAXKEY: ret = bson_append_maxkey (bson, key, key_length); break; case BSON_TYPE_MINKEY: ret = bson_append_minkey (bson, key, key_length); break; case BSON_TYPE_EOD: default: break; } return ret; } void bson_init (bson_t *bson) { bson_impl_inline_t *impl = (bson_impl_inline_t *) bson; BSON_ASSERT (bson); #ifdef BSON_MEMCHECK impl->canary = bson_malloc (1); #endif impl->flags = BSON_FLAG_INLINE | BSON_FLAG_STATIC; impl->len = 5; impl->data[0] = 5; impl->data[1] = 0; impl->data[2] = 0; impl->data[3] = 0; impl->data[4] = 0; } void bson_reinit (bson_t *bson) { uint8_t *data; BSON_ASSERT (bson); data = _bson_data (bson); bson->len = 5; data[0] = 5; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; } bool bson_init_static (bson_t *bson, const uint8_t *data, size_t length) { bson_impl_alloc_t *impl = (bson_impl_alloc_t *) bson; uint32_t len_le; BSON_ASSERT (bson); BSON_ASSERT (data); if ((length < 5) || (length > INT_MAX)) { return false; } memcpy (&len_le, data, sizeof (len_le)); if ((size_t) BSON_UINT32_FROM_LE (len_le) != length) { return false; } if (data[length - 1]) { return false; } impl->flags = BSON_FLAG_STATIC | BSON_FLAG_RDONLY; impl->len = (uint32_t) length; impl->parent = NULL; impl->depth = 0; impl->buf = &impl->alloc; impl->buflen = &impl->alloclen; impl->offset = 0; impl->alloc = (uint8_t *) data; impl->alloclen = length; impl->realloc = NULL; impl->realloc_func_ctx = NULL; return true; } bson_t * bson_new (void) { bson_impl_inline_t *impl; bson_t *bson; bson = bson_malloc (sizeof *bson); impl = (bson_impl_inline_t *) bson; impl->flags = BSON_FLAG_INLINE; impl->len = 5; #ifdef BSON_MEMCHECK impl->canary = bson_malloc (1); #endif impl->data[0] = 5; impl->data[1] = 0; impl->data[2] = 0; impl->data[3] = 0; impl->data[4] = 0; return bson; } bson_t * bson_sized_new (size_t size) { bson_impl_alloc_t *impl_a; bson_t *b; BSON_ASSERT (size <= INT32_MAX); b = bson_malloc (sizeof *b); impl_a = (bson_impl_alloc_t *) b; if (size <= BSON_INLINE_DATA_SIZE) { bson_init (b); b->flags &= ~BSON_FLAG_STATIC; } else { impl_a->flags = BSON_FLAG_NONE; impl_a->len = 5; impl_a->parent = NULL; impl_a->depth = 0; impl_a->buf = &impl_a->alloc; impl_a->buflen = &impl_a->alloclen; impl_a->offset = 0; impl_a->alloclen = BSON_MAX (5, size); impl_a->alloc = bson_malloc (impl_a->alloclen); impl_a->alloc[0] = 5; impl_a->alloc[1] = 0; impl_a->alloc[2] = 0; impl_a->alloc[3] = 0; impl_a->alloc[4] = 0; impl_a->realloc = bson_realloc_ctx; impl_a->realloc_func_ctx = NULL; } return b; } bson_t * bson_new_from_data (const uint8_t *data, size_t length) { uint32_t len_le; bson_t *bson; BSON_ASSERT (data); if ((length < 5) || (length > INT_MAX) || data[length - 1]) { return NULL; } memcpy (&len_le, data, sizeof (len_le)); if (length != (size_t) BSON_UINT32_FROM_LE (len_le)) { return NULL; } bson = bson_sized_new (length); memcpy (_bson_data (bson), data, length); bson->len = (uint32_t) length; return bson; } bson_t * bson_new_from_buffer (uint8_t **buf, size_t *buf_len, bson_realloc_func realloc_func, void *realloc_func_ctx) { bson_impl_alloc_t *impl; uint32_t len_le; uint32_t length; bson_t *bson; BSON_ASSERT (buf); BSON_ASSERT (buf_len); if (!realloc_func) { realloc_func = bson_realloc_ctx; } bson = bson_malloc0 (sizeof *bson); impl = (bson_impl_alloc_t *) bson; if (!*buf) { length = 5; len_le = BSON_UINT32_TO_LE (length); *buf_len = 5; *buf = realloc_func (*buf, *buf_len, realloc_func_ctx); memcpy (*buf, &len_le, sizeof (len_le)); (*buf)[4] = '\0'; } else { if ((*buf_len < 5) || (*buf_len > INT_MAX)) { bson_free (bson); return NULL; } memcpy (&len_le, *buf, sizeof (len_le)); length = BSON_UINT32_FROM_LE (len_le); } if ((*buf)[length - 1]) { bson_free (bson); return NULL; } impl->flags = BSON_FLAG_NO_FREE; impl->len = length; impl->buf = buf; impl->buflen = buf_len; impl->realloc = realloc_func; impl->realloc_func_ctx = realloc_func_ctx; return bson; } bson_t * bson_copy (const bson_t *bson) { const uint8_t *data; BSON_ASSERT (bson); data = _bson_data (bson); return bson_new_from_data (data, bson->len); } void bson_copy_to (const bson_t *src, bson_t *dst) { const uint8_t *data; bson_impl_alloc_t *adst; size_t len; BSON_ASSERT (src); BSON_ASSERT (dst); if ((src->flags & BSON_FLAG_INLINE)) { #ifdef BSON_MEMCHECK dst->len = src->len; dst->canary = malloc (1); memcpy (dst->padding, src->padding, sizeof dst->padding); #else memcpy (dst, src, sizeof *dst); #endif dst->flags = (BSON_FLAG_STATIC | BSON_FLAG_INLINE); return; } data = _bson_data (src); len = bson_next_power_of_two ((size_t) src->len); adst = (bson_impl_alloc_t *) dst; adst->flags = BSON_FLAG_STATIC; adst->len = src->len; adst->parent = NULL; adst->depth = 0; adst->buf = &adst->alloc; adst->buflen = &adst->alloclen; adst->offset = 0; adst->alloc = bson_malloc (len); adst->alloclen = len; adst->realloc = bson_realloc_ctx; adst->realloc_func_ctx = NULL; memcpy (adst->alloc, data, src->len); } static bool should_ignore (const char *first_exclude, va_list args, const char *name) { bool ret = false; const char *exclude = first_exclude; va_list args_copy; va_copy (args_copy, args); do { if (!strcmp (name, exclude)) { ret = true; break; } } while ((exclude = va_arg (args_copy, const char *))); va_end (args_copy); return ret; } void bson_copy_to_excluding_noinit_va (const bson_t *src, bson_t *dst, const char *first_exclude, va_list args) { bson_iter_t iter; if (bson_iter_init (&iter, src)) { while (bson_iter_next (&iter)) { if (!should_ignore (first_exclude, args, bson_iter_key (&iter))) { if (!bson_append_iter (dst, NULL, 0, &iter)) { /* * This should not be able to happen since we are copying * from within a valid bson_t. */ BSON_ASSERT (false); return; } } } } } void bson_copy_to_excluding (const bson_t *src, bson_t *dst, const char *first_exclude, ...) { va_list args; BSON_ASSERT (src); BSON_ASSERT (dst); BSON_ASSERT (first_exclude); bson_init (dst); va_start (args, first_exclude); bson_copy_to_excluding_noinit_va (src, dst, first_exclude, args); va_end (args); } void bson_copy_to_excluding_noinit (const bson_t *src, bson_t *dst, const char *first_exclude, ...) { va_list args; BSON_ASSERT (src); BSON_ASSERT (dst); BSON_ASSERT (first_exclude); va_start (args, first_exclude); bson_copy_to_excluding_noinit_va (src, dst, first_exclude, args); va_end (args); } void bson_destroy (bson_t *bson) { if (!bson) { return; } if (!(bson->flags & (BSON_FLAG_RDONLY | BSON_FLAG_INLINE | BSON_FLAG_NO_FREE))) { bson_free (*((bson_impl_alloc_t *) bson)->buf); } #ifdef BSON_MEMCHECK if (bson->flags & BSON_FLAG_INLINE) { bson_free (bson->canary); } #endif if (!(bson->flags & BSON_FLAG_STATIC)) { bson_free (bson); } } uint8_t * bson_reserve_buffer (bson_t *bson, uint32_t size) { if (bson->flags & (BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY)) { return NULL; } if (!_bson_grow (bson, size)) { return NULL; } if (bson->flags & BSON_FLAG_INLINE) { /* bson_grow didn't spill over */ ((bson_impl_inline_t *) bson)->len = size; } else { ((bson_impl_alloc_t *) bson)->len = size; } return _bson_data (bson); } bool bson_steal (bson_t *dst, bson_t *src) { bson_impl_inline_t *src_inline; bson_impl_inline_t *dst_inline; bson_impl_alloc_t *alloc; BSON_ASSERT (dst); BSON_ASSERT (src); bson_init (dst); if (src->flags & (BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY)) { return false; } if (src->flags & BSON_FLAG_INLINE) { src_inline = (bson_impl_inline_t *) src; dst_inline = (bson_impl_inline_t *) dst; dst_inline->len = src_inline->len; memcpy (dst_inline->data, src_inline->data, sizeof src_inline->data); /* for consistency, src is always invalid after steal, even if inline */ src->len = 0; #ifdef BSON_MEMCHECK bson_free (src->canary); #endif } else { #ifdef BSON_MEMCHECK bson_free (dst->canary); #endif memcpy (dst, src, sizeof (bson_t)); alloc = (bson_impl_alloc_t *) dst; alloc->flags |= BSON_FLAG_STATIC; alloc->buf = &alloc->alloc; alloc->buflen = &alloc->alloclen; } if (!(src->flags & BSON_FLAG_STATIC)) { bson_free (src); } else { /* src is invalid after steal */ src->len = 0; } return true; } uint8_t * bson_destroy_with_steal (bson_t *bson, bool steal, uint32_t *length) { uint8_t *ret = NULL; BSON_ASSERT (bson); if (length) { *length = bson->len; } if (!steal) { bson_destroy (bson); return NULL; } if ((bson->flags & (BSON_FLAG_CHILD | BSON_FLAG_IN_CHILD | BSON_FLAG_RDONLY))) { /* Do nothing */ } else if ((bson->flags & BSON_FLAG_INLINE)) { bson_impl_inline_t *inl; inl = (bson_impl_inline_t *) bson; ret = bson_malloc (bson->len); memcpy (ret, inl->data, bson->len); } else { bson_impl_alloc_t *alloc; alloc = (bson_impl_alloc_t *) bson; ret = *alloc->buf; *alloc->buf = NULL; } bson_destroy (bson); return ret; } const uint8_t * bson_get_data (const bson_t *bson) { BSON_ASSERT (bson); return _bson_data (bson); } uint32_t bson_count_keys (const bson_t *bson) { uint32_t count = 0; bson_iter_t iter; BSON_ASSERT (bson); if (bson_iter_init (&iter, bson)) { while (bson_iter_next (&iter)) { count++; } } return count; } bool bson_has_field (const bson_t *bson, const char *key) { bson_iter_t iter; bson_iter_t child; BSON_ASSERT (bson); BSON_ASSERT (key); if (NULL != strchr (key, '.')) { return (bson_iter_init (&iter, bson) && bson_iter_find_descendant (&iter, key, &child)); } return bson_iter_init_find (&iter, bson, key); } int bson_compare (const bson_t *bson, const bson_t *other) { const uint8_t *data1; const uint8_t *data2; size_t len1; size_t len2; int64_t ret; data1 = _bson_data (bson) + 4; len1 = bson->len - 4; data2 = _bson_data (other) + 4; len2 = other->len - 4; if (len1 == len2) { return memcmp (data1, data2, len1); } ret = memcmp (data1, data2, BSON_MIN (len1, len2)); if (ret == 0) { ret = (int64_t) (len1 - len2); } return (ret < 0) ? -1 : (ret > 0); } bool bson_equal (const bson_t *bson, const bson_t *other) { return !bson_compare (bson, other); } static bool _bson_as_json_visit_utf8 (const bson_iter_t *iter, const char *key, size_t v_utf8_len, const char *v_utf8, void *data) { bson_json_state_t *state = data; char *escaped; escaped = bson_utf8_escape_for_json (v_utf8, v_utf8_len); if (escaped) { bson_string_append (state->str, "\""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\""); bson_free (escaped); return false; } return true; } static bool _bson_as_json_visit_int32 (const bson_iter_t *iter, const char *key, int32_t v_int32, void *data) { bson_json_state_t *state = data; if (state->mode == BSON_JSON_MODE_CANONICAL) { bson_string_append_printf ( state->str, "{ \"$numberInt\" : \"%" PRId32 "\" }", v_int32); } else { bson_string_append_printf (state->str, "%" PRId32, v_int32); } return false; } static bool _bson_as_json_visit_int64 (const bson_iter_t *iter, const char *key, int64_t v_int64, void *data) { bson_json_state_t *state = data; if (state->mode == BSON_JSON_MODE_CANONICAL) { bson_string_append_printf ( state->str, "{ \"$numberLong\" : \"%" PRId64 "\"}", v_int64); } else { bson_string_append_printf (state->str, "%" PRId64, v_int64); } return false; } static bool _bson_as_json_visit_decimal128 (const bson_iter_t *iter, const char *key, const bson_decimal128_t *value, void *data) { bson_json_state_t *state = data; char decimal128_string[BSON_DECIMAL128_STRING]; bson_decimal128_to_string (value, decimal128_string); bson_string_append (state->str, "{ \"$numberDecimal\" : \""); bson_string_append (state->str, decimal128_string); bson_string_append (state->str, "\" }"); return false; } static bool _bson_as_json_visit_double (const bson_iter_t *iter, const char *key, double v_double, void *data) { bson_json_state_t *state = data; bson_string_t *str = state->str; uint32_t start_len; bool legacy; /* Determine if legacy (i.e. unwrapped) output should be used. Relaxed mode * will use this for nan and inf values, which we check manually since old * platforms may not have isinf or isnan. */ legacy = state->mode == BSON_JSON_MODE_LEGACY || (state->mode == BSON_JSON_MODE_RELAXED && !(v_double != v_double || v_double * 0 != 0)); if (!legacy) { bson_string_append (state->str, "{ \"$numberDouble\" : \""); } if (!legacy && v_double != v_double) { bson_string_append (str, "NaN"); } else if (!legacy && v_double * 0 != 0) { if (v_double > 0) { bson_string_append (str, "Infinity"); } else { bson_string_append (str, "-Infinity"); } } else { start_len = str->len; bson_string_append_printf (str, "%.20g", v_double); /* ensure trailing ".0" to distinguish "3" from "3.0" */ if (strspn (&str->str[start_len], "0123456789-") == str->len - start_len) { bson_string_append (str, ".0"); } } if (!legacy) { bson_string_append (state->str, "\" }"); } return false; } static bool _bson_as_json_visit_undefined (const bson_iter_t *iter, const char *key, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, "{ \"$undefined\" : true }"); return false; } static bool _bson_as_json_visit_null (const bson_iter_t *iter, const char *key, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, "null"); return false; } static bool _bson_as_json_visit_oid (const bson_iter_t *iter, const char *key, const bson_oid_t *oid, void *data) { bson_json_state_t *state = data; char str[25]; bson_oid_to_string (oid, str); bson_string_append (state->str, "{ \"$oid\" : \""); bson_string_append (state->str, str); bson_string_append (state->str, "\" }"); return false; } static bool _bson_as_json_visit_binary (const bson_iter_t *iter, const char *key, bson_subtype_t v_subtype, size_t v_binary_len, const uint8_t *v_binary, void *data) { bson_json_state_t *state = data; size_t b64_len; char *b64; b64_len = (v_binary_len / 3 + 1) * 4 + 1; b64 = bson_malloc0 (b64_len); BSON_ASSERT (bson_b64_ntop (v_binary, v_binary_len, b64, b64_len) != -1); if (state->mode == BSON_JSON_MODE_CANONICAL || state->mode == BSON_JSON_MODE_RELAXED) { bson_string_append (state->str, "{ \"$binary\" : { \"base64\": \""); bson_string_append (state->str, b64); bson_string_append (state->str, "\", \"subType\" : \""); bson_string_append_printf (state->str, "%02x", v_subtype); bson_string_append (state->str, "\" } }"); } else { bson_string_append (state->str, "{ \"$binary\" : \""); bson_string_append (state->str, b64); bson_string_append (state->str, "\", \"$type\" : \""); bson_string_append_printf (state->str, "%02x", v_subtype); bson_string_append (state->str, "\" }"); } bson_free (b64); return false; } static bool _bson_as_json_visit_bool (const bson_iter_t *iter, const char *key, bool v_bool, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, v_bool ? "true" : "false"); return false; } static bool _bson_as_json_visit_date_time (const bson_iter_t *iter, const char *key, int64_t msec_since_epoch, void *data) { bson_json_state_t *state = data; if (state->mode == BSON_JSON_MODE_CANONICAL || (state->mode == BSON_JSON_MODE_RELAXED && msec_since_epoch < 0)) { bson_string_append (state->str, "{ \"$date\" : { \"$numberLong\" : \""); bson_string_append_printf (state->str, "%" PRId64, msec_since_epoch); bson_string_append (state->str, "\" } }"); } else if (state->mode == BSON_JSON_MODE_RELAXED) { bson_string_append (state->str, "{ \"$date\" : \""); _bson_iso8601_date_format (msec_since_epoch, state->str); bson_string_append (state->str, "\" }"); } else { bson_string_append (state->str, "{ \"$date\" : "); bson_string_append_printf (state->str, "%" PRId64, msec_since_epoch); bson_string_append (state->str, " }"); } return false; } static bool _bson_as_json_visit_regex (const bson_iter_t *iter, const char *key, const char *v_regex, const char *v_options, void *data) { bson_json_state_t *state = data; char *escaped; escaped = bson_utf8_escape_for_json (v_regex, -1); if (!escaped) { return true; } if (state->mode == BSON_JSON_MODE_CANONICAL || state->mode == BSON_JSON_MODE_RELAXED) { bson_string_append (state->str, "{ \"$regularExpression\" : { \"pattern\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\", \"options\" : \""); _bson_append_regex_options_sorted (state->str, v_options); bson_string_append (state->str, "\" } }"); } else { bson_string_append (state->str, "{ \"$regex\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\", \"$options\" : \""); _bson_append_regex_options_sorted (state->str, v_options); bson_string_append (state->str, "\" }"); } bson_free (escaped); return false; } static bool _bson_as_json_visit_timestamp (const bson_iter_t *iter, const char *key, uint32_t v_timestamp, uint32_t v_increment, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, "{ \"$timestamp\" : { \"t\" : "); bson_string_append_printf (state->str, "%u", v_timestamp); bson_string_append (state->str, ", \"i\" : "); bson_string_append_printf (state->str, "%u", v_increment); bson_string_append (state->str, " } }"); return false; } static bool _bson_as_json_visit_dbpointer (const bson_iter_t *iter, const char *key, size_t v_collection_len, const char *v_collection, const bson_oid_t *v_oid, void *data) { bson_json_state_t *state = data; char *escaped; char str[25]; escaped = bson_utf8_escape_for_json (v_collection, -1); if (!escaped) { return true; } if (state->mode == BSON_JSON_MODE_CANONICAL || state->mode == BSON_JSON_MODE_RELAXED) { bson_string_append (state->str, "{ \"$dbPointer\" : { \"$ref\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\""); if (v_oid) { bson_oid_to_string (v_oid, str); bson_string_append (state->str, ", \"$id\" : { \"$oid\" : \""); bson_string_append (state->str, str); bson_string_append (state->str, "\" }"); } bson_string_append (state->str, " } }"); } else { bson_string_append (state->str, "{ \"$ref\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\""); if (v_oid) { bson_oid_to_string (v_oid, str); bson_string_append (state->str, ", \"$id\" : \""); bson_string_append (state->str, str); bson_string_append (state->str, "\""); } bson_string_append (state->str, " }"); } bson_free (escaped); return false; } static bool _bson_as_json_visit_minkey (const bson_iter_t *iter, const char *key, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, "{ \"$minKey\" : 1 }"); return false; } static bool _bson_as_json_visit_maxkey (const bson_iter_t *iter, const char *key, void *data) { bson_json_state_t *state = data; bson_string_append (state->str, "{ \"$maxKey\" : 1 }"); return false; } static bool _bson_as_json_visit_before (const bson_iter_t *iter, const char *key, void *data) { bson_json_state_t *state = data; char *escaped; if (state->count) { bson_string_append (state->str, ", "); } if (state->keys) { escaped = bson_utf8_escape_for_json (key, -1); if (escaped) { bson_string_append (state->str, "\""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\" : "); bson_free (escaped); } else { return true; } } state->count++; return false; } static void _bson_as_json_visit_corrupt (const bson_iter_t *iter, void *data) { *(((bson_json_state_t *) data)->err_offset) = iter->off; } static bool _bson_as_json_visit_code (const bson_iter_t *iter, const char *key, size_t v_code_len, const char *v_code, void *data) { bson_json_state_t *state = data; char *escaped; escaped = bson_utf8_escape_for_json (v_code, v_code_len); if (!escaped) { return true; } bson_string_append (state->str, "{ \"$code\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\" }"); bson_free (escaped); return false; } static bool _bson_as_json_visit_symbol (const bson_iter_t *iter, const char *key, size_t v_symbol_len, const char *v_symbol, void *data) { bson_json_state_t *state = data; char *escaped; escaped = bson_utf8_escape_for_json (v_symbol, v_symbol_len); if (!escaped) { return true; } if (state->mode == BSON_JSON_MODE_CANONICAL || state->mode == BSON_JSON_MODE_RELAXED) { bson_string_append (state->str, "{ \"$symbol\" : \""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\" }"); } else { bson_string_append (state->str, "\""); bson_string_append (state->str, escaped); bson_string_append (state->str, "\""); } bson_free (escaped); return false; } static bool _bson_as_json_visit_codewscope (const bson_iter_t *iter, const char *key, size_t v_code_len, const char *v_code, const bson_t *v_scope, void *data) { bson_json_state_t *state = data; char *code_escaped; char *scope; code_escaped = bson_utf8_escape_for_json (v_code, v_code_len); if (!code_escaped) { return true; } /* Encode scope with the same mode */ scope = _bson_as_json_visit_all (v_scope, NULL, state->mode); if (!scope) { bson_free (code_escaped); return true; } bson_string_append (state->str, "{ \"$code\" : \""); bson_string_append (state->str, code_escaped); bson_string_append (state->str, "\", \"$scope\" : "); bson_string_append (state->str, scope); bson_string_append (state->str, " }"); bson_free (code_escaped); bson_free (scope); return false; } static const bson_visitor_t bson_as_json_visitors = { _bson_as_json_visit_before, NULL, /* visit_after */ _bson_as_json_visit_corrupt, _bson_as_json_visit_double, _bson_as_json_visit_utf8, _bson_as_json_visit_document, _bson_as_json_visit_array, _bson_as_json_visit_binary, _bson_as_json_visit_undefined, _bson_as_json_visit_oid, _bson_as_json_visit_bool, _bson_as_json_visit_date_time, _bson_as_json_visit_null, _bson_as_json_visit_regex, _bson_as_json_visit_dbpointer, _bson_as_json_visit_code, _bson_as_json_visit_symbol, _bson_as_json_visit_codewscope, _bson_as_json_visit_int32, _bson_as_json_visit_timestamp, _bson_as_json_visit_int64, _bson_as_json_visit_maxkey, _bson_as_json_visit_minkey, NULL, /* visit_unsupported_type */ _bson_as_json_visit_decimal128, }; static bool _bson_as_json_visit_document (const bson_iter_t *iter, const char *key, const bson_t *v_document, void *data) { bson_json_state_t *state = data; bson_json_state_t child_state = {0, true, state->err_offset}; bson_iter_t child; if (state->depth >= BSON_MAX_RECURSION) { bson_string_append (state->str, "{ ... }"); return false; } if (bson_iter_init (&child, v_document)) { child_state.str = bson_string_new ("{ "); child_state.depth = state->depth + 1; child_state.mode = state->mode; if (bson_iter_visit_all (&child, &bson_as_json_visitors, &child_state)) { bson_string_free (child_state.str, true); return true; } bson_string_append (child_state.str, " }"); bson_string_append (state->str, child_state.str->str); bson_string_free (child_state.str, true); } return false; } static bool _bson_as_json_visit_array (const bson_iter_t *iter, const char *key, const bson_t *v_array, void *data) { bson_json_state_t *state = data; bson_json_state_t child_state = {0, false, state->err_offset}; bson_iter_t child; if (state->depth >= BSON_MAX_RECURSION) { bson_string_append (state->str, "{ ... }"); return false; } if (bson_iter_init (&child, v_array)) { child_state.str = bson_string_new ("[ "); child_state.depth = state->depth + 1; child_state.mode = state->mode; if (bson_iter_visit_all (&child, &bson_as_json_visitors, &child_state)) { bson_string_free (child_state.str, true); return true; } bson_string_append (child_state.str, " ]"); bson_string_append (state->str, child_state.str->str); bson_string_free (child_state.str, true); } return false; } static char * _bson_as_json_visit_all (const bson_t *bson, size_t *length, bson_json_mode_t mode) { bson_json_state_t state; bson_iter_t iter; ssize_t err_offset = -1; BSON_ASSERT (bson); if (length) { *length = 0; } if (bson_empty0 (bson)) { if (length) { *length = 3; } return bson_strdup ("{ }"); } if (!bson_iter_init (&iter, bson)) { return NULL; } state.count = 0; state.keys = true; state.str = bson_string_new ("{ "); state.depth = 0; state.err_offset = &err_offset; state.mode = mode; if (bson_iter_visit_all (&iter, &bson_as_json_visitors, &state) || err_offset != -1) { /* * We were prematurely exited due to corruption or failed visitor. */ bson_string_free (state.str, true); if (length) { *length = 0; } return NULL; } bson_string_append (state.str, " }"); if (length) { *length = state.str->len; } return bson_string_free (state.str, false); } char * bson_as_canonical_extended_json (const bson_t *bson, size_t *length) { return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_CANONICAL); } char * bson_as_json (const bson_t *bson, size_t *length) { return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_LEGACY); } char * bson_as_relaxed_extended_json (const bson_t *bson, size_t *length) { return _bson_as_json_visit_all (bson, length, BSON_JSON_MODE_RELAXED); } char * bson_array_as_json (const bson_t *bson, size_t *length) { bson_json_state_t state; bson_iter_t iter; ssize_t err_offset = -1; BSON_ASSERT (bson); if (length) { *length = 0; } if (bson_empty0 (bson)) { if (length) { *length = 3; } return bson_strdup ("[ ]"); } if (!bson_iter_init (&iter, bson)) { return NULL; } state.count = 0; state.keys = false; state.str = bson_string_new ("[ "); state.depth = 0; state.err_offset = &err_offset; state.mode = BSON_JSON_MODE_LEGACY; if (bson_iter_visit_all (&iter, &bson_as_json_visitors, &state) || err_offset != -1) { /* * We were prematurely exited due to corruption or failed visitor. */ bson_string_free (state.str, true); if (length) { *length = 0; } return NULL; } bson_string_append (state.str, " ]"); if (length) { *length = state.str->len; } return bson_string_free (state.str, false); } #define VALIDATION_ERR(_flag, _msg, ...) \ bson_set_error (&state->error, BSON_ERROR_INVALID, _flag, _msg, __VA_ARGS__) static bool _bson_iter_validate_utf8 (const bson_iter_t *iter, const char *key, size_t v_utf8_len, const char *v_utf8, void *data) { bson_validate_state_t *state = data; bool allow_null; if ((state->flags & BSON_VALIDATE_UTF8)) { allow_null = !!(state->flags & BSON_VALIDATE_UTF8_ALLOW_NULL); if (!bson_utf8_validate (v_utf8, v_utf8_len, allow_null)) { state->err_offset = iter->off; VALIDATION_ERR ( BSON_VALIDATE_UTF8, "invalid utf8 string for key \"%s\"", key); return true; } } if ((state->flags & BSON_VALIDATE_DOLLAR_KEYS)) { if (state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8) { state->phase = BSON_VALIDATE_PHASE_LF_ID_KEY; } else if (state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) { state->phase = BSON_VALIDATE_PHASE_NOT_DBREF; } } return false; } static void _bson_iter_validate_corrupt (const bson_iter_t *iter, void *data) { bson_validate_state_t *state = data; state->err_offset = iter->err_off; VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt BSON"); } static bool _bson_iter_validate_before (const bson_iter_t *iter, const char *key, void *data) { bson_validate_state_t *state = data; if ((state->flags & BSON_VALIDATE_EMPTY_KEYS)) { if (key[0] == '\0') { state->err_offset = iter->off; VALIDATION_ERR (BSON_VALIDATE_EMPTY_KEYS, "%s", "empty key"); return true; } } if ((state->flags & BSON_VALIDATE_DOLLAR_KEYS)) { if (key[0] == '$') { if (state->phase == BSON_VALIDATE_PHASE_LF_REF_KEY && strcmp (key, "$ref") == 0) { state->phase = BSON_VALIDATE_PHASE_LF_REF_UTF8; } else if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY && strcmp (key, "$id") == 0) { state->phase = BSON_VALIDATE_PHASE_LF_DB_KEY; } else if (state->phase == BSON_VALIDATE_PHASE_LF_DB_KEY && strcmp (key, "$db") == 0) { state->phase = BSON_VALIDATE_PHASE_LF_DB_UTF8; } else { state->err_offset = iter->off; VALIDATION_ERR (BSON_VALIDATE_DOLLAR_KEYS, "keys cannot begin with \"$\": \"%s\"", key); return true; } } else if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY || state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8 || state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) { state->err_offset = iter->off; VALIDATION_ERR (BSON_VALIDATE_DOLLAR_KEYS, "invalid key within DBRef subdocument: \"%s\"", key); return true; } else { state->phase = BSON_VALIDATE_PHASE_NOT_DBREF; } } if ((state->flags & BSON_VALIDATE_DOT_KEYS)) { if (strstr (key, ".")) { state->err_offset = iter->off; VALIDATION_ERR ( BSON_VALIDATE_DOT_KEYS, "keys cannot contain \".\": \"%s\"", key); return true; } } return false; } static bool _bson_iter_validate_codewscope (const bson_iter_t *iter, const char *key, size_t v_code_len, const char *v_code, const bson_t *v_scope, void *data) { bson_validate_state_t *state = data; size_t offset = 0; if (!bson_validate (v_scope, state->flags, &offset)) { state->err_offset = iter->off + offset; VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt code-with-scope"); return false; } return true; } static bool _bson_iter_validate_document (const bson_iter_t *iter, const char *key, const bson_t *v_document, void *data); static const bson_visitor_t bson_validate_funcs = { _bson_iter_validate_before, NULL, /* visit_after */ _bson_iter_validate_corrupt, NULL, /* visit_double */ _bson_iter_validate_utf8, _bson_iter_validate_document, _bson_iter_validate_document, /* visit_array */ NULL, /* visit_binary */ NULL, /* visit_undefined */ NULL, /* visit_oid */ NULL, /* visit_bool */ NULL, /* visit_date_time */ NULL, /* visit_null */ NULL, /* visit_regex */ NULL, /* visit_dbpoint */ NULL, /* visit_code */ NULL, /* visit_symbol */ _bson_iter_validate_codewscope, }; static bool _bson_iter_validate_document (const bson_iter_t *iter, const char *key, const bson_t *v_document, void *data) { bson_validate_state_t *state = data; bson_iter_t child; bson_validate_phase_t phase = state->phase; if (!bson_iter_init (&child, v_document)) { state->err_offset = iter->off; return true; } if (state->phase == BSON_VALIDATE_PHASE_START) { state->phase = BSON_VALIDATE_PHASE_TOP; } else { state->phase = BSON_VALIDATE_PHASE_LF_REF_KEY; } (void) bson_iter_visit_all (&child, &bson_validate_funcs, state); if (state->phase == BSON_VALIDATE_PHASE_LF_ID_KEY || state->phase == BSON_VALIDATE_PHASE_LF_REF_UTF8 || state->phase == BSON_VALIDATE_PHASE_LF_DB_UTF8) { if (state->err_offset <= 0) { state->err_offset = iter->off; } return true; } state->phase = phase; return false; } static void _bson_validate_internal (const bson_t *bson, bson_validate_state_t *state) { bson_iter_t iter; state->err_offset = -1; state->phase = BSON_VALIDATE_PHASE_START; memset (&state->error, 0, sizeof state->error); if (!bson_iter_init (&iter, bson)) { state->err_offset = 0; VALIDATION_ERR (BSON_VALIDATE_NONE, "%s", "corrupt BSON"); } else { _bson_iter_validate_document (&iter, NULL, bson, state); } } bool bson_validate (const bson_t *bson, bson_validate_flags_t flags, size_t *offset) { bson_validate_state_t state; state.flags = flags; _bson_validate_internal (bson, &state); if (state.err_offset > 0 && offset) { *offset = (size_t) state.err_offset; } return state.err_offset < 0; } bool bson_validate_with_error (const bson_t *bson, bson_validate_flags_t flags, bson_error_t *error) { bson_validate_state_t state; state.flags = flags; _bson_validate_internal (bson, &state); if (state.err_offset > 0 && error) { memcpy (error, &state.error, sizeof *error); } return state.err_offset < 0; } bool bson_concat (bson_t *dst, const bson_t *src) { BSON_ASSERT (dst); BSON_ASSERT (src); if (!bson_empty (src)) { return _bson_append ( dst, 1, src->len - 5, src->len - 5, _bson_data (src) + 4); } return true; } mongodb-1.6.1/src/libmongoc/src/libbson/src/bson/bson.h0000644000076500000240000010167513572250757022314 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef BSON_H #define BSON_H #define BSON_INSIDE #include "bson/bson-compat.h" #include #include #include "bson/bson-macros.h" #include "bson/bson-config.h" #include "bson/bson-atomic.h" #include "bson/bson-context.h" #include "bson/bson-clock.h" #include "bson/bson-decimal128.h" #include "bson/bson-error.h" #include "bson/bson-iter.h" #include "bson/bson-json.h" #include "bson/bson-keys.h" #include "bson/bson-md5.h" #include "bson/bson-memory.h" #include "bson/bson-oid.h" #include "bson/bson-reader.h" #include "bson/bson-string.h" #include "bson/bson-types.h" #include "bson/bson-utf8.h" #include "bson/bson-value.h" #include "bson/bson-version.h" #include "bson/bson-version-functions.h" #include "bson/bson-writer.h" #include "bson/bcon.h" #undef BSON_INSIDE BSON_BEGIN_DECLS /** * bson_empty: * @b: a bson_t. * * Checks to see if @b is an empty BSON document. An empty BSON document is * a 5 byte document which contains the length (4 bytes) and a single NUL * byte indicating end of fields. */ #define bson_empty(b) (((b)->len == 5) || !bson_get_data ((b))[4]) /** * bson_empty0: * * Like bson_empty() but treats NULL the same as an empty bson_t document. */ #define bson_empty0(b) (!(b) || bson_empty (b)) /** * bson_clear: * * Easily free a bson document and set it to NULL. Use like: * * bson_t *doc = bson_new(); * bson_clear (&doc); * BSON_ASSERT (doc == NULL); */ #define bson_clear(bptr) \ do { \ if (*(bptr)) { \ bson_destroy (*(bptr)); \ *(bptr) = NULL; \ } \ } while (0) /** * BSON_MAX_SIZE: * * The maximum size in bytes of a BSON document. */ #define BSON_MAX_SIZE ((size_t) ((1U << 31) - 1)) #define BSON_APPEND_ARRAY(b, key, val) \ bson_append_array (b, key, (int) strlen (key), val) #define BSON_APPEND_ARRAY_BEGIN(b, key, child) \ bson_append_array_begin (b, key, (int) strlen (key), child) #define BSON_APPEND_BINARY(b, key, subtype, val, len) \ bson_append_binary (b, key, (int) strlen (key), subtype, val, len) #define BSON_APPEND_BOOL(b, key, val) \ bson_append_bool (b, key, (int) strlen (key), val) #define BSON_APPEND_CODE(b, key, val) \ bson_append_code (b, key, (int) strlen (key), val) #define BSON_APPEND_CODE_WITH_SCOPE(b, key, val, scope) \ bson_append_code_with_scope (b, key, (int) strlen (key), val, scope) #define BSON_APPEND_DBPOINTER(b, key, coll, oid) \ bson_append_dbpointer (b, key, (int) strlen (key), coll, oid) #define BSON_APPEND_DOCUMENT_BEGIN(b, key, child) \ bson_append_document_begin (b, key, (int) strlen (key), child) #define BSON_APPEND_DOUBLE(b, key, val) \ bson_append_double (b, key, (int) strlen (key), val) #define BSON_APPEND_DOCUMENT(b, key, val) \ bson_append_document (b, key, (int) strlen (key), val) #define BSON_APPEND_INT32(b, key, val) \ bson_append_int32 (b, key, (int) strlen (key), val) #define BSON_APPEND_INT64(b, key, val) \ bson_append_int64 (b, key, (int) strlen (key), val) #define BSON_APPEND_MINKEY(b, key) \ bson_append_minkey (b, key, (int) strlen (key)) #define BSON_APPEND_DECIMAL128(b, key, val) \ bson_append_decimal128 (b, key, (int) strlen (key), val) #define BSON_APPEND_MAXKEY(b, key) \ bson_append_maxkey (b, key, (int) strlen (key)) #define BSON_APPEND_NULL(b, key) bson_append_null (b, key, (int) strlen (key)) #define BSON_APPEND_OID(b, key, val) \ bson_append_oid (b, key, (int) strlen (key), val) #define BSON_APPEND_REGEX(b, key, val, opt) \ bson_append_regex (b, key, (int) strlen (key), val, opt) #define BSON_APPEND_UTF8(b, key, val) \ bson_append_utf8 (b, key, (int) strlen (key), val, (int) strlen (val)) #define BSON_APPEND_SYMBOL(b, key, val) \ bson_append_symbol (b, key, (int) strlen (key), val, (int) strlen (val)) #define BSON_APPEND_TIME_T(b, key, val) \ bson_append_time_t (b, key, (int) strlen (key), val) #define BSON_APPEND_TIMEVAL(b, key, val) \ bson_append_timeval (b, key, (int) strlen (key), val) #define BSON_APPEND_DATE_TIME(b, key, val) \ bson_append_date_time (b, key, (int) strlen (key), val) #define BSON_APPEND_TIMESTAMP(b, key, val, inc) \ bson_append_timestamp (b, key, (int) strlen (key), val, inc) #define BSON_APPEND_UNDEFINED(b, key) \ bson_append_undefined (b, key, (int) strlen (key)) #define BSON_APPEND_VALUE(b, key, val) \ bson_append_value (b, key, (int) strlen (key), (val)) /** * bson_new: * * Allocates a new bson_t structure. Call the various bson_append_*() * functions to add fields to the bson. You can iterate the bson_t at any * time using a bson_iter_t and bson_iter_init(). * * Returns: A newly allocated bson_t that should be freed with bson_destroy(). */ BSON_EXPORT (bson_t *) bson_new (void); BSON_EXPORT (bson_t *) bson_new_from_json (const uint8_t *data, ssize_t len, bson_error_t *error); BSON_EXPORT (bool) bson_init_from_json (bson_t *bson, const char *data, ssize_t len, bson_error_t *error); /** * bson_init_static: * @b: A pointer to a bson_t. * @data: The data buffer to use. * @length: The length of @data. * * Initializes a bson_t using @data and @length. This is ideal if you would * like to use a stack allocation for your bson and do not need to grow the * buffer. @data must be valid for the life of @b. * * Returns: true if initialized successfully; otherwise false. */ BSON_EXPORT (bool) bson_init_static (bson_t *b, const uint8_t *data, size_t length); /** * bson_init: * @b: A pointer to a bson_t. * * Initializes a bson_t for use. This function is useful to those that want a * stack allocated bson_t. The usefulness of a stack allocated bson_t is * marginal as the target buffer for content will still require heap * allocations. It can help reduce heap fragmentation on allocators that do * not employ SLAB/magazine semantics. * * You must call bson_destroy() with @b to release resources when you are done * using @b. */ BSON_EXPORT (void) bson_init (bson_t *b); /** * bson_reinit: * @b: (inout): A bson_t. * * This is equivalent to calling bson_destroy() and bson_init() on a #bson_t. * However, it will try to persist the existing malloc'd buffer if one exists. * This is useful in cases where you want to reduce malloc overhead while * building many documents. */ BSON_EXPORT (void) bson_reinit (bson_t *b); /** * bson_new_from_data: * @data: A buffer containing a serialized bson document. * @length: The length of the document in bytes. * * Creates a new bson_t structure using the data provided. @data should contain * at least @length bytes that can be copied into the new bson_t structure. * * Returns: A newly allocated bson_t that should be freed with bson_destroy(). * If the first four bytes (little-endian) of data do not match @length, * then NULL will be returned. */ BSON_EXPORT (bson_t *) bson_new_from_data (const uint8_t *data, size_t length); /** * bson_new_from_buffer: * @buf: A pointer to a buffer containing a serialized bson document. * @buf_len: The length of the buffer in bytes. * @realloc_fun: a realloc like function * @realloc_fun_ctx: a context for the realloc function * * Creates a new bson_t structure using the data provided. @buf should contain * a bson document, or null pointer should be passed for new allocations. * * Returns: A newly allocated bson_t that should be freed with bson_destroy(). * The underlying buffer will be used and not be freed in destroy. */ BSON_EXPORT (bson_t *) bson_new_from_buffer (uint8_t **buf, size_t *buf_len, bson_realloc_func realloc_func, void *realloc_func_ctx); /** * bson_sized_new: * @size: A size_t containing the number of bytes to allocate. * * This will allocate a new bson_t with enough bytes to hold a buffer * sized @size. @size must be smaller than INT_MAX bytes. * * Returns: A newly allocated bson_t that should be freed with bson_destroy(). */ BSON_EXPORT (bson_t *) bson_sized_new (size_t size); /** * bson_copy: * @bson: A bson_t. * * Copies @bson into a newly allocated bson_t. You must call bson_destroy() * when you are done with the resulting value to free its resources. * * Returns: A newly allocated bson_t that should be free'd with bson_destroy() */ BSON_EXPORT (bson_t *) bson_copy (const bson_t *bson); /** * bson_copy_to: * @src: The source bson_t. * @dst: The destination bson_t. * * Initializes @dst and copies the content from @src into @dst. */ BSON_EXPORT (void) bson_copy_to (const bson_t *src, bson_t *dst); /** * bson_copy_to_excluding: * @src: A bson_t. * @dst: A bson_t to initialize and copy into. * @first_exclude: First field name to exclude. * * Copies @src into @dst excluding any field that is provided. * This is handy for situations when you need to remove one or * more fields in a bson_t. Note that bson_init() will be called * on dst. */ BSON_EXPORT (void) bson_copy_to_excluding (const bson_t *src, bson_t *dst, const char *first_exclude, ...) BSON_GNUC_NULL_TERMINATED BSON_GNUC_DEPRECATED_FOR (bson_copy_to_excluding_noinit); /** * bson_copy_to_excluding_noinit: * @src: A bson_t. * @dst: A bson_t to initialize and copy into. * @first_exclude: First field name to exclude. * * The same as bson_copy_to_excluding, but does not call bson_init() * on the dst. This version should be preferred in new code, but the * old function is left for backwards compatibility. */ BSON_EXPORT (void) bson_copy_to_excluding_noinit (const bson_t *src, bson_t *dst, const char *first_exclude, ...) BSON_GNUC_NULL_TERMINATED; BSON_EXPORT (void) bson_copy_to_excluding_noinit_va (const bson_t *src, bson_t *dst, const char *first_exclude, va_list args); /** * bson_destroy: * @bson: A bson_t. * * Frees the resources associated with @bson. */ BSON_EXPORT (void) bson_destroy (bson_t *bson); BSON_EXPORT (uint8_t *) bson_reserve_buffer (bson_t *bson, uint32_t size); BSON_EXPORT (bool) bson_steal (bson_t *dst, bson_t *src); /** * bson_destroy_with_steal: * @bson: A #bson_t. * @steal: If ownership of the data buffer should be transferred to caller. * @length: (out): location for the length of the buffer. * * Destroys @bson similar to calling bson_destroy() except that the underlying * buffer will be returned and ownership transferred to the caller if @steal * is non-zero. * * If length is non-NULL, the length of @bson will be stored in @length. * * It is a programming error to call this function with any bson that has * been initialized static, or is being used to create a subdocument with * functions such as bson_append_document_begin() or bson_append_array_begin(). * * Returns: a buffer owned by the caller if @steal is true. Otherwise NULL. * If there was an error, NULL is returned. */ BSON_EXPORT (uint8_t *) bson_destroy_with_steal (bson_t *bson, bool steal, uint32_t *length); /** * bson_get_data: * @bson: A bson_t. * * Fetched the data buffer for @bson of @bson->len bytes in length. * * Returns: A buffer that should not be modified or freed. */ BSON_EXPORT (const uint8_t *) bson_get_data (const bson_t *bson); /** * bson_count_keys: * @bson: A bson_t. * * Counts the number of elements found in @bson. */ BSON_EXPORT (uint32_t) bson_count_keys (const bson_t *bson); /** * bson_has_field: * @bson: A bson_t. * @key: The key to lookup. * * Checks to see if @bson contains a field named @key. * * This function is case-sensitive. * * Returns: true if @key exists in @bson; otherwise false. */ BSON_EXPORT (bool) bson_has_field (const bson_t *bson, const char *key); /** * bson_compare: * @bson: A bson_t. * @other: A bson_t. * * Compares @bson to @other in a qsort() style comparison. * See qsort() for information on how this function works. * * Returns: Less than zero, zero, or greater than zero. */ BSON_EXPORT (int) bson_compare (const bson_t *bson, const bson_t *other); /* * bson_compare: * @bson: A bson_t. * @other: A bson_t. * * Checks to see if @bson and @other are equal. * * Returns: true if equal; otherwise false. */ BSON_EXPORT (bool) bson_equal (const bson_t *bson, const bson_t *other); /** * bson_validate: * @bson: A bson_t. * @offset: A location for the error offset. * * Validates a BSON document by walking through the document and inspecting * the fields for valid content. * * Returns: true if @bson is valid; otherwise false and @offset is set. */ BSON_EXPORT (bool) bson_validate (const bson_t *bson, bson_validate_flags_t flags, size_t *offset); /** * bson_validate_with_error: * @bson: A bson_t. * @error: A location for the error info. * * Validates a BSON document by walking through the document and inspecting * the fields for valid content. * * Returns: true if @bson is valid; otherwise false and @error is filled out. */ BSON_EXPORT (bool) bson_validate_with_error (const bson_t *bson, bson_validate_flags_t flags, bson_error_t *error); /** * bson_as_canonical_extended_json: * @bson: A bson_t. * @length: A location for the string length, or NULL. * * Creates a new string containing @bson in canonical extended JSON format, * conforming to the MongoDB Extended JSON Spec: * * github.com/mongodb/specifications/blob/master/source/extended-json.rst * * The caller is responsible for freeing the resulting string. If @length is * non-NULL, then the length of the resulting string will be placed in @length. * * See http://docs.mongodb.org/manual/reference/mongodb-extended-json/ for * more information on extended JSON. * * Returns: A newly allocated string that should be freed with bson_free(). */ BSON_EXPORT (char *) bson_as_canonical_extended_json (const bson_t *bson, size_t *length); /** * bson_as_json: * @bson: A bson_t. * @length: A location for the string length, or NULL. * * Creates a new string containing @bson in libbson's legacy JSON format. * Superseded by bson_as_canonical_extended_json and * bson_as_relaxed_extended_json. The caller is * responsible for freeing the resulting string. If @length is non-NULL, then * the length of the resulting string will be placed in @length. * * Returns: A newly allocated string that should be freed with bson_free(). */ BSON_EXPORT (char *) bson_as_json (const bson_t *bson, size_t *length); /** * bson_as_relaxed_extended_json: * @bson: A bson_t. * @length: A location for the string length, or NULL. * * Creates a new string containing @bson in relaxed extended JSON format, * conforming to the MongoDB Extended JSON Spec: * * github.com/mongodb/specifications/blob/master/source/extended-json.rst * * The caller is responsible for freeing the resulting string. If @length is * non-NULL, then the length of the resulting string will be placed in @length. * * See http://docs.mongodb.org/manual/reference/mongodb-extended-json/ for * more information on extended JSON. * * Returns: A newly allocated string that should be freed with bson_free(). */ BSON_EXPORT (char *) bson_as_relaxed_extended_json (const bson_t *bson, size_t *length); /* like bson_as_json() but for outermost arrays. */ BSON_EXPORT (char *) bson_array_as_json (const bson_t *bson, size_t *length); BSON_EXPORT (bool) bson_append_value (bson_t *bson, const char *key, int key_length, const bson_value_t *value); /** * bson_append_array: * @bson: A bson_t. * @key: The key for the field. * @array: A bson_t containing the array. * * Appends a BSON array to @bson. BSON arrays are like documents where the * key is the string version of the index. For example, the first item of the * array would have the key "0". The second item would have the index "1". * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_array (bson_t *bson, const char *key, int key_length, const bson_t *array); /** * bson_append_binary: * @bson: A bson_t to append. * @key: The key for the field. * @subtype: The bson_subtype_t of the binary. * @binary: The binary buffer to append. * @length: The length of @binary. * * Appends a binary buffer to the BSON document. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_binary (bson_t *bson, const char *key, int key_length, bson_subtype_t subtype, const uint8_t *binary, uint32_t length); /** * bson_append_bool: * @bson: A bson_t. * @key: The key for the field. * @value: The boolean value. * * Appends a new field to @bson of type BSON_TYPE_BOOL. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_bool (bson_t *bson, const char *key, int key_length, bool value); /** * bson_append_code: * @bson: A bson_t. * @key: The key for the document. * @javascript: JavaScript code to be executed. * * Appends a field of type BSON_TYPE_CODE to the BSON document. @javascript * should contain a script in javascript to be executed. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_code (bson_t *bson, const char *key, int key_length, const char *javascript); /** * bson_append_code_with_scope: * @bson: A bson_t. * @key: The key for the document. * @javascript: JavaScript code to be executed. * @scope: A bson_t containing the scope for @javascript. * * Appends a field of type BSON_TYPE_CODEWSCOPE to the BSON document. * @javascript should contain a script in javascript to be executed. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_code_with_scope (bson_t *bson, const char *key, int key_length, const char *javascript, const bson_t *scope); /** * bson_append_dbpointer: * @bson: A bson_t. * @key: The key for the field. * @collection: The collection name. * @oid: The oid to the reference. * * Appends a new field of type BSON_TYPE_DBPOINTER. This datum type is * deprecated in the BSON spec and should not be used in new code. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_dbpointer (bson_t *bson, const char *key, int key_length, const char *collection, const bson_oid_t *oid); /** * bson_append_double: * @bson: A bson_t. * @key: The key for the field. * * Appends a new field to @bson of the type BSON_TYPE_DOUBLE. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_double (bson_t *bson, const char *key, int key_length, double value); /** * bson_append_document: * @bson: A bson_t. * @key: The key for the field. * @value: A bson_t containing the subdocument. * * Appends a new field to @bson of the type BSON_TYPE_DOCUMENT. * The documents contents will be copied into @bson. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_document (bson_t *bson, const char *key, int key_length, const bson_t *value); /** * bson_append_document_begin: * @bson: A bson_t. * @key: The key for the field. * @key_length: The length of @key in bytes not including NUL or -1 * if @key_length is NUL terminated. * @child: A location to an uninitialized bson_t. * * Appends a new field named @key to @bson. The field is, however, * incomplete. @child will be initialized so that you may add fields to the * child document. Child will use a memory buffer owned by @bson and * therefore grow the parent buffer as additional space is used. This allows * a single malloc'd buffer to be used when building documents which can help * reduce memory fragmentation. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_document_begin (bson_t *bson, const char *key, int key_length, bson_t *child); /** * bson_append_document_end: * @bson: A bson_t. * @child: A bson_t supplied to bson_append_document_begin(). * * Finishes the appending of a document to a @bson. @child is considered * disposed after this call and should not be used any further. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_document_end (bson_t *bson, bson_t *child); /** * bson_append_array_begin: * @bson: A bson_t. * @key: The key for the field. * @key_length: The length of @key in bytes not including NUL or -1 * if @key_length is NUL terminated. * @child: A location to an uninitialized bson_t. * * Appends a new field named @key to @bson. The field is, however, * incomplete. @child will be initialized so that you may add fields to the * child array. Child will use a memory buffer owned by @bson and * therefore grow the parent buffer as additional space is used. This allows * a single malloc'd buffer to be used when building arrays which can help * reduce memory fragmentation. * * The type of @child will be BSON_TYPE_ARRAY and therefore the keys inside * of it MUST be "0", "1", etc. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_array_begin (bson_t *bson, const char *key, int key_length, bson_t *child); /** * bson_append_array_end: * @bson: A bson_t. * @child: A bson_t supplied to bson_append_array_begin(). * * Finishes the appending of a array to a @bson. @child is considered * disposed after this call and should not be used any further. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_array_end (bson_t *bson, bson_t *child); /** * bson_append_int32: * @bson: A bson_t. * @key: The key for the field. * @value: The int32_t 32-bit integer value. * * Appends a new field of type BSON_TYPE_INT32 to @bson. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_int32 (bson_t *bson, const char *key, int key_length, int32_t value); /** * bson_append_int64: * @bson: A bson_t. * @key: The key for the field. * @value: The int64_t 64-bit integer value. * * Appends a new field of type BSON_TYPE_INT64 to @bson. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_int64 (bson_t *bson, const char *key, int key_length, int64_t value); /** * bson_append_decimal128: * @bson: A bson_t. * @key: The key for the field. * @value: The bson_decimal128_t decimal128 value. * * Appends a new field of type BSON_TYPE_DECIMAL128 to @bson. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_decimal128 (bson_t *bson, const char *key, int key_length, const bson_decimal128_t *value); /** * bson_append_iter: * @bson: A bson_t to append to. * @key: The key name or %NULL to take current key from @iter. * @key_length: The key length or -1 to use strlen(). * @iter: The iter located on the position of the element to append. * * Appends a new field to @bson that is equivalent to the field currently * pointed to by @iter. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_iter (bson_t *bson, const char *key, int key_length, const bson_iter_t *iter); /** * bson_append_minkey: * @bson: A bson_t. * @key: The key for the field. * * Appends a new field of type BSON_TYPE_MINKEY to @bson. This is a special * type that compares lower than all other possible BSON element values. * * See http://bsonspec.org for more information on this type. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_minkey (bson_t *bson, const char *key, int key_length); /** * bson_append_maxkey: * @bson: A bson_t. * @key: The key for the field. * * Appends a new field of type BSON_TYPE_MAXKEY to @bson. This is a special * type that compares higher than all other possible BSON element values. * * See http://bsonspec.org for more information on this type. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_maxkey (bson_t *bson, const char *key, int key_length); /** * bson_append_null: * @bson: A bson_t. * @key: The key for the field. * * Appends a new field to @bson with NULL for the value. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_null (bson_t *bson, const char *key, int key_length); /** * bson_append_oid: * @bson: A bson_t. * @key: The key for the field. * @oid: bson_oid_t. * * Appends a new field to the @bson of type BSON_TYPE_OID using the contents of * @oid. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_oid (bson_t *bson, const char *key, int key_length, const bson_oid_t *oid); /** * bson_append_regex: * @bson: A bson_t. * @key: The key of the field. * @regex: The regex to append to the bson. * @options: Options for @regex. * * Appends a new field to @bson of type BSON_TYPE_REGEX. @regex should * be the regex string. @options should contain the options for the regex. * * Valid options for @options are: * * 'i' for case-insensitive. * 'm' for multiple matching. * 'x' for verbose mode. * 'l' to make \w and \W locale dependent. * 's' for dotall mode ('.' matches everything) * 'u' to make \w and \W match unicode. * * For more detailed information about BSON regex elements, see bsonspec.org. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_regex (bson_t *bson, const char *key, int key_length, const char *regex, const char *options); /** * bson_append_regex: * @bson: A bson_t. * @key: The key of the field. * @key_length: The length of the key string. * @regex: The regex to append to the bson. * @regex_length: The length of the regex string. * @options: Options for @regex. * * Appends a new field to @bson of type BSON_TYPE_REGEX. @regex should * be the regex string. @options should contain the options for the regex. * * Valid options for @options are: * * 'i' for case-insensitive. * 'm' for multiple matching. * 'x' for verbose mode. * 'l' to make \w and \W locale dependent. * 's' for dotall mode ('.' matches everything) * 'u' to make \w and \W match unicode. * * For more detailed information about BSON regex elements, see bsonspec.org. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_regex_w_len (bson_t *bson, const char *key, int key_length, const char *regex, int regex_length, const char *options); /** * bson_append_utf8: * @bson: A bson_t. * @key: The key for the field. * @value: A UTF-8 encoded string. * @length: The length of @value or -1 if it is NUL terminated. * * Appends a new field to @bson using @key as the key and @value as the UTF-8 * encoded value. * * It is the callers responsibility to ensure @value is valid UTF-8. You can * use bson_utf8_validate() to perform this check. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_utf8 (bson_t *bson, const char *key, int key_length, const char *value, int length); /** * bson_append_symbol: * @bson: A bson_t. * @key: The key for the field. * @value: The symbol as a string. * @length: The length of @value or -1 if NUL-terminated. * * Appends a new field to @bson of type BSON_TYPE_SYMBOL. This BSON type is * deprecated and should not be used in new code. * * See http://bsonspec.org for more information on this type. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_symbol (bson_t *bson, const char *key, int key_length, const char *value, int length); /** * bson_append_time_t: * @bson: A bson_t. * @key: The key for the field. * @value: A time_t. * * Appends a BSON_TYPE_DATE_TIME field to @bson using the time_t @value for the * number of seconds since UNIX epoch in UTC. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_time_t (bson_t *bson, const char *key, int key_length, time_t value); /** * bson_append_timeval: * @bson: A bson_t. * @key: The key for the field. * @value: A struct timeval containing the date and time. * * Appends a BSON_TYPE_DATE_TIME field to @bson using the struct timeval * provided. The time is persisted in milliseconds since the UNIX epoch in UTC. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_timeval (bson_t *bson, const char *key, int key_length, struct timeval *value); /** * bson_append_date_time: * @bson: A bson_t. * @key: The key for the field. * @key_length: The length of @key in bytes or -1 if \0 terminated. * @value: The number of milliseconds elapsed since UNIX epoch. * * Appends a new field to @bson of type BSON_TYPE_DATE_TIME. * * Returns: true if successful; otherwise false. */ BSON_EXPORT (bool) bson_append_date_time (bson_t *bson, const char *key, int key_length, int64_t value); /** * bson_append_now_utc: * @bson: A bson_t. * @key: The key for the field. * @key_length: The length of @key or -1 if it is NULL terminated. * * Appends a BSON_TYPE_DATE_TIME field to @bson using the current time in UTC * as the field value. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_now_utc (bson_t *bson, const char *key, int key_length); /** * bson_append_timestamp: * @bson: A bson_t. * @key: The key for the field. * @timestamp: 4 byte timestamp. * @increment: 4 byte increment for timestamp. * * Appends a field of type BSON_TYPE_TIMESTAMP to @bson. This is a special type * used by MongoDB replication and sharding. If you need generic time and date * fields use bson_append_time_t() or bson_append_timeval(). * * Setting @increment and @timestamp to zero has special semantics. See * http://bsonspec.org for more information on this field type. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_timestamp (bson_t *bson, const char *key, int key_length, uint32_t timestamp, uint32_t increment); /** * bson_append_undefined: * @bson: A bson_t. * @key: The key for the field. * * Appends a field of type BSON_TYPE_UNDEFINED. This type is deprecated in the * spec and should not be used for new code. However, it is provided for those * needing to interact with legacy systems. * * Returns: true if successful; false if append would overflow max size. */ BSON_EXPORT (bool) bson_append_undefined (bson_t *bson, const char *key, int key_length); BSON_EXPORT (bool) bson_concat (bson_t *dst, const bson_t *src); BSON_END_DECLS #endif /* BSON_H */ mongodb-1.6.1/src/libmongoc/src/libbson/src/jsonsl/jsonsl.c0000644000076500000240000015021313572250757023215 0ustar alcaeusstaff/* Copyright (C) 2012-2015 Mark Nunberg. * * See included LICENSE file for license details. */ #include "jsonsl.h" #include "bson/bson-memory.h" #include #include #ifdef JSONSL_USE_METRICS #define XMETRICS \ X(STRINGY_INSIGNIFICANT) \ X(STRINGY_SLOWPATH) \ X(ALLOWED_WHITESPACE) \ X(QUOTE_FASTPATH) \ X(SPECIAL_FASTPATH) \ X(SPECIAL_WSPOP) \ X(SPECIAL_SLOWPATH) \ X(GENERIC) \ X(STRUCTURAL_TOKEN) \ X(SPECIAL_SWITCHFIRST) \ X(STRINGY_CATCH) \ X(NUMBER_FASTPATH) \ X(ESCAPES) \ X(TOTAL) \ struct jsonsl_metrics_st { #define X(m) \ unsigned long metric_##m; XMETRICS #undef X }; static struct jsonsl_metrics_st GlobalMetrics = { 0 }; static unsigned long GenericCounter[0x100] = { 0 }; static unsigned long StringyCatchCounter[0x100] = { 0 }; #define INCR_METRIC(m) \ GlobalMetrics.metric_##m++; #define INCR_GENERIC(c) \ INCR_METRIC(GENERIC); \ GenericCounter[c]++; \ #define INCR_STRINGY_CATCH(c) \ INCR_METRIC(STRINGY_CATCH); \ StringyCatchCounter[c]++; JSONSL_API void jsonsl_dump_global_metrics(void) { int ii; printf("JSONSL Metrics:\n"); #define X(m) \ printf("\t%-30s %20lu (%0.2f%%)\n", #m, GlobalMetrics.metric_##m, \ (float)((float)(GlobalMetrics.metric_##m/(float)GlobalMetrics.metric_TOTAL)) * 100); XMETRICS #undef X printf("Generic Characters:\n"); for (ii = 0; ii < 0xff; ii++) { if (GenericCounter[ii]) { printf("\t[ %c ] %lu\n", ii, GenericCounter[ii]); } } printf("Weird string loop\n"); for (ii = 0; ii < 0xff; ii++) { if (StringyCatchCounter[ii]) { printf("\t[ %c ] %lu\n", ii, StringyCatchCounter[ii]); } } } #else #define INCR_METRIC(m) #define INCR_GENERIC(c) #define INCR_STRINGY_CATCH(c) JSONSL_API void jsonsl_dump_global_metrics(void) { } #endif /* JSONSL_USE_METRICS */ #define CASE_DIGITS \ case '1': \ case '2': \ case '3': \ case '4': \ case '5': \ case '6': \ case '7': \ case '8': \ case '9': \ case '0': static unsigned extract_special(unsigned); static int is_special_end(unsigned); static int is_allowed_whitespace(unsigned); static int is_allowed_escape(unsigned); static int is_simple_char(unsigned); static char get_escape_equiv(unsigned); JSONSL_API jsonsl_t jsonsl_new(int nlevels) { unsigned int ii; struct jsonsl_st * jsn; if (nlevels < 2) { return NULL; } jsn = (struct jsonsl_st *) bson_malloc0(sizeof (*jsn) + ( (nlevels-1) * sizeof (struct jsonsl_state_st) ) ); jsn->levels_max = (unsigned int) nlevels; jsn->max_callback_level = UINT_MAX; jsonsl_reset(jsn); for (ii = 0; ii < jsn->levels_max; ii++) { jsn->stack[ii].level = ii; } return jsn; } JSONSL_API void jsonsl_reset(jsonsl_t jsn) { jsn->tok_last = 0; jsn->can_insert = 1; jsn->pos = 0; jsn->level = 0; jsn->stopfl = 0; jsn->in_escape = 0; jsn->expecting = 0; } JSONSL_API void jsonsl_destroy(jsonsl_t jsn) { if (jsn) { bson_free(jsn); } } #define FASTPARSE_EXHAUSTED 1 #define FASTPARSE_BREAK 0 /* * This function is meant to accelerate string parsing, reducing the main loop's * check if we are indeed a string. * * @param jsn the parser * @param[in,out] bytes_p A pointer to the current buffer (i.e. current position) * @param[in,out] nbytes_p A pointer to the current size of the buffer * @return true if all bytes have been exhausted (and thus the main loop can * return), false if a special character was examined which requires greater * examination. */ static int jsonsl__str_fastparse(jsonsl_t jsn, const jsonsl_uchar_t **bytes_p, size_t *nbytes_p) { const jsonsl_uchar_t *bytes = *bytes_p; const jsonsl_uchar_t *end; for (end = bytes + *nbytes_p; bytes != end; bytes++) { if ( #ifdef JSONSL_USE_WCHAR *bytes >= 0x100 || #endif /* JSONSL_USE_WCHAR */ (is_simple_char(*bytes))) { INCR_METRIC(TOTAL); INCR_METRIC(STRINGY_INSIGNIFICANT); } else { /* Once we're done here, re-calculate the position variables */ jsn->pos += (bytes - *bytes_p); *nbytes_p -= (bytes - *bytes_p); *bytes_p = bytes; return FASTPARSE_BREAK; } } /* Once we're done here, re-calculate the position variables */ jsn->pos += (bytes - *bytes_p); return FASTPARSE_EXHAUSTED; } /* Functions exactly like str_fastparse, except it also accepts a 'state' * argument, since the number's value is updated in the state. */ static int jsonsl__num_fastparse(jsonsl_t jsn, const jsonsl_uchar_t **bytes_p, size_t *nbytes_p, struct jsonsl_state_st *state) { int exhausted = 1; size_t nbytes = *nbytes_p; const jsonsl_uchar_t *bytes = *bytes_p; for (; nbytes; nbytes--, bytes++) { jsonsl_uchar_t c = *bytes; if (isdigit(c)) { INCR_METRIC(TOTAL); INCR_METRIC(NUMBER_FASTPATH); state->nelem = (state->nelem * 10) + (c - 0x30); } else { exhausted = 0; break; } } jsn->pos += (*nbytes_p - nbytes); if (exhausted) { return FASTPARSE_EXHAUSTED; } *nbytes_p = nbytes; *bytes_p = bytes; return FASTPARSE_BREAK; } JSONSL_API void jsonsl_feed(jsonsl_t jsn, const jsonsl_char_t *bytes, size_t nbytes) { #define INVOKE_ERROR(eb) \ if (jsn->error_callback(jsn, JSONSL_ERROR_##eb, state, (char*)c)) { \ goto GT_AGAIN; \ } \ return; #define STACK_PUSH \ if (jsn->level >= (levels_max-1)) { \ jsn->error_callback(jsn, JSONSL_ERROR_LEVELS_EXCEEDED, state, (char*)c); \ return; \ } \ state = jsn->stack + (++jsn->level); \ state->ignore_callback = jsn->stack[jsn->level-1].ignore_callback; \ state->pos_begin = jsn->pos; #define STACK_POP_NOPOS \ state->pos_cur = jsn->pos; \ state = jsn->stack + (--jsn->level); #define STACK_POP \ STACK_POP_NOPOS; \ state->pos_cur = jsn->pos; #define CALLBACK_AND_POP_NOPOS(T) \ state->pos_cur = jsn->pos; \ DO_CALLBACK(T, POP); \ state->nescapes = 0; \ state = jsn->stack + (--jsn->level); #define CALLBACK_AND_POP(T) \ CALLBACK_AND_POP_NOPOS(T); \ state->pos_cur = jsn->pos; #define SPECIAL_POP \ CALLBACK_AND_POP(SPECIAL); \ jsn->expecting = 0; \ jsn->tok_last = 0; \ #define CUR_CHAR (*(jsonsl_uchar_t*)c) #define DO_CALLBACK(T, action) \ if (jsn->call_##T && \ jsn->max_callback_level > state->level && \ state->ignore_callback == 0) { \ \ if (jsn->action_callback_##action) { \ jsn->action_callback_##action(jsn, JSONSL_ACTION_##action, state, (jsonsl_char_t*)c); \ } else if (jsn->action_callback) { \ jsn->action_callback(jsn, JSONSL_ACTION_##action, state, (jsonsl_char_t*)c); \ } \ if (jsn->stopfl) { return; } \ } /** * Verifies that we are able to insert the (non-string) item into a hash. */ #define ENSURE_HVAL \ if (state->nelem % 2 == 0 && state->type == JSONSL_T_OBJECT) { \ INVOKE_ERROR(HKEY_EXPECTED); \ } #define VERIFY_SPECIAL(lit) \ if (CUR_CHAR != (lit)[jsn->pos - state->pos_begin]) { \ INVOKE_ERROR(SPECIAL_EXPECTED); \ } #define VERIFY_SPECIAL_CI(lit) \ if (tolower(CUR_CHAR) != (lit)[jsn->pos - state->pos_begin]) { \ INVOKE_ERROR(SPECIAL_EXPECTED); \ } #define STATE_SPECIAL_LENGTH \ (state)->nescapes #define IS_NORMAL_NUMBER \ ((state)->special_flags == JSONSL_SPECIALf_UNSIGNED || \ (state)->special_flags == JSONSL_SPECIALf_SIGNED) #define STATE_NUM_LAST jsn->tok_last #define CONTINUE_NEXT_CHAR() continue const jsonsl_uchar_t *c = (jsonsl_uchar_t*)bytes; size_t levels_max = jsn->levels_max; struct jsonsl_state_st *state = jsn->stack + jsn->level; jsn->base = bytes; for (; nbytes; nbytes--, jsn->pos++, c++) { unsigned state_type; INCR_METRIC(TOTAL); GT_AGAIN: state_type = state->type; /* Most common type is typically a string: */ if (state_type & JSONSL_Tf_STRINGY) { /* Special escape handling for some stuff */ if (jsn->in_escape) { jsn->in_escape = 0; if (!is_allowed_escape(CUR_CHAR)) { INVOKE_ERROR(ESCAPE_INVALID); } else if (CUR_CHAR == 'u') { DO_CALLBACK(UESCAPE, UESCAPE); if (jsn->return_UESCAPE) { return; } } CONTINUE_NEXT_CHAR(); } if (jsonsl__str_fastparse(jsn, &c, &nbytes) == FASTPARSE_EXHAUSTED) { /* No need to readjust variables as we've exhausted the iterator */ return; } else { if (CUR_CHAR == '"') { goto GT_QUOTE; } else if (CUR_CHAR == '\\') { goto GT_ESCAPE; } else { INVOKE_ERROR(WEIRD_WHITESPACE); } } INCR_METRIC(STRINGY_SLOWPATH); } else if (state_type == JSONSL_T_SPECIAL) { /* Fast track for signed/unsigned */ if (IS_NORMAL_NUMBER) { if (jsonsl__num_fastparse(jsn, &c, &nbytes, state) == FASTPARSE_EXHAUSTED) { return; } else { goto GT_SPECIAL_NUMERIC; } } else if (state->special_flags == JSONSL_SPECIALf_DASH) { #ifdef JSONSL_PARSE_NAN if (CUR_CHAR == 'I' || CUR_CHAR == 'i') { /* parsing -Infinity? */ state->special_flags = JSONSL_SPECIALf_NEG_INF; CONTINUE_NEXT_CHAR(); } #endif if (!isdigit(CUR_CHAR)) { INVOKE_ERROR(INVALID_NUMBER); } if (CUR_CHAR == '0') { state->special_flags = JSONSL_SPECIALf_ZERO|JSONSL_SPECIALf_SIGNED; } else if (isdigit(CUR_CHAR)) { state->special_flags = JSONSL_SPECIALf_SIGNED; state->nelem = CUR_CHAR - 0x30; } else { INVOKE_ERROR(INVALID_NUMBER); } CONTINUE_NEXT_CHAR(); } else if (state->special_flags == JSONSL_SPECIALf_ZERO) { if (isdigit(CUR_CHAR)) { /* Following a zero! */ INVOKE_ERROR(INVALID_NUMBER); } /* Unset the 'zero' flag: */ if (state->special_flags & JSONSL_SPECIALf_SIGNED) { state->special_flags = JSONSL_SPECIALf_SIGNED; } else { state->special_flags = JSONSL_SPECIALf_UNSIGNED; } goto GT_SPECIAL_NUMERIC; } if ((state->special_flags & JSONSL_SPECIALf_NUMERIC) && !(state->special_flags & JSONSL_SPECIALf_INF)) { GT_SPECIAL_NUMERIC: switch (CUR_CHAR) { CASE_DIGITS STATE_NUM_LAST = '1'; CONTINUE_NEXT_CHAR(); case '.': if (state->special_flags & JSONSL_SPECIALf_FLOAT) { INVOKE_ERROR(INVALID_NUMBER); } state->special_flags |= JSONSL_SPECIALf_FLOAT; STATE_NUM_LAST = '.'; CONTINUE_NEXT_CHAR(); case 'e': case 'E': if (state->special_flags & JSONSL_SPECIALf_EXPONENT) { INVOKE_ERROR(INVALID_NUMBER); } state->special_flags |= JSONSL_SPECIALf_EXPONENT; STATE_NUM_LAST = 'e'; CONTINUE_NEXT_CHAR(); case '-': case '+': if (STATE_NUM_LAST != 'e') { INVOKE_ERROR(INVALID_NUMBER); } STATE_NUM_LAST = '-'; CONTINUE_NEXT_CHAR(); default: if (is_special_end(CUR_CHAR)) { goto GT_SPECIAL_POP; } INVOKE_ERROR(INVALID_NUMBER); break; } } /* else if (!NUMERIC) */ if (!is_special_end(CUR_CHAR)) { STATE_SPECIAL_LENGTH++; /* Verify TRUE, FALSE, NULL */ if (state->special_flags == JSONSL_SPECIALf_TRUE) { VERIFY_SPECIAL("true"); } else if (state->special_flags == JSONSL_SPECIALf_FALSE) { VERIFY_SPECIAL("false"); } else if (state->special_flags == JSONSL_SPECIALf_NULL) { VERIFY_SPECIAL("null"); #ifdef JSONSL_PARSE_NAN } else if (state->special_flags == JSONSL_SPECIALf_POS_INF) { VERIFY_SPECIAL_CI("infinity"); } else if (state->special_flags == JSONSL_SPECIALf_NEG_INF) { VERIFY_SPECIAL_CI("-infinity"); } else if (state->special_flags == JSONSL_SPECIALf_NAN) { VERIFY_SPECIAL_CI("nan"); } else if (state->special_flags & JSONSL_SPECIALf_NULL || state->special_flags & JSONSL_SPECIALf_NAN) { /* previous char was "n", are we parsing null or nan? */ if (CUR_CHAR != 'u') { state->special_flags &= ~JSONSL_SPECIALf_NULL; } if (tolower(CUR_CHAR) != 'a') { state->special_flags &= ~JSONSL_SPECIALf_NAN; } #endif } INCR_METRIC(SPECIAL_FASTPATH); CONTINUE_NEXT_CHAR(); } GT_SPECIAL_POP: jsn->can_insert = 0; if (IS_NORMAL_NUMBER) { /* Nothing */ } else if (state->special_flags == JSONSL_SPECIALf_ZERO || state->special_flags == (JSONSL_SPECIALf_ZERO|JSONSL_SPECIALf_SIGNED)) { /* 0 is unsigned! */ state->special_flags = JSONSL_SPECIALf_UNSIGNED; } else if (state->special_flags == JSONSL_SPECIALf_DASH) { /* Still in dash! */ INVOKE_ERROR(INVALID_NUMBER); } else if (state->special_flags & JSONSL_SPECIALf_INF) { if (STATE_SPECIAL_LENGTH != 8) { INVOKE_ERROR(SPECIAL_INCOMPLETE); } state->nelem = 1; } else if (state->special_flags & JSONSL_SPECIALf_NUMERIC) { /* Check that we're not at the end of a token */ if (STATE_NUM_LAST != '1') { INVOKE_ERROR(INVALID_NUMBER); } } else if (state->special_flags == JSONSL_SPECIALf_TRUE) { if (STATE_SPECIAL_LENGTH != 4) { INVOKE_ERROR(SPECIAL_INCOMPLETE); } state->nelem = 1; } else if (state->special_flags == JSONSL_SPECIALf_FALSE) { if (STATE_SPECIAL_LENGTH != 5) { INVOKE_ERROR(SPECIAL_INCOMPLETE); } } else if (state->special_flags == JSONSL_SPECIALf_NULL) { if (STATE_SPECIAL_LENGTH != 4) { INVOKE_ERROR(SPECIAL_INCOMPLETE); } } SPECIAL_POP; jsn->expecting = ','; if (is_allowed_whitespace(CUR_CHAR)) { CONTINUE_NEXT_CHAR(); } /** * This works because we have a non-whitespace token * which is not a special token. If this is a structural * character then it will be gracefully handled by the * switch statement. Otherwise it will default to the 'special' * state again, */ goto GT_STRUCTURAL_TOKEN; } else if (is_allowed_whitespace(CUR_CHAR)) { INCR_METRIC(ALLOWED_WHITESPACE); /* So we're not special. Harmless insignificant whitespace * passthrough */ CONTINUE_NEXT_CHAR(); } else if (extract_special(CUR_CHAR)) { /* not a string, whitespace, or structural token. must be special */ goto GT_SPECIAL_BEGIN; } INCR_GENERIC(CUR_CHAR); if (CUR_CHAR == '"') { GT_QUOTE: jsn->can_insert = 0; switch (state_type) { /* the end of a string or hash key */ case JSONSL_T_STRING: CALLBACK_AND_POP(STRING); CONTINUE_NEXT_CHAR(); case JSONSL_T_HKEY: CALLBACK_AND_POP(HKEY); CONTINUE_NEXT_CHAR(); case JSONSL_T_OBJECT: state->nelem++; if ( (state->nelem-1) % 2 ) { /* Odd, this must be a hash value */ if (jsn->tok_last != ':') { INVOKE_ERROR(MISSING_TOKEN); } jsn->expecting = ','; /* Can't figure out what to expect next */ jsn->tok_last = 0; STACK_PUSH; state->type = JSONSL_T_STRING; DO_CALLBACK(STRING, PUSH); } else { /* hash key */ if (jsn->expecting != '"') { INVOKE_ERROR(STRAY_TOKEN); } jsn->tok_last = 0; jsn->expecting = ':'; STACK_PUSH; state->type = JSONSL_T_HKEY; DO_CALLBACK(HKEY, PUSH); } CONTINUE_NEXT_CHAR(); case JSONSL_T_LIST: state->nelem++; STACK_PUSH; state->type = JSONSL_T_STRING; jsn->expecting = ','; jsn->tok_last = 0; DO_CALLBACK(STRING, PUSH); CONTINUE_NEXT_CHAR(); case JSONSL_T_SPECIAL: INVOKE_ERROR(STRAY_TOKEN); break; default: INVOKE_ERROR(STRING_OUTSIDE_CONTAINER); break; } /* switch(state->type) */ } else if (CUR_CHAR == '\\') { GT_ESCAPE: INCR_METRIC(ESCAPES); /* Escape */ if ( (state->type & JSONSL_Tf_STRINGY) == 0 ) { INVOKE_ERROR(ESCAPE_OUTSIDE_STRING); } state->nescapes++; jsn->in_escape = 1; CONTINUE_NEXT_CHAR(); } /* " or \ */ GT_STRUCTURAL_TOKEN: switch (CUR_CHAR) { case ':': INCR_METRIC(STRUCTURAL_TOKEN); if (jsn->expecting != CUR_CHAR) { INVOKE_ERROR(STRAY_TOKEN); } jsn->tok_last = ':'; jsn->can_insert = 1; jsn->expecting = '"'; CONTINUE_NEXT_CHAR(); case ',': INCR_METRIC(STRUCTURAL_TOKEN); /** * The comma is one of the more generic tokens. * In the context of an OBJECT, the can_insert flag * should never be set, and no other action is * necessary. */ if (jsn->expecting != CUR_CHAR) { /* make this branch execute only when we haven't manually * just placed the ',' in the expecting register. */ INVOKE_ERROR(STRAY_TOKEN); } if (state->type == JSONSL_T_OBJECT) { /* end of hash value, expect a string as a hash key */ jsn->expecting = '"'; } else { jsn->can_insert = 1; } jsn->tok_last = ','; jsn->expecting = '"'; CONTINUE_NEXT_CHAR(); /* new list or object */ /* hashes are more common */ case '{': case '[': INCR_METRIC(STRUCTURAL_TOKEN); if (!jsn->can_insert) { INVOKE_ERROR(CANT_INSERT); } ENSURE_HVAL; state->nelem++; STACK_PUSH; /* because the constants match the opening delimiters, we can do this: */ state->type = CUR_CHAR; state->nelem = 0; jsn->can_insert = 1; if (CUR_CHAR == '{') { /* If we're a hash, we expect a key first, which is quouted */ jsn->expecting = '"'; } if (CUR_CHAR == JSONSL_T_OBJECT) { DO_CALLBACK(OBJECT, PUSH); } else { DO_CALLBACK(LIST, PUSH); } jsn->tok_last = 0; CONTINUE_NEXT_CHAR(); /* closing of list or object */ case '}': case ']': INCR_METRIC(STRUCTURAL_TOKEN); if (jsn->tok_last == ',' && jsn->options.allow_trailing_comma == 0) { INVOKE_ERROR(TRAILING_COMMA); } jsn->can_insert = 0; jsn->level--; jsn->expecting = ','; jsn->tok_last = 0; if (CUR_CHAR == ']') { if (state->type != '[') { INVOKE_ERROR(BRACKET_MISMATCH); } DO_CALLBACK(LIST, POP); } else { if (state->type != '{') { INVOKE_ERROR(BRACKET_MISMATCH); } else if (state->nelem && state->nelem % 2 != 0) { INVOKE_ERROR(VALUE_EXPECTED); } DO_CALLBACK(OBJECT, POP); } state = jsn->stack + jsn->level; state->pos_cur = jsn->pos; CONTINUE_NEXT_CHAR(); default: GT_SPECIAL_BEGIN: /** * Not a string, not a structural token, and not benign whitespace. * Technically we should iterate over the character always, but since * we are not doing full numerical/value decoding anyway (but only hinting), * we only check upon entry. */ if (state->type != JSONSL_T_SPECIAL) { int special_flags = extract_special(CUR_CHAR); if (!special_flags) { /** * Try to do some heuristics here anyway to figure out what kind of * error this is. The 'special' case is a fallback scenario anyway. */ if (CUR_CHAR == '\0') { INVOKE_ERROR(FOUND_NULL_BYTE); } else if (CUR_CHAR < 0x20) { INVOKE_ERROR(WEIRD_WHITESPACE); } else { INVOKE_ERROR(SPECIAL_EXPECTED); } } ENSURE_HVAL; state->nelem++; if (!jsn->can_insert) { INVOKE_ERROR(CANT_INSERT); } STACK_PUSH; state->type = JSONSL_T_SPECIAL; state->special_flags = special_flags; STATE_SPECIAL_LENGTH = 1; if (special_flags == JSONSL_SPECIALf_UNSIGNED) { state->nelem = CUR_CHAR - 0x30; STATE_NUM_LAST = '1'; } else { STATE_NUM_LAST = '-'; state->nelem = 0; } DO_CALLBACK(SPECIAL, PUSH); } CONTINUE_NEXT_CHAR(); } } } JSONSL_API const char* jsonsl_strerror(jsonsl_error_t err) { if (err == JSONSL_ERROR_SUCCESS) { return "SUCCESS"; } #define X(t) \ if (err == JSONSL_ERROR_##t) \ return #t; JSONSL_XERR; #undef X return ""; } JSONSL_API const char *jsonsl_strtype(jsonsl_type_t type) { #define X(o,c) \ if (type == JSONSL_T_##o) \ return #o; JSONSL_XTYPE #undef X return "UNKNOWN TYPE"; } /* * * JPR/JSONPointer functions * * */ #ifndef JSONSL_NO_JPR static jsonsl_jpr_type_t populate_component(char *in, struct jsonsl_jpr_component_st *component, char **next, jsonsl_error_t *errp) { unsigned long pctval; char *c = NULL, *outp = NULL, *end = NULL; size_t input_len; jsonsl_jpr_type_t ret = JSONSL_PATH_NONE; if (*next == NULL || *(*next) == '\0') { return JSONSL_PATH_NONE; } /* Replace the next / with a NULL */ *next = strstr(in, "/"); if (*next != NULL) { *(*next) = '\0'; /* drop the forward slash */ input_len = *next - in; end = *next; *next += 1; /* next character after the '/' */ } else { input_len = strlen(in); end = in + input_len + 1; } component->pstr = in; /* Check for special components of interest */ if (*in == JSONSL_PATH_WILDCARD_CHAR && input_len == 1) { /* Lone wildcard */ ret = JSONSL_PATH_WILDCARD; goto GT_RET; } else if (isdigit(*in)) { /* ASCII Numeric */ char *endptr; component->idx = strtoul(in, &endptr, 10); if (endptr && *endptr == '\0') { ret = JSONSL_PATH_NUMERIC; goto GT_RET; } } /* Default, it's a string */ ret = JSONSL_PATH_STRING; for (c = outp = in; c < end; c++, outp++) { char origc; if (*c != '%') { goto GT_ASSIGN; } /* * c = { [+0] = '%', [+1] = 'b', [+2] = 'e', [+3] = '\0' } */ /* Need %XX */ if (c+2 >= end) { *errp = JSONSL_ERROR_PERCENT_BADHEX; return JSONSL_PATH_INVALID; } if (! (isxdigit(*(c+1)) && isxdigit(*(c+2))) ) { *errp = JSONSL_ERROR_PERCENT_BADHEX; return JSONSL_PATH_INVALID; } /* Temporarily null-terminate the characters */ origc = *(c+3); *(c+3) = '\0'; pctval = strtoul(c+1, NULL, 16); *(c+3) = origc; *outp = (char) pctval; c += 2; continue; GT_ASSIGN: *outp = *c; } /* Null-terminate the string */ for (; outp < c; outp++) { *outp = '\0'; } GT_RET: component->ptype = ret; if (ret != JSONSL_PATH_WILDCARD) { component->len = strlen(component->pstr); } return ret; } JSONSL_API jsonsl_jpr_t jsonsl_jpr_new(const char *path, jsonsl_error_t *errp) { char *my_copy = NULL; int count, curidx; struct jsonsl_jpr_st *ret = NULL; struct jsonsl_jpr_component_st *components = NULL; size_t origlen; jsonsl_error_t errstacked; #define JPR_BAIL(err) *errp = err; goto GT_ERROR; if (errp == NULL) { errp = &errstacked; } if (path == NULL || *path != '/') { JPR_BAIL(JSONSL_ERROR_JPR_NOROOT); } count = 1; path++; { const char *c = path; for (; *c; c++) { if (*c == '/') { count++; if (*(c+1) == '/') { JPR_BAIL(JSONSL_ERROR_JPR_DUPSLASH); } } } } if(*path) { count++; } components = (struct jsonsl_jpr_component_st *) malloc(sizeof(*components) * count); if (!components) { JPR_BAIL(JSONSL_ERROR_ENOMEM); } my_copy = (char *)malloc(strlen(path) + 1); if (!my_copy) { JPR_BAIL(JSONSL_ERROR_ENOMEM); } strcpy(my_copy, path); components[0].ptype = JSONSL_PATH_ROOT; if (*my_copy) { char *cur = my_copy; int pathret = JSONSL_PATH_STRING; curidx = 1; while (curidx < count) { pathret = populate_component(cur, components + curidx, &cur, errp); if (pathret > 0) { curidx++; } else { break; } } if (pathret == JSONSL_PATH_INVALID) { JPR_BAIL(JSONSL_ERROR_JPR_BADPATH); } } else { curidx = 1; } path--; /*revert path to leading '/' */ origlen = strlen(path) + 1; ret = (struct jsonsl_jpr_st *)malloc(sizeof(*ret)); if (!ret) { JPR_BAIL(JSONSL_ERROR_ENOMEM); } ret->orig = (char *)malloc(origlen); if (!ret->orig) { JPR_BAIL(JSONSL_ERROR_ENOMEM); } ret->components = components; ret->ncomponents = curidx; ret->basestr = my_copy; ret->norig = origlen-1; strcpy(ret->orig, path); return ret; GT_ERROR: free(my_copy); free(components); if (ret) { free(ret->orig); } free(ret); return NULL; #undef JPR_BAIL } void jsonsl_jpr_destroy(jsonsl_jpr_t jpr) { free(jpr->components); free(jpr->basestr); free(jpr->orig); free(jpr); } /** * Call when there is a possibility of a match, either as a final match or * as a path within a match * @param jpr The JPR path * @param component Component corresponding to the current element * @param prlevel The level of the *parent* * @param chtype The type of the child * @return Match status */ static jsonsl_jpr_match_t jsonsl__match_continue(jsonsl_jpr_t jpr, const struct jsonsl_jpr_component_st *component, unsigned prlevel, unsigned chtype) { const struct jsonsl_jpr_component_st *next_comp = component + 1; if (prlevel == jpr->ncomponents - 1) { /* This is the match. Check the expected type of the match against * the child */ if (jpr->match_type == 0 || jpr->match_type == chtype) { return JSONSL_MATCH_COMPLETE; } else { return JSONSL_MATCH_TYPE_MISMATCH; } } if (chtype == JSONSL_T_LIST) { if (next_comp->ptype == JSONSL_PATH_NUMERIC) { return JSONSL_MATCH_POSSIBLE; } else { return JSONSL_MATCH_TYPE_MISMATCH; } } else if (chtype == JSONSL_T_OBJECT) { if (next_comp->ptype == JSONSL_PATH_NUMERIC) { return JSONSL_MATCH_TYPE_MISMATCH; } else { return JSONSL_MATCH_POSSIBLE; } } else { return JSONSL_MATCH_TYPE_MISMATCH; } } JSONSL_API jsonsl_jpr_match_t jsonsl_path_match(jsonsl_jpr_t jpr, const struct jsonsl_state_st *parent, const struct jsonsl_state_st *child, const char *key, size_t nkey) { const struct jsonsl_jpr_component_st *comp; if (!parent) { /* No parent. Return immediately since it's always a match */ return jsonsl__match_continue(jpr, jpr->components, 0, child->type); } comp = jpr->components + parent->level; /* note that we don't need to verify the type of the match, this is * always done through the previous call to jsonsl__match_continue. * If we are in a POSSIBLE tree then we can be certain the types (at * least at this level) are correct */ if (parent->type == JSONSL_T_OBJECT) { if (comp->len != nkey || strncmp(key, comp->pstr, nkey) != 0) { return JSONSL_MATCH_NOMATCH; } } else { if (comp->idx != parent->nelem - 1) { return JSONSL_MATCH_NOMATCH; } } return jsonsl__match_continue(jpr, comp, parent->level, child->type); } JSONSL_API jsonsl_jpr_match_t jsonsl_jpr_match(jsonsl_jpr_t jpr, unsigned int parent_type, unsigned int parent_level, const char *key, size_t nkey) { /* find our current component. This is the child level */ int cmpret; struct jsonsl_jpr_component_st *p_component; p_component = jpr->components + parent_level; if (parent_level >= jpr->ncomponents) { return JSONSL_MATCH_NOMATCH; } /* Lone query for 'root' element. Always matches */ if (parent_level == 0) { if (jpr->ncomponents == 1) { return JSONSL_MATCH_COMPLETE; } else { return JSONSL_MATCH_POSSIBLE; } } /* Wildcard, always matches */ if (p_component->ptype == JSONSL_PATH_WILDCARD) { if (parent_level == jpr->ncomponents-1) { return JSONSL_MATCH_COMPLETE; } else { return JSONSL_MATCH_POSSIBLE; } } /* Check numeric array index. This gets its special block so we can avoid * string comparisons */ if (p_component->ptype == JSONSL_PATH_NUMERIC) { if (parent_type == JSONSL_T_LIST) { if (p_component->idx != nkey) { /* Wrong index */ return JSONSL_MATCH_NOMATCH; } else { if (parent_level == jpr->ncomponents-1) { /* This is the last element of the path */ return JSONSL_MATCH_COMPLETE; } else { /* Intermediate element */ return JSONSL_MATCH_POSSIBLE; } } } else if (p_component->is_arridx) { /* Numeric and an array index (set explicitly by user). But not * a list for a parent */ return JSONSL_MATCH_TYPE_MISMATCH; } } else if (parent_type == JSONSL_T_LIST) { return JSONSL_MATCH_TYPE_MISMATCH; } /* Check lengths */ if (p_component->len != nkey) { return JSONSL_MATCH_NOMATCH; } /* Check string comparison */ cmpret = strncmp(p_component->pstr, key, nkey); if (cmpret == 0) { if (parent_level == jpr->ncomponents-1) { return JSONSL_MATCH_COMPLETE; } else { return JSONSL_MATCH_POSSIBLE; } } return JSONSL_MATCH_NOMATCH; } JSONSL_API void jsonsl_jpr_match_state_init(jsonsl_t jsn, jsonsl_jpr_t *jprs, size_t njprs) { size_t ii, *firstjmp; if (njprs == 0) { return; } jsn->jprs = (jsonsl_jpr_t *)malloc(sizeof(jsonsl_jpr_t) * njprs); jsn->jpr_count = njprs; jsn->jpr_root = (size_t*)calloc(1, sizeof(size_t) * njprs * jsn->levels_max); memcpy(jsn->jprs, jprs, sizeof(jsonsl_jpr_t) * njprs); /* Set the initial jump table values */ firstjmp = jsn->jpr_root; for (ii = 0; ii < njprs; ii++) { firstjmp[ii] = ii+1; } } JSONSL_API void jsonsl_jpr_match_state_cleanup(jsonsl_t jsn) { if (jsn->jpr_count == 0) { return; } free(jsn->jpr_root); free(jsn->jprs); jsn->jprs = NULL; jsn->jpr_root = NULL; jsn->jpr_count = 0; } /** * This function should be called exactly once on each element... * This should also be called in recursive order, since we rely * on the parent having been initialized for a match. * * Since the parent is checked for a match as well, we maintain a 'serial' counter. * Whenever we traverse an element, we expect the serial to be the same as a global * integer. If they do not match, we re-initialize the context, and set the serial. * * This ensures a type of consistency without having a proactive reset by the * main lexer itself. * */ JSONSL_API jsonsl_jpr_t jsonsl_jpr_match_state(jsonsl_t jsn, struct jsonsl_state_st *state, const char *key, size_t nkey, jsonsl_jpr_match_t *out) { struct jsonsl_state_st *parent_state; jsonsl_jpr_t ret = NULL; /* Jump and JPR tables for our own state and the parent state */ size_t *jmptable, *pjmptable; size_t jmp_cur, ii, ourjmpidx; if (!jsn->jpr_root) { *out = JSONSL_MATCH_NOMATCH; return NULL; } pjmptable = jsn->jpr_root + (jsn->jpr_count * (state->level-1)); jmptable = pjmptable + jsn->jpr_count; /* If the parent cannot match, then invalidate it */ if (*pjmptable == 0) { *jmptable = 0; *out = JSONSL_MATCH_NOMATCH; return NULL; } parent_state = jsn->stack + state->level - 1; if (parent_state->type == JSONSL_T_LIST) { nkey = (size_t) parent_state->nelem; } *jmptable = 0; ourjmpidx = 0; memset(jmptable, 0, sizeof(int) * jsn->jpr_count); for (ii = 0; ii < jsn->jpr_count; ii++) { jmp_cur = pjmptable[ii]; if (jmp_cur) { jsonsl_jpr_t jpr = jsn->jprs[jmp_cur-1]; *out = jsonsl_jpr_match(jpr, parent_state->type, parent_state->level, key, nkey); if (*out == JSONSL_MATCH_COMPLETE) { ret = jpr; *jmptable = 0; return ret; } else if (*out == JSONSL_MATCH_POSSIBLE) { jmptable[ourjmpidx] = ii+1; ourjmpidx++; } } else { break; } } if (!*jmptable) { *out = JSONSL_MATCH_NOMATCH; } return NULL; } JSONSL_API const char *jsonsl_strmatchtype(jsonsl_jpr_match_t match) { #define X(T,v) \ if ( match == JSONSL_MATCH_##T ) \ return #T; JSONSL_XMATCH #undef X return ""; } #endif /* JSONSL_WITH_JPR */ static char * jsonsl__writeutf8(uint32_t pt, char *out) { #define ADD_OUTPUT(c) *out = (char)(c); out++; if (pt < 0x80) { ADD_OUTPUT(pt); } else if (pt < 0x800) { ADD_OUTPUT((pt >> 6) | 0xC0); ADD_OUTPUT((pt & 0x3F) | 0x80); } else if (pt < 0x10000) { ADD_OUTPUT((pt >> 12) | 0xE0); ADD_OUTPUT(((pt >> 6) & 0x3F) | 0x80); ADD_OUTPUT((pt & 0x3F) | 0x80); } else { ADD_OUTPUT((pt >> 18) | 0xF0); ADD_OUTPUT(((pt >> 12) & 0x3F) | 0x80); ADD_OUTPUT(((pt >> 6) & 0x3F) | 0x80); ADD_OUTPUT((pt & 0x3F) | 0x80); } return out; #undef ADD_OUTPUT } /* Thanks snej (https://github.com/mnunberg/jsonsl/issues/9) */ static int jsonsl__digit2int(char ch) { int d = ch - '0'; if ((unsigned) d < 10) { return d; } d = ch - 'a'; if ((unsigned) d < 6) { return d + 10; } d = ch - 'A'; if ((unsigned) d < 6) { return d + 10; } return -1; } /* Assume 's' is at least 4 bytes long */ static int jsonsl__get_uescape_16(const char *s) { int ret = 0; int cur; #define GET_DIGIT(off) \ cur = jsonsl__digit2int(s[off]); \ if (cur == -1) { return -1; } \ ret |= (cur << (12 - (off * 4))); GET_DIGIT(0); GET_DIGIT(1); GET_DIGIT(2); GET_DIGIT(3); #undef GET_DIGIT return ret; } /** * Utility function to convert escape sequences */ JSONSL_API size_t jsonsl_util_unescape_ex(const char *in, char *out, size_t len, const int toEscape[128], unsigned *oflags, jsonsl_error_t *err, const char **errat) { const unsigned char *c = (const unsigned char*)in; char *begin_p = out; unsigned oflags_s; uint16_t last_codepoint = 0; if (!oflags) { oflags = &oflags_s; } *oflags = 0; #define UNESCAPE_BAIL(e,offset) \ *err = JSONSL_ERROR_##e; \ if (errat) { \ *errat = (const char*)(c+ (ptrdiff_t)(offset)); \ } \ return 0; for (; len; len--, c++, out++) { int uescval; if (*c != '\\') { /* Not an escape, so we don't care about this */ goto GT_ASSIGN; } if (len < 2) { UNESCAPE_BAIL(ESCAPE_INVALID, 0); } if (!is_allowed_escape(c[1])) { UNESCAPE_BAIL(ESCAPE_INVALID, 1) } if ((toEscape && toEscape[(unsigned char)c[1] & 0x7f] == 0 && c[1] != '\\' && c[1] != '"')) { /* if we don't want to unescape this string, write the escape sequence to the output */ *out++ = *c++; --len; goto GT_ASSIGN; } if (c[1] != 'u') { /* simple skip-and-replace using pre-defined maps. * TODO: should the maps actually reflect the desired * replacement character in toEscape? */ char esctmp = get_escape_equiv(c[1]); if (esctmp) { /* Check if there is a corresponding replacement */ *out = esctmp; } else { /* Just gobble up the 'reverse-solidus' */ *out = c[1]; } len--; c++; /* do not assign, just continue */ continue; } /* next == 'u' */ if (len < 6) { /* Need at least six characters.. */ UNESCAPE_BAIL(UESCAPE_TOOSHORT, 2); } uescval = jsonsl__get_uescape_16((const char *)c + 2); if (uescval == -1) { UNESCAPE_BAIL(PERCENT_BADHEX, -1); } if (last_codepoint) { uint16_t w1 = last_codepoint, w2 = (uint16_t)uescval; uint32_t cp; if (uescval < 0xDC00 || uescval > 0xDFFF) { UNESCAPE_BAIL(INVALID_CODEPOINT, -1); } cp = (w1 & 0x3FF) << 10; cp |= (w2 & 0x3FF); cp += 0x10000; out = jsonsl__writeutf8(cp, out) - 1; last_codepoint = 0; } else if (uescval < 0xD800 || uescval > 0xDFFF) { *oflags |= JSONSL_SPECIALf_NONASCII; out = jsonsl__writeutf8(uescval, out) - 1; } else if (uescval < 0xDC00) { *oflags |= JSONSL_SPECIALf_NONASCII; last_codepoint = (uint16_t)uescval; out--; } else { UNESCAPE_BAIL(INVALID_CODEPOINT, 2); } /* Post uescape cleanup */ len -= 5; /* Gobble up 5 chars after 'u' */ c += 5; continue; /* Only reached by previous branches */ GT_ASSIGN: *out = *c; } if (last_codepoint) { *err = JSONSL_ERROR_INVALID_CODEPOINT; return 0; } *err = JSONSL_ERROR_SUCCESS; return out - begin_p; } /** * Character Table definitions. * These were all generated via srcutil/genchartables.pl */ /** * This table contains the beginnings of non-string * allowable (bareword) values. */ static unsigned short Special_Table[0x100] = { /* 0x00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x1f */ /* 0x20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x2c */ /* 0x2d */ JSONSL_SPECIALf_DASH /* <-> */, /* 0x2d */ /* 0x2e */ 0,0, /* 0x2f */ /* 0x30 */ JSONSL_SPECIALf_ZERO /* <0> */, /* 0x30 */ /* 0x31 */ JSONSL_SPECIALf_UNSIGNED /* <1> */, /* 0x31 */ /* 0x32 */ JSONSL_SPECIALf_UNSIGNED /* <2> */, /* 0x32 */ /* 0x33 */ JSONSL_SPECIALf_UNSIGNED /* <3> */, /* 0x33 */ /* 0x34 */ JSONSL_SPECIALf_UNSIGNED /* <4> */, /* 0x34 */ /* 0x35 */ JSONSL_SPECIALf_UNSIGNED /* <5> */, /* 0x35 */ /* 0x36 */ JSONSL_SPECIALf_UNSIGNED /* <6> */, /* 0x36 */ /* 0x37 */ JSONSL_SPECIALf_UNSIGNED /* <7> */, /* 0x37 */ /* 0x38 */ JSONSL_SPECIALf_UNSIGNED /* <8> */, /* 0x38 */ /* 0x39 */ JSONSL_SPECIALf_UNSIGNED /* <9> */, /* 0x39 */ /* 0x3a */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x48 */ /* 0x49 */ JSONSL__INF_PROXY /* */, /* 0x49 */ /* 0x4a */ 0,0,0,0, /* 0x4d */ /* 0x4e */ JSONSL__NAN_PROXY /* */, /* 0x4e */ /* 0x4f */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x65 */ /* 0x66 */ JSONSL_SPECIALf_FALSE /* */, /* 0x66 */ /* 0x67 */ 0,0, /* 0x68 */ /* 0x69 */ JSONSL__INF_PROXY /* */, /* 0x69 */ /* 0x6a */ 0,0,0,0, /* 0x6d */ /* 0x6e */ JSONSL_SPECIALf_NULL|JSONSL__NAN_PROXY /* */, /* 0x6e */ /* 0x6f */ 0,0,0,0,0, /* 0x73 */ /* 0x74 */ JSONSL_SPECIALf_TRUE /* */, /* 0x74 */ /* 0x75 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x94 */ /* 0x95 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xb4 */ /* 0xb5 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xd4 */ /* 0xd5 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xf4 */ /* 0xf5 */ 0,0,0,0,0,0,0,0,0,0, /* 0xfe */ }; /** * Contains characters which signal the termination of any of the 'special' bareword * values. */ static int Special_Endings[0x100] = { /* 0x00 */ 0,0,0,0,0,0,0,0,0, /* 0x08 */ /* 0x09 */ 1 /* */, /* 0x09 */ /* 0x0a */ 1 /* */, /* 0x0a */ /* 0x0b */ 0,0, /* 0x0c */ /* 0x0d */ 1 /* */, /* 0x0d */ /* 0x0e */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x1f */ /* 0x20 */ 1 /* */, /* 0x20 */ /* 0x21 */ 0, /* 0x21 */ /* 0x22 */ 1 /* " */, /* 0x22 */ /* 0x23 */ 0,0,0,0,0,0,0,0,0, /* 0x2b */ /* 0x2c */ 1 /* , */, /* 0x2c */ /* 0x2d */ 0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x39 */ /* 0x3a */ 1 /* : */, /* 0x3a */ /* 0x3b */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x5a */ /* 0x5b */ 1 /* [ */, /* 0x5b */ /* 0x5c */ 1 /* \ */, /* 0x5c */ /* 0x5d */ 1 /* ] */, /* 0x5d */ /* 0x5e */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x7a */ /* 0x7b */ 1 /* { */, /* 0x7b */ /* 0x7c */ 0, /* 0x7c */ /* 0x7d */ 1 /* } */, /* 0x7d */ /* 0x7e */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x9d */ /* 0x9e */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xbd */ /* 0xbe */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xdd */ /* 0xde */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xfd */ /* 0xfe */ 0 /* 0xfe */ }; /** * This table contains entries for the allowed whitespace as per RFC 4627 */ static int Allowed_Whitespace[0x100] = { /* 0x00 */ 0,0,0,0,0,0,0,0,0, /* 0x08 */ /* 0x09 */ 1 /* */, /* 0x09 */ /* 0x0a */ 1 /* */, /* 0x0a */ /* 0x0b */ 0,0, /* 0x0c */ /* 0x0d */ 1 /* */, /* 0x0d */ /* 0x0e */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x1f */ /* 0x20 */ 1 /* */, /* 0x20 */ /* 0x21 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x40 */ /* 0x41 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x60 */ /* 0x61 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x80 */ /* 0x81 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xa0 */ /* 0xa1 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xc0 */ /* 0xc1 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xe0 */ /* 0xe1 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* 0xfe */ }; static const int String_No_Passthrough[0x100] = { /* 0x00 */ 1 /* */, /* 0x00 */ /* 0x01 */ 1 /* */, /* 0x01 */ /* 0x02 */ 1 /* */, /* 0x02 */ /* 0x03 */ 1 /* */, /* 0x03 */ /* 0x04 */ 1 /* */, /* 0x04 */ /* 0x05 */ 1 /* */, /* 0x05 */ /* 0x06 */ 1 /* */, /* 0x06 */ /* 0x07 */ 1 /* */, /* 0x07 */ /* 0x08 */ 1 /* */, /* 0x08 */ /* 0x09 */ 1 /* */, /* 0x09 */ /* 0x0a */ 1 /* */, /* 0x0a */ /* 0x0b */ 1 /* */, /* 0x0b */ /* 0x0c */ 1 /* */, /* 0x0c */ /* 0x0d */ 1 /* */, /* 0x0d */ /* 0x0e */ 1 /* */, /* 0x0e */ /* 0x0f */ 1 /* */, /* 0x0f */ /* 0x10 */ 1 /* */, /* 0x10 */ /* 0x11 */ 1 /* */, /* 0x11 */ /* 0x12 */ 1 /* */, /* 0x12 */ /* 0x13 */ 1 /* */, /* 0x13 */ /* 0x14 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x21 */ /* 0x22 */ 1 /* <"> */, /* 0x22 */ /* 0x23 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x42 */ /* 0x43 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x5b */ /* 0x5c */ 1 /* <\> */, /* 0x5c */ /* 0x5d */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x7c */ /* 0x7d */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x9c */ /* 0x9d */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xbc */ /* 0xbd */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xdc */ /* 0xdd */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xfc */ /* 0xfd */ 0,0, /* 0xfe */ }; /** * Allowable two-character 'common' escapes: */ static int Allowed_Escapes[0x100] = { /* 0x00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x1f */ /* 0x20 */ 0,0, /* 0x21 */ /* 0x22 */ 1 /* <"> */, /* 0x22 */ /* 0x23 */ 0,0,0,0,0,0,0,0,0,0,0,0, /* 0x2e */ /* 0x2f */ 1 /* */, /* 0x2f */ /* 0x30 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x4f */ /* 0x50 */ 0,0,0,0,0,0,0,0,0,0,0,0, /* 0x5b */ /* 0x5c */ 1 /* <\> */, /* 0x5c */ /* 0x5d */ 0,0,0,0,0, /* 0x61 */ /* 0x62 */ 1 /* */, /* 0x62 */ /* 0x63 */ 0,0,0, /* 0x65 */ /* 0x66 */ 1 /* */, /* 0x66 */ /* 0x67 */ 0,0,0,0,0,0,0, /* 0x6d */ /* 0x6e */ 1 /* */, /* 0x6e */ /* 0x6f */ 0,0,0, /* 0x71 */ /* 0x72 */ 1 /* */, /* 0x72 */ /* 0x73 */ 0, /* 0x73 */ /* 0x74 */ 1 /* */, /* 0x74 */ /* 0x75 */ 1 /* */, /* 0x75 */ /* 0x76 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x95 */ /* 0x96 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xb5 */ /* 0xb6 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xd5 */ /* 0xd6 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xf5 */ /* 0xf6 */ 0,0,0,0,0,0,0,0,0, /* 0xfe */ }; /** * This table contains the _values_ for a given (single) escaped character. */ static unsigned char Escape_Equivs[0x100] = { /* 0x00 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x1f */ /* 0x20 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x3f */ /* 0x40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x5f */ /* 0x60 */ 0,0, /* 0x61 */ /* 0x62 */ 8 /* */, /* 0x62 */ /* 0x63 */ 0,0,0, /* 0x65 */ /* 0x66 */ 12 /* */, /* 0x66 */ /* 0x67 */ 0,0,0,0,0,0,0, /* 0x6d */ /* 0x6e */ 10 /* */, /* 0x6e */ /* 0x6f */ 0,0,0, /* 0x71 */ /* 0x72 */ 13 /* */, /* 0x72 */ /* 0x73 */ 0, /* 0x73 */ /* 0x74 */ 9 /* */, /* 0x74 */ /* 0x75 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0x94 */ /* 0x95 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xb4 */ /* 0xb5 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xd4 */ /* 0xd5 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 0xf4 */ /* 0xf5 */ 0,0,0,0,0,0,0,0,0,0 /* 0xfe */ }; /* Definitions of above-declared static functions */ static char get_escape_equiv(unsigned c) { return Escape_Equivs[c & 0xff]; } static unsigned extract_special(unsigned c) { return Special_Table[c & 0xff]; } static int is_special_end(unsigned c) { return Special_Endings[c & 0xff]; } static int is_allowed_whitespace(unsigned c) { return c == ' ' || Allowed_Whitespace[c & 0xff]; } static int is_allowed_escape(unsigned c) { return Allowed_Escapes[c & 0xff]; } static int is_simple_char(unsigned c) { return !String_No_Passthrough[c & 0xff]; } /* Clean up all our macros! */ #undef INCR_METRIC #undef INCR_GENERIC #undef INCR_STRINGY_CATCH #undef CASE_DIGITS #undef INVOKE_ERROR #undef STACK_PUSH #undef STACK_POP_NOPOS #undef STACK_POP #undef CALLBACK_AND_POP_NOPOS #undef CALLBACK_AND_POP #undef SPECIAL_POP #undef CUR_CHAR #undef DO_CALLBACK #undef ENSURE_HVAL #undef VERIFY_SPECIAL #undef STATE_SPECIAL_LENGTH #undef IS_NORMAL_NUMBER #undef STATE_NUM_LAST #undef FASTPARSE_EXHAUSTED #undef FASTPARSE_BREAK mongodb-1.6.1/src/libmongoc/src/libbson/src/jsonsl/jsonsl.h0000644000076500000240000007445113572250757023233 0ustar alcaeusstaff/** * JSON Simple/Stacked/Stateful Lexer. * - Does not buffer data * - Maintains state * - Callback oriented * - Lightweight and fast. One source file and one header file * * Copyright (C) 2012-2015 Mark Nunberg * See included LICENSE file for license details. */ #include "../bson/bson-prelude.h" #ifndef JSONSL_H_ #define JSONSL_H_ #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ #ifdef JSONSL_USE_WCHAR typedef jsonsl_char_t wchar_t; typedef jsonsl_uchar_t unsigned wchar_t; #else typedef char jsonsl_char_t; typedef unsigned char jsonsl_uchar_t; #endif /* JSONSL_USE_WCHAR */ #ifdef JSONSL_PARSE_NAN #define JSONSL__NAN_PROXY JSONSL_SPECIALf_NAN #define JSONSL__INF_PROXY JSONSL_SPECIALf_INF #else #define JSONSL__NAN_PROXY 0 #define JSONSL__INF_PROXY 0 #endif /* Stolen from http-parser.h, and possibly others */ #if defined(_WIN32) && !defined(__MINGW32__) && (!defined(_MSC_VER) || _MSC_VER<1600) typedef __int8 int8_t; typedef unsigned __int8 uint8_t; typedef __int16 int16_t; typedef unsigned __int16 uint16_t; typedef __int32 int32_t; typedef unsigned __int32 uint32_t; typedef __int64 int64_t; typedef unsigned __int64 uint64_t; #if !defined(_MSC_VER) || _MSC_VER<1400 typedef unsigned int size_t; typedef int ssize_t; #endif #else #include #endif #if (!defined(JSONSL_STATE_GENERIC)) && (!defined(JSONSL_STATE_USER_FIELDS)) #define JSONSL_STATE_GENERIC #endif /* !defined JSONSL_STATE_GENERIC */ #ifdef JSONSL_STATE_GENERIC #define JSONSL_STATE_USER_FIELDS #endif /* JSONSL_STATE_GENERIC */ /* Additional fields for component object */ #ifndef JSONSL_JPR_COMPONENT_USER_FIELDS #define JSONSL_JPR_COMPONENT_USER_FIELDS #endif #ifndef JSONSL_API /** * We require a /DJSONSL_DLL so that users already using this as a static * or embedded library don't get confused */ #if defined(_WIN32) && defined(JSONSL_DLL) #define JSONSL_API __declspec(dllexport) #else #define JSONSL_API #endif /* _WIN32 */ #endif /* !JSONSL_API */ #ifndef JSONSL_INLINE #if defined(_MSC_VER) #define JSONSL_INLINE __inline #elif defined(__GNUC__) #define JSONSL_INLINE __inline__ #else #define JSONSL_INLINE inline #endif /* _MSC_VER or __GNUC__ */ #endif /* JSONSL_INLINE */ #define JSONSL_MAX_LEVELS 512 struct jsonsl_st; typedef struct jsonsl_st *jsonsl_t; typedef struct jsonsl_jpr_st* jsonsl_jpr_t; /** * This flag is true when AND'd against a type whose value * must be in "quoutes" i.e. T_HKEY and T_STRING */ #define JSONSL_Tf_STRINGY 0xffff00 /** * Constant representing the special JSON types. * The values are special and aid in speed (the OBJECT and LIST * values are the char literals of their openings). * * Their actual value is a character which attempts to resemble * some mnemonic reference to the actual type. * * If new types are added, they must fit into the ASCII printable * range (so they should be AND'd with 0x7f and yield something * meaningful) */ #define JSONSL_XTYPE \ X(STRING, '"'|JSONSL_Tf_STRINGY) \ X(HKEY, '#'|JSONSL_Tf_STRINGY) \ X(OBJECT, '{') \ X(LIST, '[') \ X(SPECIAL, '^') \ X(UESCAPE, 'u') typedef enum { #define X(o, c) \ JSONSL_T_##o = c, JSONSL_XTYPE JSONSL_T_UNKNOWN = '?', /* Abstract 'root' object */ JSONSL_T_ROOT = 0 #undef X } jsonsl_type_t; /** * Subtypes for T_SPECIAL. We define them as flags * because more than one type can be applied to a * given object. */ #define JSONSL_XSPECIAL \ X(NONE, 0) \ X(SIGNED, 1<<0) \ X(UNSIGNED, 1<<1) \ X(TRUE, 1<<2) \ X(FALSE, 1<<3) \ X(NULL, 1<<4) \ X(FLOAT, 1<<5) \ X(EXPONENT, 1<<6) \ X(NONASCII, 1<<7) \ X(NAN, 1<<8) \ X(INF, 1<<9) typedef enum { #define X(o,b) \ JSONSL_SPECIALf_##o = b, JSONSL_XSPECIAL #undef X /* Handy flags for checking */ JSONSL_SPECIALf_UNKNOWN = 1 << 10, /** @private Private */ JSONSL_SPECIALf_ZERO = 1 << 11 | JSONSL_SPECIALf_UNSIGNED, /** @private */ JSONSL_SPECIALf_DASH = 1 << 12, /** @private */ JSONSL_SPECIALf_POS_INF = (JSONSL_SPECIALf_INF), JSONSL_SPECIALf_NEG_INF = (JSONSL_SPECIALf_INF|JSONSL_SPECIALf_SIGNED), /** Type is numeric */ JSONSL_SPECIALf_NUMERIC = (JSONSL_SPECIALf_SIGNED| JSONSL_SPECIALf_UNSIGNED), /** Type is a boolean */ JSONSL_SPECIALf_BOOLEAN = (JSONSL_SPECIALf_TRUE|JSONSL_SPECIALf_FALSE), /** Type is an "extended", not integral type (but numeric) */ JSONSL_SPECIALf_NUMNOINT = (JSONSL_SPECIALf_FLOAT|JSONSL_SPECIALf_EXPONENT|JSONSL_SPECIALf_NAN |JSONSL_SPECIALf_INF) } jsonsl_special_t; /** * These are the various types of stack (or other) events * which will trigger a callback. * Like the type constants, this are also mnemonic */ #define JSONSL_XACTION \ X(PUSH, '+') \ X(POP, '-') \ X(UESCAPE, 'U') \ X(ERROR, '!') typedef enum { #define X(a,c) \ JSONSL_ACTION_##a = c, JSONSL_XACTION JSONSL_ACTION_UNKNOWN = '?' #undef X } jsonsl_action_t; /** * Various errors which may be thrown while parsing JSON */ #define JSONSL_XERR \ /* Trailing garbage characters */ \ X(GARBAGE_TRAILING) \ /* We were expecting a 'special' (numeric, true, false, null) */ \ X(SPECIAL_EXPECTED) \ /* The 'special' value was incomplete */ \ X(SPECIAL_INCOMPLETE) \ /* Found a stray token */ \ X(STRAY_TOKEN) \ /* We were expecting a token before this one */ \ X(MISSING_TOKEN) \ /* Cannot insert because the container is not ready */ \ X(CANT_INSERT) \ /* Found a '\' outside a string */ \ X(ESCAPE_OUTSIDE_STRING) \ /* Found a ':' outside of a hash */ \ X(KEY_OUTSIDE_OBJECT) \ /* found a string outside of a container */ \ X(STRING_OUTSIDE_CONTAINER) \ /* Found a null byte in middle of string */ \ X(FOUND_NULL_BYTE) \ /* Current level exceeds limit specified in constructor */ \ X(LEVELS_EXCEEDED) \ /* Got a } as a result of an opening [ or vice versa */ \ X(BRACKET_MISMATCH) \ /* We expected a key, but got something else instead */ \ X(HKEY_EXPECTED) \ /* We got an illegal control character (bad whitespace or something) */ \ X(WEIRD_WHITESPACE) \ /* Found a \u-escape, but there were less than 4 following hex digits */ \ X(UESCAPE_TOOSHORT) \ /* Invalid two-character escape */ \ X(ESCAPE_INVALID) \ /* Trailing comma */ \ X(TRAILING_COMMA) \ /* An invalid number was passed in a numeric field */ \ X(INVALID_NUMBER) \ /* Value is missing for object */ \ X(VALUE_EXPECTED) \ /* The following are for JPR Stuff */ \ \ /* Found a literal '%' but it was only followed by a single valid hex digit */ \ X(PERCENT_BADHEX) \ /* jsonpointer URI is malformed '/' */ \ X(JPR_BADPATH) \ /* Duplicate slash */ \ X(JPR_DUPSLASH) \ /* No leading root */ \ X(JPR_NOROOT) \ /* Allocation failure */ \ X(ENOMEM) \ /* Invalid unicode codepoint detected (in case of escapes) */ \ X(INVALID_CODEPOINT) typedef enum { JSONSL_ERROR_SUCCESS = 0, #define X(e) \ JSONSL_ERROR_##e, JSONSL_XERR #undef X JSONSL_ERROR_GENERIC } jsonsl_error_t; /** * A state is a single level of the stack. * Non-private data (i.e. the 'data' field, see the STATE_GENERIC section) * will remain in tact until the item is popped. * * As a result, it means a parent state object may be accessed from a child * object, (the parents fields will all be valid). This allows a user to create * an ad-hoc hierarchy on top of the JSON one. * */ struct jsonsl_state_st { /** * The JSON object type */ unsigned type; /** If this element is special, then its extended type is here */ unsigned special_flags; /** * The position (in terms of number of bytes since the first call to * jsonsl_feed()) at which the state was first pushed. This includes * opening tokens, if applicable. * * @note For strings (i.e. type & JSONSL_Tf_STRINGY is nonzero) this will * be the position of the first quote. * * @see jsonsl_st::pos which contains the _current_ position and can be * used during a POP callback to get the length of the element. */ size_t pos_begin; /**FIXME: This is redundant as the same information can be derived from * jsonsl_st::pos at pop-time */ size_t pos_cur; /** * Level of recursion into nesting. This is mainly a convenience * variable, as this can technically be deduced from the lexer's * level parameter (though the logic is not that simple) */ unsigned int level; /** * how many elements in the object/list. * For objects (hashes), an element is either * a key or a value. Thus for one complete pair, * nelem will be 2. * * For special types, this will hold the sum of the digits. * This only holds true for values which are simple signed/unsigned * numbers. Otherwise a special flag is set, and extra handling is not * performed. */ uint64_t nelem; /*TODO: merge this and special_flags into a union */ /** * Useful for an opening nest, this will prevent a callback from being * invoked on this item or any of its children */ int ignore_callback; /** * Counter which is incremented each time an escape ('\') is encountered. * This is used internally for non-string types and should only be * inspected by the user if the state actually represents a string * type. */ unsigned int nescapes; /** * Put anything you want here. if JSONSL_STATE_USER_FIELDS is here, then * the macro expansion happens here. * * You can use these fields to store hierarchical or 'tagging' information * for specific objects. * * See the documentation above for the lifetime of the state object (i.e. * if the private data points to allocated memory, it should be freed * when the object is popped, as the state object will be re-used) */ #ifndef JSONSL_STATE_GENERIC JSONSL_STATE_USER_FIELDS #else /** * Otherwise, this is a simple void * pointer for anything you want */ void *data; #endif /* JSONSL_STATE_USER_FIELDS */ }; /**Gets the number of elements in the list. * @param st The state. Must be of type JSONSL_T_LIST * @return number of elements in the list */ #define JSONSL_LIST_SIZE(st) ((st)->nelem) /**Gets the number of key-value pairs in an object * @param st The state. Must be of type JSONSL_T_OBJECT * @return the number of key-value pairs in the object */ #define JSONSL_OBJECT_SIZE(st) ((st)->nelem / 2) /**Gets the numeric value. * @param st The state. Must be of type JSONSL_T_SPECIAL and * special_flags must have the JSONSL_SPECIALf_NUMERIC flag * set. * @return the numeric value of the state. */ #define JSONSL_NUMERIC_VALUE(st) ((st)->nelem) /* * So now we need some special structure for keeping the * JPR info in sync. Preferably all in a single block * of memory (there's no need for separate allocations. * So we will define a 'table' with the following layout * * Level nPosbl JPR1_last JPR2_last JPR3_last * * 0 1 NOMATCH POSSIBLE POSSIBLE * 1 0 NOMATCH NOMATCH COMPLETE * [ table ends here because no further path is possible] * * Where the JPR..n corresponds to the number of JPRs * requested, and nPosble is a quick flag to determine * * the number of possibilities. In the future this might * be made into a proper 'jump' table, * * Since we always mark JPRs from the higher levels descending * into the lower ones, a prospective child match would first * look at the parent table to check the possibilities, and then * see which ones were possible.. * * Thus, the size of this blob would be (and these are all ints here) * nLevels * nJPR * 2. * * the 'Width' of the table would be nJPR*2, and the 'height' would be * nlevels */ /** * This is called when a stack change ocurs. * * @param jsn The lexer * @param action The type of action, this can be PUSH or POP * @param state A pointer to the stack currently affected by the action * @param at A pointer to the position of the input buffer which triggered * this action. */ typedef void (*jsonsl_stack_callback)( jsonsl_t jsn, jsonsl_action_t action, struct jsonsl_state_st* state, const jsonsl_char_t *at); /** * This is called when an error is encountered. * Sometimes it's possible to 'erase' characters (by replacing them * with whitespace). If you think you have corrected the error, you * can return a true value, in which case the parser will backtrack * and try again. * * @param jsn The lexer * @param error The error which was thrown * @param state the current state * @param a pointer to the position of the input buffer which triggered * the error. Note that this is not const, this is because you have the * possibility of modifying the character in an attempt to correct the * error * * @return zero to bail, nonzero to try again (this only makes sense if * the input buffer has been modified by this callback) */ typedef int (*jsonsl_error_callback)( jsonsl_t jsn, jsonsl_error_t error, struct jsonsl_state_st* state, jsonsl_char_t *at); struct jsonsl_st { /** Public, read-only */ /** This is the current level of the stack */ unsigned int level; /** Flag set to indicate we should stop processing */ unsigned int stopfl; /** * This is the current position, relative to the beginning * of the stream. */ size_t pos; /** This is the 'bytes' variable passed to feed() */ const jsonsl_char_t *base; /** Callback invoked for PUSH actions */ jsonsl_stack_callback action_callback_PUSH; /** Callback invoked for POP actions */ jsonsl_stack_callback action_callback_POP; /** Default callback for any action, if neither PUSH or POP callbacks are defined */ jsonsl_stack_callback action_callback; /** * Do not invoke callbacks for objects deeper than this level. * NOTE: This field establishes the lower bound for ignored callbacks, * and is thus misnamed. `min_ignore_level` would actually make more * sense, but we don't want to break API. */ unsigned int max_callback_level; /** The error callback. Invoked when an error happens. Should not be NULL */ jsonsl_error_callback error_callback; /* these are boolean flags you can modify. You will be called * about notification for each of these types if the corresponding * variable is true. */ /** * @name Callback Booleans. * These determine whether a callback is to be invoked for certain types of objects * @{*/ /** Boolean flag to enable or disable the invokcation for events on this type*/ int call_SPECIAL; int call_OBJECT; int call_LIST; int call_STRING; int call_HKEY; /*@}*/ /** * @name u-Escape handling * Special handling for the \\u-f00d type sequences. These are meant * to be translated back into the corresponding octet(s). * A special callback (if set) is invoked with *at=='u'. An application * may wish to temporarily suspend parsing and handle the 'u-' sequence * internally (or not). */ /*@{*/ /** Callback to be invoked for a u-escape */ jsonsl_stack_callback action_callback_UESCAPE; /** Boolean flag, whether to invoke the callback */ int call_UESCAPE; /** Boolean flag, whether we should return after encountering a u-escape: * the callback is invoked and then we return if this is true */ int return_UESCAPE; /*@}*/ struct { int allow_trailing_comma; } options; /** Put anything here */ void *data; /*@{*/ /** Private */ int in_escape; char expecting; char tok_last; int can_insert; unsigned int levels_max; #ifndef JSONSL_NO_JPR size_t jpr_count; jsonsl_jpr_t *jprs; /* Root pointer for JPR matching information */ size_t *jpr_root; #endif /* JSONSL_NO_JPR */ /*@}*/ /** * This is the stack. Its upper bound is levels_max, or the * nlevels argument passed to jsonsl_new. If you modify this structure, * make sure that this member is last. */ struct jsonsl_state_st stack[1]; }; /** * Creates a new lexer object, with capacity for recursion up to nlevels * * @param nlevels maximum recursion depth */ JSONSL_API jsonsl_t jsonsl_new(int nlevels); /** * Feeds data into the lexer. * * @param jsn the lexer object * @param bytes new data to be fed * @param nbytes size of new data */ JSONSL_API void jsonsl_feed(jsonsl_t jsn, const jsonsl_char_t *bytes, size_t nbytes); /** * Resets the internal parser state. This does not free the parser * but does clean it internally, so that the next time feed() is called, * it will be treated as a new stream * * @param jsn the lexer */ JSONSL_API void jsonsl_reset(jsonsl_t jsn); /** * Frees the lexer, cleaning any allocated memory taken * * @param jsn the lexer */ JSONSL_API void jsonsl_destroy(jsonsl_t jsn); /** * Gets the 'parent' element, given the current one * * @param jsn the lexer * @param cur the current nest, which should be a struct jsonsl_nest_st */ static JSONSL_INLINE struct jsonsl_state_st *jsonsl_last_state(const jsonsl_t jsn, const struct jsonsl_state_st *state) { /* Don't complain about overriding array bounds */ if (state->level > 1) { return jsn->stack + state->level - 1; } else { return NULL; } } /** * Gets the state of the last fully consumed child of this parent. This is * only valid in the parent's POP callback. * * @param the lexer * @return A pointer to the child. */ static JSONSL_INLINE struct jsonsl_state_st *jsonsl_last_child(const jsonsl_t jsn, const struct jsonsl_state_st *parent) { return jsn->stack + (parent->level + 1); } /**Call to instruct the parser to stop parsing and return. This is valid * only from within a callback */ static JSONSL_INLINE void jsonsl_stop(jsonsl_t jsn) { jsn->stopfl = 1; } /** * This enables receiving callbacks on all events. Doesn't do * anything special but helps avoid some boilerplate. * This does not touch the UESCAPE callbacks or flags. */ static JSONSL_INLINE void jsonsl_enable_all_callbacks(jsonsl_t jsn) { jsn->call_HKEY = 1; jsn->call_STRING = 1; jsn->call_OBJECT = 1; jsn->call_SPECIAL = 1; jsn->call_LIST = 1; } /** * A macro which returns true if the current state object can * have children. This means a list type or an object type. */ #define JSONSL_STATE_IS_CONTAINER(state) \ (state->type == JSONSL_T_OBJECT || state->type == JSONSL_T_LIST) /** * These two functions, dump a string representation * of the error or type, respectively. They will never * return NULL */ JSONSL_API const char* jsonsl_strerror(jsonsl_error_t err); JSONSL_API const char* jsonsl_strtype(jsonsl_type_t jt); /** * Dumps global metrics to the screen. This is a noop unless * jsonsl was compiled with JSONSL_USE_METRICS */ JSONSL_API void jsonsl_dump_global_metrics(void); /* This macro just here for editors to do code folding */ #ifndef JSONSL_NO_JPR /** * @name JSON Pointer API * * JSONPointer API. This isn't really related to the lexer (at least not yet) * JSONPointer provides an extremely simple specification for providing * locations within JSON objects. We will extend it a bit and allow for * providing 'wildcard' characters by which to be able to 'query' the stream. * * See http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer-00 * * Currently I'm implementing the 'single query' API which can only use a single * query component. In the future I will integrate my yet-to-be-published * Boyer-Moore-esque prefix searching implementation, in order to allow * multiple paths to be merged into one for quick and efficient searching. * * * JPR (as we'll refer to it within the source) can be used by splitting * the components into multiple sections, and incrementally 'track' each * component. When JSONSL delivers a 'pop' callback for a string, or a 'push' * callback for an object, we will check to see whether the index matching * the component corresponding to the current level contains a match * for our path. * * In order to do this properly, a structure must be maintained within the * parent indicating whether its children are possible matches. This flag * will be 'inherited' by call children which may conform to the match * specification, and discarded by all which do not (thereby eliminating * their children from inheriting it). * * A successful match is a complete one. One can provide multiple paths with * multiple levels of matches e.g. * /foo/bar/baz/^/blah * * @{ */ /** The wildcard character */ #ifndef JSONSL_PATH_WILDCARD_CHAR #define JSONSL_PATH_WILDCARD_CHAR '^' #endif /* WILDCARD_CHAR */ #define JSONSL_XMATCH \ X(COMPLETE,1) \ X(POSSIBLE,0) \ X(NOMATCH,-1) \ X(TYPE_MISMATCH, -2) typedef enum { #define X(T,v) \ JSONSL_MATCH_##T = v, JSONSL_XMATCH #undef X JSONSL_MATCH_UNKNOWN } jsonsl_jpr_match_t; typedef enum { JSONSL_PATH_STRING = 1, JSONSL_PATH_WILDCARD, JSONSL_PATH_NUMERIC, JSONSL_PATH_ROOT, /* Special */ JSONSL_PATH_INVALID = -1, JSONSL_PATH_NONE = 0 } jsonsl_jpr_type_t; struct jsonsl_jpr_component_st { /** The string the component points to */ char *pstr; /** if this is a numeric type, the number is 'cached' here */ unsigned long idx; /** The length of the string */ size_t len; /** The type of component (NUMERIC or STRING) */ jsonsl_jpr_type_t ptype; /** Set this to true to enforce type checking between dict keys and array * indices. jsonsl_jpr_match() will return TYPE_MISMATCH if it detects * that an array index is actually a child of a dictionary. */ short is_arridx; /* Extra fields (for more advanced searches. Default is empty) */ JSONSL_JPR_COMPONENT_USER_FIELDS }; struct jsonsl_jpr_st { /** Path components */ struct jsonsl_jpr_component_st *components; size_t ncomponents; /**Type of the match to be expected. If nonzero, will be compared against * the actual type */ unsigned match_type; /** Base of allocated string for components */ char *basestr; /** The original match string. Useful for returning to the user */ char *orig; size_t norig; }; /** * Create a new JPR object. * * @param path the JSONPointer path specification. * @param errp a pointer to a jsonsl_error_t. If this function returns NULL, * then more details will be in this variable. * * @return a new jsonsl_jpr_t object, or NULL on error. */ JSONSL_API jsonsl_jpr_t jsonsl_jpr_new(const char *path, jsonsl_error_t *errp); /** * Destroy a JPR object */ JSONSL_API void jsonsl_jpr_destroy(jsonsl_jpr_t jpr); /** * Match a JSON object against a type and specific level * * @param jpr the JPR object * @param parent_type the type of the parent (should be T_LIST or T_OBJECT) * @param parent_level the level of the parent * @param key the 'key' of the child. If the parent is an array, this should be * empty. * @param nkey - the length of the key. If the parent is an array (T_LIST), then * this should be the current index. * * NOTE: The key of the child means any kind of associative data related to the * element. Thus: <<< { "foo" : [ >>, * the opening array's key is "foo". * * @return a status constant. This indicates whether a match was excluded, possible, * or successful. */ JSONSL_API jsonsl_jpr_match_t jsonsl_jpr_match(jsonsl_jpr_t jpr, unsigned int parent_type, unsigned int parent_level, const char *key, size_t nkey); /** * Alternate matching algorithm. This matching algorithm does not use * JSONPointer but relies on a more structured searching mechanism. It * assumes that there is a clear distinction between array indices and * object keys. In this case, the jsonsl_path_component_st::ptype should * be set to @ref JSONSL_PATH_NUMERIC for an array index (the * jsonsl_path_comonent_st::is_arridx field will be removed in a future * version). * * @param jpr The path * @param parent The parent structure. Can be NULL if this is the root object * @param child The child structure. Should not be NULL * @param key Object key, if an object * @param nkey Length of object key * @return Status constant if successful * * @note * For successful matching, both the key and the path itself should be normalized * to contain 'proper' utf8 sequences rather than utf16 '\uXXXX' escapes. This * should currently be done in the application. Another version of this function * may use a temporary buffer in such circumstances (allocated by the application). * * Since this function also checks the state of the child, it should only * be called on PUSH callbacks, and not POP callbacks */ JSONSL_API jsonsl_jpr_match_t jsonsl_path_match(jsonsl_jpr_t jpr, const struct jsonsl_state_st *parent, const struct jsonsl_state_st *child, const char *key, size_t nkey); /** * Associate a set of JPR objects with a lexer instance. * This should be called before the lexer has been fed any data (and * behavior is undefined if you don't adhere to this). * * After using this function, you may subsequently call match_state() on * given states (presumably from within the callbacks). * * Note that currently the first JPR is the quickest and comes * pre-allocated with the state structure. Further JPR objects * are chained. * * @param jsn The lexer * @param jprs An array of jsonsl_jpr_t objects * @param njprs How many elements in the jprs array. */ JSONSL_API void jsonsl_jpr_match_state_init(jsonsl_t jsn, jsonsl_jpr_t *jprs, size_t njprs); /** * This follows the same semantics as the normal match, * except we infer parent and type information from the relevant state objects. * The match status (for all possible JPR objects) is set in the *out parameter. * * If a match has succeeded, then its JPR object will be returned. In all other * instances, NULL is returned; * * @param jpr The jsonsl_jpr_t handle * @param state The jsonsl_state_st which is a candidate * @param key The hash key (if applicable, can be NULL if parent is list) * @param nkey Length of hash key (if applicable, can be zero if parent is list) * @param out A pointer to a jsonsl_jpr_match_t. This will be populated with * the match result * * @return If a match was completed in full, then the JPR object containing * the matching path will be returned. Otherwise, the return is NULL (note, this * does not mean matching has failed, it can still be part of the match: check * the out parameter). */ JSONSL_API jsonsl_jpr_t jsonsl_jpr_match_state(jsonsl_t jsn, struct jsonsl_state_st *state, const char *key, size_t nkey, jsonsl_jpr_match_t *out); /** * Cleanup any memory allocated and any states set by * match_state_init() and match_state() * @param jsn The lexer */ JSONSL_API void jsonsl_jpr_match_state_cleanup(jsonsl_t jsn); /** * Return a string representation of the match result returned by match() */ JSONSL_API const char *jsonsl_strmatchtype(jsonsl_jpr_match_t match); /* @}*/ /** * Utility function to convert escape sequences into their original form. * * The decoders I've sampled do not seem to specify a standard behavior of what * to escape/unescape. * * RFC 4627 Mandates only that the quoute, backslash, and ASCII control * characters (0x00-0x1f) be escaped. It is often common for applications * to escape a '/' - however this may also be desired behavior. the JSON * spec is not clear on this, and therefore jsonsl leaves it up to you. * * Additionally, sometimes you may wish to _normalize_ JSON. This is specifically * true when dealing with 'u-escapes' which can be expressed perfectly fine * as utf8. One use case for normalization is JPR string comparison, in which * case two effectively equivalent strings may not match because one is using * u-escapes and the other proper utf8. To normalize u-escapes only, pass in * an empty `toEscape` table, enabling only the `u` index. * * @param in The input string. * @param out An allocated output (should be the same size as in) * @param len the size of the buffer * @param toEscape - A sparse array of characters to unescape. Characters * which are not present in this array, e.g. toEscape['c'] == 0 will be * ignored and passed to the output in their original form. * @param oflags If not null, and a \uXXXX escape expands to a non-ascii byte, * then this variable will have the SPECIALf_NONASCII flag on. * * @param err A pointer to an error variable. If an error ocurrs, it will be * set in this variable * @param errat If not null and an error occurs, this will be set to point * to the position within the string at which the offending character was * encountered. * * @return The effective size of the output buffer. * * @note * This function now encodes the UTF8 equivalents of utf16 escapes (i.e. * 'u-escapes'). Previously this would encode the escapes as utf16 literals, * which while still correct in some sense was confusing for many (especially * considering that the inputs were variations of char). * * @note * The output buffer will never be larger than the input buffer, since * standard escape sequences (i.e. '\t') occupy two bytes in the source * but only one byte (when unescaped) in the output. Likewise u-escapes * (i.e. \uXXXX) will occupy six bytes in the source, but at the most * two bytes when escaped. */ JSONSL_API size_t jsonsl_util_unescape_ex(const char *in, char *out, size_t len, const int toEscape[128], unsigned *oflags, jsonsl_error_t *err, const char **errat); /** * Convenience macro to avoid passing too many parameters */ #define jsonsl_util_unescape(in, out, len, toEscape, err) \ jsonsl_util_unescape_ex(in, out, len, toEscape, NULL, err, NULL) #endif /* JSONSL_NO_JPR */ #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* JSONSL_H_ */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-aggregate-private.h0000644000076500000240000000272713572250757027071 0ustar alcaeusstaff/* * Copyright 2019 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_AGGREGATE_PRIVATE_H #define MONGOC_AGGREGATE_PRIVATE_H #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-flags.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-write-concern.h" BSON_BEGIN_DECLS mongoc_cursor_t * _mongoc_aggregate (mongoc_client_t *client, const char *ns, mongoc_query_flags_t flags, const bson_t *pipeline, const bson_t *opts, const mongoc_read_prefs_t *user_rp, const mongoc_read_prefs_t *default_rp, const mongoc_read_concern_t *default_rc, const mongoc_write_concern_t *default_wc); bool _has_write_key (bson_iter_t *iter); BSON_END_DECLS #endif /* MONGOC_AGGREGATE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-aggregate.c0000644000076500000240000002453513572250757025415 0ustar alcaeusstaff/* * Copyright 2019 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-aggregate-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-server-stream-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" /*-------------------------------------------------------------------------- * * _has_write_key -- * * Returns true if the aggregation pipeline's last stage is "$out" * or "$merge"; otherwise returns false. * * Side effects: * Advances @iter to the last element. * *-------------------------------------------------------------------------- */ bool _has_write_key (bson_iter_t *iter) { bson_iter_t stage; while (bson_iter_next (iter)) { if (BSON_ITER_HOLDS_DOCUMENT (iter)) { bson_iter_recurse (iter, &stage); if (bson_iter_find (&stage, "$out")) { return true; } bson_iter_recurse (iter, &stage); if (bson_iter_find (&stage, "$merge")) { return true; } } } return false; } /*-------------------------------------------------------------------------- * * _make_agg_cmd -- * * Constructs an aggregate command. If @ns does not include a collection * name, 1 will be used in its place for the value of "aggregate" in the * command document. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @command is always initialized. * @error is set if there is a failure. * *-------------------------------------------------------------------------- */ static bool _make_agg_cmd (const char *ns, const bson_t *pipeline, const bson_t *opts, bson_t *command, bson_error_t *err) { const char *dot; bson_iter_t iter; int32_t batch_size = 0; bson_t child; bool has_write_key; bson_iter_t has_write_key_iter; bson_init (command); dot = strstr (ns, "."); if (dot) { /* Note: we're not validating that the collection name's length is one or * more characters, as functions such as mongoc_client_get_collection also * do not validate. */ BSON_APPEND_UTF8 (command, "aggregate", dot + 1); } else { BSON_APPEND_INT32 (command, "aggregate", 1); } /* * The following will allow @pipeline to be either an array of * items for the pipeline, or {"pipeline": [...]}. */ if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_recurse (&iter, &has_write_key_iter); if (!bson_append_iter (command, "pipeline", 8, &iter)) { bson_set_error (err, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Failed to append \"pipeline\" to create command."); return false; } } else { BSON_APPEND_ARRAY (command, "pipeline", pipeline); bson_iter_init (&has_write_key_iter, pipeline); } has_write_key = _has_write_key (&has_write_key_iter); bson_append_document_begin (command, "cursor", 6, &child); if (opts && bson_iter_init_find (&iter, opts, "batchSize") && BSON_ITER_HOLDS_NUMBER (&iter)) { batch_size = (int32_t) bson_iter_as_int64 (&iter); /* Ignore batchSize=0 for aggregates with $out or $merge */ if (!(has_write_key && batch_size == 0)) { BSON_APPEND_INT32 (&child, "batchSize", batch_size); } } bson_append_document_end (command, &child); return true; } /* *-------------------------------------------------------------------------- * * _mongoc_aggregate -- * * Constructs a mongoc_cursor_t for an "aggregate" command. * * This function will always return a new mongoc_cursor_t that should * be freed with mongoc_cursor_destroy(). * * The cursor may fail once iterated upon, so check * mongoc_cursor_error() if mongoc_cursor_next() returns false. * * See http://docs.mongodb.org/manual/aggregation/ for more * information on how to build aggregation pipelines. * * Parameters: * @ns: Namespace (or database name for database-level aggregation). * @flags: Bitwise or of mongoc_query_flags_t or 0. * @pipeline: A bson_t containing the pipeline request. @pipeline * will be sent as an array type in the request. * @opts: A bson_t containing aggregation options, such as * bypassDocumentValidation (used with $out and $merge), maxTimeMS * (declaring maximum server execution time) and explain (return * information on the processing of the pipeline). * @user_rp: Optional read preferences for the command. * @default_rp: Default read preferences from the collection or database. * @default_rc: Default read concern from the collection or database. * @default_wc: Default write concern from the collection or database. * * Returns: * A newly allocated mongoc_cursor_t that should be freed with * mongoc_cursor_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_cursor_t * _mongoc_aggregate (mongoc_client_t *client, const char *ns, mongoc_query_flags_t flags, const bson_t *pipeline, const bson_t *opts, const mongoc_read_prefs_t *user_rp, const mongoc_read_prefs_t *default_rp, const mongoc_read_concern_t *default_rc, const mongoc_write_concern_t *default_wc) { mongoc_server_stream_t *server_stream = NULL; bool has_write_key; bool has_write_concern; bson_iter_t ar; mongoc_cursor_t *cursor; uint32_t server_id; bson_iter_t iter; bson_t command; bson_t cursor_opts; bool created_command; bson_error_t create_cmd_err = {0}; ENTRY; BSON_ASSERT (client); BSON_ASSERT (ns); BSON_ASSERT (pipeline); bson_init (&cursor_opts); _mongoc_cursor_flags_to_opts (flags, &cursor_opts, NULL); if (opts) { bson_concat (&cursor_opts /* destination */, opts /* source */); } created_command = _make_agg_cmd (ns, pipeline, opts, &command, &create_cmd_err); cursor = _mongoc_cursor_cmd_new (client, ns, created_command ? &command : NULL, &cursor_opts, user_rp, default_rp, default_rc); bson_destroy (&command); bson_destroy (&cursor_opts); if (!created_command) { /* copy error back to cursor. */ memcpy (&cursor->error, &create_cmd_err, sizeof (bson_error_t)); GOTO (done); } /* Get serverId from opts; if invalid set cursor err. _mongoc_cursor_cmd_new * has already done this, but we want a COMMAND error, not CURSOR, since that * has been the contract since serverId was first implemented. */ if (!_mongoc_get_server_id_from_opts (opts, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, &server_id, &cursor->error)) { GOTO (done); } if (mongoc_cursor_error (cursor, NULL)) { /* something else is wrong with opts */ GOTO (done); } if (!_mongoc_read_prefs_validate (cursor->read_prefs, &cursor->error)) { GOTO (done); } /* pipeline could be like {pipeline: [{$out: 'test'}]} or [{$out: 'test'}] */ if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &ar)) { has_write_key = _has_write_key (&ar); } else { if (!bson_iter_init (&iter, pipeline)) { bson_set_error (&cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Pipeline is invalid BSON"); GOTO (done); } has_write_key = _has_write_key (&iter); } if (has_write_key && cursor->read_prefs->mode != MONGOC_READ_PRIMARY) { mongoc_read_prefs_destroy (cursor->read_prefs); cursor->read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); MONGOC_WARNING ("$out or $merge stage specified. Overriding read " "preference to primary."); } /* server id isn't enough. ensure we're connected & know wire version */ server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { GOTO (done); } has_write_concern = bson_has_field (&cursor->opts, "writeConcern"); if (has_write_concern && has_write_key && server_stream->sd->max_wire_version < WIRE_VERSION_CMD_WRITE_CONCERN) { bson_set_error ( &cursor->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "\"aggregate\" with \"$out\" or \"$merge\" does not support " "writeConcern with wire version %d, wire version %d is " "required", server_stream->sd->max_wire_version, WIRE_VERSION_CMD_WRITE_CONCERN); GOTO (done); } /* Only inherit WriteConcern when aggregate has $out or $merge */ if (!bson_has_field (&cursor->opts, "writeConcern") && has_write_key) { mongoc_write_concern_destroy (cursor->write_concern); cursor->write_concern = mongoc_write_concern_copy (default_wc); } done: mongoc_server_stream_cleanup (server_stream); /* null ok */ /* we always return the cursor, even if it fails; users can detect the * failure on performing a cursor operation. see CDRIVER-880. */ RETURN (cursor); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-apm-private.h0000644000076500000240000001330413572250757025711 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_APM_PRIVATE_H #define MONGOC_APM_PRIVATE_H #include #include "mongoc/mongoc-apm.h" BSON_BEGIN_DECLS /* forward decl */ struct _mongoc_cmd_t; struct _mongoc_apm_callbacks_t { mongoc_apm_command_started_cb_t started; mongoc_apm_command_succeeded_cb_t succeeded; mongoc_apm_command_failed_cb_t failed; mongoc_apm_server_changed_cb_t server_changed; mongoc_apm_server_opening_cb_t server_opening; mongoc_apm_server_closed_cb_t server_closed; mongoc_apm_topology_changed_cb_t topology_changed; mongoc_apm_topology_opening_cb_t topology_opening; mongoc_apm_topology_closed_cb_t topology_closed; mongoc_apm_server_heartbeat_started_cb_t server_heartbeat_started; mongoc_apm_server_heartbeat_succeeded_cb_t server_heartbeat_succeeded; mongoc_apm_server_heartbeat_failed_cb_t server_heartbeat_failed; }; /* * command monitoring events */ struct _mongoc_apm_command_started_t { bson_t *command; bool command_owned; const char *database_name; const char *command_name; int64_t request_id; int64_t operation_id; const mongoc_host_list_t *host; uint32_t server_id; void *context; }; struct _mongoc_apm_command_succeeded_t { int64_t duration; const bson_t *reply; const char *command_name; int64_t request_id; int64_t operation_id; const mongoc_host_list_t *host; uint32_t server_id; void *context; }; struct _mongoc_apm_command_failed_t { int64_t duration; const char *command_name; const bson_error_t *error; const bson_t *reply; int64_t request_id; int64_t operation_id; const mongoc_host_list_t *host; uint32_t server_id; void *context; }; /* * SDAM monitoring events */ struct _mongoc_apm_server_changed_t { const mongoc_host_list_t *host; bson_oid_t topology_id; const mongoc_server_description_t *previous_description; const mongoc_server_description_t *new_description; void *context; }; struct _mongoc_apm_server_opening_t { const mongoc_host_list_t *host; bson_oid_t topology_id; void *context; }; struct _mongoc_apm_server_closed_t { const mongoc_host_list_t *host; bson_oid_t topology_id; void *context; }; struct _mongoc_apm_topology_changed_t { bson_oid_t topology_id; const mongoc_topology_description_t *previous_description; const mongoc_topology_description_t *new_description; void *context; }; struct _mongoc_apm_topology_opening_t { bson_oid_t topology_id; void *context; }; struct _mongoc_apm_topology_closed_t { bson_oid_t topology_id; void *context; }; struct _mongoc_apm_server_heartbeat_started_t { const mongoc_host_list_t *host; void *context; }; struct _mongoc_apm_server_heartbeat_succeeded_t { int64_t duration_usec; const bson_t *reply; const mongoc_host_list_t *host; void *context; }; struct _mongoc_apm_server_heartbeat_failed_t { int64_t duration_usec; const bson_error_t *error; const mongoc_host_list_t *host; void *context; }; void mongoc_apm_command_started_init (mongoc_apm_command_started_t *event, const bson_t *command, const char *database_name, const char *command_name, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context); void mongoc_apm_command_started_init_with_cmd (mongoc_apm_command_started_t *event, struct _mongoc_cmd_t *cmd, int64_t request_id, void *context); void mongoc_apm_command_started_cleanup (mongoc_apm_command_started_t *event); void mongoc_apm_command_succeeded_init (mongoc_apm_command_succeeded_t *event, int64_t duration, const bson_t *reply, const char *command_name, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context); void mongoc_apm_command_succeeded_cleanup (mongoc_apm_command_succeeded_t *event); void mongoc_apm_command_failed_init (mongoc_apm_command_failed_t *event, int64_t duration, const char *command_name, const bson_error_t *error, const bson_t *reply, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context); void mongoc_apm_command_failed_cleanup (mongoc_apm_command_failed_t *event); BSON_END_DECLS #endif /* MONGOC_APM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-apm.c0000644000076500000240000004276313572250757024247 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-apm-private.h" #include "mongoc/mongoc-cmd-private.h" /* * An Application Performance Management (APM) implementation, complying with * MongoDB's Command Monitoring Spec: * * https://github.com/mongodb/specifications/tree/master/source/command-monitoring */ static void append_documents_from_cmd (const mongoc_cmd_t *cmd, mongoc_apm_command_started_t *event) { int32_t doc_len; bson_t doc; const uint8_t *pos; const char *field_name; bson_t bson; char str[16]; const char *key; uint32_t i; if (!cmd->payload || !cmd->payload_size) { return; } if (!event->command_owned) { event->command = bson_copy (event->command); event->command_owned = true; } /* make array from outgoing OP_MSG payload type 1 on an "insert", * "update", or "delete" command. */ field_name = _mongoc_get_documents_field_name (cmd->command_name); BSON_ASSERT (field_name); BSON_ASSERT (BSON_APPEND_ARRAY_BEGIN (event->command, field_name, &bson)); pos = cmd->payload; i = 0; while (pos < cmd->payload + cmd->payload_size) { memcpy (&doc_len, pos, sizeof (doc_len)); doc_len = BSON_UINT32_FROM_LE (doc_len); BSON_ASSERT (bson_init_static (&doc, pos, (size_t) doc_len)); bson_uint32_to_string (i, &key, str, sizeof (str)); BSON_APPEND_DOCUMENT (&bson, key, &doc); pos += doc_len; i++; } bson_append_array_end (event->command, &bson); } /* * Private initializer / cleanup functions. */ void mongoc_apm_command_started_init (mongoc_apm_command_started_t *event, const bson_t *command, const char *database_name, const char *command_name, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context) { bson_iter_t iter; uint32_t len; const uint8_t *data; /* Command Monitoring Spec: * * In cases where queries or commands are embedded in a $query parameter * when a read preference is provided, they MUST be unwrapped and the value * of the $query attribute becomes the filter or the command in the started * event. The read preference will subsequently be dropped as it is * considered metadata and metadata is not currently provided in the command * events. */ if (bson_has_field (command, "$readPreference")) { if (bson_iter_init_find (&iter, command, "$query") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); event->command = bson_new_from_data (data, len); event->command_owned = true; } else { /* Got $readPreference without $query, probably OP_MSG */ event->command = (bson_t *) command; event->command_owned = false; } } else { /* discard "const", we promise not to modify "command" */ event->command = (bson_t *) command; event->command_owned = false; } event->database_name = database_name; event->command_name = command_name; event->request_id = request_id; event->operation_id = operation_id; event->host = host; event->server_id = server_id; event->context = context; } void mongoc_apm_command_started_init_with_cmd (mongoc_apm_command_started_t *event, mongoc_cmd_t *cmd, int64_t request_id, void *context) { mongoc_apm_command_started_init (event, cmd->command, cmd->db_name, cmd->command_name, request_id, cmd->operation_id, &cmd->server_stream->sd->host, cmd->server_stream->sd->id, context); /* OP_MSG document sequence for insert, update, or delete? */ append_documents_from_cmd (cmd, event); } void mongoc_apm_command_started_cleanup (mongoc_apm_command_started_t *event) { if (event->command_owned) { bson_destroy (event->command); } } void mongoc_apm_command_succeeded_init (mongoc_apm_command_succeeded_t *event, int64_t duration, const bson_t *reply, const char *command_name, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context) { BSON_ASSERT (reply); event->duration = duration; event->reply = reply; event->command_name = command_name; event->request_id = request_id; event->operation_id = operation_id; event->host = host; event->server_id = server_id; event->context = context; } void mongoc_apm_command_succeeded_cleanup (mongoc_apm_command_succeeded_t *event) { /* no-op */ } void mongoc_apm_command_failed_init (mongoc_apm_command_failed_t *event, int64_t duration, const char *command_name, const bson_error_t *error, const bson_t *reply, int64_t request_id, int64_t operation_id, const mongoc_host_list_t *host, uint32_t server_id, void *context) { BSON_ASSERT (reply); event->duration = duration; event->command_name = command_name; event->error = error; event->reply = reply; event->request_id = request_id; event->operation_id = operation_id; event->host = host; event->server_id = server_id; event->context = context; } void mongoc_apm_command_failed_cleanup (mongoc_apm_command_failed_t *event) { /* no-op */ } /* * event field accessors */ /* command-started event fields */ const bson_t * mongoc_apm_command_started_get_command ( const mongoc_apm_command_started_t *event) { return event->command; } const char * mongoc_apm_command_started_get_database_name ( const mongoc_apm_command_started_t *event) { return event->database_name; } const char * mongoc_apm_command_started_get_command_name ( const mongoc_apm_command_started_t *event) { return event->command_name; } int64_t mongoc_apm_command_started_get_request_id ( const mongoc_apm_command_started_t *event) { return event->request_id; } int64_t mongoc_apm_command_started_get_operation_id ( const mongoc_apm_command_started_t *event) { return event->operation_id; } const mongoc_host_list_t * mongoc_apm_command_started_get_host (const mongoc_apm_command_started_t *event) { return event->host; } uint32_t mongoc_apm_command_started_get_server_id ( const mongoc_apm_command_started_t *event) { return event->server_id; } void * mongoc_apm_command_started_get_context ( const mongoc_apm_command_started_t *event) { return event->context; } /* command-succeeded event fields */ int64_t mongoc_apm_command_succeeded_get_duration ( const mongoc_apm_command_succeeded_t *event) { return event->duration; } const bson_t * mongoc_apm_command_succeeded_get_reply ( const mongoc_apm_command_succeeded_t *event) { return event->reply; } const char * mongoc_apm_command_succeeded_get_command_name ( const mongoc_apm_command_succeeded_t *event) { return event->command_name; } int64_t mongoc_apm_command_succeeded_get_request_id ( const mongoc_apm_command_succeeded_t *event) { return event->request_id; } int64_t mongoc_apm_command_succeeded_get_operation_id ( const mongoc_apm_command_succeeded_t *event) { return event->operation_id; } const mongoc_host_list_t * mongoc_apm_command_succeeded_get_host ( const mongoc_apm_command_succeeded_t *event) { return event->host; } uint32_t mongoc_apm_command_succeeded_get_server_id ( const mongoc_apm_command_succeeded_t *event) { return event->server_id; } void * mongoc_apm_command_succeeded_get_context ( const mongoc_apm_command_succeeded_t *event) { return event->context; } /* command-failed event fields */ int64_t mongoc_apm_command_failed_get_duration ( const mongoc_apm_command_failed_t *event) { return event->duration; } const char * mongoc_apm_command_failed_get_command_name ( const mongoc_apm_command_failed_t *event) { return event->command_name; } void mongoc_apm_command_failed_get_error (const mongoc_apm_command_failed_t *event, bson_error_t *error) { memcpy (error, event->error, sizeof *event->error); } const bson_t * mongoc_apm_command_failed_get_reply (const mongoc_apm_command_failed_t *event) { return event->reply; } int64_t mongoc_apm_command_failed_get_request_id ( const mongoc_apm_command_failed_t *event) { return event->request_id; } int64_t mongoc_apm_command_failed_get_operation_id ( const mongoc_apm_command_failed_t *event) { return event->operation_id; } const mongoc_host_list_t * mongoc_apm_command_failed_get_host (const mongoc_apm_command_failed_t *event) { return event->host; } uint32_t mongoc_apm_command_failed_get_server_id ( const mongoc_apm_command_failed_t *event) { return event->server_id; } void * mongoc_apm_command_failed_get_context (const mongoc_apm_command_failed_t *event) { return event->context; } /* server-changed event fields */ const mongoc_host_list_t * mongoc_apm_server_changed_get_host (const mongoc_apm_server_changed_t *event) { return event->host; } void mongoc_apm_server_changed_get_topology_id ( const mongoc_apm_server_changed_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } const mongoc_server_description_t * mongoc_apm_server_changed_get_previous_description ( const mongoc_apm_server_changed_t *event) { return event->previous_description; } const mongoc_server_description_t * mongoc_apm_server_changed_get_new_description ( const mongoc_apm_server_changed_t *event) { return event->new_description; } void * mongoc_apm_server_changed_get_context (const mongoc_apm_server_changed_t *event) { return event->context; } /* server-opening event fields */ const mongoc_host_list_t * mongoc_apm_server_opening_get_host (const mongoc_apm_server_opening_t *event) { return event->host; } void mongoc_apm_server_opening_get_topology_id ( const mongoc_apm_server_opening_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } void * mongoc_apm_server_opening_get_context (const mongoc_apm_server_opening_t *event) { return event->context; } /* server-closed event fields */ const mongoc_host_list_t * mongoc_apm_server_closed_get_host (const mongoc_apm_server_closed_t *event) { return event->host; } void mongoc_apm_server_closed_get_topology_id ( const mongoc_apm_server_closed_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } void * mongoc_apm_server_closed_get_context (const mongoc_apm_server_closed_t *event) { return event->context; } /* topology-changed event fields */ void mongoc_apm_topology_changed_get_topology_id ( const mongoc_apm_topology_changed_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } const mongoc_topology_description_t * mongoc_apm_topology_changed_get_previous_description ( const mongoc_apm_topology_changed_t *event) { return event->previous_description; } const mongoc_topology_description_t * mongoc_apm_topology_changed_get_new_description ( const mongoc_apm_topology_changed_t *event) { return event->new_description; } void * mongoc_apm_topology_changed_get_context ( const mongoc_apm_topology_changed_t *event) { return event->context; } /* topology-opening event field */ void mongoc_apm_topology_opening_get_topology_id ( const mongoc_apm_topology_opening_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } void * mongoc_apm_topology_opening_get_context ( const mongoc_apm_topology_opening_t *event) { return event->context; } /* topology-closed event field */ void mongoc_apm_topology_closed_get_topology_id ( const mongoc_apm_topology_closed_t *event, bson_oid_t *topology_id) { bson_oid_copy (&event->topology_id, topology_id); } void * mongoc_apm_topology_closed_get_context ( const mongoc_apm_topology_closed_t *event) { return event->context; } /* heartbeat-started event field */ const mongoc_host_list_t * mongoc_apm_server_heartbeat_started_get_host ( const mongoc_apm_server_heartbeat_started_t *event) { return event->host; } void * mongoc_apm_server_heartbeat_started_get_context ( const mongoc_apm_server_heartbeat_started_t *event) { return event->context; } /* heartbeat-succeeded event fields */ int64_t mongoc_apm_server_heartbeat_succeeded_get_duration ( const mongoc_apm_server_heartbeat_succeeded_t *event) { return event->duration_usec; } const bson_t * mongoc_apm_server_heartbeat_succeeded_get_reply ( const mongoc_apm_server_heartbeat_succeeded_t *event) { return event->reply; } const mongoc_host_list_t * mongoc_apm_server_heartbeat_succeeded_get_host ( const mongoc_apm_server_heartbeat_succeeded_t *event) { return event->host; } void * mongoc_apm_server_heartbeat_succeeded_get_context ( const mongoc_apm_server_heartbeat_succeeded_t *event) { return event->context; } /* heartbeat-failed event fields */ int64_t mongoc_apm_server_heartbeat_failed_get_duration ( const mongoc_apm_server_heartbeat_failed_t *event) { return event->duration_usec; } void mongoc_apm_server_heartbeat_failed_get_error ( const mongoc_apm_server_heartbeat_failed_t *event, bson_error_t *error) { memcpy (error, event->error, sizeof *event->error); } const mongoc_host_list_t * mongoc_apm_server_heartbeat_failed_get_host ( const mongoc_apm_server_heartbeat_failed_t *event) { return event->host; } void * mongoc_apm_server_heartbeat_failed_get_context ( const mongoc_apm_server_heartbeat_failed_t *event) { return event->context; } /* * registering callbacks */ mongoc_apm_callbacks_t * mongoc_apm_callbacks_new (void) { size_t s = sizeof (mongoc_apm_callbacks_t); return (mongoc_apm_callbacks_t *) bson_malloc0 (s); } void mongoc_apm_callbacks_destroy (mongoc_apm_callbacks_t *callbacks) { bson_free (callbacks); } void mongoc_apm_set_command_started_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_started_cb_t cb) { callbacks->started = cb; } void mongoc_apm_set_command_succeeded_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_succeeded_cb_t cb) { callbacks->succeeded = cb; } void mongoc_apm_set_command_failed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_failed_cb_t cb) { callbacks->failed = cb; } void mongoc_apm_set_server_changed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_changed_cb_t cb) { callbacks->server_changed = cb; } void mongoc_apm_set_server_opening_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_opening_cb_t cb) { callbacks->server_opening = cb; } void mongoc_apm_set_server_closed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_closed_cb_t cb) { callbacks->server_closed = cb; } void mongoc_apm_set_topology_changed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_changed_cb_t cb) { callbacks->topology_changed = cb; } void mongoc_apm_set_topology_opening_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_opening_cb_t cb) { callbacks->topology_opening = cb; } void mongoc_apm_set_topology_closed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_closed_cb_t cb) { callbacks->topology_closed = cb; } void mongoc_apm_set_server_heartbeat_started_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_started_cb_t cb) { callbacks->server_heartbeat_started = cb; } void mongoc_apm_set_server_heartbeat_succeeded_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_succeeded_cb_t cb) { callbacks->server_heartbeat_succeeded = cb; } void mongoc_apm_set_server_heartbeat_failed_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_failed_cb_t cb) { callbacks->server_heartbeat_failed = cb; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-apm.h0000644000076500000240000003161113572250757024242 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_APM_H #define MONGOC_APM_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-server-description.h" #include "mongoc/mongoc-topology-description.h" BSON_BEGIN_DECLS /* * Application Performance Management (APM) interface, complies with two specs. * MongoDB's Command Monitoring Spec: * * https://github.com/mongodb/specifications/tree/master/source/command-monitoring * * MongoDB's Spec for Monitoring Server Discovery and Monitoring (SDAM) events: * * https://github.com/mongodb/specifications/tree/master/source/server-discovery-and-monitoring * */ /* * callbacks to receive APM events */ typedef struct _mongoc_apm_callbacks_t mongoc_apm_callbacks_t; /* * command monitoring events */ typedef struct _mongoc_apm_command_started_t mongoc_apm_command_started_t; typedef struct _mongoc_apm_command_succeeded_t mongoc_apm_command_succeeded_t; typedef struct _mongoc_apm_command_failed_t mongoc_apm_command_failed_t; /* * SDAM monitoring events */ typedef struct _mongoc_apm_server_changed_t mongoc_apm_server_changed_t; typedef struct _mongoc_apm_server_opening_t mongoc_apm_server_opening_t; typedef struct _mongoc_apm_server_closed_t mongoc_apm_server_closed_t; typedef struct _mongoc_apm_topology_changed_t mongoc_apm_topology_changed_t; typedef struct _mongoc_apm_topology_opening_t mongoc_apm_topology_opening_t; typedef struct _mongoc_apm_topology_closed_t mongoc_apm_topology_closed_t; typedef struct _mongoc_apm_server_heartbeat_started_t mongoc_apm_server_heartbeat_started_t; typedef struct _mongoc_apm_server_heartbeat_succeeded_t mongoc_apm_server_heartbeat_succeeded_t; typedef struct _mongoc_apm_server_heartbeat_failed_t mongoc_apm_server_heartbeat_failed_t; /* * event field accessors */ /* command-started event fields */ MONGOC_EXPORT (const bson_t *) mongoc_apm_command_started_get_command ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (const char *) mongoc_apm_command_started_get_database_name ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (const char *) mongoc_apm_command_started_get_command_name ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_started_get_request_id ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_started_get_operation_id ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_command_started_get_host (const mongoc_apm_command_started_t *event); MONGOC_EXPORT (uint32_t) mongoc_apm_command_started_get_server_id ( const mongoc_apm_command_started_t *event); MONGOC_EXPORT (void *) mongoc_apm_command_started_get_context ( const mongoc_apm_command_started_t *event); /* command-succeeded event fields */ MONGOC_EXPORT (int64_t) mongoc_apm_command_succeeded_get_duration ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (const bson_t *) mongoc_apm_command_succeeded_get_reply ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (const char *) mongoc_apm_command_succeeded_get_command_name ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_succeeded_get_request_id ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_succeeded_get_operation_id ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_command_succeeded_get_host ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (uint32_t) mongoc_apm_command_succeeded_get_server_id ( const mongoc_apm_command_succeeded_t *event); MONGOC_EXPORT (void *) mongoc_apm_command_succeeded_get_context ( const mongoc_apm_command_succeeded_t *event); /* command-failed event fields */ MONGOC_EXPORT (int64_t) mongoc_apm_command_failed_get_duration ( const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (const char *) mongoc_apm_command_failed_get_command_name ( const mongoc_apm_command_failed_t *event); /* retrieve the error by filling out the passed-in "error" struct */ MONGOC_EXPORT (void) mongoc_apm_command_failed_get_error (const mongoc_apm_command_failed_t *event, bson_error_t *error); MONGOC_EXPORT (const bson_t *) mongoc_apm_command_failed_get_reply (const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_failed_get_request_id ( const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (int64_t) mongoc_apm_command_failed_get_operation_id ( const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_command_failed_get_host (const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (uint32_t) mongoc_apm_command_failed_get_server_id ( const mongoc_apm_command_failed_t *event); MONGOC_EXPORT (void *) mongoc_apm_command_failed_get_context ( const mongoc_apm_command_failed_t *event); /* server-changed event fields */ MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_changed_get_host (const mongoc_apm_server_changed_t *event); MONGOC_EXPORT (void) mongoc_apm_server_changed_get_topology_id ( const mongoc_apm_server_changed_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (const mongoc_server_description_t *) mongoc_apm_server_changed_get_previous_description ( const mongoc_apm_server_changed_t *event); MONGOC_EXPORT (const mongoc_server_description_t *) mongoc_apm_server_changed_get_new_description ( const mongoc_apm_server_changed_t *event); MONGOC_EXPORT (void *) mongoc_apm_server_changed_get_context ( const mongoc_apm_server_changed_t *event); /* server-opening event fields */ MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_opening_get_host (const mongoc_apm_server_opening_t *event); MONGOC_EXPORT (void) mongoc_apm_server_opening_get_topology_id ( const mongoc_apm_server_opening_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (void *) mongoc_apm_server_opening_get_context ( const mongoc_apm_server_opening_t *event); /* server-closed event fields */ MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_closed_get_host (const mongoc_apm_server_closed_t *event); MONGOC_EXPORT (void) mongoc_apm_server_closed_get_topology_id ( const mongoc_apm_server_closed_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (void *) mongoc_apm_server_closed_get_context (const mongoc_apm_server_closed_t *event); /* topology-changed event fields */ MONGOC_EXPORT (void) mongoc_apm_topology_changed_get_topology_id ( const mongoc_apm_topology_changed_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (const mongoc_topology_description_t *) mongoc_apm_topology_changed_get_previous_description ( const mongoc_apm_topology_changed_t *event); MONGOC_EXPORT (const mongoc_topology_description_t *) mongoc_apm_topology_changed_get_new_description ( const mongoc_apm_topology_changed_t *event); MONGOC_EXPORT (void *) mongoc_apm_topology_changed_get_context ( const mongoc_apm_topology_changed_t *event); /* topology-opening event field */ MONGOC_EXPORT (void) mongoc_apm_topology_opening_get_topology_id ( const mongoc_apm_topology_opening_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (void *) mongoc_apm_topology_opening_get_context ( const mongoc_apm_topology_opening_t *event); /* topology-closed event field */ MONGOC_EXPORT (void) mongoc_apm_topology_closed_get_topology_id ( const mongoc_apm_topology_closed_t *event, bson_oid_t *topology_id); MONGOC_EXPORT (void *) mongoc_apm_topology_closed_get_context ( const mongoc_apm_topology_closed_t *event); /* heartbeat-started event field */ MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_heartbeat_started_get_host ( const mongoc_apm_server_heartbeat_started_t *event); MONGOC_EXPORT (void *) mongoc_apm_server_heartbeat_started_get_context ( const mongoc_apm_server_heartbeat_started_t *event); /* heartbeat-succeeded event fields */ MONGOC_EXPORT (int64_t) mongoc_apm_server_heartbeat_succeeded_get_duration ( const mongoc_apm_server_heartbeat_succeeded_t *event); MONGOC_EXPORT (const bson_t *) mongoc_apm_server_heartbeat_succeeded_get_reply ( const mongoc_apm_server_heartbeat_succeeded_t *event); MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_heartbeat_succeeded_get_host ( const mongoc_apm_server_heartbeat_succeeded_t *event); MONGOC_EXPORT (void *) mongoc_apm_server_heartbeat_succeeded_get_context ( const mongoc_apm_server_heartbeat_succeeded_t *event); /* heartbeat-failed event fields */ MONGOC_EXPORT (int64_t) mongoc_apm_server_heartbeat_failed_get_duration ( const mongoc_apm_server_heartbeat_failed_t *event); MONGOC_EXPORT (void) mongoc_apm_server_heartbeat_failed_get_error ( const mongoc_apm_server_heartbeat_failed_t *event, bson_error_t *error); MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_apm_server_heartbeat_failed_get_host ( const mongoc_apm_server_heartbeat_failed_t *event); MONGOC_EXPORT (void *) mongoc_apm_server_heartbeat_failed_get_context ( const mongoc_apm_server_heartbeat_failed_t *event); /* * callbacks */ typedef void (*mongoc_apm_command_started_cb_t) ( const mongoc_apm_command_started_t *event); typedef void (*mongoc_apm_command_succeeded_cb_t) ( const mongoc_apm_command_succeeded_t *event); typedef void (*mongoc_apm_command_failed_cb_t) ( const mongoc_apm_command_failed_t *event); typedef void (*mongoc_apm_server_changed_cb_t) ( const mongoc_apm_server_changed_t *event); typedef void (*mongoc_apm_server_opening_cb_t) ( const mongoc_apm_server_opening_t *event); typedef void (*mongoc_apm_server_closed_cb_t) ( const mongoc_apm_server_closed_t *event); typedef void (*mongoc_apm_topology_changed_cb_t) ( const mongoc_apm_topology_changed_t *event); typedef void (*mongoc_apm_topology_opening_cb_t) ( const mongoc_apm_topology_opening_t *event); typedef void (*mongoc_apm_topology_closed_cb_t) ( const mongoc_apm_topology_closed_t *event); typedef void (*mongoc_apm_server_heartbeat_started_cb_t) ( const mongoc_apm_server_heartbeat_started_t *event); typedef void (*mongoc_apm_server_heartbeat_succeeded_cb_t) ( const mongoc_apm_server_heartbeat_succeeded_t *event); typedef void (*mongoc_apm_server_heartbeat_failed_cb_t) ( const mongoc_apm_server_heartbeat_failed_t *event); /* * registering callbacks */ MONGOC_EXPORT (mongoc_apm_callbacks_t *) mongoc_apm_callbacks_new (void); MONGOC_EXPORT (void) mongoc_apm_callbacks_destroy (mongoc_apm_callbacks_t *callbacks); MONGOC_EXPORT (void) mongoc_apm_set_command_started_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_started_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_command_succeeded_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_succeeded_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_command_failed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_command_failed_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_changed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_changed_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_opening_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_opening_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_closed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_closed_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_topology_changed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_changed_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_topology_opening_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_opening_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_topology_closed_cb (mongoc_apm_callbacks_t *callbacks, mongoc_apm_topology_closed_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_heartbeat_started_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_started_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_heartbeat_succeeded_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_succeeded_cb_t cb); MONGOC_EXPORT (void) mongoc_apm_set_server_heartbeat_failed_cb ( mongoc_apm_callbacks_t *callbacks, mongoc_apm_server_heartbeat_failed_cb_t cb); BSON_END_DECLS #endif /* MONGOC_APM_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-array-private.h0000644000076500000240000000270313572250757026253 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_ARRAY_PRIVATE_H #define MONGOC_ARRAY_PRIVATE_H #include BSON_BEGIN_DECLS typedef struct _mongoc_array_t mongoc_array_t; struct _mongoc_array_t { size_t len; size_t element_size; size_t allocated; void *data; }; #define _mongoc_array_append_val(a, v) _mongoc_array_append_vals (a, &v, 1) #define _mongoc_array_index(a, t, i) (((t *) (a)->data)[i]) #define _mongoc_array_clear(a) (a)->len = 0 void _mongoc_array_init (mongoc_array_t *array, size_t element_size); void _mongoc_array_copy (mongoc_array_t *dst, const mongoc_array_t *src); void _mongoc_array_append_vals (mongoc_array_t *array, const void *data, uint32_t n_elements); void _mongoc_array_destroy (mongoc_array_t *array); BSON_END_DECLS #endif /* MONGOC_ARRAY_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-array.c0000644000076500000240000000443213572250757024577 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-array-private.h" void _mongoc_array_init (mongoc_array_t *array, size_t element_size) { BSON_ASSERT (array); BSON_ASSERT (element_size); array->len = 0; array->element_size = element_size; array->allocated = 128; array->data = (void *) bson_malloc0 (array->allocated); } /* *-------------------------------------------------------------------------- * * _mongoc_array_copy -- * * Destroy dst and copy src into it. Both arrays must be initialized. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void _mongoc_array_copy (mongoc_array_t *dst, const mongoc_array_t *src) { _mongoc_array_destroy (dst); dst->len = src->len; dst->element_size = src->element_size; dst->allocated = src->allocated; dst->data = (void *) bson_malloc (dst->allocated); memcpy (dst->data, src->data, dst->allocated); } void _mongoc_array_destroy (mongoc_array_t *array) { if (array && array->data) { bson_free (array->data); } } void _mongoc_array_append_vals (mongoc_array_t *array, const void *data, uint32_t n_elements) { size_t len; size_t off; size_t next_size; BSON_ASSERT (array); BSON_ASSERT (data); off = array->element_size * array->len; len = (size_t) n_elements * array->element_size; if ((off + len) > array->allocated) { next_size = bson_next_power_of_two (off + len); array->data = (void *) bson_realloc (array->data, next_size); array->allocated = next_size; } memcpy ((uint8_t *) array->data + off, data, len); array->len += n_elements; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-async-cmd-private.h0000644000076500000240000000603013572250757027010 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_ASYNC_CMD_PRIVATE_H #define MONGOC_ASYNC_CMD_PRIVATE_H #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-async-private.h" #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef enum { MONGOC_ASYNC_CMD_INITIATE, MONGOC_ASYNC_CMD_SETUP, MONGOC_ASYNC_CMD_SEND, MONGOC_ASYNC_CMD_RECV_LEN, MONGOC_ASYNC_CMD_RECV_RPC, MONGOC_ASYNC_CMD_ERROR_STATE, MONGOC_ASYNC_CMD_CANCELED_STATE, } mongoc_async_cmd_state_t; typedef struct _mongoc_async_cmd { mongoc_stream_t *stream; mongoc_async_t *async; mongoc_async_cmd_state_t state; int events; mongoc_async_cmd_initiate_t initiator; mongoc_async_cmd_setup_t setup; void *setup_ctx; mongoc_async_cmd_cb_t cb; void *data; bson_error_t error; int64_t initiate_delay_ms; int64_t connect_started; int64_t cmd_started; int64_t timeout_msec; bson_t cmd; mongoc_buffer_t buffer; mongoc_array_t array; mongoc_iovec_t *iovec; size_t niovec; size_t bytes_written; size_t bytes_to_read; mongoc_rpc_t rpc; bson_t reply; bool reply_needs_cleanup; char ns[MONGOC_NAMESPACE_MAX]; struct addrinfo *dns_result; struct _mongoc_async_cmd *next; struct _mongoc_async_cmd *prev; } mongoc_async_cmd_t; mongoc_async_cmd_t * mongoc_async_cmd_new (mongoc_async_t *async, mongoc_stream_t *stream, bool is_setup_done, struct addrinfo *dns_result, mongoc_async_cmd_initiate_t initiator, int64_t initiate_delay_ms, mongoc_async_cmd_setup_t setup, void *setup_ctx, const char *dbname, const bson_t *cmd, mongoc_async_cmd_cb_t cb, void *cb_data, int64_t timeout_msec); void mongoc_async_cmd_destroy (mongoc_async_cmd_t *acmd); bool mongoc_async_cmd_run (mongoc_async_cmd_t *acmd); #ifdef MONGOC_ENABLE_SSL int mongoc_async_cmd_tls_setup (mongoc_stream_t *stream, int *events, void *ctx, int32_t timeout_msec, bson_error_t *error); #endif BSON_END_DECLS #endif /* MONGOC_ASYNC_CMD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-async-cmd.c0000644000076500000240000003412413572250757025340 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-async-cmd-private.h" #include "mongoc/mongoc-async-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-opcode.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-server-description-private.h" #include "mongoc/mongoc-topology-scanner-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/utlist.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-stream-tls.h" #endif #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "async" typedef mongoc_async_cmd_result_t (*_mongoc_async_cmd_phase_t) ( mongoc_async_cmd_t *cmd); mongoc_async_cmd_result_t _mongoc_async_cmd_phase_initiate (mongoc_async_cmd_t *cmd); mongoc_async_cmd_result_t _mongoc_async_cmd_phase_setup (mongoc_async_cmd_t *cmd); mongoc_async_cmd_result_t _mongoc_async_cmd_phase_send (mongoc_async_cmd_t *cmd); mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_len (mongoc_async_cmd_t *cmd); mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_rpc (mongoc_async_cmd_t *cmd); static const _mongoc_async_cmd_phase_t gMongocCMDPhases[] = { _mongoc_async_cmd_phase_initiate, _mongoc_async_cmd_phase_setup, _mongoc_async_cmd_phase_send, _mongoc_async_cmd_phase_recv_len, _mongoc_async_cmd_phase_recv_rpc, NULL, /* no callback for MONGOC_ASYNC_CMD_ERROR_STATE */ NULL, /* no callback for MONGOC_ASYNC_CMD_CANCELED_STATE */ }; #ifdef MONGOC_ENABLE_SSL int mongoc_async_cmd_tls_setup (mongoc_stream_t *stream, int *events, void *ctx, int32_t timeout_msec, bson_error_t *error) { mongoc_stream_t *tls_stream; const char *host = (const char *) ctx; int retry_events = 0; for (tls_stream = stream; tls_stream->type != MONGOC_STREAM_TLS; tls_stream = mongoc_stream_get_base_stream (tls_stream)) { } #if defined(MONGOC_ENABLE_SSL_OPENSSL) || \ defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) /* pass 0 for the timeout to begin / continue non-blocking handshake */ timeout_msec = 0; #endif if (mongoc_stream_tls_handshake ( tls_stream, host, timeout_msec, &retry_events, error)) { return 1; } if (retry_events) { *events = retry_events; return 0; } return -1; } #endif bool mongoc_async_cmd_run (mongoc_async_cmd_t *acmd) { mongoc_async_cmd_result_t result; int64_t duration_usec; _mongoc_async_cmd_phase_t phase_callback; BSON_ASSERT (acmd); /* if we have successfully connected to the node, call the callback. */ if (acmd->state == MONGOC_ASYNC_CMD_SEND) { acmd->cb (acmd, MONGOC_ASYNC_CMD_CONNECTED, NULL, 0); } phase_callback = gMongocCMDPhases[acmd->state]; if (phase_callback) { result = phase_callback (acmd); } else { result = MONGOC_ASYNC_CMD_ERROR; } if (result == MONGOC_ASYNC_CMD_IN_PROGRESS) { return true; } duration_usec = bson_get_monotonic_time () - acmd->cmd_started; if (result == MONGOC_ASYNC_CMD_SUCCESS) { acmd->cb (acmd, result, &acmd->reply, duration_usec); } else { /* we're in ERROR, TIMEOUT, or CANCELED */ acmd->cb (acmd, result, NULL, duration_usec); } mongoc_async_cmd_destroy (acmd); return false; } void _mongoc_async_cmd_init_send (mongoc_async_cmd_t *acmd, const char *dbname) { bson_snprintf (acmd->ns, sizeof acmd->ns, "%s.$cmd", dbname); acmd->rpc.header.msg_len = 0; acmd->rpc.header.request_id = ++acmd->async->request_id; acmd->rpc.header.response_to = 0; acmd->rpc.header.opcode = MONGOC_OPCODE_QUERY; acmd->rpc.query.flags = MONGOC_QUERY_SLAVE_OK; acmd->rpc.query.collection = acmd->ns; acmd->rpc.query.skip = 0; acmd->rpc.query.n_return = -1; acmd->rpc.query.query = bson_get_data (&acmd->cmd); acmd->rpc.query.fields = NULL; /* This will always be isMaster, which are not allowed to be compressed */ _mongoc_rpc_gather (&acmd->rpc, &acmd->array); acmd->iovec = (mongoc_iovec_t *) acmd->array.data; acmd->niovec = acmd->array.len; _mongoc_rpc_swab_to_le (&acmd->rpc); acmd->bytes_written = 0; } void _mongoc_async_cmd_state_start (mongoc_async_cmd_t *acmd, bool is_setup_done) { if (!acmd->stream) { acmd->state = MONGOC_ASYNC_CMD_INITIATE; } else if (acmd->setup && !is_setup_done) { acmd->state = MONGOC_ASYNC_CMD_SETUP; } else { acmd->state = MONGOC_ASYNC_CMD_SEND; } acmd->events = POLLOUT; } mongoc_async_cmd_t * mongoc_async_cmd_new (mongoc_async_t *async, mongoc_stream_t *stream, bool is_setup_done, struct addrinfo *dns_result, mongoc_async_cmd_initiate_t initiator, int64_t initiate_delay_ms, mongoc_async_cmd_setup_t setup, void *setup_ctx, const char *dbname, const bson_t *cmd, mongoc_async_cmd_cb_t cb, void *cb_data, int64_t timeout_msec) { mongoc_async_cmd_t *acmd; BSON_ASSERT (cmd); BSON_ASSERT (dbname); acmd = (mongoc_async_cmd_t *) bson_malloc0 (sizeof (*acmd)); acmd->async = async; acmd->dns_result = dns_result; acmd->timeout_msec = timeout_msec; acmd->stream = stream; acmd->initiator = initiator; acmd->initiate_delay_ms = initiate_delay_ms; acmd->setup = setup; acmd->setup_ctx = setup_ctx; acmd->cb = cb; acmd->data = cb_data; acmd->connect_started = bson_get_monotonic_time (); bson_copy_to (cmd, &acmd->cmd); _mongoc_array_init (&acmd->array, sizeof (mongoc_iovec_t)); _mongoc_buffer_init (&acmd->buffer, NULL, 0, NULL, NULL); _mongoc_async_cmd_init_send (acmd, dbname); _mongoc_async_cmd_state_start (acmd, is_setup_done); async->ncmds++; DL_APPEND (async->cmds, acmd); return acmd; } void mongoc_async_cmd_destroy (mongoc_async_cmd_t *acmd) { BSON_ASSERT (acmd); DL_DELETE (acmd->async->cmds, acmd); acmd->async->ncmds--; bson_destroy (&acmd->cmd); if (acmd->reply_needs_cleanup) { bson_destroy (&acmd->reply); } _mongoc_array_destroy (&acmd->array); _mongoc_buffer_destroy (&acmd->buffer); bson_free (acmd); } mongoc_async_cmd_result_t _mongoc_async_cmd_phase_initiate (mongoc_async_cmd_t *acmd) { acmd->stream = acmd->initiator (acmd); if (!acmd->stream) { return MONGOC_ASYNC_CMD_ERROR; } /* reset the connect started time after connection starts. */ acmd->connect_started = bson_get_monotonic_time (); if (acmd->setup) { acmd->state = MONGOC_ASYNC_CMD_SETUP; } else { acmd->state = MONGOC_ASYNC_CMD_SEND; } return MONGOC_ASYNC_CMD_IN_PROGRESS; } mongoc_async_cmd_result_t _mongoc_async_cmd_phase_setup (mongoc_async_cmd_t *acmd) { int retval; BSON_ASSERT (acmd->timeout_msec < INT32_MAX); retval = acmd->setup (acmd->stream, &acmd->events, acmd->setup_ctx, (int32_t) acmd->timeout_msec, &acmd->error); switch (retval) { case -1: return MONGOC_ASYNC_CMD_ERROR; case 0: break; case 1: acmd->state = MONGOC_ASYNC_CMD_SEND; acmd->events = POLLOUT; break; default: abort (); } return MONGOC_ASYNC_CMD_IN_PROGRESS; } mongoc_async_cmd_result_t _mongoc_async_cmd_phase_send (mongoc_async_cmd_t *acmd) { size_t total_bytes = 0; size_t offset; ssize_t bytes; int i; /* if a continued write, then iovec will be set to a temporary copy */ bool used_temp_iovec = false; mongoc_iovec_t *iovec = acmd->iovec; size_t niovec = acmd->niovec; for (i = 0; i < acmd->niovec; i++) { total_bytes += acmd->iovec[i].iov_len; } if (acmd->bytes_written > 0) { BSON_ASSERT (acmd->bytes_written < total_bytes); /* if bytes have been written before, compute the offset in the next * iovec entry to be written. */ offset = acmd->bytes_written; /* subtract the lengths of all iovec entries written so far. */ for (i = 0; i < acmd->niovec; i++) { if (offset < acmd->iovec[i].iov_len) { break; } offset -= acmd->iovec[i].iov_len; } BSON_ASSERT (i < acmd->niovec); /* create a new iovec with the remaining data to be written. */ niovec = acmd->niovec - i; iovec = bson_malloc (niovec * sizeof (mongoc_iovec_t)); memcpy (iovec, acmd->iovec + i, niovec * sizeof (mongoc_iovec_t)); iovec[0].iov_base = (char *) iovec[0].iov_base + offset; iovec[0].iov_len -= offset; used_temp_iovec = true; } bytes = mongoc_stream_writev (acmd->stream, iovec, niovec, 0); if (used_temp_iovec) { bson_free (iovec); } if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to write rpc bytes."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_written += bytes; if (acmd->bytes_written < total_bytes) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } acmd->state = MONGOC_ASYNC_CMD_RECV_LEN; acmd->bytes_to_read = 4; acmd->events = POLLIN; acmd->cmd_started = bson_get_monotonic_time (); return MONGOC_ASYNC_CMD_IN_PROGRESS; } mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_len (mongoc_async_cmd_t *acmd) { ssize_t bytes = _mongoc_buffer_try_append_from_stream ( &acmd->buffer, acmd->stream, acmd->bytes_to_read, 0); uint32_t msg_len; if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to receive length header from server."); return MONGOC_ASYNC_CMD_ERROR; } if (bytes == 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Server closed connection."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = (size_t) (acmd->bytes_to_read - bytes); if (!acmd->bytes_to_read) { memcpy (&msg_len, acmd->buffer.data, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if (msg_len < 16 || msg_len > MONGOC_DEFAULT_MAX_MSG_SIZE || msg_len < acmd->buffer.len) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = msg_len - acmd->buffer.len; acmd->state = MONGOC_ASYNC_CMD_RECV_RPC; return _mongoc_async_cmd_phase_recv_rpc (acmd); } return MONGOC_ASYNC_CMD_IN_PROGRESS; } mongoc_async_cmd_result_t _mongoc_async_cmd_phase_recv_rpc (mongoc_async_cmd_t *acmd) { ssize_t bytes = _mongoc_buffer_try_append_from_stream ( &acmd->buffer, acmd->stream, acmd->bytes_to_read, 0); if (bytes <= 0 && mongoc_stream_should_retry (acmd->stream)) { return MONGOC_ASYNC_CMD_IN_PROGRESS; } if (bytes < 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to receive rpc bytes from server."); return MONGOC_ASYNC_CMD_ERROR; } if (bytes == 0) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Server closed connection."); return MONGOC_ASYNC_CMD_ERROR; } acmd->bytes_to_read = (size_t) (acmd->bytes_to_read - bytes); if (!acmd->bytes_to_read) { if (!_mongoc_rpc_scatter ( &acmd->rpc, acmd->buffer.data, acmd->buffer.len)) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server."); return MONGOC_ASYNC_CMD_ERROR; } if (BSON_UINT32_FROM_LE (acmd->rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) { uint8_t *buf = NULL; size_t len = BSON_UINT32_FROM_LE (acmd->rpc.compressed.uncompressed_size) + sizeof (mongoc_rpc_header_t); buf = bson_malloc0 (len); if (!_mongoc_rpc_decompress (&acmd->rpc, buf, len)) { bson_free (buf); bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Could not decompress server reply"); return MONGOC_ASYNC_CMD_ERROR; } _mongoc_buffer_destroy (&acmd->buffer); _mongoc_buffer_init (&acmd->buffer, buf, len, NULL, NULL); } _mongoc_rpc_swab_from_le (&acmd->rpc); if (!_mongoc_rpc_get_first_document (&acmd->rpc, &acmd->reply)) { bson_set_error (&acmd->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server"); return MONGOC_ASYNC_CMD_ERROR; } acmd->reply_needs_cleanup = true; return MONGOC_ASYNC_CMD_SUCCESS; } return MONGOC_ASYNC_CMD_IN_PROGRESS; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-async-private.h0000644000076500000240000000365713572250757026263 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_ASYNC_PRIVATE_H #define MONGOC_ASYNC_PRIVATE_H #include #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS struct _mongoc_async_cmd; typedef struct _mongoc_async { struct _mongoc_async_cmd *cmds; size_t ncmds; uint32_t request_id; } mongoc_async_t; typedef enum { MONGOC_ASYNC_CMD_CONNECTED, MONGOC_ASYNC_CMD_IN_PROGRESS, MONGOC_ASYNC_CMD_SUCCESS, MONGOC_ASYNC_CMD_ERROR, MONGOC_ASYNC_CMD_TIMEOUT, } mongoc_async_cmd_result_t; typedef void (*mongoc_async_cmd_cb_t) (struct _mongoc_async_cmd *acmd, mongoc_async_cmd_result_t result, const bson_t *bson, int64_t duration_usec); typedef mongoc_stream_t *(*mongoc_async_cmd_initiate_t) ( struct _mongoc_async_cmd *); typedef int (*mongoc_async_cmd_setup_t) (mongoc_stream_t *stream, int *events, void *ctx, int32_t timeout_msec, bson_error_t *error); mongoc_async_t * mongoc_async_new (); void mongoc_async_destroy (mongoc_async_t *async); void mongoc_async_run (mongoc_async_t *async); BSON_END_DECLS #endif /* MONGOC_ASYNC_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-async.c0000644000076500000240000001463113572250757024600 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-async-private.h" #include "mongoc/mongoc-async-cmd-private.h" #include "mongoc/utlist.h" #include "mongoc/mongoc.h" #include "mongoc/mongoc-socket-private.h" #include "mongoc/mongoc-util-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "async" mongoc_async_t * mongoc_async_new () { mongoc_async_t *async = (mongoc_async_t *) bson_malloc0 (sizeof (*async)); return async; } void mongoc_async_destroy (mongoc_async_t *async) { mongoc_async_cmd_t *acmd, *tmp; DL_FOREACH_SAFE (async->cmds, acmd, tmp) { mongoc_async_cmd_destroy (acmd); } bson_free (async); } void mongoc_async_run (mongoc_async_t *async) { mongoc_async_cmd_t *acmd, *tmp; mongoc_async_cmd_t **acmds_polled = NULL; mongoc_stream_poll_t *poller = NULL; int nstreams, i; ssize_t nactive = 0; int64_t now; int64_t expire_at; int64_t poll_timeout_msec; size_t poll_size; now = bson_get_monotonic_time (); poll_size = 0; /* CDRIVER-1571 reset start times in case a stream initiator was slow */ DL_FOREACH (async->cmds, acmd) { acmd->connect_started = now; } while (async->ncmds) { /* ncmds grows if we discover a replica & start calling ismaster on it */ if (poll_size < async->ncmds) { poller = (mongoc_stream_poll_t *) bson_realloc ( poller, sizeof (*poller) * async->ncmds); acmds_polled = (mongoc_async_cmd_t **) bson_realloc ( acmds_polled, sizeof (*acmds_polled) * async->ncmds); poll_size = async->ncmds; } expire_at = INT64_MAX; nstreams = 0; /* check if any cmds are ready to be initiated. */ DL_FOREACH_SAFE (async->cmds, acmd, tmp) { if (acmd->state == MONGOC_ASYNC_CMD_INITIATE) { BSON_ASSERT (!acmd->stream); if (now >= acmd->initiate_delay_ms * 1000 + acmd->connect_started) { /* time to initiate. */ if (mongoc_async_cmd_run (acmd)) { BSON_ASSERT (acmd->stream); } else { /* this command was removed. */ continue; } } else { /* don't poll longer than the earliest cmd ready to init. */ expire_at = BSON_MIN ( expire_at, acmd->connect_started + acmd->initiate_delay_ms); } } if (acmd->stream) { acmds_polled[nstreams] = acmd; poller[nstreams].stream = acmd->stream; poller[nstreams].events = acmd->events; poller[nstreams].revents = 0; expire_at = BSON_MIN ( expire_at, acmd->connect_started + acmd->timeout_msec * 1000); ++nstreams; } } if (async->ncmds == 0) { /* all cmds failed to initiate and removed themselves. */ break; } poll_timeout_msec = BSON_MAX (0, (expire_at - now) / 1000); BSON_ASSERT (poll_timeout_msec < INT32_MAX); if (nstreams > 0) { /* we need at least one stream to poll. */ nactive = mongoc_stream_poll (poller, nstreams, (int32_t) poll_timeout_msec); } else { /* currently this does not get hit. we always have at least one command * initialized with a stream. */ _mongoc_usleep (poll_timeout_msec * 1000); } if (nactive > 0) { for (i = 0; i < nstreams; i++) { mongoc_async_cmd_t *iter = acmds_polled[i]; if (poller[i].revents & (POLLERR | POLLHUP)) { int hup = poller[i].revents & POLLHUP; if (iter->state == MONGOC_ASYNC_CMD_SEND) { bson_set_error (&iter->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, hup ? "connection refused" : "unknown connection error"); } else { bson_set_error (&iter->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, hup ? "connection closed" : "unknown socket error"); } iter->state = MONGOC_ASYNC_CMD_ERROR_STATE; } if ((poller[i].revents & poller[i].events) || iter->state == MONGOC_ASYNC_CMD_ERROR_STATE) { (void) mongoc_async_cmd_run (iter); nactive--; } if (!nactive) { break; } } } DL_FOREACH_SAFE (async->cmds, acmd, tmp) { bool remove_cmd = false; mongoc_async_cmd_result_t result; /* check if an initiated cmd has passed the connection timeout. */ if (acmd->state != MONGOC_ASYNC_CMD_INITIATE && now > acmd->connect_started + acmd->timeout_msec * 1000) { bson_set_error (&acmd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, acmd->state == MONGOC_ASYNC_CMD_SEND ? "connection timeout" : "socket timeout"); remove_cmd = true; result = MONGOC_ASYNC_CMD_TIMEOUT; } else if (acmd->state == MONGOC_ASYNC_CMD_CANCELED_STATE) { remove_cmd = true; result = MONGOC_ASYNC_CMD_ERROR; } if (remove_cmd) { acmd->cb (acmd, result, NULL, (now - acmd->connect_started) / 1000); /* Remove acmd from the async->cmds doubly-linked list */ mongoc_async_cmd_destroy (acmd); } } now = bson_get_monotonic_time (); } bson_free (poller); bson_free (acmds_polled); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-buffer-private.h0000644000076500000240000000433313572250757026407 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_BUFFER_PRIVATE_H #define MONGOC_BUFFER_PRIVATE_H #include #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_buffer_t mongoc_buffer_t; struct _mongoc_buffer_t { uint8_t *data; size_t datalen; size_t len; bson_realloc_func realloc_func; void *realloc_data; }; void _mongoc_buffer_init (mongoc_buffer_t *buffer, uint8_t *buf, size_t buflen, bson_realloc_func realloc_func, void *realloc_data); bool _mongoc_buffer_append (mongoc_buffer_t *buffer, const uint8_t *data, size_t data_size); bool _mongoc_buffer_append_from_stream (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t size, int32_t timeout_msec, bson_error_t *error); ssize_t _mongoc_buffer_try_append_from_stream (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t size, int32_t timeout_msec); ssize_t _mongoc_buffer_fill (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t min_bytes, int32_t timeout_msec, bson_error_t *error); void _mongoc_buffer_destroy (mongoc_buffer_t *buffer); void _mongoc_buffer_clear (mongoc_buffer_t *buffer, bool zero); BSON_END_DECLS #endif /* MONGOC_BUFFER_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-buffer.c0000644000076500000240000002200713572250757024730 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-trace-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "buffer" #ifndef MONGOC_BUFFER_DEFAULT_SIZE #define MONGOC_BUFFER_DEFAULT_SIZE 1024 #endif #define SPACE_FOR(_b, _sz) \ (((ssize_t) (_b)->datalen - (ssize_t) (_b)->len) >= (ssize_t) (_sz)) /** * _mongoc_buffer_init: * @buffer: A mongoc_buffer_t to initialize. * @buf: A data buffer to attach to @buffer. * @buflen: The size of @buflen. * @realloc_func: A function to resize @buf. * * Initializes @buffer for use. If additional space is needed by @buffer, then * @realloc_func will be called to resize @buf. * * @buffer takes ownership of @buf and will realloc it to zero bytes when * cleaning up the data structure. */ void _mongoc_buffer_init (mongoc_buffer_t *buffer, uint8_t *buf, size_t buflen, bson_realloc_func realloc_func, void *realloc_data) { BSON_ASSERT (buffer); BSON_ASSERT (buflen || !buf); if (!realloc_func) { realloc_func = bson_realloc_ctx; } if (!buflen) { buflen = MONGOC_BUFFER_DEFAULT_SIZE; } if (!buf) { buf = (uint8_t *) realloc_func (NULL, buflen, NULL); } memset (buffer, 0, sizeof *buffer); buffer->data = buf; buffer->datalen = buflen; buffer->len = 0; buffer->realloc_func = realloc_func; buffer->realloc_data = realloc_data; } /** * _mongoc_buffer_destroy: * @buffer: A mongoc_buffer_t. * * Cleanup after @buffer and release any allocated resources. */ void _mongoc_buffer_destroy (mongoc_buffer_t *buffer) { BSON_ASSERT (buffer); if (buffer->data && buffer->realloc_func) { buffer->realloc_func (buffer->data, 0, buffer->realloc_data); } memset (buffer, 0, sizeof *buffer); } /** * _mongoc_buffer_clear: * @buffer: A mongoc_buffer_t. * @zero: If the memory should be zeroed. * * Clears a buffers contents and resets it to initial state. You can request * that the memory is zeroed, which might be useful if you know the contents * contain security related information. */ void _mongoc_buffer_clear (mongoc_buffer_t *buffer, bool zero) { BSON_ASSERT (buffer); if (zero) { memset (buffer->data, 0, buffer->datalen); } buffer->len = 0; } bool _mongoc_buffer_append (mongoc_buffer_t *buffer, const uint8_t *data, size_t data_size) { uint8_t *buf; ENTRY; BSON_ASSERT (buffer); BSON_ASSERT (data_size); BSON_ASSERT (buffer->datalen); BSON_ASSERT ((buffer->datalen + data_size) < INT_MAX); if (!SPACE_FOR (buffer, data_size)) { if (buffer->len) { memmove (&buffer->data[0], buffer->data, buffer->len); } if (!SPACE_FOR (buffer, data_size)) { buffer->datalen = bson_next_power_of_two (data_size + buffer->len); buffer->data = (uint8_t *) buffer->realloc_func ( buffer->data, buffer->datalen, NULL); } } buf = &buffer->data[buffer->len]; BSON_ASSERT ((buffer->len + data_size) <= buffer->datalen); memcpy (buf, data, data_size); buffer->len += data_size; RETURN (true); } /** * mongoc_buffer_append_from_stream: * @buffer; A mongoc_buffer_t. * @stream: The stream to read from. * @size: The number of bytes to read. * @timeout_msec: The number of milliseconds to wait or -1 for the default * @error: A location for a bson_error_t, or NULL. * * Reads from stream @size bytes and stores them in @buffer. This can be used * in conjunction with reading RPCs from a stream. You read from the stream * into this buffer and then scatter the buffer into the RPC. * * Returns: true if successful; otherwise false and @error is set. */ bool _mongoc_buffer_append_from_stream (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t size, int32_t timeout_msec, bson_error_t *error) { uint8_t *buf; ssize_t ret; ENTRY; BSON_ASSERT (buffer); BSON_ASSERT (stream); BSON_ASSERT (size); BSON_ASSERT (buffer->datalen); BSON_ASSERT ((buffer->datalen + size) < INT_MAX); if (!SPACE_FOR (buffer, size)) { if (buffer->len) { memmove (&buffer->data[0], buffer->data, buffer->len); } if (!SPACE_FOR (buffer, size)) { buffer->datalen = bson_next_power_of_two (size + buffer->len); buffer->data = (uint8_t *) buffer->realloc_func ( buffer->data, buffer->datalen, NULL); } } buf = &buffer->data[buffer->len]; BSON_ASSERT ((buffer->len + size) <= buffer->datalen); ret = mongoc_stream_read (stream, buf, size, size, timeout_msec); if (ret != size) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to read %" PRIu64 " bytes: socket error or timeout", (uint64_t) size); RETURN (false); } buffer->len += ret; RETURN (true); } /** * _mongoc_buffer_fill: * @buffer: A mongoc_buffer_t. * @stream: A stream to read from. * @min_bytes: The minimum number of bytes to read. * @error: A location for a bson_error_t or NULL. * * Attempts to fill the entire buffer, or at least @min_bytes. * * Returns: The number of buffered bytes, or -1 on failure. */ ssize_t _mongoc_buffer_fill (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t min_bytes, int32_t timeout_msec, bson_error_t *error) { ssize_t ret; size_t avail_bytes; ENTRY; BSON_ASSERT (buffer); BSON_ASSERT (stream); BSON_ASSERT (buffer->data); BSON_ASSERT (buffer->datalen); if (min_bytes <= buffer->len) { RETURN (buffer->len); } min_bytes -= buffer->len; if (buffer->len) { memmove (&buffer->data[0], buffer->data, buffer->len); } if (!SPACE_FOR (buffer, min_bytes)) { buffer->datalen = bson_next_power_of_two (buffer->len + min_bytes); buffer->data = (uint8_t *) buffer->realloc_func ( buffer->data, buffer->datalen, buffer->realloc_data); } avail_bytes = buffer->datalen - buffer->len; ret = mongoc_stream_read ( stream, &buffer->data[buffer->len], avail_bytes, min_bytes, timeout_msec); if (ret == -1) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to buffer %u bytes", (unsigned) min_bytes); RETURN (-1); } buffer->len += ret; if (buffer->len < min_bytes) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Could only buffer %u of %u bytes", (unsigned) buffer->len, (unsigned) min_bytes); RETURN (-1); } RETURN (buffer->len); } /** * mongoc_buffer_try_append_from_stream: * @buffer; A mongoc_buffer_t. * @stream: The stream to read from. * @size: The number of bytes to read. * @timeout_msec: The number of milliseconds to wait or -1 for the default * * Reads from stream @size bytes and stores them in @buffer. This can be used * in conjunction with reading RPCs from a stream. You read from the stream * into this buffer and then scatter the buffer into the RPC. * * Returns: bytes read if successful; otherwise 0 or -1. */ ssize_t _mongoc_buffer_try_append_from_stream (mongoc_buffer_t *buffer, mongoc_stream_t *stream, size_t size, int32_t timeout_msec) { uint8_t *buf; ssize_t ret; ENTRY; BSON_ASSERT (buffer); BSON_ASSERT (stream); BSON_ASSERT (size); BSON_ASSERT (buffer->datalen); BSON_ASSERT ((buffer->datalen + size) < INT_MAX); if (!SPACE_FOR (buffer, size)) { buffer->datalen = bson_next_power_of_two (size + buffer->len); buffer->data = (uint8_t *) buffer->realloc_func (buffer->data, buffer->datalen, NULL); } buf = &buffer->data[buffer->len]; BSON_ASSERT ((buffer->len + size) <= buffer->datalen); ret = mongoc_stream_read (stream, buf, size, 0, timeout_msec); if (ret > 0) { buffer->len += ret; } RETURN (ret); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-bulk-operation-private.h0000644000076500000240000000306213572250757030067 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_BULK_OPERATION_PRIVATE_H #define MONGOC_BULK_OPERATION_PRIVATE_H #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-write-command-private.h" BSON_BEGIN_DECLS struct _mongoc_bulk_operation_t { char *database; char *collection; mongoc_client_t *client; mongoc_client_session_t *session; mongoc_write_concern_t *write_concern; mongoc_bulk_write_flags_t flags; uint32_t server_id; mongoc_array_t commands; mongoc_write_result_t result; bool executed; int64_t operation_id; }; mongoc_bulk_operation_t * _mongoc_bulk_operation_new (mongoc_client_t *client, const char *database, const char *collection, mongoc_bulk_write_flags_t flags, const mongoc_write_concern_t *write_concern); BSON_END_DECLS #endif /* MONGOC_BULK_OPERATION_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-bulk-operation.c0000644000076500000240000006425613572250757026426 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-bulk-operation.h" #include "mongoc/mongoc-bulk-operation-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-opts-private.h" #include "mongoc/mongoc-write-command-private.h" /* * This is the implementation of both write commands and bulk write commands. * They are all implemented as one contiguous set since we'd like to cut down * on code duplication here. * * This implementation is currently naive. * * Some interesting optimizations might be: * * - If unordered mode, send operations as we get them instead of waiting * for execute() to be called. This could save us memcpy()'s too. * - If there is no acknowledgement desired, keep a count of how many * replies we need and ask the socket layer to skip that many bytes * when reading. * - Try to use iovec to send write commands with subdocuments rather than * copying them into the write command document. */ mongoc_bulk_operation_t * mongoc_bulk_operation_new (bool ordered) { mongoc_bulk_operation_t *bulk; bulk = (mongoc_bulk_operation_t *) bson_malloc0 (sizeof *bulk); bulk->flags.bypass_document_validation = false; bulk->flags.ordered = ordered; bulk->server_id = 0; _mongoc_array_init (&bulk->commands, sizeof (mongoc_write_command_t)); _mongoc_write_result_init (&bulk->result); return bulk; } mongoc_bulk_operation_t * _mongoc_bulk_operation_new ( mongoc_client_t *client, /* IN */ const char *database, /* IN */ const char *collection, /* IN */ mongoc_bulk_write_flags_t flags, /* IN */ const mongoc_write_concern_t *write_concern) /* IN */ { mongoc_bulk_operation_t *bulk; BSON_ASSERT (client); BSON_ASSERT (collection); bulk = mongoc_bulk_operation_new (flags.ordered); bulk->client = client; bulk->database = bson_strdup (database); bulk->collection = bson_strdup (collection); bulk->write_concern = mongoc_write_concern_copy (write_concern); bulk->executed = false; bulk->flags = flags; bulk->operation_id = ++client->cluster.operation_id; return bulk; } void mongoc_bulk_operation_destroy (mongoc_bulk_operation_t *bulk) /* IN */ { mongoc_write_command_t *command; int i; if (bulk) { for (i = 0; i < bulk->commands.len; i++) { command = &_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i); _mongoc_write_command_destroy (command); } bson_free (bulk->database); bson_free (bulk->collection); mongoc_write_concern_destroy (bulk->write_concern); _mongoc_array_destroy (&bulk->commands); _mongoc_write_result_destroy (&bulk->result); bson_free (bulk); } } /* already failed, e.g. a bad call to mongoc_bulk_operation_insert? */ #define BULK_EXIT_IF_PRIOR_ERROR \ do { \ if (bulk->result.error.domain) { \ EXIT; \ } \ } while (0) #define BULK_RETURN_IF_PRIOR_ERROR \ do { \ if (bulk->result.error.domain) { \ if (error != &bulk->result.error) { \ bson_set_error (error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ "Bulk operation is invalid from prior error: %s", \ bulk->result.error.message); \ }; \ return false; \ }; \ } while (0) bool _mongoc_bulk_operation_remove_with_opts ( mongoc_bulk_operation_t *bulk, const bson_t *selector, const mongoc_bulk_remove_opts_t *remove_opts, int32_t limit, bson_error_t *error) /* OUT */ { mongoc_write_command_t command = {0}; mongoc_write_command_t *last; bson_t opts; bool has_collation; bool ret = false; ENTRY; BSON_ASSERT (bulk); BSON_ASSERT (selector); bson_init (&opts); /* allow "limit" in opts, but it must be the correct limit */ if (remove_opts->limit != limit) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid \"limit\" in opts: %" PRId32 "." " The value must be %" PRId32 ", or omitted.", remove_opts->limit, limit); GOTO (done); } bson_append_int32 (&opts, "limit", 5, limit); has_collation = !bson_empty (&remove_opts->collation); if (has_collation) { bson_append_document (&opts, "collation", 9, &remove_opts->collation); } if (bulk->commands.len) { last = &_mongoc_array_index ( &bulk->commands, mongoc_write_command_t, bulk->commands.len - 1); if (last->type == MONGOC_WRITE_COMMAND_DELETE) { last->flags.has_collation |= has_collation; last->flags.has_multi_write |= (remove_opts->limit == 0); _mongoc_write_command_delete_append (last, selector, &opts); ret = true; GOTO (done); } } _mongoc_write_command_init_delete ( &command, selector, NULL, &opts, bulk->flags, bulk->operation_id); command.flags.has_collation = has_collation; command.flags.has_multi_write = (remove_opts->limit == 0); _mongoc_array_append_val (&bulk->commands, command); ret = true; done: bson_destroy (&opts); RETURN (ret); } bool mongoc_bulk_operation_remove_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *opts, bson_error_t *error) /* OUT */ { mongoc_bulk_remove_one_opts_t remove_opts; bool ret; ENTRY; BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_remove_one_opts_parse ( bulk->client, opts, &remove_opts, error)) { _mongoc_bulk_remove_one_opts_cleanup (&remove_opts); RETURN (false); } ret = _mongoc_bulk_operation_remove_with_opts ( bulk, selector, &remove_opts.remove, 1, error); _mongoc_bulk_remove_one_opts_cleanup (&remove_opts); RETURN (ret); } bool mongoc_bulk_operation_remove_many_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *opts, bson_error_t *error) /* OUT */ { mongoc_bulk_remove_many_opts_t remove_opts; bool ret; ENTRY; BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_remove_many_opts_parse ( bulk->client, opts, &remove_opts, error)) { _mongoc_bulk_remove_many_opts_cleanup (&remove_opts); RETURN (false); } ret = _mongoc_bulk_operation_remove_with_opts ( bulk, selector, &remove_opts.remove, 0, error); _mongoc_bulk_remove_many_opts_cleanup (&remove_opts); RETURN (ret); } void mongoc_bulk_operation_remove (mongoc_bulk_operation_t *bulk, /* IN */ const bson_t *selector) /* IN */ { bson_error_t *error = &bulk->result.error; ENTRY; BULK_EXIT_IF_PRIOR_ERROR; if (!mongoc_bulk_operation_remove_many_with_opts ( bulk, selector, NULL, error)) { MONGOC_WARNING ("%s", error->message); } if (error->domain) { MONGOC_WARNING ("%s", error->message); } EXIT; } void mongoc_bulk_operation_remove_one (mongoc_bulk_operation_t *bulk, /* IN */ const bson_t *selector) /* IN */ { bson_error_t *error = &bulk->result.error; ENTRY; BULK_EXIT_IF_PRIOR_ERROR; if (!mongoc_bulk_operation_remove_one_with_opts ( bulk, selector, NULL, error)) { MONGOC_WARNING ("%s", error->message); } if (error->domain) { MONGOC_WARNING ("%s", error->message); } EXIT; } void mongoc_bulk_operation_delete (mongoc_bulk_operation_t *bulk, const bson_t *selector) { ENTRY; mongoc_bulk_operation_remove (bulk, selector); EXIT; } void mongoc_bulk_operation_delete_one (mongoc_bulk_operation_t *bulk, const bson_t *selector) { ENTRY; mongoc_bulk_operation_remove_one (bulk, selector); EXIT; } void mongoc_bulk_operation_insert (mongoc_bulk_operation_t *bulk, const bson_t *document) { ENTRY; BSON_ASSERT (bulk); BSON_ASSERT (document); if (!mongoc_bulk_operation_insert_with_opts ( bulk, document, NULL /* opts */, &bulk->result.error)) { MONGOC_WARNING ("%s", bulk->result.error.message); } EXIT; } bool mongoc_bulk_operation_insert_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *document, const bson_t *opts, bson_error_t *error) { mongoc_bulk_insert_opts_t insert_opts; mongoc_write_command_t command = {0}; mongoc_write_command_t *last; bool ret = false; ENTRY; BSON_ASSERT (bulk); BSON_ASSERT (document); BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_insert_opts_parse ( bulk->client, opts, &insert_opts, error)) { GOTO (done); } if (!_mongoc_validate_new_document (document, insert_opts.validate, error)) { GOTO (done); } if (bulk->commands.len) { last = &_mongoc_array_index ( &bulk->commands, mongoc_write_command_t, bulk->commands.len - 1); if (last->type == MONGOC_WRITE_COMMAND_INSERT) { _mongoc_write_command_insert_append (last, document); ret = true; GOTO (done); } } _mongoc_write_command_init_insert ( &command, document, &insert_opts.extra, bulk->flags, bulk->operation_id, !mongoc_write_concern_is_acknowledged (bulk->write_concern)); _mongoc_array_append_val (&bulk->commands, command); ret = true; done: _mongoc_bulk_insert_opts_cleanup (&insert_opts); RETURN (ret); } static void _mongoc_bulk_operation_update_append ( mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const mongoc_bulk_update_opts_t *update_opts, const bson_t *array_filters, const bson_t *extra_opts) { mongoc_write_command_t command = {0}; mongoc_write_command_t *last; bson_t opts; bool has_collation; bool has_array_filters; bson_init (&opts); bson_append_bool (&opts, "upsert", 6, update_opts->upsert); bson_append_bool (&opts, "multi", 5, update_opts->multi); has_array_filters = !bson_empty0 (array_filters); if (has_array_filters) { bson_append_array (&opts, "arrayFilters", 12, array_filters); } has_collation = !bson_empty (&update_opts->collation); if (has_collation) { bson_append_document (&opts, "collation", 9, &update_opts->collation); } if (extra_opts) { bson_concat (&opts, extra_opts); } if (bulk->commands.len) { last = &_mongoc_array_index ( &bulk->commands, mongoc_write_command_t, bulk->commands.len - 1); if (last->type == MONGOC_WRITE_COMMAND_UPDATE) { last->flags.has_collation |= has_collation; last->flags.has_multi_write |= update_opts->multi; _mongoc_write_command_update_append (last, selector, document, &opts); bson_destroy (&opts); return; } } _mongoc_write_command_init_update ( &command, selector, document, &opts, bulk->flags, bulk->operation_id); command.flags.has_array_filters = has_array_filters; command.flags.has_collation = has_collation; command.flags.has_multi_write = update_opts->multi; _mongoc_array_append_val (&bulk->commands, command); bson_destroy (&opts); } static bool _mongoc_bulk_operation_update_with_opts ( mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const mongoc_bulk_update_opts_t *update_opts, const bson_t *array_filters, const bson_t *extra_opts, bool multi, bson_error_t *error) /* OUT */ { ENTRY; BSON_ASSERT (bulk); BSON_ASSERT (selector); BSON_ASSERT (document); if (!_mongoc_validate_update (document, update_opts->validate, error)) { RETURN (false); } /* allow "multi" in opts, but it must be the correct multi */ if (update_opts->multi != multi) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid \"multi\" in opts: %s." " The value must be %s, or omitted.", update_opts->multi ? "true" : "false", multi ? "true" : "false"); RETURN (false); } _mongoc_bulk_operation_update_append ( bulk, selector, document, update_opts, array_filters, extra_opts); RETURN (true); } bool mongoc_bulk_operation_update_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error) /* OUT */ { mongoc_bulk_update_one_opts_t update_opts; bool ret; ENTRY; BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_update_one_opts_parse ( bulk->client, opts, &update_opts, error)) { _mongoc_bulk_update_one_opts_cleanup (&update_opts); RETURN (false); } ret = _mongoc_bulk_operation_update_with_opts (bulk, selector, document, &update_opts.update, &update_opts.arrayFilters, &update_opts.extra, false /* multi */, error); _mongoc_bulk_update_one_opts_cleanup (&update_opts); RETURN (ret); } bool mongoc_bulk_operation_update_many_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error) /* OUT */ { mongoc_bulk_update_many_opts_t update_opts; bool ret; ENTRY; BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_update_many_opts_parse ( bulk->client, opts, &update_opts, error)) { _mongoc_bulk_update_many_opts_cleanup (&update_opts); RETURN (false); } ret = _mongoc_bulk_operation_update_with_opts (bulk, selector, document, &update_opts.update, &update_opts.arrayFilters, &update_opts.extra, true /* multi */, error); _mongoc_bulk_update_many_opts_cleanup (&update_opts); RETURN (ret); } void mongoc_bulk_operation_update (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert) { bson_t opts; bson_error_t *error = &bulk->result.error; ENTRY; BULK_EXIT_IF_PRIOR_ERROR; bson_init (&opts); if (upsert) { BSON_APPEND_BOOL (&opts, "upsert", upsert); } if (!mongoc_bulk_operation_update_many_with_opts ( bulk, selector, document, &opts, error)) { MONGOC_WARNING ("%s", error->message); } bson_destroy (&opts); if (error->domain) { MONGOC_WARNING ("%s", error->message); } EXIT; } void mongoc_bulk_operation_update_one (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert) { bson_t opts; bson_error_t *error = &bulk->result.error; ENTRY; BULK_EXIT_IF_PRIOR_ERROR; bson_init (&opts); BSON_APPEND_BOOL (&opts, "upsert", upsert); if (!mongoc_bulk_operation_update_one_with_opts ( bulk, selector, document, &opts, error)) { MONGOC_WARNING ("%s", error->message); } bson_destroy (&opts); if (error->domain) { MONGOC_WARNING ("%s", error->message); } EXIT; } bool mongoc_bulk_operation_replace_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error) /* OUT */ { mongoc_bulk_replace_one_opts_t repl_opts; mongoc_bulk_update_opts_t *update_opts = &repl_opts.update; bool ret = false; ENTRY; BSON_ASSERT (bulk); BSON_ASSERT (selector); BSON_ASSERT (document); BULK_RETURN_IF_PRIOR_ERROR; if (!_mongoc_bulk_replace_one_opts_parse ( bulk->client, opts, &repl_opts, error)) { GOTO (done); } if (!_mongoc_validate_replace (document, update_opts->validate, error)) { GOTO (done); } /* allow "multi" in opts, but it must be the correct multi */ if (update_opts->multi) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid \"multi\": true in opts for" " mongoc_bulk_operation_replace_one_with_opts." " The value must be true, or omitted."); GOTO (done); } _mongoc_bulk_operation_update_append ( bulk, selector, document, update_opts, NULL, &repl_opts.extra); ret = true; done: _mongoc_bulk_replace_one_opts_cleanup (&repl_opts); RETURN (ret); } void mongoc_bulk_operation_replace_one (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert) { bson_t opts = BSON_INITIALIZER; bson_error_t *error = &bulk->result.error; ENTRY; BSON_APPEND_BOOL (&opts, "upsert", upsert); if (!mongoc_bulk_operation_replace_one_with_opts ( bulk, selector, document, &opts, error)) { MONGOC_WARNING ("%s", error->message); } bson_destroy (&opts); EXIT; } uint32_t mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk, /* IN */ bson_t *reply, /* OUT */ bson_error_t *error) /* OUT */ { mongoc_cluster_t *cluster; mongoc_write_command_t *command; mongoc_server_stream_t *server_stream; bool ret; uint32_t offset = 0; int i; ENTRY; BSON_ASSERT (bulk); if (!bulk->client) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a client " "and one has not been set."); GOTO (err); } cluster = &bulk->client->cluster; if (bulk->executed) { _mongoc_write_result_destroy (&bulk->result); _mongoc_write_result_init (&bulk->result); } bulk->executed = true; if (!bulk->database) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a database " "and one has not been set."); GOTO (err); } else if (!bulk->collection) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "mongoc_bulk_operation_execute() requires a collection " "and one has not been set."); GOTO (err); } /* error stored by functions like mongoc_bulk_operation_insert that * can't report errors immediately */ if (bulk->result.error.domain) { if (error) { memcpy (error, &bulk->result.error, sizeof (bson_error_t)); } GOTO (err); } if (!bulk->commands.len) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot do an empty bulk write"); GOTO (err); } for (i = 0; i < bulk->commands.len; i++) { if (bulk->server_id) { server_stream = mongoc_cluster_stream_for_server (cluster, bulk->server_id, true /* reconnect_ok */, bulk->session, reply, error); } else { server_stream = mongoc_cluster_stream_for_writes ( cluster, bulk->session, reply, error); } if (!server_stream) { /* stream_for_server and stream_for_writes initialize reply on error */ RETURN (false); } command = &_mongoc_array_index (&bulk->commands, mongoc_write_command_t, i); _mongoc_write_command_execute (command, bulk->client, server_stream, bulk->database, bulk->collection, bulk->write_concern, offset, bulk->session, &bulk->result); bulk->server_id = server_stream->sd->id; if (bulk->result.failed && (bulk->flags.ordered || bulk->result.must_stop)) { mongoc_server_stream_cleanup (server_stream); GOTO (cleanup); } offset += command->n_documents; mongoc_server_stream_cleanup (server_stream); } cleanup: _mongoc_bson_init_if_set (reply); ret = MONGOC_WRITE_RESULT_COMPLETE (&bulk->result, bulk->client->error_api_version, bulk->write_concern, MONGOC_ERROR_COMMAND /* err domain */, reply, error); RETURN (ret ? bulk->server_id : 0); err: _mongoc_bson_init_if_set (reply); RETURN (false); } void mongoc_bulk_operation_set_write_concern ( mongoc_bulk_operation_t *bulk, const mongoc_write_concern_t *write_concern) { BSON_ASSERT (bulk); if (bulk->write_concern) { mongoc_write_concern_destroy (bulk->write_concern); } if (write_concern) { bulk->write_concern = mongoc_write_concern_copy (write_concern); } else { bulk->write_concern = mongoc_write_concern_new (); } } const mongoc_write_concern_t * mongoc_bulk_operation_get_write_concern (const mongoc_bulk_operation_t *bulk) { BSON_ASSERT (bulk); return bulk->write_concern; } void mongoc_bulk_operation_set_database (mongoc_bulk_operation_t *bulk, const char *database) { BSON_ASSERT (bulk); if (bulk->database) { bson_free (bulk->database); } bulk->database = bson_strdup (database); } void mongoc_bulk_operation_set_collection (mongoc_bulk_operation_t *bulk, const char *collection) { BSON_ASSERT (bulk); if (bulk->collection) { bson_free (bulk->collection); } bulk->collection = bson_strdup (collection); } void mongoc_bulk_operation_set_client (mongoc_bulk_operation_t *bulk, void *client) { BSON_ASSERT (bulk); BSON_ASSERT (client); if (bulk->session) { BSON_ASSERT (bulk->session->client == client); } bulk->client = (mongoc_client_t *) client; /* if you call set_client, bulk was likely made by mongoc_bulk_operation_new, * not mongoc_collection_create_bulk_operation_with_opts(), so operation_id * is 0. */ if (!bulk->operation_id) { bulk->operation_id = ++bulk->client->cluster.operation_id; } } void mongoc_bulk_operation_set_client_session ( mongoc_bulk_operation_t *bulk, struct _mongoc_client_session_t *client_session) { BSON_ASSERT (bulk); BSON_ASSERT (client_session); if (bulk->client) { BSON_ASSERT (bulk->client == client_session->client); } bulk->session = client_session; } uint32_t mongoc_bulk_operation_get_hint (const mongoc_bulk_operation_t *bulk) { BSON_ASSERT (bulk); return bulk->server_id; } void mongoc_bulk_operation_set_hint (mongoc_bulk_operation_t *bulk, uint32_t server_id) { BSON_ASSERT (bulk); bulk->server_id = server_id; } void mongoc_bulk_operation_set_bypass_document_validation ( mongoc_bulk_operation_t *bulk, bool bypass) { BSON_ASSERT (bulk); bulk->flags.bypass_document_validation = bypass; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-bulk-operation.h0000644000076500000240000001504413572250757026422 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_BULK_OPERATION_H #define MONGOC_BULK_OPERATION_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-write-concern.h" /* ordered, bypass_document_validation, has_collation, multi */ #define MONGOC_BULK_WRITE_FLAGS_INIT \ { \ true, false, 0 \ } BSON_BEGIN_DECLS /* forward decl */ struct _mongoc_client_session_t; typedef struct _mongoc_bulk_operation_t mongoc_bulk_operation_t; typedef struct _mongoc_bulk_write_flags_t mongoc_bulk_write_flags_t; MONGOC_EXPORT (void) mongoc_bulk_operation_destroy (mongoc_bulk_operation_t *bulk); MONGOC_EXPORT (uint32_t) mongoc_bulk_operation_execute (mongoc_bulk_operation_t *bulk, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (void) mongoc_bulk_operation_delete (mongoc_bulk_operation_t *bulk, const bson_t *selector) BSON_GNUC_DEPRECATED_FOR (mongoc_bulk_operation_remove); MONGOC_EXPORT (void) mongoc_bulk_operation_delete_one (mongoc_bulk_operation_t *bulk, const bson_t *selector) BSON_GNUC_DEPRECATED_FOR (mongoc_bulk_operation_remove_one); MONGOC_EXPORT (void) mongoc_bulk_operation_insert (mongoc_bulk_operation_t *bulk, const bson_t *document); MONGOC_EXPORT (bool) mongoc_bulk_operation_insert_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *document, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_remove (mongoc_bulk_operation_t *bulk, const bson_t *selector); MONGOC_EXPORT (bool) mongoc_bulk_operation_remove_many_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_remove_one (mongoc_bulk_operation_t *bulk, const bson_t *selector); MONGOC_EXPORT (bool) mongoc_bulk_operation_remove_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_replace_one (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert); MONGOC_EXPORT (bool) mongoc_bulk_operation_replace_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_update (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert); MONGOC_EXPORT (bool) mongoc_bulk_operation_update_many_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_update_one (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, bool upsert); MONGOC_EXPORT (bool) mongoc_bulk_operation_update_one_with_opts (mongoc_bulk_operation_t *bulk, const bson_t *selector, const bson_t *document, const bson_t *opts, bson_error_t *error); /* OUT */ MONGOC_EXPORT (void) mongoc_bulk_operation_set_bypass_document_validation ( mongoc_bulk_operation_t *bulk, bool bypass); /* * The following functions are really only useful by language bindings and * those wanting to replay a bulk operation to a number of clients or * collections. */ MONGOC_EXPORT (mongoc_bulk_operation_t *) mongoc_bulk_operation_new (bool ordered); MONGOC_EXPORT (void) mongoc_bulk_operation_set_write_concern ( mongoc_bulk_operation_t *bulk, const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_bulk_operation_set_database (mongoc_bulk_operation_t *bulk, const char *database); MONGOC_EXPORT (void) mongoc_bulk_operation_set_collection (mongoc_bulk_operation_t *bulk, const char *collection); MONGOC_EXPORT (void) mongoc_bulk_operation_set_client (mongoc_bulk_operation_t *bulk, void *client); MONGOC_EXPORT (void) mongoc_bulk_operation_set_client_session ( mongoc_bulk_operation_t *bulk, struct _mongoc_client_session_t *client_session); /* These names include the term "hint" for backward compatibility, should be * mongoc_bulk_operation_get_server_id, mongoc_bulk_operation_set_server_id. */ MONGOC_EXPORT (void) mongoc_bulk_operation_set_hint (mongoc_bulk_operation_t *bulk, uint32_t server_id); MONGOC_EXPORT (uint32_t) mongoc_bulk_operation_get_hint (const mongoc_bulk_operation_t *bulk); MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_bulk_operation_get_write_concern (const mongoc_bulk_operation_t *bulk); BSON_END_DECLS #endif /* MONGOC_BULK_OPERATION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-change-stream-private.h0000644000076500000240000000502613572250757027654 0ustar alcaeusstaff/* * Copyright 2017-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CHANGE_STREAM_PRIVATE_H #define MONGOC_CHANGE_STREAM_PRIVATE_H #include "mongoc/mongoc-change-stream.h" #include "mongoc/mongoc-client-session.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-opts-private.h" #include "mongoc/mongoc-opts-helpers-private.h" typedef enum { MONGOC_CHANGE_STREAM_COLLECTION, MONGOC_CHANGE_STREAM_DATABASE, MONGOC_CHANGE_STREAM_CLIENT } mongoc_change_stream_type_t; struct _mongoc_change_stream_t { mongoc_change_stream_opts_t opts; mongoc_timestamp_t operation_time; bson_t pipeline_to_append; bson_t resume_token; bson_t *full_document; bson_error_t err; bson_t err_doc; mongoc_cursor_t *cursor; mongoc_client_t *client; mongoc_read_prefs_t *read_prefs; mongoc_read_concern_t *read_concern; mongoc_change_stream_type_t change_stream_type; char db[140]; char coll[140]; int64_t max_await_time_ms; int32_t batch_size; bool has_returned_results; /* Track whether the change stream has resumed after an error, as this * determines how we construct an initial or resuming aggregate command. */ bool resumed; mongoc_client_session_t *implicit_session; }; mongoc_change_stream_t * _mongoc_change_stream_new_from_collection (const mongoc_collection_t *coll, const bson_t *pipeline, const bson_t *opts); mongoc_change_stream_t * _mongoc_change_stream_new_from_database (const mongoc_database_t *db, const bson_t *pipeline, const bson_t *opts); mongoc_change_stream_t * _mongoc_change_stream_new_from_client (mongoc_client_t *client, const bson_t *pipeline, const bson_t *opts); #endif /* MONGOC_CHANGE_STREAM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-change-stream.c0000644000076500000240000005452313572250757026205 0ustar alcaeusstaff/* * Copyright 2017-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-change-stream-private.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-database-private.h" #include "mongoc/mongoc-error.h" #define CHANGE_STREAM_ERR(_str) \ bson_set_error (&stream->err, \ MONGOC_ERROR_CURSOR, \ MONGOC_ERROR_BSON, \ "Could not set " _str); /* the caller knows either a client or server error has occurred. * `reply` contains the server reply or an empty document. */ static bool _is_resumable_error (const bson_t *reply) { bson_error_t error = {0}; /* Change Streams Spec resumable criteria: "any error encountered which is * not a server error (e.g. a timeout error or network error)" */ if (bson_empty (reply)) { return true; } if (_mongoc_cmd_check_ok (reply, MONGOC_ERROR_API_VERSION_2, &error)) { return true; } if (mongoc_error_has_label (reply, "NonResumableChangeStreamError")) { return false; } /* Change Streams Spec resumable criteria: "a server error response with an * error message containing the substring 'not master' or 'node is * recovering' */ if (strstr (error.message, "not master") || strstr (error.message, "node is recovering")) { return true; } /* Change Streams Spec resumable criteria: "any server error response from a * getMore command excluding those containing the following error codes" */ switch (error.code) { case 11601: /* Interrupted */ case 136: /* CappedPositionLost */ case 237: /* CursorKilled */ case MONGOC_ERROR_QUERY_FAILURE: /* error code omitted */ return false; default: return true; } } static void _set_resume_token (mongoc_change_stream_t *stream, const bson_t *resume_token) { BSON_ASSERT (stream); BSON_ASSERT (resume_token); bson_destroy (&stream->resume_token); bson_copy_to (resume_token, &stream->resume_token); } /* construct the aggregate command in cmd. looks like one of the following: * for a collection change stream: * { aggregate: collname, pipeline: [], cursor: { batchSize: x } } * for a database change stream: * { aggregate: 1, pipeline: [], cursor: { batchSize: x } } * for a client change stream: * { aggregate: 1, pipeline: [{$changeStream: {allChangesForCluster: true}}], * cursor: { batchSize: x } } */ static void _make_command (mongoc_change_stream_t *stream, bson_t *command, int32_t max_wire_version) { bson_iter_t iter; bson_t change_stream_stage; /* { $changeStream: } */ bson_t change_stream_doc; bson_t pipeline; bson_t cursor_doc; if (stream->change_stream_type == MONGOC_CHANGE_STREAM_COLLECTION) { bson_append_utf8 ( command, "aggregate", 9, stream->coll, (int) strlen (stream->coll)); } else { bson_append_int32 (command, "aggregate", 9, 1); } bson_append_array_begin (command, "pipeline", 8, &pipeline); /* append the $changeStream stage. */ bson_append_document_begin (&pipeline, "0", 1, &change_stream_stage); bson_append_document_begin ( &change_stream_stage, "$changeStream", 13, &change_stream_doc); bson_concat (&change_stream_doc, stream->full_document); if (stream->resumed) { /* Change stream spec: Resume Process */ /* If there is a cached resumeToken: */ if (!bson_empty (&stream->resume_token)) { /* If the ChangeStream was started with startAfter and has yet to return a result document: */ if (!bson_empty (&stream->opts.startAfter) && !stream->has_returned_results) { /* The driver MUST set startAfter to the cached resumeToken */ BSON_APPEND_DOCUMENT ( &change_stream_doc, "startAfter", &stream->resume_token); } else { /* The driver MUST set resumeAfter to the cached resumeToken */ BSON_APPEND_DOCUMENT ( &change_stream_doc, "resumeAfter", &stream->resume_token); } } else if (!_mongoc_timestamp_empty (&stream->operation_time) && max_wire_version >= 7) { /* Else if there is no cached resumeToken and the ChangeStream has a saved operation time and the max wire version is >= 7, the driver MUST set startAtOperationTime */ _mongoc_timestamp_append (&stream->operation_time, &change_stream_doc, "startAtOperationTime"); } } else { /* Change streams spec: "startAtOperationTime, resumeAfter, and startAfter * are all mutually exclusive; if any two are set, the server will return * an error. Drivers MUST NOT throw a custom error, and MUST defer to the * server error." */ if (!bson_empty (&stream->opts.resumeAfter)) { BSON_APPEND_DOCUMENT ( &change_stream_doc, "resumeAfter", &stream->opts.resumeAfter); /* Update the cached resume token */ _set_resume_token (stream, &stream->opts.resumeAfter); } if (!bson_empty (&stream->opts.startAfter)) { BSON_APPEND_DOCUMENT ( &change_stream_doc, "startAfter", &stream->opts.startAfter); /* Update the cached resume token (take precedence over resumeAfter) */ _set_resume_token (stream, &stream->opts.startAfter); } if (!_mongoc_timestamp_empty (&stream->operation_time)) { _mongoc_timestamp_append (&stream->operation_time, &change_stream_doc, "startAtOperationTime"); } } if (stream->change_stream_type == MONGOC_CHANGE_STREAM_CLIENT) { bson_append_bool (&change_stream_doc, "allChangesForCluster", 20, true); } bson_append_document_end (&change_stream_stage, &change_stream_doc); bson_append_document_end (&pipeline, &change_stream_stage); /* Append user pipeline if it exists */ if (bson_iter_init_find (&iter, &stream->pipeline_to_append, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { bson_iter_t child_iter; uint32_t key_int = 1; char buf[16]; const char *key_str; BSON_ASSERT (bson_iter_recurse (&iter, &child_iter)); while (bson_iter_next (&child_iter)) { /* the user pipeline may consist of invalid stages or non-documents. * append anyway, and rely on the server error. */ size_t keyLen = bson_uint32_to_string (key_int, &key_str, buf, sizeof (buf)); bson_append_value ( &pipeline, key_str, (int) keyLen, bson_iter_value (&child_iter)); ++key_int; } } bson_append_array_end (command, &pipeline); /* Add batch size if needed */ bson_append_document_begin (command, "cursor", 6, &cursor_doc); if (stream->batch_size > 0) { bson_append_int32 (&cursor_doc, "batchSize", 9, stream->batch_size); } bson_append_document_end (command, &cursor_doc); } /*--------------------------------------------------------------------------- * * _make_cursor -- * * Construct and send the aggregate command and create the resulting * cursor. On error, stream->cursor remains NULL, otherwise it is * created and must be destroyed. * * Return: * False on error and sets stream->err. * *-------------------------------------------------------------------------- */ static bool _make_cursor (mongoc_change_stream_t *stream) { mongoc_client_session_t *cs = NULL; bson_t command_opts; bson_t command; /* { aggregate: "coll", pipeline: [], ... } */ bson_t reply; bson_t getmore_opts = BSON_INITIALIZER; bson_iter_t iter; mongoc_server_description_t *sd; uint32_t server_id; int32_t max_wire_version = -1; BSON_ASSERT (stream); BSON_ASSERT (!stream->cursor); bson_init (&command); bson_copy_to (&(stream->opts.extra), &command_opts); sd = mongoc_client_select_server ( stream->client, false /* for_writes */, stream->read_prefs, &stream->err); if (!sd) { goto cleanup; } server_id = mongoc_server_description_id (sd); bson_append_int32 (&command_opts, "serverId", 8, server_id); bson_append_int32 (&getmore_opts, "serverId", 8, server_id); max_wire_version = sd->max_wire_version; mongoc_server_description_destroy (sd); if (bson_iter_init_find (&iter, &command_opts, "sessionId")) { if (!_mongoc_client_session_from_iter ( stream->client, &iter, &cs, &stream->err)) { goto cleanup; } } else if (stream->implicit_session) { /* If an implicit session was created before, and this cursor is now * being recreated after resuming, then use the same session as before. */ cs = stream->implicit_session; if (!mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } else { /* Create an implicit session. This session lsid must be the same for the * agg command and the subsequent getMores. Thus, this implicit session is * passed as if it were an explicit session to * mongoc_client_read_command_with_opts and * _mongoc_cursor_change_stream_new, but it is still implicit and its * lifetime is owned by this change_stream_t. */ mongoc_session_opt_t *session_opts; session_opts = mongoc_session_opts_new (); mongoc_session_opts_set_causal_consistency (session_opts, false); /* returns NULL if sessions aren't supported. ignore errors. */ cs = mongoc_client_start_session (stream->client, session_opts, NULL); stream->implicit_session = cs; mongoc_session_opts_destroy (session_opts); if (cs && !mongoc_client_session_append (cs, &command_opts, &stream->err)) { goto cleanup; } } if (cs && !mongoc_client_session_append (cs, &getmore_opts, &stream->err)) { goto cleanup; } if (stream->read_concern && !bson_has_field (&command_opts, "readConcern")) { mongoc_read_concern_append (stream->read_concern, &command_opts); } _make_command (stream, &command, max_wire_version); /* even though serverId has already been set, still pass the read prefs. * they are necessary for OP_MSG if sending to a secondary. */ if (!mongoc_client_read_command_with_opts (stream->client, stream->db, &command, stream->read_prefs, &command_opts, &reply, &stream->err)) { bson_destroy (&stream->err_doc); bson_copy_to (&reply, &stream->err_doc); bson_destroy (&reply); goto cleanup; } bson_append_bool ( &getmore_opts, MONGOC_CURSOR_TAILABLE, MONGOC_CURSOR_TAILABLE_LEN, true); bson_append_bool (&getmore_opts, MONGOC_CURSOR_AWAIT_DATA, MONGOC_CURSOR_AWAIT_DATA_LEN, true); /* maxTimeMS is only appended to getMores if these are set in cursor opts. */ if (stream->max_await_time_ms > 0) { bson_append_int64 (&getmore_opts, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, MONGOC_CURSOR_MAX_AWAIT_TIME_MS_LEN, stream->max_await_time_ms); } if (stream->batch_size > 0) { bson_append_int32 (&getmore_opts, MONGOC_CURSOR_BATCH_SIZE, MONGOC_CURSOR_BATCH_SIZE_LEN, stream->batch_size); } /* steals reply. */ stream->cursor = _mongoc_cursor_change_stream_new (stream->client, &reply, &getmore_opts); if (mongoc_cursor_error (stream->cursor, NULL)) { goto cleanup; } /* Change stream spec: "When aggregate or getMore returns: If an empty batch * was returned and a postBatchResumeToken was included, cache it." */ if (_mongoc_cursor_change_stream_end_of_batch (stream->cursor) && _mongoc_cursor_change_stream_has_post_batch_resume_token ( stream->cursor)) { _set_resume_token ( stream, _mongoc_cursor_change_stream_get_post_batch_resume_token ( stream->cursor)); } /* Change stream spec: startAtOperationTime */ if (bson_empty (&stream->opts.resumeAfter) && bson_empty (&stream->opts.startAfter) && _mongoc_timestamp_empty (&stream->operation_time) && max_wire_version >= 7 && bson_empty (&stream->resume_token) && bson_iter_init_find ( &iter, _mongoc_cursor_change_stream_get_reply (stream->cursor), "operationTime") && BSON_ITER_HOLDS_TIMESTAMP (&iter)) { _mongoc_timestamp_set_from_bson (&stream->operation_time, &iter); } cleanup: bson_destroy (&command); bson_destroy (&command_opts); bson_destroy (&getmore_opts); return stream->err.code == 0; } /*--------------------------------------------------------------------------- * * _change_stream_init -- * * Called after @stream has the collection name, database name, read * preferences, and read concern set. Creates the change streams * cursor. * *-------------------------------------------------------------------------- */ void _change_stream_init (mongoc_change_stream_t *stream, const bson_t *pipeline, const bson_t *opts) { BSON_ASSERT (pipeline); stream->max_await_time_ms = -1; stream->batch_size = -1; bson_init (&stream->pipeline_to_append); bson_init (&stream->resume_token); bson_init (&stream->err_doc); if (!_mongoc_change_stream_opts_parse ( stream->client, opts, &stream->opts, &stream->err)) { return; } stream->full_document = BCON_NEW ("fullDocument", stream->opts.fullDocument); _mongoc_timestamp_set (&stream->operation_time, &stream->opts.startAtOperationTime); stream->batch_size = stream->opts.batchSize; stream->max_await_time_ms = stream->opts.maxAwaitTimeMS; /* Accept two forms of user pipeline: * 1. A document like: { "pipeline": [...] } * 2. An array-like document: { "0": {}, "1": {}, ... } * If the passed pipeline is invalid, we pass it along and let the server * error instead. */ if (!bson_empty (pipeline)) { bson_iter_t iter; if (bson_iter_init_find (&iter, pipeline, "pipeline") && BSON_ITER_HOLDS_ARRAY (&iter)) { if (!BSON_APPEND_VALUE (&stream->pipeline_to_append, "pipeline", bson_iter_value (&iter))) { CHANGE_STREAM_ERR ("pipeline"); } } else { if (!BSON_APPEND_ARRAY ( &stream->pipeline_to_append, "pipeline", pipeline)) { CHANGE_STREAM_ERR ("pipeline"); } } } if (stream->err.code == 0) { (void) _make_cursor (stream); } } mongoc_change_stream_t * _mongoc_change_stream_new_from_collection (const mongoc_collection_t *coll, const bson_t *pipeline, const bson_t *opts) { mongoc_change_stream_t *stream; BSON_ASSERT (coll); stream = (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t)); bson_strncpy (stream->db, coll->db, sizeof (stream->db)); bson_strncpy (stream->coll, coll->collection, sizeof (stream->coll)); stream->read_prefs = mongoc_read_prefs_copy (coll->read_prefs); stream->read_concern = mongoc_read_concern_copy (coll->read_concern); stream->client = coll->client; stream->change_stream_type = MONGOC_CHANGE_STREAM_COLLECTION; _change_stream_init (stream, pipeline, opts); return stream; } mongoc_change_stream_t * _mongoc_change_stream_new_from_database (const mongoc_database_t *db, const bson_t *pipeline, const bson_t *opts) { mongoc_change_stream_t *stream; BSON_ASSERT (db); stream = (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t)); bson_strncpy (stream->db, db->name, sizeof (stream->db)); stream->coll[0] = '\0'; stream->read_prefs = mongoc_read_prefs_copy (db->read_prefs); stream->read_concern = mongoc_read_concern_copy (db->read_concern); stream->client = db->client; stream->change_stream_type = MONGOC_CHANGE_STREAM_DATABASE; _change_stream_init (stream, pipeline, opts); return stream; } mongoc_change_stream_t * _mongoc_change_stream_new_from_client (mongoc_client_t *client, const bson_t *pipeline, const bson_t *opts) { mongoc_change_stream_t *stream; BSON_ASSERT (client); stream = (mongoc_change_stream_t *) bson_malloc0 (sizeof (mongoc_change_stream_t)); bson_strncpy (stream->db, "admin", sizeof (stream->db)); stream->coll[0] = '\0'; stream->read_prefs = mongoc_read_prefs_copy (client->read_prefs); stream->read_concern = mongoc_read_concern_copy (client->read_concern); stream->client = client; stream->change_stream_type = MONGOC_CHANGE_STREAM_CLIENT; _change_stream_init (stream, pipeline, opts); return stream; } const bson_t * mongoc_change_stream_get_resume_token (mongoc_change_stream_t *stream) { if (!bson_empty (&stream->resume_token)) { return &stream->resume_token; } return NULL; } bool mongoc_change_stream_next (mongoc_change_stream_t *stream, const bson_t **bson) { bson_iter_t iter; bson_t doc_resume_token; uint32_t len; const uint8_t *data; bool ret = false; BSON_ASSERT (stream); BSON_ASSERT (bson); if (stream->err.code != 0) { goto end; } BSON_ASSERT (stream->cursor); if (!mongoc_cursor_next (stream->cursor, bson)) { const bson_t *err_doc; bson_error_t err; bool resumable = false; if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) { /* no error occurred, just no documents left. */ goto end; } resumable = _is_resumable_error (err_doc); while (resumable) { /* recreate the cursor. */ mongoc_cursor_destroy (stream->cursor); stream->cursor = NULL; stream->resumed = true; if (!_make_cursor (stream)) { goto end; } if (mongoc_cursor_next (stream->cursor, bson)) { break; } if (!mongoc_cursor_error_document (stream->cursor, &err, &err_doc)) { goto end; } if (err_doc) { resumable = _is_resumable_error (err_doc); } else { resumable = false; } } if (!resumable) { stream->err = err; bson_destroy (&stream->err_doc); bson_copy_to (err_doc, &stream->err_doc); goto end; } } /* we have received documents, either from the first call to next or after a * resume. */ stream->has_returned_results = true; if (!bson_iter_init_find (&iter, *bson, "_id") || !BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (&stream->err, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN, "Cannot provide resume functionality when the resume " "token is missing"); goto end; } /* copy the resume token. */ bson_iter_document (&iter, &len, &data); BSON_ASSERT (bson_init_static (&doc_resume_token, data, len)); _set_resume_token (stream, &doc_resume_token); /* clear out the operation time, since we no longer need it to resume. */ _mongoc_timestamp_clear (&stream->operation_time); ret = true; end: /* Change stream spec: Updating the Cached Resume Token */ if (stream->cursor && !mongoc_cursor_error (stream->cursor, NULL) && _mongoc_cursor_change_stream_end_of_batch (stream->cursor) && _mongoc_cursor_change_stream_has_post_batch_resume_token ( stream->cursor)) { _set_resume_token ( stream, _mongoc_cursor_change_stream_get_post_batch_resume_token ( stream->cursor)); } /* Driver Sessions Spec: "When an implicit session is associated with a * cursor for use with getMore operations, the session MUST be returned to * the pool immediately following a getMore operation that indicates that the * cursor has been exhausted." */ if (stream->implicit_session) { /* if creating the change stream cursor errored, it may be null. */ if (!stream->cursor || stream->cursor->cursor_id == 0) { mongoc_client_session_destroy (stream->implicit_session); stream->implicit_session = NULL; } } return ret; } bool mongoc_change_stream_error_document (const mongoc_change_stream_t *stream, bson_error_t *err, const bson_t **bson) { BSON_ASSERT (stream); if (stream->err.code != 0) { if (err) { *err = stream->err; } if (bson) { *bson = &stream->err_doc; } return true; } if (bson) { *bson = NULL; } return false; } void mongoc_change_stream_destroy (mongoc_change_stream_t *stream) { if (!stream) { return; } bson_destroy (&stream->pipeline_to_append); bson_destroy (&stream->resume_token); bson_destroy (stream->full_document); bson_destroy (&stream->err_doc); _mongoc_change_stream_opts_cleanup (&stream->opts); mongoc_cursor_destroy (stream->cursor); mongoc_client_session_destroy (stream->implicit_session); mongoc_read_prefs_destroy (stream->read_prefs); mongoc_read_concern_destroy (stream->read_concern); bson_free (stream); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-change-stream.h0000644000076500000240000000252513572250757026205 0ustar alcaeusstaff/* * Copyright 2017-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CHANGE_STREAM_H #define MONGOC_CHANGE_STREAM_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS typedef struct _mongoc_change_stream_t mongoc_change_stream_t; MONGOC_EXPORT (void) mongoc_change_stream_destroy (mongoc_change_stream_t *); MONGOC_EXPORT (const bson_t *) mongoc_change_stream_get_resume_token (mongoc_change_stream_t *); MONGOC_EXPORT (bool) mongoc_change_stream_next (mongoc_change_stream_t *, const bson_t **); MONGOC_EXPORT (bool) mongoc_change_stream_error_document (const mongoc_change_stream_t *, bson_error_t *, const bson_t **); BSON_END_DECLS #endif /* MONGOC_CHANGE_STREAM_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-pool-private.h0000644000076500000240000000254313572250757027364 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_POOL_PRIVATE_H #define MONGOC_CLIENT_POOL_PRIVATE_H #include #include "mongoc/mongoc-client-pool.h" #include "mongoc/mongoc-topology-description.h" #include "mongoc/mongoc-topology-private.h" BSON_BEGIN_DECLS /* for tests */ void _mongoc_client_pool_set_stream_initiator (mongoc_client_pool_t *pool, mongoc_stream_initiator_t si, void *user_data); size_t mongoc_client_pool_get_size (mongoc_client_pool_t *pool); size_t mongoc_client_pool_num_pushed (mongoc_client_pool_t *pool); mongoc_topology_t * _mongoc_client_pool_get_topology (mongoc_client_pool_t *pool); BSON_END_DECLS #endif /* MONGOC_CLIENT_POOL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-pool.c0000644000076500000240000002524713572250757025715 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-apm-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-client-pool-private.h" #include "mongoc/mongoc-client-pool.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-queue-private.h" #include "mongoc/mongoc-thread-private.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-trace-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl-private.h" #endif struct _mongoc_client_pool_t { bson_mutex_t mutex; mongoc_cond_t cond; mongoc_queue_t queue; mongoc_topology_t *topology; mongoc_uri_t *uri; uint32_t min_pool_size; uint32_t max_pool_size; uint32_t size; #ifdef MONGOC_ENABLE_SSL bool ssl_opts_set; mongoc_ssl_opt_t ssl_opts; #endif bool apm_callbacks_set; mongoc_apm_callbacks_t apm_callbacks; void *apm_context; int32_t error_api_version; bool error_api_set; }; #ifdef MONGOC_ENABLE_SSL void mongoc_client_pool_set_ssl_opts (mongoc_client_pool_t *pool, const mongoc_ssl_opt_t *opts) { BSON_ASSERT (pool); bson_mutex_lock (&pool->mutex); _mongoc_ssl_opts_cleanup (&pool->ssl_opts); memset (&pool->ssl_opts, 0, sizeof pool->ssl_opts); pool->ssl_opts_set = false; if (opts) { _mongoc_ssl_opts_copy_to (opts, &pool->ssl_opts); pool->ssl_opts_set = true; } mongoc_topology_scanner_set_ssl_opts (pool->topology->scanner, &pool->ssl_opts); bson_mutex_unlock (&pool->mutex); } #endif mongoc_client_pool_t * mongoc_client_pool_new (const mongoc_uri_t *uri) { mongoc_topology_t *topology; mongoc_client_pool_t *pool; const bson_t *b; bson_iter_t iter; const char *appname; ENTRY; BSON_ASSERT (uri); #ifndef MONGOC_ENABLE_SSL if (mongoc_uri_get_tls (uri)) { MONGOC_ERROR ("Can't create SSL client pool," " SSL not enabled in this build."); return NULL; } #endif pool = (mongoc_client_pool_t *) bson_malloc0 (sizeof *pool); bson_mutex_init (&pool->mutex); _mongoc_queue_init (&pool->queue); pool->uri = mongoc_uri_copy (uri); pool->min_pool_size = 0; pool->max_pool_size = 100; pool->size = 0; topology = mongoc_topology_new (uri, false); pool->topology = topology; pool->error_api_version = MONGOC_ERROR_API_VERSION_LEGACY; b = mongoc_uri_get_options (pool->uri); if (bson_iter_init_find_case (&iter, b, MONGOC_URI_MINPOOLSIZE)) { MONGOC_WARNING ( MONGOC_URI_MINPOOLSIZE " is deprecated; its behavior does not match its name, and its actual" " behavior will likely hurt performance."); if (BSON_ITER_HOLDS_INT32 (&iter)) { pool->min_pool_size = BSON_MAX (0, bson_iter_int32 (&iter)); } } if (bson_iter_init_find_case (&iter, b, MONGOC_URI_MAXPOOLSIZE)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { pool->max_pool_size = BSON_MAX (1, bson_iter_int32 (&iter)); } } appname = mongoc_uri_get_option_as_utf8 (pool->uri, MONGOC_URI_APPNAME, NULL); if (appname) { /* the appname should have already been validated */ BSON_ASSERT (mongoc_client_pool_set_appname (pool, appname)); } #ifdef MONGOC_ENABLE_SSL if (mongoc_uri_get_tls (pool->uri)) { mongoc_ssl_opt_t ssl_opt = {0}; _mongoc_ssl_opts_from_uri (&ssl_opt, pool->uri); /* sets use_ssl = true */ mongoc_client_pool_set_ssl_opts (pool, &ssl_opt); } #endif mongoc_counter_client_pools_active_inc (); RETURN (pool); } void mongoc_client_pool_destroy (mongoc_client_pool_t *pool) { mongoc_client_t *client; ENTRY; if (!pool) { EXIT; } if (pool->topology->session_pool) { client = mongoc_client_pool_pop (pool); _mongoc_client_end_sessions (client); mongoc_client_pool_push (pool, client); } while ( (client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) { mongoc_client_destroy (client); } mongoc_topology_destroy (pool->topology); mongoc_uri_destroy (pool->uri); bson_mutex_destroy (&pool->mutex); mongoc_cond_destroy (&pool->cond); #ifdef MONGOC_ENABLE_SSL _mongoc_ssl_opts_cleanup (&pool->ssl_opts); #endif bson_free (pool); mongoc_counter_client_pools_active_dec (); mongoc_counter_client_pools_disposed_inc (); EXIT; } /* * Start the background topology scanner. * * This function assumes the pool's mutex is locked */ static void _start_scanner_if_needed (mongoc_client_pool_t *pool) { if (!_mongoc_topology_start_background_scanner (pool->topology)) { MONGOC_ERROR ("Background scanner did not start!"); abort (); } } static void _initialize_new_client (mongoc_client_pool_t *pool, mongoc_client_t *client) { /* for tests */ mongoc_client_set_stream_initiator ( client, pool->topology->scanner->initiator, pool->topology->scanner->initiator_context); client->error_api_version = pool->error_api_version; _mongoc_client_set_apm_callbacks_private ( client, &pool->apm_callbacks, pool->apm_context); #ifdef MONGOC_ENABLE_SSL if (pool->ssl_opts_set) { mongoc_client_set_ssl_opts (client, &pool->ssl_opts); } #endif } mongoc_client_t * mongoc_client_pool_pop (mongoc_client_pool_t *pool) { mongoc_client_t *client; ENTRY; BSON_ASSERT (pool); bson_mutex_lock (&pool->mutex); again: if (!(client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) { if (pool->size < pool->max_pool_size) { client = _mongoc_client_new_from_uri (pool->topology); _initialize_new_client (pool, client); pool->size++; } else { mongoc_cond_wait (&pool->cond, &pool->mutex); GOTO (again); } } _start_scanner_if_needed (pool); bson_mutex_unlock (&pool->mutex); RETURN (client); } mongoc_client_t * mongoc_client_pool_try_pop (mongoc_client_pool_t *pool) { mongoc_client_t *client; ENTRY; BSON_ASSERT (pool); bson_mutex_lock (&pool->mutex); if (!(client = (mongoc_client_t *) _mongoc_queue_pop_head (&pool->queue))) { if (pool->size < pool->max_pool_size) { client = _mongoc_client_new_from_uri (pool->topology); _initialize_new_client (pool, client); pool->size++; } } if (client) { _start_scanner_if_needed (pool); } bson_mutex_unlock (&pool->mutex); RETURN (client); } void mongoc_client_pool_push (mongoc_client_pool_t *pool, mongoc_client_t *client) { ENTRY; BSON_ASSERT (pool); BSON_ASSERT (client); bson_mutex_lock (&pool->mutex); _mongoc_queue_push_head (&pool->queue, client); if (pool->min_pool_size && _mongoc_queue_get_length (&pool->queue) > pool->min_pool_size) { mongoc_client_t *old_client; old_client = (mongoc_client_t *) _mongoc_queue_pop_tail (&pool->queue); if (old_client) { mongoc_client_destroy (old_client); pool->size--; } } mongoc_cond_signal (&pool->cond); bson_mutex_unlock (&pool->mutex); EXIT; } /* for tests */ void _mongoc_client_pool_set_stream_initiator (mongoc_client_pool_t *pool, mongoc_stream_initiator_t si, void *context) { mongoc_topology_scanner_set_stream_initiator ( pool->topology->scanner, si, context); } /* for tests */ size_t mongoc_client_pool_get_size (mongoc_client_pool_t *pool) { size_t size = 0; ENTRY; bson_mutex_lock (&pool->mutex); size = pool->size; bson_mutex_unlock (&pool->mutex); RETURN (size); } size_t mongoc_client_pool_num_pushed (mongoc_client_pool_t *pool) { size_t num_pushed = 0; ENTRY; bson_mutex_lock (&pool->mutex); num_pushed = pool->queue.length; bson_mutex_unlock (&pool->mutex); RETURN (num_pushed); } mongoc_topology_t * _mongoc_client_pool_get_topology (mongoc_client_pool_t *pool) { return pool->topology; } void mongoc_client_pool_max_size (mongoc_client_pool_t *pool, uint32_t max_pool_size) { ENTRY; bson_mutex_lock (&pool->mutex); pool->max_pool_size = max_pool_size; bson_mutex_unlock (&pool->mutex); EXIT; } void mongoc_client_pool_min_size (mongoc_client_pool_t *pool, uint32_t min_pool_size) { ENTRY; MONGOC_WARNING ( "mongoc_client_pool_min_size is deprecated; its behavior does not match" " its name, and its actual behavior will likely hurt performance."); bson_mutex_lock (&pool->mutex); pool->min_pool_size = min_pool_size; bson_mutex_unlock (&pool->mutex); EXIT; } bool mongoc_client_pool_set_apm_callbacks (mongoc_client_pool_t *pool, mongoc_apm_callbacks_t *callbacks, void *context) { mongoc_topology_t *topology; topology = pool->topology; if (pool->apm_callbacks_set) { MONGOC_ERROR ("Can only set callbacks once"); return false; } bson_mutex_lock (&topology->mutex); if (callbacks) { memcpy (&topology->description.apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t)); memcpy (&pool->apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t)); } mongoc_topology_set_apm_callbacks (topology, callbacks, context); topology->description.apm_context = context; pool->apm_context = context; pool->apm_callbacks_set = true; bson_mutex_unlock (&topology->mutex); return true; } bool mongoc_client_pool_set_error_api (mongoc_client_pool_t *pool, int32_t version) { if (version != MONGOC_ERROR_API_VERSION_LEGACY && version != MONGOC_ERROR_API_VERSION_2) { MONGOC_ERROR ("Unsupported Error API Version: %" PRId32, version); return false; } if (pool->error_api_set) { MONGOC_ERROR ("Can only set Error API Version once"); return false; } pool->error_api_version = version; pool->error_api_set = true; return true; } bool mongoc_client_pool_set_appname (mongoc_client_pool_t *pool, const char *appname) { bool ret; bson_mutex_lock (&pool->mutex); ret = _mongoc_topology_set_appname (pool->topology, appname); bson_mutex_unlock (&pool->mutex); return ret; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-pool.h0000644000076500000240000000460113572250757025711 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_POOL_H #define MONGOC_CLIENT_POOL_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-apm.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl.h" #endif #include "mongoc/mongoc-uri.h" BSON_BEGIN_DECLS typedef struct _mongoc_client_pool_t mongoc_client_pool_t; MONGOC_EXPORT (mongoc_client_pool_t *) mongoc_client_pool_new (const mongoc_uri_t *uri); MONGOC_EXPORT (void) mongoc_client_pool_destroy (mongoc_client_pool_t *pool); MONGOC_EXPORT (mongoc_client_t *) mongoc_client_pool_pop (mongoc_client_pool_t *pool); MONGOC_EXPORT (void) mongoc_client_pool_push (mongoc_client_pool_t *pool, mongoc_client_t *client); MONGOC_EXPORT (mongoc_client_t *) mongoc_client_pool_try_pop (mongoc_client_pool_t *pool); MONGOC_EXPORT (void) mongoc_client_pool_max_size (mongoc_client_pool_t *pool, uint32_t max_pool_size); MONGOC_EXPORT (void) mongoc_client_pool_min_size (mongoc_client_pool_t *pool, uint32_t min_pool_size) BSON_GNUC_DEPRECATED; #ifdef MONGOC_ENABLE_SSL MONGOC_EXPORT (void) mongoc_client_pool_set_ssl_opts (mongoc_client_pool_t *pool, const mongoc_ssl_opt_t *opts); #endif MONGOC_EXPORT (bool) mongoc_client_pool_set_apm_callbacks (mongoc_client_pool_t *pool, mongoc_apm_callbacks_t *callbacks, void *context); MONGOC_EXPORT (bool) mongoc_client_pool_set_error_api (mongoc_client_pool_t *pool, int32_t version); MONGOC_EXPORT (bool) mongoc_client_pool_set_appname (mongoc_client_pool_t *pool, const char *appname); BSON_END_DECLS #endif /* MONGOC_CLIENT_POOL_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-private.h0000644000076500000240000001600013572250757026406 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_PRIVATE_H #define MONGOC_CLIENT_PRIVATE_H #include #include "mongoc/mongoc-apm-private.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-opcode.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl.h" #endif #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-write-concern.h" BSON_BEGIN_DECLS /* protocol versions this driver can speak */ #define WIRE_VERSION_MIN 3 #define WIRE_VERSION_MAX 8 /* first version that supported "find" and "getMore" commands */ #define WIRE_VERSION_FIND_CMD 4 /* first version with "killCursors" command */ #define WIRE_VERSION_KILLCURSORS_CMD 4 /* first version when findAndModify accepts writeConcern */ #define WIRE_VERSION_FAM_WRITE_CONCERN 4 /* first version to support readConcern */ #define WIRE_VERSION_READ_CONCERN 4 /* first version to support maxStalenessSeconds */ #define WIRE_VERSION_MAX_STALENESS 5 /* first version to support writeConcern */ #define WIRE_VERSION_CMD_WRITE_CONCERN 5 /* first version to support collation */ #define WIRE_VERSION_COLLATION 5 /* first version to support OP_MSG */ #define WIRE_VERSION_OP_MSG 6 /* first version to support array filters for "update" command */ #define WIRE_VERSION_ARRAY_FILTERS 6 /* first version to support retryable reads */ #define WIRE_VERSION_RETRY_READS 6 /* first version to support retryable writes */ #define WIRE_VERSION_RETRY_WRITES 6 /* version corresponding to server 4.0 release */ #define WIRE_VERSION_4_0 7 /* version corresponding to server 4.2 release */ #define WIRE_VERSION_4_2 8 struct _mongoc_client_t { mongoc_uri_t *uri; mongoc_cluster_t cluster; bool in_exhaust; mongoc_stream_initiator_t initiator; void *initiator_data; #ifdef MONGOC_ENABLE_SSL bool use_ssl; mongoc_ssl_opt_t ssl_opts; #endif mongoc_topology_t *topology; mongoc_read_prefs_t *read_prefs; mongoc_read_concern_t *read_concern; mongoc_write_concern_t *write_concern; mongoc_apm_callbacks_t apm_callbacks; void *apm_context; int32_t error_api_version; bool error_api_set; /* mongoc_client_session_t's in use, to look up lsids and clusterTimes */ mongoc_set_t *client_sessions; unsigned int csid_rand_seed; uint32_t generation; }; /* Defines whether _mongoc_client_command_with_opts() is acting as a read * command helper for a command like "distinct", or a write command helper for * a command like "createRole", or both, like "aggregate" with "$out". */ typedef enum { MONGOC_CMD_RAW = 0, MONGOC_CMD_READ = 1, MONGOC_CMD_WRITE = 2, MONGOC_CMD_RW = 3, } mongoc_command_mode_t; BSON_STATIC_ASSERT2 (mongoc_cmd_rw, MONGOC_CMD_RW == (MONGOC_CMD_READ | MONGOC_CMD_WRITE)); typedef enum { MONGOC_RR_SRV, MONGOC_RR_TXT } mongoc_rr_type_t; typedef struct _mongoc_rr_data_t { /* Number of records returned by DNS. */ uint32_t count; /* Set to lowest TTL found when polling SRV records. */ uint32_t min_ttl; /* Initialized with copy of uri->hosts prior to polling. * Any remaining records after DNS query are no longer active. */ mongoc_host_list_t *hosts; } mongoc_rr_data_t; bool _mongoc_client_get_rr (const char *service, mongoc_rr_type_t rr_type, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error); mongoc_client_t * _mongoc_client_new_from_uri (mongoc_topology_t *topology); bool _mongoc_client_set_apm_callbacks_private (mongoc_client_t *client, mongoc_apm_callbacks_t *callbacks, void *context); mongoc_stream_t * mongoc_client_default_stream_initiator (const mongoc_uri_t *uri, const mongoc_host_list_t *host, void *user_data, bson_error_t *error); mongoc_stream_t * _mongoc_client_create_stream (mongoc_client_t *client, const mongoc_host_list_t *host, bson_error_t *error); bool _mongoc_client_recv (mongoc_client_t *client, mongoc_rpc_t *rpc, mongoc_buffer_t *buffer, mongoc_server_stream_t *server_stream, bson_error_t *error); void _mongoc_client_kill_cursor (mongoc_client_t *client, uint32_t server_id, int64_t cursor_id, int64_t operation_id, const char *db, const char *collection, mongoc_client_session_t *cs); bool _mongoc_client_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, mongoc_command_mode_t mode, const bson_t *opts, mongoc_query_flags_t flags, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, mongoc_read_concern_t *default_rc, mongoc_write_concern_t *default_wc, bson_t *reply, bson_error_t *error); mongoc_server_session_t * _mongoc_client_pop_server_session (mongoc_client_t *client, bson_error_t *error); bool _mongoc_client_lookup_session (const mongoc_client_t *client, uint32_t client_session_id, mongoc_client_session_t **cs, bson_error_t *error); void _mongoc_client_unregister_session (mongoc_client_t *client, mongoc_client_session_t *session); void _mongoc_client_push_server_session (mongoc_client_t *client, mongoc_server_session_t *server_session); void _mongoc_client_end_sessions (mongoc_client_t *client); BSON_END_DECLS #endif /* MONGOC_CLIENT_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-session-private.h0000644000076500000240000001133413572250757030074 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_SESSION_PRIVATE_H #define MONGOC_CLIENT_SESSION_PRIVATE_H #include #include "mongoc/mongoc-client-session.h" /* error labels: see Transactions Spec */ #define TRANSIENT_TXN_ERR "TransientTransactionError" #define UNKNOWN_COMMIT_RESULT "UnknownTransactionCommitResult" #define MAX_TIME_MS_EXPIRED "MaxTimeMSExpired" #define DEFAULT_MAX_COMMIT_TIME_MS 0 #define MONGOC_DEFAULT_WTIMEOUT_FOR_COMMIT_RETRY 10000 struct _mongoc_transaction_opt_t { mongoc_read_concern_t *read_concern; mongoc_write_concern_t *write_concern; mongoc_read_prefs_t *read_prefs; int64_t max_commit_time_ms; }; typedef enum { MONGOC_SESSION_NO_OPTS = 0, MONGOC_SESSION_CAUSAL_CONSISTENCY = (1 << 0), } mongoc_session_flag_t; struct _mongoc_session_opt_t { mongoc_session_flag_t flags; mongoc_transaction_opt_t default_txn_opts; }; typedef struct _mongoc_server_session_t { struct _mongoc_server_session_t *prev, *next; int64_t last_used_usec; bson_t lsid; /* logical session id */ int64_t txn_number; /* transaction number */ } mongoc_server_session_t; typedef enum { MONGOC_TRANSACTION_NONE, MONGOC_TRANSACTION_STARTING, MONGOC_TRANSACTION_IN_PROGRESS, MONGOC_TRANSACTION_ENDING, MONGOC_TRANSACTION_COMMITTED, MONGOC_TRANSACTION_COMMITTED_EMPTY, MONGOC_TRANSACTION_ABORTED, } mongoc_transaction_state_t; typedef struct _mongoc_transaction_t { mongoc_transaction_state_t state; mongoc_transaction_opt_t opts; } mongoc_transaction_t; struct _mongoc_client_session_t { mongoc_client_t *client; mongoc_session_opt_t opts; mongoc_server_session_t *server_session; mongoc_transaction_t txn; uint32_t client_session_id; bson_t cluster_time; uint32_t operation_timestamp; uint32_t operation_increment; uint32_t client_generation; uint32_t server_id; bson_t *recovery_token; /* For testing only */ int64_t with_txn_timeout_ms; const char *fail_commit_label; }; bool _mongoc_parse_cluster_time (const bson_t *cluster_time, uint32_t *timestamp, uint32_t *increment); bool _mongoc_cluster_time_greater (const bson_t *new, const bson_t *old); void _mongoc_client_session_handle_reply (mongoc_client_session_t *session, bool is_acknowledged, const bson_t *reply); mongoc_server_session_t * _mongoc_server_session_new (bson_error_t *error); bool _mongoc_server_session_timed_out (const mongoc_server_session_t *server_session, int64_t session_timeout_minutes); void _mongoc_server_session_destroy (mongoc_server_session_t *server_session); mongoc_client_session_t * _mongoc_client_session_new (mongoc_client_t *client, mongoc_server_session_t *server_session, const mongoc_session_opt_t *opts, uint32_t client_session_id); bool _mongoc_client_session_from_iter (mongoc_client_t *client, const bson_iter_t *iter, mongoc_client_session_t **cs, bson_error_t *error); bool _mongoc_client_session_in_txn (const mongoc_client_session_t *session); bool _mongoc_client_session_in_txn_or_ending ( const mongoc_client_session_t *session); bool _mongoc_client_session_txn_in_progress (const mongoc_client_session_t *session); bool _mongoc_client_session_append_txn (mongoc_client_session_t *session, bson_t *cmd, bson_error_t *error); void _mongoc_client_session_append_read_concern (const mongoc_client_session_t *cs, const bson_t *user_read_concern, bool is_read_command, bson_t *cmd); void _mongoc_client_session_unpin (mongoc_client_session_t *session); void _mongoc_client_session_pin (mongoc_client_session_t *session, uint32_t server_id); #endif /* MONGOC_CLIENT_SESSION_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-session.c0000644000076500000240000013267213572250757026430 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-rand-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-read-prefs-private.h" #define SESSION_NEVER_USED (-1) #define WITH_TXN_TIMEOUT_MS (120 * 1000) static void txn_opts_set (mongoc_transaction_opt_t *opts, const mongoc_read_concern_t *read_concern, const mongoc_write_concern_t *write_concern, const mongoc_read_prefs_t *read_prefs, int64_t max_commit_time_ms) { if (read_concern) { mongoc_transaction_opts_set_read_concern (opts, read_concern); } if (write_concern) { mongoc_transaction_opts_set_write_concern (opts, write_concern); } if (read_prefs) { mongoc_transaction_opts_set_read_prefs (opts, read_prefs); } if (max_commit_time_ms != DEFAULT_MAX_COMMIT_TIME_MS) { mongoc_transaction_opts_set_max_commit_time_ms (opts, max_commit_time_ms); } } static void txn_opts_cleanup (mongoc_transaction_opt_t *opts) { /* null inputs are ok */ mongoc_read_concern_destroy (opts->read_concern); mongoc_write_concern_destroy (opts->write_concern); mongoc_read_prefs_destroy (opts->read_prefs); /* prepare opts for reuse */ opts->read_concern = NULL; opts->write_concern = NULL; opts->read_prefs = NULL; opts->max_commit_time_ms = DEFAULT_MAX_COMMIT_TIME_MS; } static void txn_opts_copy (const mongoc_transaction_opt_t *src, mongoc_transaction_opt_t *dst) { txn_opts_cleanup (dst); /* null inputs are ok for these copy functions */ dst->read_concern = mongoc_read_concern_copy (src->read_concern); dst->write_concern = mongoc_write_concern_copy (src->write_concern); dst->read_prefs = mongoc_read_prefs_copy (src->read_prefs); dst->max_commit_time_ms = src->max_commit_time_ms; } static void copy_labels_plus_unknown_commit_result (const bson_t *src, bson_t *dst) { bson_iter_t iter; bson_iter_t src_label; bson_t dst_labels; char str[16]; uint32_t i = 0; const char *key; BSON_APPEND_ARRAY_BEGIN (dst, "errorLabels", &dst_labels); BSON_APPEND_UTF8 (&dst_labels, "0", UNKNOWN_COMMIT_RESULT); /* append any other errorLabels already in "src" */ if (bson_iter_init_find (&iter, src, "errorLabels") && bson_iter_recurse (&iter, &src_label)) { while (bson_iter_next (&src_label) && BSON_ITER_HOLDS_UTF8 (&src_label)) { if (strcmp (bson_iter_utf8 (&src_label, NULL), UNKNOWN_COMMIT_RESULT) != 0) { i++; bson_uint32_to_string (i, &key, str, sizeof str); BSON_APPEND_UTF8 ( &dst_labels, key, bson_iter_utf8 (&src_label, NULL)); } } } bson_append_array_end (dst, &dst_labels); } static bool txn_abort (mongoc_client_session_t *session, bson_t *reply, bson_error_t *error) { bson_t cmd = BSON_INITIALIZER; bson_t opts = BSON_INITIALIZER; bson_error_t err_local; bson_error_t *err_ptr = error ? error : &err_local; bson_t reply_local = BSON_INITIALIZER; mongoc_write_err_type_t error_type; bool r = false; _mongoc_bson_init_if_set (reply); if (!mongoc_client_session_append (session, &opts, err_ptr)) { GOTO (done); } if (session->txn.opts.write_concern) { if (!mongoc_write_concern_append (session->txn.opts.write_concern, &opts)) { bson_set_error (err_ptr, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Invalid transaction write concern"); GOTO (done); } } BSON_APPEND_INT32 (&cmd, "abortTransaction", 1); if (session->recovery_token) { BSON_APPEND_DOCUMENT (&cmd, "recoveryToken", session->recovery_token); } /* will be reinitialized by mongoc_client_write_command_with_opts */ bson_destroy (&reply_local); r = mongoc_client_write_command_with_opts ( session->client, "admin", &cmd, &opts, &reply_local, err_ptr); /* Transactions Spec: "Drivers MUST retry the commitTransaction command once * after it fails with a retryable error", same for abort */ error_type = _mongoc_write_error_get_type (r, err_ptr, &reply_local); if (error_type == MONGOC_WRITE_ERR_RETRY) { _mongoc_client_session_unpin (session); bson_destroy (&reply_local); r = mongoc_client_write_command_with_opts ( session->client, "admin", &cmd, &opts, &reply_local, err_ptr); } if (!r) { /* we won't return an error from abortTransaction, so warn */ MONGOC_WARNING ("Error in abortTransaction: %s", err_ptr->message); _mongoc_client_session_unpin (session); } done: bson_destroy (&reply_local); bson_destroy (&cmd); bson_destroy (&opts); return r; } static mongoc_write_concern_t * create_commit_retry_wc (const mongoc_write_concern_t *existing_wc) { mongoc_write_concern_t *wc; wc = existing_wc ? mongoc_write_concern_copy (existing_wc) : mongoc_write_concern_new (); /* Transactions spec: "If the modified write concern does not include a * wtimeout value, drivers MUST also apply wtimeout: 10000 to the write * concern in order to avoid waiting forever if the majority write concern * cannot be satisfied." */ if (mongoc_write_concern_get_wtimeout_int64 (wc) <= 0) { mongoc_write_concern_set_wtimeout_int64 ( wc, MONGOC_DEFAULT_WTIMEOUT_FOR_COMMIT_RETRY); } /* Transactions spec: "If the transaction is using a write concern that is * not the server default, any other write concern options MUST be left as-is * when applying w:majority. */ mongoc_write_concern_set_w (wc, MONGOC_WRITE_CONCERN_W_MAJORITY); return wc; } static bool txn_commit (mongoc_client_session_t *session, bool explicitly_retrying, bson_t *reply, bson_error_t *error) { bson_t cmd = BSON_INITIALIZER; bson_t opts = BSON_INITIALIZER; bson_error_t err_local = {0}; bson_error_t *err_ptr = error ? error : &err_local; bson_t reply_local = BSON_INITIALIZER; mongoc_write_err_type_t error_type; bool r = false; bool retrying_after_error = false; mongoc_write_concern_t *retry_wc = NULL; _mongoc_bson_init_if_set (reply); BSON_APPEND_INT32 (&cmd, "commitTransaction", 1); if (session->recovery_token) { BSON_APPEND_DOCUMENT (&cmd, "recoveryToken", session->recovery_token); } retry: if (!mongoc_client_session_append (session, &opts, err_ptr)) { GOTO (done); } if (session->txn.opts.max_commit_time_ms != DEFAULT_MAX_COMMIT_TIME_MS) { if (!bson_append_int64 ( &opts, "maxTimeMS", -1, session->txn.opts.max_commit_time_ms)) { bson_set_error (err_ptr, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "error appending maxCommitTimeMS"); GOTO (done); } } /* Transactions Spec: "When commitTransaction is retried, either by the * driver's internal retry-once logic or explicitly by the user calling * commitTransaction again, drivers MUST apply w:majority to the write * concern of the commitTransaction command." */ if (!retry_wc && (retrying_after_error || explicitly_retrying)) { retry_wc = create_commit_retry_wc (session->txn.opts.write_concern ? session->txn.opts.write_concern : session->client->write_concern); } if (retry_wc || session->txn.opts.write_concern) { if (!mongoc_write_concern_append ( retry_wc ? retry_wc : session->txn.opts.write_concern, &opts)) { bson_set_error (err_ptr, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Invalid transaction write concern"); GOTO (done); } } /* will be reinitialized by mongoc_client_write_command_with_opts */ bson_destroy (&reply_local); r = mongoc_client_write_command_with_opts ( session->client, "admin", &cmd, &opts, &reply_local, err_ptr); /* Transactions Spec: "Drivers MUST retry the commitTransaction command once * after it fails with a retryable error", same for abort */ error_type = _mongoc_write_error_get_type (r, err_ptr, &reply_local); if (!retrying_after_error && error_type == MONGOC_WRITE_ERR_RETRY) { retrying_after_error = true; /* retry after error only once */ _mongoc_client_session_unpin (session); bson_reinit (&opts); GOTO (retry); } /* Transactions Spec: "add the UnknownTransactionCommitResult error label * when commitTransaction fails with a network error, server selection * error, MaxTimeMSExpired error, or write concern failed / timeout." */ if (!r && (err_ptr->domain == MONGOC_ERROR_SERVER_SELECTION || error_type == MONGOC_WRITE_ERR_RETRY || error_type == MONGOC_WRITE_ERR_WRITE_CONCERN || err_ptr->code == MONGOC_ERROR_MAX_TIME_MS_EXPIRED)) { /* Drivers MUST unpin a ClientSession when any individual * commitTransaction command attempt fails with an * UnknownTransactionCommitResult error label. Do this even if we won't * actually apply the error label due to reply being NULL */ _mongoc_client_session_unpin (session); if (reply) { bson_copy_to_excluding_noinit ( &reply_local, reply, "errorLabels", NULL); copy_labels_plus_unknown_commit_result (&reply_local, reply); } } else if (reply) { /* maintain invariants: reply & reply_local are valid until the end */ bson_destroy (reply); bson_steal (reply, &reply_local); bson_init (&reply_local); } done: bson_destroy (&reply_local); bson_destroy (&cmd); bson_destroy (&opts); if (retry_wc) { mongoc_write_concern_destroy (retry_wc); } return r; } mongoc_transaction_opt_t * mongoc_transaction_opts_new (void) { mongoc_transaction_opt_t *opts; opts = (mongoc_transaction_opt_t *) bson_malloc0 ( sizeof (mongoc_transaction_opt_t)); opts->max_commit_time_ms = DEFAULT_MAX_COMMIT_TIME_MS; return opts; } mongoc_transaction_opt_t * mongoc_transaction_opts_clone (const mongoc_transaction_opt_t *opts) { mongoc_transaction_opt_t *cloned_opts; ENTRY; BSON_ASSERT (opts); cloned_opts = mongoc_transaction_opts_new (); txn_opts_copy (opts, cloned_opts); RETURN (cloned_opts); } void mongoc_transaction_opts_destroy (mongoc_transaction_opt_t *opts) { ENTRY; if (!opts) { EXIT; } txn_opts_cleanup (opts); bson_free (opts); EXIT; } void mongoc_transaction_opts_set_max_commit_time_ms (mongoc_transaction_opt_t *opts, int64_t max_commit_time_ms) { BSON_ASSERT (opts); opts->max_commit_time_ms = max_commit_time_ms; } int64_t mongoc_transaction_opts_get_max_commit_time_ms (mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->max_commit_time_ms; } void mongoc_transaction_opts_set_read_concern ( mongoc_transaction_opt_t *opts, const mongoc_read_concern_t *read_concern) { BSON_ASSERT (opts); mongoc_read_concern_destroy (opts->read_concern); opts->read_concern = mongoc_read_concern_copy (read_concern); } const mongoc_read_concern_t * mongoc_transaction_opts_get_read_concern (const mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->read_concern; } void mongoc_transaction_opts_set_write_concern ( mongoc_transaction_opt_t *opts, const mongoc_write_concern_t *write_concern) { BSON_ASSERT (opts); mongoc_write_concern_destroy (opts->write_concern); opts->write_concern = mongoc_write_concern_copy (write_concern); } const mongoc_write_concern_t * mongoc_transaction_opts_get_write_concern (const mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->write_concern; } void mongoc_transaction_opts_set_read_prefs (mongoc_transaction_opt_t *opts, const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (opts); mongoc_read_prefs_destroy (opts->read_prefs); opts->read_prefs = mongoc_read_prefs_copy (read_prefs); } const mongoc_read_prefs_t * mongoc_transaction_opts_get_read_prefs (const mongoc_transaction_opt_t *opts) { BSON_ASSERT (opts); return opts->read_prefs; } mongoc_session_opt_t * mongoc_session_opts_new (void) { mongoc_session_opt_t *opts = bson_malloc0 (sizeof (mongoc_session_opt_t)); /* Driver Sessions Spec: causal consistency is true by default */ mongoc_session_opts_set_causal_consistency (opts, true); return opts; } void mongoc_session_opts_set_causal_consistency (mongoc_session_opt_t *opts, bool causal_consistency) { ENTRY; BSON_ASSERT (opts); if (causal_consistency) { opts->flags |= MONGOC_SESSION_CAUSAL_CONSISTENCY; } else { opts->flags &= ~MONGOC_SESSION_CAUSAL_CONSISTENCY; } EXIT; } bool mongoc_session_opts_get_causal_consistency (const mongoc_session_opt_t *opts) { ENTRY; BSON_ASSERT (opts); RETURN (!!(opts->flags & MONGOC_SESSION_CAUSAL_CONSISTENCY)); } void mongoc_session_opts_set_default_transaction_opts ( mongoc_session_opt_t *opts, const mongoc_transaction_opt_t *txn_opts) { ENTRY; BSON_ASSERT (opts); BSON_ASSERT (txn_opts); txn_opts_set (&opts->default_txn_opts, txn_opts->read_concern, txn_opts->write_concern, txn_opts->read_prefs, txn_opts->max_commit_time_ms); EXIT; } const mongoc_transaction_opt_t * mongoc_session_opts_get_default_transaction_opts ( const mongoc_session_opt_t *opts) { ENTRY; BSON_ASSERT (opts); RETURN (&opts->default_txn_opts); } static void _mongoc_session_opts_copy (const mongoc_session_opt_t *src, mongoc_session_opt_t *dst) { dst->flags = src->flags; txn_opts_copy (&src->default_txn_opts, &dst->default_txn_opts); } mongoc_session_opt_t * mongoc_session_opts_clone (const mongoc_session_opt_t *opts) { mongoc_session_opt_t *cloned_opts; ENTRY; BSON_ASSERT (opts); cloned_opts = bson_malloc0 (sizeof (mongoc_session_opt_t)); _mongoc_session_opts_copy (opts, cloned_opts); RETURN (cloned_opts); } void mongoc_session_opts_destroy (mongoc_session_opt_t *opts) { ENTRY; if (!opts) { EXIT; } txn_opts_cleanup (&opts->default_txn_opts); bson_free (opts); EXIT; } static bool _mongoc_server_session_uuid (uint8_t *data /* OUT */, bson_error_t *error) { #ifdef MONGOC_ENABLE_CRYPTO /* https://tools.ietf.org/html/rfc4122#page-14 * o Set the two most significant bits (bits 6 and 7) of the * clock_seq_hi_and_reserved to zero and one, respectively. * * o Set the four most significant bits (bits 12 through 15) of the * time_hi_and_version field to the 4-bit version number from * Section 4.1.3. * * o Set all the other bits to randomly (or pseudo-randomly) chosen * values. */ if (!_mongoc_rand_bytes (data, 16)) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_SESSION_FAILURE, "Could not generate UUID for logical session id"); return false; } data[6] = (uint8_t) (0x40 | (data[6] & 0xf)); data[8] = (uint8_t) (0x80 | (data[8] & 0x3f)); return true; #else /* no _mongoc_rand_bytes without a crypto library */ bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_SESSION_FAILURE, "Could not generate UUID for logical session id, we need a" " cryptography library like libcrypto, Common Crypto, or" " CNG"); return false; #endif } bool _mongoc_parse_cluster_time (const bson_t *cluster_time, uint32_t *timestamp, uint32_t *increment) { bson_iter_t iter; char *s; if (!cluster_time || !bson_iter_init_find (&iter, cluster_time, "clusterTime") || !BSON_ITER_HOLDS_TIMESTAMP (&iter)) { s = bson_as_json (cluster_time, NULL); MONGOC_ERROR ("Cannot parse cluster time from %s\n", s); bson_free (s); return false; } bson_iter_timestamp (&iter, timestamp, increment); return true; } bool _mongoc_cluster_time_greater (const bson_t *new, const bson_t *old) { uint32_t new_t, new_i, old_t, old_i; if (!_mongoc_parse_cluster_time (new, &new_t, &new_i) || !_mongoc_parse_cluster_time (old, &old_t, &old_i)) { return false; } return (new_t > old_t) || (new_t == old_t && new_i > old_i); } void _mongoc_client_session_handle_reply (mongoc_client_session_t *session, bool is_acknowledged, const bson_t *reply) { bson_iter_t iter; uint32_t len; const uint8_t *data; bson_t cluster_time; uint32_t t; uint32_t i; BSON_ASSERT (session); if (!reply || !bson_iter_init (&iter, reply)) { return; } if (mongoc_error_has_label (reply, "TransientTransactionError")) { /* Transaction Spec: "Drivers MUST unpin a ClientSession when a command * within a transaction, including commitTransaction and abortTransaction, * fails with a TransientTransactionError". If the server reply included * a TransientTransactionError, we unpin here. If a network error caused * us to add a label client-side, we unpin in network_error_reply. */ session->server_id = 0; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "$clusterTime") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_document (&iter, &len, &data); BSON_ASSERT (bson_init_static (&cluster_time, data, (size_t) len)); mongoc_client_session_advance_cluster_time (session, &cluster_time); } else if (!strcmp (bson_iter_key (&iter), "operationTime") && BSON_ITER_HOLDS_TIMESTAMP (&iter) && is_acknowledged) { bson_iter_timestamp (&iter, &t, &i); mongoc_client_session_advance_operation_time (session, t, i); } } } mongoc_server_session_t * _mongoc_server_session_new (bson_error_t *error) { uint8_t uuid_data[16]; mongoc_server_session_t *s; ENTRY; if (!_mongoc_server_session_uuid (uuid_data, error)) { RETURN (NULL); } s = bson_malloc0 (sizeof (mongoc_server_session_t)); s->last_used_usec = SESSION_NEVER_USED; s->prev = NULL; s->next = NULL; bson_init (&s->lsid); bson_append_binary ( &s->lsid, "id", 2, BSON_SUBTYPE_UUID, uuid_data, sizeof uuid_data); /* transaction number is a positive integer and will be incremented before * each use, so ensure it is initialized to zero. */ s->txn_number = 0; RETURN (s); } bool _mongoc_server_session_timed_out (const mongoc_server_session_t *server_session, int64_t session_timeout_minutes) { int64_t timeout_usec; const int64_t minute_to_usec = 60 * 1000 * 1000; ENTRY; if (session_timeout_minutes == MONGOC_NO_SESSIONS) { /* not connected right now; keep the session */ return false; } if (server_session->last_used_usec == SESSION_NEVER_USED) { return false; } /* Driver Sessions Spec: if a session has less than one minute left before * becoming stale, discard it */ timeout_usec = server_session->last_used_usec + session_timeout_minutes * minute_to_usec; RETURN (timeout_usec - bson_get_monotonic_time () < 1 * minute_to_usec); } void _mongoc_server_session_destroy (mongoc_server_session_t *server_session) { ENTRY; bson_destroy (&server_session->lsid); bson_free (server_session); EXIT; } mongoc_client_session_t * _mongoc_client_session_new (mongoc_client_t *client, mongoc_server_session_t *server_session, const mongoc_session_opt_t *opts, uint32_t client_session_id) { mongoc_client_session_t *session; ENTRY; BSON_ASSERT (client); session = bson_malloc0 (sizeof (mongoc_client_session_t)); session->client = client; session->client_generation = client->generation; session->server_session = server_session; session->client_session_id = client_session_id; bson_init (&session->cluster_time); txn_opts_set (&session->opts.default_txn_opts, client->read_concern, client->write_concern, client->read_prefs, DEFAULT_MAX_COMMIT_TIME_MS); if (opts) { session->opts.flags = opts->flags; txn_opts_set (&session->opts.default_txn_opts, opts->default_txn_opts.read_concern, opts->default_txn_opts.write_concern, opts->default_txn_opts.read_prefs, opts->default_txn_opts.max_commit_time_ms); } else { /* sessions are causally consistent by default */ session->opts.flags = MONGOC_SESSION_CAUSAL_CONSISTENCY; } /* these values are used for testing only. */ session->with_txn_timeout_ms = 0; session->fail_commit_label = NULL; RETURN (session); } mongoc_client_t * mongoc_client_session_get_client (const mongoc_client_session_t *session) { BSON_ASSERT (session); return session->client; } const mongoc_session_opt_t * mongoc_client_session_get_opts (const mongoc_client_session_t *session) { BSON_ASSERT (session); return &session->opts; } const bson_t * mongoc_client_session_get_lsid (const mongoc_client_session_t *session) { BSON_ASSERT (session); return &session->server_session->lsid; } const bson_t * mongoc_client_session_get_cluster_time (const mongoc_client_session_t *session) { BSON_ASSERT (session); if (bson_empty (&session->cluster_time)) { return NULL; } return &session->cluster_time; } uint32_t mongoc_client_session_get_server_id (const mongoc_client_session_t *session) { BSON_ASSERT (session); return session->server_id; } void mongoc_client_session_advance_cluster_time (mongoc_client_session_t *session, const bson_t *cluster_time) { uint32_t t, i; ENTRY; if (bson_empty (&session->cluster_time) && _mongoc_parse_cluster_time (cluster_time, &t, &i)) { bson_destroy (&session->cluster_time); bson_copy_to (cluster_time, &session->cluster_time); EXIT; } if (_mongoc_cluster_time_greater (cluster_time, &session->cluster_time)) { bson_destroy (&session->cluster_time); bson_copy_to (cluster_time, &session->cluster_time); } EXIT; } void mongoc_client_session_get_operation_time ( const mongoc_client_session_t *session, uint32_t *timestamp, uint32_t *increment) { BSON_ASSERT (session); BSON_ASSERT (timestamp); BSON_ASSERT (increment); *timestamp = session->operation_timestamp; *increment = session->operation_increment; } void mongoc_client_session_advance_operation_time (mongoc_client_session_t *session, uint32_t timestamp, uint32_t increment) { ENTRY; BSON_ASSERT (session); if (timestamp > session->operation_timestamp || (timestamp == session->operation_timestamp && increment > session->operation_increment)) { session->operation_timestamp = timestamp; session->operation_increment = increment; } EXIT; } static bool timeout_exceeded (int64_t expire_at) { int64_t current_time = bson_get_monotonic_time (); return current_time >= expire_at; } static bool _max_time_ms_failure (bson_t *reply) { bson_iter_t iter; bson_iter_t descendant; if (!reply) { return false; } /* We can fail with a maxTimeMS error with the error code at the top level, or nested within a writeConcernError. */ if (bson_iter_init_find (&iter, reply, "codeName") && BSON_ITER_HOLDS_UTF8 (&iter) && 0 == strcmp (bson_iter_utf8 (&iter, NULL), MAX_TIME_MS_EXPIRED)) { return true; } bson_iter_init (&iter, reply); if (bson_iter_find_descendant ( &iter, "writeConcernError.codeName", &descendant) && BSON_ITER_HOLDS_UTF8 (&descendant) && 0 == strcmp (bson_iter_utf8 (&descendant, NULL), MAX_TIME_MS_EXPIRED)) { return true; } return false; } bool mongoc_client_session_with_transaction ( mongoc_client_session_t *session, mongoc_client_session_with_transaction_cb_t cb, const mongoc_transaction_opt_t *opts, void *ctx, bson_t *reply, bson_error_t *error) { mongoc_transaction_state_t state; int64_t timeout; int64_t expire_at; bson_t local_reply; bson_t *active_reply = NULL; bool res; ENTRY; timeout = session->with_txn_timeout_ms > 0 ? session->with_txn_timeout_ms : WITH_TXN_TIMEOUT_MS; expire_at = bson_get_monotonic_time () + ((int64_t) timeout * 1000); /* Attempt to wrap a user callback in start- and end- transaction semantics. If this fails for transient reasons, restart, either from the very beginning, or just retry committing the transaction. Will retry until the timeout WITH_TXN_TIMEOUT_MS is exhausted. At the top of this loop, active_reply should always be NULL, and local_reply should always be uninitialized. */ while (true) { res = mongoc_client_session_start_transaction (session, opts, error); if (!res) { GOTO (done); } res = cb (session, ctx, &active_reply, error); state = session->txn.state; /* If the user cb set a reply, use it. Otherwise, sub in local_reply since we must have an active reply object one way or another. */ if (!active_reply) { bson_init (&local_reply); active_reply = &local_reply; } if (!res) { if (state == MONGOC_TRANSACTION_STARTING || state == MONGOC_TRANSACTION_IN_PROGRESS) { BSON_ASSERT ( mongoc_client_session_abort_transaction (session, NULL)); } if (mongoc_error_has_label (active_reply, TRANSIENT_TXN_ERR) && !timeout_exceeded (expire_at)) { bson_destroy (active_reply); active_reply = NULL; continue; } /* Unknown error running callback, fail. */ GOTO (done); } if (state == MONGOC_TRANSACTION_ABORTED || state == MONGOC_TRANSACTION_NONE || state == MONGOC_TRANSACTION_COMMITTED || state == MONGOC_TRANSACTION_COMMITTED_EMPTY) { GOTO (done); } /* Whether or not we used local_reply above, use it now, but access it * through active_reply so cleanup in DONE is simpler. */ bson_destroy (active_reply); active_reply = &local_reply; /* Commit the transaction, retrying either from here or from the outer loop on error. At the top of this loop, active_reply should always be pointing to an uninitialized stack-allocated bson_t, so we can pass it into commit_transaction, which requires this like our other public functions that take a bson_t reply. */ while (true) { res = mongoc_client_session_commit_transaction ( session, active_reply, error); if (!res) { /* If we have a MaxTimeMsExpired error, fail and propogate the error to the caller. */ if (_max_time_ms_failure (active_reply)) { GOTO (done); } if (mongoc_error_has_label (active_reply, UNKNOWN_COMMIT_RESULT) && !timeout_exceeded (expire_at)) { /* Commit_transaction applies majority write concern on retry * attempts. * * Here, we don't want to set active_reply = NULL when we * destroy, because we want it to point to an uninitialized * bson_t at the top of this loop every time.*/ bson_destroy (active_reply); continue; } if (mongoc_error_has_label (active_reply, TRANSIENT_TXN_ERR) && !timeout_exceeded (expire_at)) { /* In the case of a transient txn error, go back to outside loop. We must set the reply to NULL so it may be used by the cb. */ bson_destroy (active_reply); active_reply = NULL; break; } /* Unknown error committing transaction, fail. */ GOTO (done); } /* Transaction successfully committed! */ GOTO (done); } } done: /* At this point, active_reply is either pointing to the user's reply object, or our local one on the stack, or is NULL. */ if (reply && active_reply) { bson_copy_to (active_reply, reply); } else if (reply) { bson_init (reply); } bson_destroy (active_reply); RETURN (res); } bool mongoc_client_session_start_transaction (mongoc_client_session_t *session, const mongoc_transaction_opt_t *opts, bson_error_t *error) { mongoc_server_description_t *sd; bool ret; ENTRY; BSON_ASSERT (session); ret = true; sd = mongoc_client_select_server ( session->client, true /* primary */, NULL, error); if (!sd) { ret = false; GOTO (done); } if (sd->max_wire_version < 7 || (sd->max_wire_version < 8 && sd->type == MONGOC_SERVER_MONGOS)) { bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Multi-document transactions are not supported by this " "server version"); ret = false; GOTO (done); } /* use "switch" so that static checkers ensure we handle all states */ switch (session->txn.state) { case MONGOC_TRANSACTION_STARTING: case MONGOC_TRANSACTION_IN_PROGRESS: bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Transaction already in progress"); ret = false; GOTO (done); case MONGOC_TRANSACTION_ENDING: MONGOC_ERROR ("starting txn in invalid state MONGOC_TRANSACTION_ENDING"); abort (); case MONGOC_TRANSACTION_COMMITTED: case MONGOC_TRANSACTION_COMMITTED_EMPTY: case MONGOC_TRANSACTION_ABORTED: case MONGOC_TRANSACTION_NONE: default: break; } session->server_session->txn_number++; txn_opts_set (&session->txn.opts, session->opts.default_txn_opts.read_concern, session->opts.default_txn_opts.write_concern, session->opts.default_txn_opts.read_prefs, session->opts.default_txn_opts.max_commit_time_ms); if (opts) { txn_opts_set (&session->txn.opts, opts->read_concern, opts->write_concern, opts->read_prefs, opts->max_commit_time_ms); } if (!mongoc_write_concern_is_acknowledged ( session->txn.opts.write_concern)) { bson_set_error ( error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Transactions do not support unacknowledged write concern"); ret = false; GOTO (done); } /* Transactions Spec: Starting a new transaction on a pinned ClientSession * MUST unpin the session. */ _mongoc_client_session_unpin (session); session->txn.state = MONGOC_TRANSACTION_STARTING; /* Transactions spec: "Drivers MUST clear a session's cached * 'recoveryToken' when transitioning to the 'no transaction' or * 'starting transaction' state." */ bson_destroy (session->recovery_token); session->recovery_token = NULL; done: mongoc_server_description_destroy (sd); return ret; } bool mongoc_client_session_in_transaction (const mongoc_client_session_t *session) { ENTRY; BSON_ASSERT (session); /* call the internal function, which would allow a NULL session */ RETURN (_mongoc_client_session_in_txn (session)); } bool mongoc_client_session_commit_transaction (mongoc_client_session_t *session, bson_t *reply, bson_error_t *error) { bool r = false; ENTRY; BSON_ASSERT (session); /* For testing only, mock out certain kinds of errors. */ if (session->fail_commit_label) { bson_t labels; BSON_ASSERT (reply); bson_init (reply); BSON_APPEND_ARRAY_BEGIN (reply, "errorLabels", &labels); BSON_APPEND_UTF8 (&labels, "0", session->fail_commit_label); /* Waste the test timeout, if there is one set. */ if (session->with_txn_timeout_ms) { _mongoc_usleep (session->with_txn_timeout_ms * 1000); } RETURN (r); } /* See Transactions Spec for state diagram. In COMMITTED state, user can call * commit again to retry after network error */ switch (session->txn.state) { case MONGOC_TRANSACTION_NONE: bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "No transaction started"); _mongoc_bson_init_if_set (reply); break; case MONGOC_TRANSACTION_STARTING: case MONGOC_TRANSACTION_COMMITTED_EMPTY: /* we sent no commands, not actually started on server */ session->txn.state = MONGOC_TRANSACTION_COMMITTED_EMPTY; _mongoc_bson_init_if_set (reply); r = true; break; case MONGOC_TRANSACTION_COMMITTED: case MONGOC_TRANSACTION_IN_PROGRESS: { bool explicitly_retrying = (session->txn.state == MONGOC_TRANSACTION_COMMITTED); /* in MONGOC_TRANSACTION_ENDING we add txnNumber and autocommit: false * to the commitTransaction command, but if it fails with network error * we add UnknownTransactionCommitResult not TransientTransactionError */ session->txn.state = MONGOC_TRANSACTION_ENDING; r = txn_commit (session, explicitly_retrying, reply, error); session->txn.state = MONGOC_TRANSACTION_COMMITTED; break; } case MONGOC_TRANSACTION_ENDING: MONGOC_ERROR ("commit called in invalid state MONGOC_TRANSACTION_ENDING"); abort (); case MONGOC_TRANSACTION_ABORTED: default: bson_set_error ( error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Cannot call commitTransaction after calling abortTransaction"); _mongoc_bson_init_if_set (reply); break; } RETURN (r); } bool mongoc_client_session_abort_transaction (mongoc_client_session_t *session, bson_error_t *error) { ENTRY; BSON_ASSERT (session); switch (session->txn.state) { case MONGOC_TRANSACTION_STARTING: /* we sent no commands, not actually started on server */ session->txn.state = MONGOC_TRANSACTION_ABORTED; txn_opts_cleanup (&session->txn.opts); RETURN (true); case MONGOC_TRANSACTION_IN_PROGRESS: session->txn.state = MONGOC_TRANSACTION_ENDING; /* Transactions Spec: ignore errors from abortTransaction command */ txn_abort (session, NULL, NULL); session->txn.state = MONGOC_TRANSACTION_ABORTED; RETURN (true); case MONGOC_TRANSACTION_COMMITTED: case MONGOC_TRANSACTION_COMMITTED_EMPTY: bson_set_error ( error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Cannot call abortTransaction after calling commitTransaction"); RETURN (false); case MONGOC_TRANSACTION_ABORTED: bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Cannot call abortTransaction twice"); RETURN (false); case MONGOC_TRANSACTION_ENDING: MONGOC_ERROR ("abort called in invalid state MONGOC_TRANSACTION_ENDING"); abort (); case MONGOC_TRANSACTION_NONE: default: bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "No transaction started"); RETURN (false); } } bool _mongoc_client_session_from_iter (mongoc_client_t *client, const bson_iter_t *iter, mongoc_client_session_t **cs, bson_error_t *error) { ENTRY; /* must be int64 that fits in uint32 */ if (!BSON_ITER_HOLDS_INT64 (iter) || bson_iter_int64 (iter) > 0xffffffff) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid sessionId"); RETURN (false); } RETURN (_mongoc_client_lookup_session ( client, (uint32_t) bson_iter_int64 (iter), cs, error)); } /* Returns true if in the middle of a transaction. Note: this returns false if * the commit/abort is running. */ bool _mongoc_client_session_in_txn (const mongoc_client_session_t *session) { if (!session) { return false; } /* use "switch" so that static checkers ensure we handle all states */ switch (session->txn.state) { case MONGOC_TRANSACTION_STARTING: case MONGOC_TRANSACTION_IN_PROGRESS: return true; case MONGOC_TRANSACTION_NONE: case MONGOC_TRANSACTION_ENDING: case MONGOC_TRANSACTION_COMMITTED: case MONGOC_TRANSACTION_COMMITTED_EMPTY: case MONGOC_TRANSACTION_ABORTED: default: return false; } } /* Like _mongoc_client_session_in_txn, but also returns true if running the * commit/abort for this transaction. */ bool _mongoc_client_session_in_txn_or_ending (const mongoc_client_session_t *session) { if (!session) { return false; } /* use "switch" so that static checkers ensure we handle all states */ switch (session->txn.state) { case MONGOC_TRANSACTION_STARTING: case MONGOC_TRANSACTION_IN_PROGRESS: case MONGOC_TRANSACTION_ENDING: return true; case MONGOC_TRANSACTION_NONE: case MONGOC_TRANSACTION_COMMITTED: case MONGOC_TRANSACTION_COMMITTED_EMPTY: case MONGOC_TRANSACTION_ABORTED: default: return false; } } bool _mongoc_client_session_txn_in_progress (const mongoc_client_session_t *session) { if (!session) { return false; } return session->txn.state == MONGOC_TRANSACTION_IN_PROGRESS; } /* *-------------------------------------------------------------------------- * * _mongoc_client_session_append_txn -- * * Add transaction fields besides "readConcern" to @cmd. * * Returns: * Returns false and sets @error if @cmd is empty, otherwise returns * true. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool _mongoc_client_session_append_txn (mongoc_client_session_t *session, bson_t *cmd, bson_error_t *error) { mongoc_transaction_t *txn; ENTRY; if (!session) { RETURN (true); } if (bson_empty0 (cmd)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Empty command in transaction"); RETURN (false); } txn = &session->txn; /* See Transactions Spec for state transitions. In COMMITTED / ABORTED, the * next operation resets the session and moves to TRANSACTION_NONE */ switch (session->txn.state) { case MONGOC_TRANSACTION_STARTING: txn->state = MONGOC_TRANSACTION_IN_PROGRESS; bson_append_bool (cmd, "startTransaction", 16, true); /* FALL THROUGH */ case MONGOC_TRANSACTION_IN_PROGRESS: case MONGOC_TRANSACTION_ENDING: bson_append_int64 ( cmd, "txnNumber", 9, session->server_session->txn_number); bson_append_bool (cmd, "autocommit", 10, false); RETURN (true); case MONGOC_TRANSACTION_COMMITTED: if (!strcmp (_mongoc_get_command_name (cmd), "commitTransaction")) { /* send commitTransaction again */ bson_append_int64 ( cmd, "txnNumber", 9, session->server_session->txn_number); bson_append_bool (cmd, "autocommit", 10, false); RETURN (true); } /* FALL THROUGH */ case MONGOC_TRANSACTION_COMMITTED_EMPTY: case MONGOC_TRANSACTION_ABORTED: txn_opts_cleanup (&session->txn.opts); txn->state = MONGOC_TRANSACTION_NONE; /* Transactions spec: "Drivers MUST clear a session's cached * 'recoveryToken' when transitioning to the 'no transaction' or * 'starting transaction' state." */ bson_destroy (session->recovery_token); session->recovery_token = NULL; RETURN (true); case MONGOC_TRANSACTION_NONE: default: RETURN (true); } } /* *-------------------------------------------------------------------------- * * _mongoc_client_session_append_read_concern -- * * Add read concern if we're doing a read outside a transaction, or if * we're starting a transaction, or if the user explicitly passed a read * concern in some function's "opts". The contents of the read concern * are "level" and/or "afterClusterTime" - if both are empty, don't add * read concern. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void _mongoc_client_session_append_read_concern (const mongoc_client_session_t *cs, const bson_t *rc, bool is_read_command, bson_t *cmd) { const mongoc_read_concern_t *txn_rc; mongoc_transaction_state_t txn_state; bool user_rc_has_level; bool txn_has_level; bool has_timestamp; bool has_level; bson_t child; ENTRY; BSON_ASSERT (cs); txn_state = cs->txn.state; txn_rc = cs->txn.opts.read_concern; if (txn_state == MONGOC_TRANSACTION_IN_PROGRESS) { return; } has_timestamp = (txn_state == MONGOC_TRANSACTION_STARTING || is_read_command) && mongoc_session_opts_get_causal_consistency (&cs->opts) && cs->operation_timestamp; user_rc_has_level = rc && bson_has_field (rc, "level"); txn_has_level = txn_state == MONGOC_TRANSACTION_STARTING && !mongoc_read_concern_is_default (txn_rc); has_level = user_rc_has_level || txn_has_level; if (!has_timestamp && !has_level) { return; } bson_append_document_begin (cmd, "readConcern", 11, &child); if (rc) { bson_concat (&child, rc); } if (txn_state == MONGOC_TRANSACTION_STARTING) { /* add transaction's read concern level unless user overrides */ if (txn_has_level && !user_rc_has_level) { bson_append_utf8 (&child, "level", 5, txn_rc->level, -1); } } if (has_timestamp) { bson_append_timestamp (&child, "afterClusterTime", 16, cs->operation_timestamp, cs->operation_increment); } bson_append_document_end (cmd, &child); } bool mongoc_client_session_append (const mongoc_client_session_t *client_session, bson_t *opts, bson_error_t *error) { ENTRY; BSON_ASSERT (client_session); BSON_ASSERT (opts); if (!bson_append_int64 ( opts, "sessionId", 9, client_session->client_session_id)) { bson_set_error ( error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "invalid opts"); RETURN (false); } RETURN (true); } void mongoc_client_session_destroy (mongoc_client_session_t *session) { ENTRY; if (!session) { EXIT; } if (session->client_generation == session->client->generation) { if (mongoc_client_session_in_transaction (session)) { mongoc_client_session_abort_transaction (session, NULL); } _mongoc_client_unregister_session (session->client, session); _mongoc_client_push_server_session (session->client, session->server_session); } else { /* If the client has been reset, destroy the server session instead of pushing it back into the topology's pool. */ _mongoc_server_session_destroy (session->server_session); } txn_opts_cleanup (&session->opts.default_txn_opts); txn_opts_cleanup (&session->txn.opts); bson_destroy (&session->cluster_time); bson_destroy (session->recovery_token); bson_free (session); EXIT; } void _mongoc_client_session_unpin (mongoc_client_session_t *session) { BSON_ASSERT (session); session->server_id = 0; } void _mongoc_client_session_pin (mongoc_client_session_t *session, uint32_t server_id) { BSON_ASSERT (session); session->server_id = server_id; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client-session.h0000644000076500000240000001402113572250757026420 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_SESSION_H #define MONGOC_CLIENT_SESSION_H #include #include "mongoc/mongoc-macros.h" /* mongoc_client_session_t, mongoc_transaction_opt_t, and mongoc_session_opt_t are typedef'ed here */ #include "mongoc/mongoc-client.h" BSON_BEGIN_DECLS typedef bool (*mongoc_client_session_with_transaction_cb_t) ( mongoc_client_session_t *session, void *ctx, bson_t **reply, bson_error_t *error); /* these options types are named "opt_t" but their functions are named with * "opts", for consistency with the older mongoc_ssl_opt_t */ MONGOC_EXPORT (mongoc_transaction_opt_t *) mongoc_transaction_opts_new (void) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_transaction_opt_t *) mongoc_transaction_opts_clone (const mongoc_transaction_opt_t *opts); MONGOC_EXPORT (void) mongoc_transaction_opts_destroy (mongoc_transaction_opt_t *opts); MONGOC_EXPORT (void) mongoc_transaction_opts_set_max_commit_time_ms (mongoc_transaction_opt_t *opts, int64_t max_commit_time_ms); MONGOC_EXPORT (int64_t) mongoc_transaction_opts_get_max_commit_time_ms (mongoc_transaction_opt_t *opts); MONGOC_EXPORT (void) mongoc_transaction_opts_set_read_concern ( mongoc_transaction_opt_t *opts, const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (const mongoc_read_concern_t *) mongoc_transaction_opts_get_read_concern (const mongoc_transaction_opt_t *opts); MONGOC_EXPORT (void) mongoc_transaction_opts_set_write_concern ( mongoc_transaction_opt_t *opts, const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_transaction_opts_get_write_concern ( const mongoc_transaction_opt_t *opts); MONGOC_EXPORT (void) mongoc_transaction_opts_set_read_prefs (mongoc_transaction_opt_t *opts, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (const mongoc_read_prefs_t *) mongoc_transaction_opts_get_read_prefs (const mongoc_transaction_opt_t *opts); MONGOC_EXPORT (mongoc_session_opt_t *) mongoc_session_opts_new (void) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (void) mongoc_session_opts_set_causal_consistency (mongoc_session_opt_t *opts, bool causal_consistency); MONGOC_EXPORT (bool) mongoc_session_opts_get_causal_consistency (const mongoc_session_opt_t *opts); MONGOC_EXPORT (void) mongoc_session_opts_set_default_transaction_opts ( mongoc_session_opt_t *opts, const mongoc_transaction_opt_t *txn_opts); MONGOC_EXPORT (const mongoc_transaction_opt_t *) mongoc_session_opts_get_default_transaction_opts ( const mongoc_session_opt_t *opts); MONGOC_EXPORT (mongoc_session_opt_t *) mongoc_session_opts_clone (const mongoc_session_opt_t *opts); MONGOC_EXPORT (void) mongoc_session_opts_destroy (mongoc_session_opt_t *opts); MONGOC_EXPORT (mongoc_client_t *) mongoc_client_session_get_client (const mongoc_client_session_t *session); MONGOC_EXPORT (const mongoc_session_opt_t *) mongoc_client_session_get_opts (const mongoc_client_session_t *session); MONGOC_EXPORT (const bson_t *) mongoc_client_session_get_lsid (const mongoc_client_session_t *session); MONGOC_EXPORT (const bson_t *) mongoc_client_session_get_cluster_time (const mongoc_client_session_t *session); MONGOC_EXPORT (void) mongoc_client_session_advance_cluster_time (mongoc_client_session_t *session, const bson_t *cluster_time); MONGOC_EXPORT (void) mongoc_client_session_get_operation_time ( const mongoc_client_session_t *session, uint32_t *timestamp, uint32_t *increment); MONGOC_EXPORT (uint32_t) mongoc_client_session_get_server_id (const mongoc_client_session_t *session); MONGOC_EXPORT (void) mongoc_client_session_advance_operation_time (mongoc_client_session_t *session, uint32_t timestamp, uint32_t increment); MONGOC_EXPORT (bool) mongoc_client_session_with_transaction ( mongoc_client_session_t *session, mongoc_client_session_with_transaction_cb_t cb, const mongoc_transaction_opt_t *opts, void *ctx, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_session_start_transaction (mongoc_client_session_t *session, const mongoc_transaction_opt_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_session_in_transaction (const mongoc_client_session_t *session); MONGOC_EXPORT (bool) mongoc_client_session_commit_transaction (mongoc_client_session_t *session, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_session_abort_transaction (mongoc_client_session_t *session, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_session_append (const mongoc_client_session_t *client_session, bson_t *opts, bson_error_t *error); /* There is no mongoc_client_session_end, only mongoc_client_session_destroy. * Driver Sessions Spec: "In languages that have idiomatic ways of disposing of * resources, drivers SHOULD support that in addition to or instead of * endSession." */ MONGOC_EXPORT (void) mongoc_client_session_destroy (mongoc_client_session_t *session); BSON_END_DECLS #endif /* MONGOC_CLIENT_SESSION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client.c0000644000076500000240000025062613572250757024747 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-config.h" #ifdef MONGOC_HAVE_DNSAPI /* for DnsQuery_UTF8 */ #include #include #include #else #if defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH) #include #include #include #include #define BSON_INSIDE #include #undef BSON_INSIDE #endif #endif #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-database-private.h" #include "mongoc/mongoc-gridfs-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-error-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-queue-private.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-stream-buffered.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-thread-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-uri-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-set-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-change-stream-private.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-cursor-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-ssl-private.h" #include "mongoc/mongoc-cmd-private.h" #include "mongoc/mongoc-opts-private.h" #endif #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "client" static void _mongoc_client_op_killcursors (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, int64_t cursor_id, int64_t operation_id, const char *db, const char *collection); static void _mongoc_client_killcursors_command (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, int64_t cursor_id, const char *db, const char *collection, mongoc_client_session_t *cs); #define DNS_ERROR(_msg, ...) \ do { \ bson_set_error (error, \ MONGOC_ERROR_STREAM, \ MONGOC_ERROR_STREAM_NAME_RESOLUTION, \ _msg, \ __VA_ARGS__); \ GOTO (done); \ } while (0) #ifdef MONGOC_HAVE_DNSAPI typedef bool (*mongoc_rr_callback_t) (const char *service, PDNS_RECORD pdns, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error); static bool srv_callback (const char *service, PDNS_RECORD pdns, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { if (rr_data && rr_data->hosts) { _mongoc_host_list_remove_host ( &(rr_data->hosts), pdns->Data.SRV.pNameTarget, pdns->Data.SRV.wPort); } return mongoc_uri_upsert_host ( uri, pdns->Data.SRV.pNameTarget, pdns->Data.SRV.wPort, error); } /* rr_data is unused, but here to match srv_callback signature */ static bool txt_callback (const char *service, PDNS_RECORD pdns, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { DWORD i; bson_string_t *txt; bool r; txt = bson_string_new (NULL); for (i = 0; i < pdns->Data.TXT.dwStringCount; i++) { bson_string_append (txt, pdns->Data.TXT.pStringArray[i]); } r = mongoc_uri_parse_options (uri, txt->str, true /* from_dns */, error); bson_string_free (txt, true); return r; } /* *-------------------------------------------------------------------------- * * _mongoc_get_rr_dnsapi -- * * Fetch SRV or TXT resource records using the Windows DNS API and * update @uri. * * Returns: * Success or failure. * * For an SRV lookup, returns false if there is any error. * * For TXT lookup, ignores any error fetching the resource record, but * returns false if the resource record is found and there is an error * reading its contents as URI options. * * Side effects: * @error is set if there is a failure. * *-------------------------------------------------------------------------- */ static bool _mongoc_get_rr_dnsapi (const char *service, mongoc_rr_type_t rr_type, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { const char *rr_type_name; WORD nst; mongoc_rr_callback_t callback; PDNS_RECORD pdns = NULL; DNS_STATUS res; LPVOID lpMsgBuf = NULL; bool dns_success; bool callback_success = true; int i; ENTRY; if (rr_type == MONGOC_RR_SRV) { /* return true only if DNS succeeds */ dns_success = false; rr_type_name = "SRV"; nst = DNS_TYPE_SRV; callback = srv_callback; } else { /* return true whether or not DNS succeeds */ dns_success = true; rr_type_name = "TXT"; nst = DNS_TYPE_TEXT; callback = txt_callback; } res = DnsQuery_UTF8 (service, nst, DNS_QUERY_BYPASS_CACHE, NULL /* IP Address */, &pdns, 0 /* reserved */); if (res) { DWORD flags = FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; if (FormatMessage (flags, 0, res, MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, 0)) { DNS_ERROR ("Failed to look up %s record \"%s\": %s", rr_type_name, service, (char *) lpMsgBuf); } DNS_ERROR ("Failed to look up %s record \"%s\": Unknown error", rr_type_name, service); } if (!pdns) { DNS_ERROR ("No %s records for \"%s\"", rr_type_name, service); } dns_success = true; i = 0; do { /* DnsQuery can return additional records not of the requested type */ if ((rr_type == MONGOC_RR_TXT && pdns->wType == DNS_TYPE_TEXT) || (rr_type == MONGOC_RR_SRV && pdns->wType == DNS_TYPE_SRV)) { if (i > 0 && rr_type == MONGOC_RR_TXT) { /* Initial DNS Seedlist Discovery Spec: a client "MUST raise an error when multiple TXT records are encountered". */ callback_success = false; DNS_ERROR ("Multiple TXT records for \"%s\"", service); } if (rr_data) { if ((i == 0) || (pdns->dwTtl < rr_data->min_ttl)) { rr_data->min_ttl = pdns->dwTtl; } } if (!callback (service, pdns, uri, rr_data, error)) { callback_success = false; GOTO (done); } i++; } pdns = pdns->pNext; } while (pdns); if (rr_data) { rr_data->count = i; } done: if (pdns) { DnsRecordListFree (pdns, DnsFreeRecordList); } if (lpMsgBuf) { LocalFree (lpMsgBuf); } RETURN (dns_success && callback_success); } #elif (defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH)) typedef bool (*mongoc_rr_callback_t) (const char *service, ns_msg *ns_answer, ns_rr *rr, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error); static bool srv_callback (const char *service, ns_msg *ns_answer, ns_rr *rr, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { const uint8_t *data; char name[1024]; uint16_t port; int size; bool ret = false; data = ns_rr_rdata (*rr); /* memcpy the network endian port before converting to host endian. we cannot * cast (data + 4) directly as a uint16_t*, because it may not align on an * 2-byte boundary. */ memcpy (&port, data + 4, sizeof (port)); port = ntohs (port); size = dn_expand (ns_msg_base (*ns_answer), ns_msg_end (*ns_answer), data + 6, name, sizeof (name)); if (size < 1) { DNS_ERROR ("Invalid record in SRV answer for \"%s\": \"%s\"", service, strerror (h_errno)); } if (rr_data && rr_data->hosts) { _mongoc_host_list_remove_host (&(rr_data->hosts), name, port); } ret = mongoc_uri_upsert_host (uri, name, port, error); done: return ret; } /* rr_data is unused, but here to match srv_callback signature */ static bool txt_callback (const char *service, ns_msg *ns_answer, ns_rr *rr, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { char s[256]; const uint8_t *data; bson_string_t *txt; uint16_t pos, total; uint8_t len; bool r = false; total = (uint16_t) ns_rr_rdlen (*rr); if (total < 1 || total > 255) { DNS_ERROR ("Invalid TXT record size %hu for \"%s\"", total, service); } /* a TXT record has one or more strings, each up to 255 chars, each is * prefixed by its length as 1 byte. thus endianness doesn't matter. */ txt = bson_string_new (NULL); pos = 0; data = ns_rr_rdata (*rr); while (pos < total) { memcpy (&len, data + pos, sizeof (uint8_t)); pos++; bson_strncpy (s, (const char *) (data + pos), (size_t) len + 1); bson_string_append (txt, s); pos += len; } r = mongoc_uri_parse_options (uri, txt->str, true /* from_dns */, error); bson_string_free (txt, true); done: return r; } /* *-------------------------------------------------------------------------- * * _mongoc_get_rr_search -- * * Fetch SRV or TXT resource records using libresolv and update @uri. * * Returns: * Success or failure. * * For an SRV lookup, returns false if there is any error. * * For TXT lookup, ignores any error fetching the resource record, but * returns false if the resource record is found and there is an error * reading its contents as URI options. * * Side effects: * @error is set if there is a failure. * *-------------------------------------------------------------------------- */ static bool _mongoc_get_rr_search (const char *service, mongoc_rr_type_t rr_type, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { #ifdef MONGOC_HAVE_RES_NSEARCH struct __res_state state = {0}; #endif int size; unsigned char search_buf[1024]; ns_msg ns_answer; int n; int i; const char *rr_type_name; ns_type nst; mongoc_rr_callback_t callback; ns_rr resource_record; bool dns_success; bool callback_success = true; ENTRY; if (rr_type == MONGOC_RR_SRV) { /* return true only if DNS succeeds */ dns_success = false; rr_type_name = "SRV"; nst = ns_t_srv; callback = srv_callback; } else { /* return true whether or not DNS succeeds */ dns_success = true; rr_type_name = "TXT"; nst = ns_t_txt; callback = txt_callback; } #ifdef MONGOC_HAVE_RES_NSEARCH /* thread-safe */ res_ninit (&state); size = res_nsearch ( &state, service, ns_c_in, nst, search_buf, sizeof (search_buf)); #elif defined(MONGOC_HAVE_RES_SEARCH) size = res_search (service, ns_c_in, nst, search_buf, sizeof (search_buf)); #endif if (size < 0) { DNS_ERROR ("Failed to look up %s record \"%s\": %s", rr_type_name, service, strerror (h_errno)); } if (ns_initparse (search_buf, size, &ns_answer)) { DNS_ERROR ("Invalid %s answer for \"%s\"", rr_type_name, service); } n = ns_msg_count (ns_answer, ns_s_an); if (!n) { DNS_ERROR ("No %s records for \"%s\"", rr_type_name, service); } if (rr_data) { rr_data->count = n; } for (i = 0; i < n; i++) { if (i > 0 && rr_type == MONGOC_RR_TXT) { /* Initial DNS Seedlist Discovery Spec: a client "MUST raise an error * when multiple TXT records are encountered". */ callback_success = false; DNS_ERROR ("Multiple TXT records for \"%s\"", service); } if (ns_parserr (&ns_answer, ns_s_an, i, &resource_record)) { DNS_ERROR ("Invalid record %d of %s answer for \"%s\": \"%s\"", i, rr_type_name, service, strerror (h_errno)); } if (rr_data) { uint32_t ttl; ttl = ns_rr_ttl (resource_record); if ((i == 0) || (ttl < rr_data->min_ttl)) { rr_data->min_ttl = ttl; } } if (!callback ( service, &ns_answer, &resource_record, uri, rr_data, error)) { callback_success = false; GOTO (done); } } dns_success = true; done: #ifdef MONGOC_HAVE_RES_NDESTROY /* defined on BSD/Darwin, and only if MONGOC_HAVE_RES_NSEARCH is defined */ res_ndestroy (&state); #elif defined(MONGOC_HAVE_RES_NCLOSE) /* defined on Linux, and only if MONGOC_HAVE_RES_NSEARCH is defined */ res_nclose (&state); #endif RETURN (dns_success && callback_success); } #endif /* *-------------------------------------------------------------------------- * * _mongoc_client_get_rr -- * * Fetch an SRV or TXT resource record and update @uri. See RFCs 1464 * and 2782, and MongoDB's Initial DNS Seedlist Discovery Spec. * * Returns: * Success or failure. * * Side effects: * @error is set if there is a failure. * *-------------------------------------------------------------------------- */ bool _mongoc_client_get_rr (const char *service, mongoc_rr_type_t rr_type, mongoc_uri_t *uri, mongoc_rr_data_t *rr_data, bson_error_t *error) { #ifdef MONGOC_HAVE_DNSAPI return _mongoc_get_rr_dnsapi (service, rr_type, uri, rr_data, error); #elif (defined(MONGOC_HAVE_RES_NSEARCH) || defined(MONGOC_HAVE_RES_SEARCH)) return _mongoc_get_rr_search (service, rr_type, uri, rr_data, error); #else bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "libresolv unavailable, cannot use mongodb+srv URI"); return false; #endif } #undef DNS_ERROR /* *-------------------------------------------------------------------------- * * mongoc_client_connect_tcp -- * * Connect to a host using a TCP socket. * * This will be performed synchronously and return a mongoc_stream_t * that can be used to connect with the remote host. * * Returns: * A newly allocated mongoc_stream_t if successful; otherwise * NULL and @error is set. * * Side effects: * @error is set if return value is NULL. * *-------------------------------------------------------------------------- */ static mongoc_stream_t * mongoc_client_connect_tcp (const mongoc_uri_t *uri, const mongoc_host_list_t *host, bson_error_t *error) { mongoc_socket_t *sock = NULL; struct addrinfo hints; struct addrinfo *result, *rp; int32_t connecttimeoutms; int64_t expire_at; char portstr[8]; int s; ENTRY; BSON_ASSERT (uri); BSON_ASSERT (host); connecttimeoutms = mongoc_uri_get_option_as_int32 ( uri, MONGOC_URI_CONNECTTIMEOUTMS, MONGOC_DEFAULT_CONNECTTIMEOUTMS); BSON_ASSERT (connecttimeoutms); bson_snprintf (portstr, sizeof portstr, "%hu", host->port); memset (&hints, 0, sizeof hints); hints.ai_family = host->family; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = 0; hints.ai_protocol = 0; s = getaddrinfo (host->host, portstr, &hints, &result); if (s != 0) { mongoc_counter_dns_failure_inc (); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Failed to resolve %s", host->host); RETURN (NULL); } mongoc_counter_dns_success_inc (); for (rp = result; rp; rp = rp->ai_next) { /* * Create a new non-blocking socket. */ if (!(sock = mongoc_socket_new ( rp->ai_family, rp->ai_socktype, rp->ai_protocol))) { continue; } /* * Try to connect to the peer. */ expire_at = bson_get_monotonic_time () + (connecttimeoutms * 1000L); if (0 != mongoc_socket_connect ( sock, rp->ai_addr, (mongoc_socklen_t) rp->ai_addrlen, expire_at)) { mongoc_socket_destroy (sock); sock = NULL; continue; } break; } if (!sock) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "Failed to connect to target host: %s", host->host_and_port); freeaddrinfo (result); RETURN (NULL); } freeaddrinfo (result); return mongoc_stream_socket_new (sock); } /* *-------------------------------------------------------------------------- * * mongoc_client_connect_unix -- * * Connect to a MongoDB server using a UNIX domain socket. * * Returns: * A newly allocated mongoc_stream_t if successful; otherwise * NULL and @error is set. * * Side effects: * @error is set if return value is NULL. * *-------------------------------------------------------------------------- */ static mongoc_stream_t * mongoc_client_connect_unix (const mongoc_uri_t *uri, const mongoc_host_list_t *host, bson_error_t *error) { #ifdef _WIN32 ENTRY; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "UNIX domain sockets not supported on win32."); RETURN (NULL); #else struct sockaddr_un saddr; mongoc_socket_t *sock; mongoc_stream_t *ret = NULL; ENTRY; BSON_ASSERT (uri); BSON_ASSERT (host); memset (&saddr, 0, sizeof saddr); saddr.sun_family = AF_UNIX; bson_snprintf (saddr.sun_path, sizeof saddr.sun_path - 1, "%s", host->host); sock = mongoc_socket_new (AF_UNIX, SOCK_STREAM, 0); if (sock == NULL) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to create socket."); RETURN (NULL); } if (-1 == mongoc_socket_connect ( sock, (struct sockaddr *) &saddr, sizeof saddr, -1)) { mongoc_socket_destroy (sock); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "Failed to connect to UNIX domain socket."); RETURN (NULL); } ret = mongoc_stream_socket_new (sock); RETURN (ret); #endif } /* *-------------------------------------------------------------------------- * * mongoc_client_default_stream_initiator -- * * A mongoc_stream_initiator_t that will handle the various type * of supported sockets by MongoDB including TCP and UNIX. * * Language binding authors may want to implement an alternate * version of this method to use their native stream format. * * Returns: * A mongoc_stream_t if successful; otherwise NULL and @error is set. * * Side effects: * @error is set if return value is NULL. * *-------------------------------------------------------------------------- */ mongoc_stream_t * mongoc_client_default_stream_initiator (const mongoc_uri_t *uri, const mongoc_host_list_t *host, void *user_data, bson_error_t *error) { mongoc_stream_t *base_stream = NULL; #ifdef MONGOC_ENABLE_SSL mongoc_client_t *client = (mongoc_client_t *) user_data; const char *mechanism; int32_t connecttimeoutms; #endif BSON_ASSERT (uri); BSON_ASSERT (host); #ifndef MONGOC_ENABLE_SSL if (mongoc_uri_get_tls (uri)) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER, "SSL is not enabled in this build of mongo-c-driver."); return NULL; } #endif switch (host->family) { case AF_UNSPEC: #if defined(AF_INET6) case AF_INET6: #endif case AF_INET: base_stream = mongoc_client_connect_tcp (uri, host, error); break; case AF_UNIX: base_stream = mongoc_client_connect_unix (uri, host, error); break; default: bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_INVALID_TYPE, "Invalid address family: 0x%02x", host->family); break; } #ifdef MONGOC_ENABLE_SSL if (base_stream) { mechanism = mongoc_uri_get_auth_mechanism (uri); if (client->use_ssl || (mechanism && (0 == strcmp (mechanism, "MONGODB-X509")))) { mongoc_stream_t *original = base_stream; base_stream = mongoc_stream_tls_new_with_hostname ( base_stream, host->host, &client->ssl_opts, true); if (!base_stream) { mongoc_stream_destroy (original); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed initialize TLS state."); return NULL; } connecttimeoutms = mongoc_uri_get_option_as_int32 ( uri, MONGOC_URI_CONNECTTIMEOUTMS, MONGOC_DEFAULT_CONNECTTIMEOUTMS); if (!mongoc_stream_tls_handshake_block ( base_stream, host->host, connecttimeoutms, error)) { mongoc_stream_destroy (base_stream); return NULL; } } } #endif return base_stream ? mongoc_stream_buffered_new (base_stream, 1024) : NULL; } /* *-------------------------------------------------------------------------- * * _mongoc_client_create_stream -- * * INTERNAL API * * This function is used by the mongoc_cluster_t to initiate a * new stream. This is done because cluster is private API and * those using mongoc_client_t may need to override this process. * * This function calls the default initiator for new streams. * * Returns: * A newly allocated mongoc_stream_t if successful; otherwise * NULL and @error is set. * * Side effects: * @error is set if return value is NULL. * *-------------------------------------------------------------------------- */ mongoc_stream_t * _mongoc_client_create_stream (mongoc_client_t *client, const mongoc_host_list_t *host, bson_error_t *error) { BSON_ASSERT (client); BSON_ASSERT (host); return client->initiator (client->uri, host, client->initiator_data, error); } /* *-------------------------------------------------------------------------- * * _mongoc_client_recv -- * * Receives a RPC from a remote MongoDB cluster node. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @error is set if return value is false. * *-------------------------------------------------------------------------- */ bool _mongoc_client_recv (mongoc_client_t *client, mongoc_rpc_t *rpc, mongoc_buffer_t *buffer, mongoc_server_stream_t *server_stream, bson_error_t *error) { BSON_ASSERT (client); BSON_ASSERT (rpc); BSON_ASSERT (buffer); BSON_ASSERT (server_stream); if (!mongoc_cluster_try_recv ( &client->cluster, rpc, buffer, server_stream, error)) { mongoc_topology_invalidate_server ( client->topology, server_stream->sd->id, error); return false; } return true; } /* *-------------------------------------------------------------------------- * * mongoc_client_new -- * * Create a new mongoc_client_t using the URI provided. * * @uri should be a MongoDB URI string such as "mongodb://localhost/" * More information on the format can be found at * http://docs.mongodb.org/manual/reference/connection-string/ * * Returns: * A newly allocated mongoc_client_t or NULL if @uri_string is * invalid. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_client_t * mongoc_client_new (const char *uri_string) { mongoc_topology_t *topology; mongoc_client_t *client; mongoc_uri_t *uri; if (!uri_string) { uri_string = "mongodb://127.0.0.1/"; } if (!(uri = mongoc_uri_new (uri_string))) { return NULL; } topology = mongoc_topology_new (uri, true); client = _mongoc_client_new_from_uri (topology); if (!client) { mongoc_topology_destroy (topology); } mongoc_uri_destroy (uri); return client; } /* *-------------------------------------------------------------------------- * * mongoc_client_set_ssl_opts * * set ssl opts for a client * * Returns: * Nothing * * Side effects: * None. * *-------------------------------------------------------------------------- */ #ifdef MONGOC_ENABLE_SSL void mongoc_client_set_ssl_opts (mongoc_client_t *client, const mongoc_ssl_opt_t *opts) { BSON_ASSERT (client); BSON_ASSERT (opts); _mongoc_ssl_opts_cleanup (&client->ssl_opts); client->use_ssl = true; _mongoc_ssl_opts_copy_to (opts, &client->ssl_opts); if (client->topology->single_threaded) { mongoc_topology_scanner_set_ssl_opts (client->topology->scanner, &client->ssl_opts); } } #endif /* *-------------------------------------------------------------------------- * * mongoc_client_new_from_uri -- * * Create a new mongoc_client_t for a mongoc_uri_t. * * Returns: * A newly allocated mongoc_client_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_client_t * mongoc_client_new_from_uri (const mongoc_uri_t *uri) { mongoc_topology_t *topology; topology = mongoc_topology_new (uri, true); /* topology->uri may be different from uri: if this is a mongodb+srv:// URI * then mongoc_topology_new has fetched SRV and TXT records and updated its * uri from them. */ return _mongoc_client_new_from_uri (topology); } /* *-------------------------------------------------------------------------- * * _mongoc_client_new_from_uri -- * * Create a new mongoc_client_t for a given topology object. * * Returns: * A newly allocated mongoc_client_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_client_t * _mongoc_client_new_from_uri (mongoc_topology_t *topology) { mongoc_client_t *client; const mongoc_read_prefs_t *read_prefs; const mongoc_read_concern_t *read_concern; const mongoc_write_concern_t *write_concern; const char *appname; BSON_ASSERT (topology); #ifndef MONGOC_ENABLE_SSL if (mongoc_uri_get_tls (topology->uri)) { MONGOC_ERROR ("Can't create SSL client, SSL not enabled in this build."); return NULL; } #endif client = (mongoc_client_t *) bson_malloc0 (sizeof *client); client->uri = mongoc_uri_copy (topology->uri); client->initiator = mongoc_client_default_stream_initiator; client->initiator_data = client; client->topology = topology; client->error_api_version = MONGOC_ERROR_API_VERSION_LEGACY; client->error_api_set = false; client->client_sessions = mongoc_set_new (8, NULL, NULL); client->csid_rand_seed = (unsigned int) bson_get_monotonic_time (); write_concern = mongoc_uri_get_write_concern (client->uri); client->write_concern = mongoc_write_concern_copy (write_concern); read_concern = mongoc_uri_get_read_concern (client->uri); client->read_concern = mongoc_read_concern_copy (read_concern); read_prefs = mongoc_uri_get_read_prefs_t (client->uri); client->read_prefs = mongoc_read_prefs_copy (read_prefs); appname = mongoc_uri_get_option_as_utf8 (client->uri, MONGOC_URI_APPNAME, NULL); if (appname && client->topology->single_threaded) { /* the appname should have already been validated */ BSON_ASSERT (mongoc_client_set_appname (client, appname)); } mongoc_cluster_init (&client->cluster, client->uri, client); #ifdef MONGOC_ENABLE_SSL client->use_ssl = false; if (mongoc_uri_get_tls (client->uri)) { mongoc_ssl_opt_t ssl_opt = {0}; _mongoc_ssl_opts_from_uri (&ssl_opt, client->uri); /* sets use_ssl = true */ mongoc_client_set_ssl_opts (client, &ssl_opt); } #endif mongoc_counter_clients_active_inc (); return client; } /* *-------------------------------------------------------------------------- * * mongoc_client_destroy -- * * Destroys a mongoc_client_t and cleans up all resources associated * with the client instance. * * Returns: * None. * * Side effects: * @client is destroyed. * *-------------------------------------------------------------------------- */ void mongoc_client_destroy (mongoc_client_t *client) { if (client) { if (client->topology->single_threaded) { _mongoc_client_end_sessions (client); mongoc_topology_destroy (client->topology); } mongoc_write_concern_destroy (client->write_concern); mongoc_read_concern_destroy (client->read_concern); mongoc_read_prefs_destroy (client->read_prefs); mongoc_cluster_destroy (&client->cluster); mongoc_uri_destroy (client->uri); mongoc_set_destroy (client->client_sessions); #ifdef MONGOC_ENABLE_SSL _mongoc_ssl_opts_cleanup (&client->ssl_opts); #endif bson_free (client); mongoc_counter_clients_active_dec (); mongoc_counter_clients_disposed_inc (); } } /* *-------------------------------------------------------------------------- * * mongoc_client_get_uri -- * * Fetch the URI used for @client. * * Returns: * A mongoc_uri_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_uri_t * mongoc_client_get_uri (const mongoc_client_t *client) { BSON_ASSERT (client); return client->uri; } /* *-------------------------------------------------------------------------- * * mongoc_client_start_session -- * * Creates a structure to communicate in a session over @client. * * This structure should be freed when the caller is done with it * using mongoc_client_session_destroy(). * * Returns: * A newly allocated mongoc_client_session_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_client_session_t * mongoc_client_start_session (mongoc_client_t *client, const mongoc_session_opt_t *opts, bson_error_t *error) { mongoc_server_session_t *ss; mongoc_client_session_t *cs; uint32_t csid; ENTRY; ss = _mongoc_client_pop_server_session (client, error); if (!ss) { RETURN (NULL); } /* get a random internal id for the session, retrying on collision */ do { csid = (uint32_t) _mongoc_rand_simple (&client->csid_rand_seed); } while (mongoc_set_get (client->client_sessions, csid)); cs = _mongoc_client_session_new (client, ss, opts, csid); /* remember session so if we see its client_session_id in a command, we can * find its lsid and clusterTime */ mongoc_set_add (client->client_sessions, csid, cs); RETURN (cs); } /* *-------------------------------------------------------------------------- * * mongoc_client_get_database -- * * Fetches a newly allocated database structure to communicate with * a database over @client. * * @database should be a db name such as "test". * * This structure should be freed when the caller is done with it * using mongoc_database_destroy(). * * Returns: * A newly allocated mongoc_database_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_database_t * mongoc_client_get_database (mongoc_client_t *client, const char *name) { BSON_ASSERT (client); BSON_ASSERT (name); return _mongoc_database_new (client, name, client->read_prefs, client->read_concern, client->write_concern); } /* *-------------------------------------------------------------------------- * * mongoc_client_get_default_database -- * * Get the database named in the MongoDB connection URI, or NULL * if none was specified in the URI. * * This structure should be freed when the caller is done with it * using mongoc_database_destroy(). * * Returns: * A newly allocated mongoc_database_t or NULL. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_database_t * mongoc_client_get_default_database (mongoc_client_t *client) { const char *db; BSON_ASSERT (client); db = mongoc_uri_get_database (client->uri); if (db) { return mongoc_client_get_database (client, db); } return NULL; } /* *-------------------------------------------------------------------------- * * mongoc_client_get_collection -- * * This function returns a newly allocated collection structure. * * @db should be the name of the database, such as "test". * @collection should be the name of the collection such as "test". * * The above would result in the namespace "test.test". * * You should free this structure when you are done with it using * mongoc_collection_destroy(). * * Returns: * A newly allocated mongoc_collection_t that should be freed with * mongoc_collection_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_collection_t * mongoc_client_get_collection (mongoc_client_t *client, const char *db, const char *collection) { BSON_ASSERT (client); BSON_ASSERT (db); BSON_ASSERT (collection); return _mongoc_collection_new (client, db, collection, client->read_prefs, client->read_concern, client->write_concern); } /* *-------------------------------------------------------------------------- * * mongoc_client_get_gridfs -- * * This function returns a newly allocated collection structure. * * @db should be the name of the database, such as "test". * * @prefix optional prefix for GridFS collection names, or NULL. Default * is "fs", thus the default collection names for GridFS are "fs.files" * and "fs.chunks". * * Returns: * A newly allocated mongoc_gridfs_t that should be freed with * mongoc_gridfs_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_gridfs_t * mongoc_client_get_gridfs (mongoc_client_t *client, const char *db, const char *prefix, bson_error_t *error) { BSON_ASSERT (client); BSON_ASSERT (db); if (!prefix) { prefix = "fs"; } return _mongoc_gridfs_new (client, db, prefix, error); } /* *-------------------------------------------------------------------------- * * mongoc_client_get_write_concern -- * * Fetches the default write concern for @client. * * Returns: * A mongoc_write_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_write_concern_t * mongoc_client_get_write_concern (const mongoc_client_t *client) { BSON_ASSERT (client); return client->write_concern; } /* *-------------------------------------------------------------------------- * * mongoc_client_set_write_concern -- * * Sets the default write concern for @client. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_client_set_write_concern (mongoc_client_t *client, const mongoc_write_concern_t *write_concern) { BSON_ASSERT (client); if (write_concern != client->write_concern) { if (client->write_concern) { mongoc_write_concern_destroy (client->write_concern); } client->write_concern = write_concern ? mongoc_write_concern_copy (write_concern) : mongoc_write_concern_new (); } } /* *-------------------------------------------------------------------------- * * mongoc_client_get_read_concern -- * * Fetches the default read concern for @client. * * Returns: * A mongoc_read_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_concern_t * mongoc_client_get_read_concern (const mongoc_client_t *client) { BSON_ASSERT (client); return client->read_concern; } /* *-------------------------------------------------------------------------- * * mongoc_client_set_read_concern -- * * Sets the default read concern for @client. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_client_set_read_concern (mongoc_client_t *client, const mongoc_read_concern_t *read_concern) { BSON_ASSERT (client); if (read_concern != client->read_concern) { if (client->read_concern) { mongoc_read_concern_destroy (client->read_concern); } client->read_concern = read_concern ? mongoc_read_concern_copy (read_concern) : mongoc_read_concern_new (); } } /* *-------------------------------------------------------------------------- * * mongoc_client_get_read_prefs -- * * Fetch the default read preferences for @client. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_prefs_t * mongoc_client_get_read_prefs (const mongoc_client_t *client) { BSON_ASSERT (client); return client->read_prefs; } /* *-------------------------------------------------------------------------- * * mongoc_client_set_read_prefs -- * * Set the default read preferences for @client. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_client_set_read_prefs (mongoc_client_t *client, const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (client); if (read_prefs != client->read_prefs) { if (client->read_prefs) { mongoc_read_prefs_destroy (client->read_prefs); } client->read_prefs = read_prefs ? mongoc_read_prefs_copy (read_prefs) : mongoc_read_prefs_new (MONGOC_READ_PRIMARY); } } mongoc_cursor_t * mongoc_client_command (mongoc_client_t *client, const char *db_name, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { char ns[MONGOC_NAMESPACE_MAX]; mongoc_cursor_t *cursor; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (query); /* * Allow a caller to provide a fully qualified namespace */ if (NULL == strstr (db_name, "$cmd")) { bson_snprintf (ns, sizeof ns, "%s.$cmd", db_name); db_name = ns; } cursor = _mongoc_cursor_cmd_deprecated_new (client, db_name, query, read_prefs); return cursor; } static bool _mongoc_client_retryable_write_command_with_stream ( mongoc_client_t *client, mongoc_cmd_parts_t *parts, mongoc_server_stream_t *server_stream, bson_t *reply, bson_error_t *error) { mongoc_server_stream_t *retry_server_stream = NULL; bson_iter_t txn_number_iter; bool is_retryable = true; bool ret; ENTRY; BSON_ASSERT (parts->is_retryable_write); /* increment the transaction number for the first attempt of each retryable * write command */ BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts->assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts->assembled.session->server_session->txn_number); retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts->assembled, reply, error); if (is_retryable) { _mongoc_write_error_update_if_unsupported_storage_engine ( ret, error, reply); } /* If a retryable error is encountered and the write is retryable, select * a new writable stream and retry. If server selection fails or the selected * server does not support retryable writes, fall through and allow the * original error to be reported. */ if (is_retryable && _mongoc_write_error_get_type (ret, error, reply) == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, parts->assembled.session, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts->assembled.server_stream = retry_server_stream; bson_destroy (reply); GOTO (retry); } } if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } RETURN (ret); } static bool _mongoc_client_retryable_read_command_with_stream ( mongoc_client_t *client, mongoc_cmd_parts_t *parts, mongoc_server_stream_t *server_stream, bson_t *reply, bson_error_t *error) { mongoc_server_stream_t *retry_server_stream = NULL; bool is_retryable = true; bool ret; bson_t reply_local; if (reply == NULL) { reply = &reply_local; } ENTRY; BSON_ASSERT (parts->is_retryable_read); retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts->assembled, reply, error); /* If a retryable error is encountered and the read is retryable, select * a new readable stream and retry. If server selection fails or the selected * server does not support retryable reads, fall through and allow the * original error to be reported. */ if (is_retryable && _mongoc_read_error_get_type (ret, error, reply) == MONGOC_READ_ERR_RETRY) { bson_error_t ignored_error; /* each read command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_reads (&client->cluster, parts->read_prefs, parts->assembled.session, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_READS) { parts->assembled.server_stream = retry_server_stream; bson_destroy (reply); GOTO (retry); } } if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } RETURN (ret); } static bool _mongoc_client_command_with_stream (mongoc_client_t *client, mongoc_cmd_parts_t *parts, const mongoc_read_prefs_t *read_prefs, mongoc_server_stream_t *server_stream, bson_t *reply, bson_error_t *error) { ENTRY; parts->assembled.operation_id = ++client->cluster.operation_id; if (!mongoc_cmd_parts_assemble (parts, server_stream, error)) { _mongoc_bson_init_if_set (reply); return false; }; if (parts->is_retryable_write) { RETURN (_mongoc_client_retryable_write_command_with_stream ( client, parts, server_stream, reply, error)); } if (parts->is_retryable_read) { RETURN (_mongoc_client_retryable_read_command_with_stream ( client, parts, server_stream, reply, error)); } RETURN (mongoc_cluster_run_command_monitored ( &client->cluster, &parts->assembled, reply, error)); } bool mongoc_client_command_simple (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { mongoc_cluster_t *cluster; mongoc_server_stream_t *server_stream = NULL; mongoc_cmd_parts_t parts; bool ret; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); if (!_mongoc_read_prefs_validate (read_prefs, error)) { RETURN (false); } cluster = &client->cluster; mongoc_cmd_parts_init (&parts, client, db_name, MONGOC_QUERY_NONE, command); parts.read_prefs = read_prefs; /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ server_stream = mongoc_cluster_stream_for_reads (cluster, read_prefs, NULL, reply, error); if (server_stream) { ret = _mongoc_client_command_with_stream ( client, &parts, read_prefs, server_stream, reply, error); } else { /* reply initialized by mongoc_cluster_stream_for_reads */ ret = false; } mongoc_cmd_parts_cleanup (&parts); mongoc_server_stream_cleanup (server_stream); RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_client_command_with_opts -- * * Execute a command on the server. If mode is MONGOC_CMD_READ or * MONGOC_CMD_RW, then read concern is applied from @opts, or else from * @default_rc, and read preferences are applied from @user_prefs, or else * from @default_prefs. If mode is MONGOC_CMD_WRITE or MONGOC_CMD_RW, then * write concern is applied from @opts if present, or else @default_wc. * * If mode is MONGOC_CMD_RAW, then read concern and write concern are * applied from @opts only. Read preferences are applied from * @user_prefs. * * The mongoc_client_t's read preference, read concern, and write concern * are *NOT* applied. * * Returns: * Success or failure. * A write concern timeout or write concern error is considered a failure. * * Side effects: * @reply is always initialized. * @error is filled out if the command fails. * *-------------------------------------------------------------------------- */ bool _mongoc_client_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, mongoc_command_mode_t mode, const bson_t *opts, mongoc_query_flags_t flags, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, mongoc_read_concern_t *default_rc, mongoc_write_concern_t *default_wc, bson_t *reply, bson_error_t *error) { mongoc_read_write_opts_t read_write_opts; mongoc_cmd_parts_t parts; const char *command_name; const mongoc_read_prefs_t *prefs = COALESCE (user_prefs, default_prefs); mongoc_server_stream_t *server_stream = NULL; mongoc_cluster_t *cluster; mongoc_client_session_t *cs; bson_t reply_local; bson_t *reply_ptr; int32_t wire_version; int32_t wc_wire_version; bool reply_initialized = false; bool ret = false; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); command_name = _mongoc_get_command_name (command); cluster = &client->cluster; reply_ptr = reply ? reply : &reply_local; mongoc_cmd_parts_init (&parts, client, db_name, flags, command); parts.is_read_command = (mode & MONGOC_CMD_READ); parts.is_write_command = (mode & MONGOC_CMD_WRITE); if (!_mongoc_read_write_opts_parse (client, opts, &read_write_opts, error)) { GOTO (done); } cs = read_write_opts.client_session; if (!command_name) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Empty command document"); GOTO (done); } if (_mongoc_client_session_in_txn (read_write_opts.client_session)) { if ((mode == MONGOC_CMD_READ || mode == MONGOC_CMD_RAW) && !IS_PREF_PRIMARY (user_prefs)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Read preference in a transaction must be primary"); GOTO (done); } if (!bson_empty (&read_write_opts.readConcern)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set read concern after starting transaction"); GOTO (done); } if (read_write_opts.writeConcern && strcmp (command_name, "commitTransaction") != 0 && strcmp (command_name, "abortTransaction") != 0) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set write concern after starting transaction"); GOTO (done); } } if (mode == MONGOC_CMD_READ || mode == MONGOC_CMD_RAW) { /* NULL read pref is ok */ if (!_mongoc_read_prefs_validate (prefs, error)) { GOTO (done); } parts.read_prefs = prefs; } else { /* this is a command that writes */ prefs = NULL; } if (read_write_opts.serverId) { /* "serverId" passed in opts */ server_stream = mongoc_cluster_stream_for_server (cluster, read_write_opts.serverId, true /* reconnect ok */, cs, reply_ptr, error); if (server_stream && server_stream->sd->type != MONGOC_SERVER_MONGOS) { parts.user_query_flags |= MONGOC_QUERY_SLAVE_OK; } } else if (parts.is_write_command) { server_stream = mongoc_cluster_stream_for_writes (cluster, cs, reply_ptr, error); } else { server_stream = mongoc_cluster_stream_for_reads (cluster, prefs, cs, reply_ptr, error); } if (!server_stream) { /* stream_for_reads/writes/server has initialized reply */ reply_initialized = true; GOTO (done); } wire_version = server_stream->sd->max_wire_version; if (!mongoc_cmd_parts_append_read_write ( &parts, &read_write_opts, wire_version, error)) { GOTO (done); } if (mode & MONGOC_CMD_WRITE) { wc_wire_version = !strcasecmp (command_name, "findandmodify") ? WIRE_VERSION_FAM_WRITE_CONCERN : WIRE_VERSION_CMD_WRITE_CONCERN; if (read_write_opts.write_concern_owned && wire_version < wc_wire_version) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "\"%s\" command does not support writeConcern with " "wire version %d, wire version %d is required", command_name, wire_version, wc_wire_version); GOTO (done); } /* use default write concern unless it's in opts */ if (!mongoc_write_concern_is_default (default_wc) && !read_write_opts.write_concern_owned && wire_version >= wc_wire_version) { if (!mongoc_cmd_parts_set_write_concern ( &parts, default_wc, wire_version, error)) { GOTO (done); } } } /* use default read concern for read command, unless it's in opts */ if ((mode & MONGOC_CMD_READ) && bson_empty (&read_write_opts.readConcern)) { if (!mongoc_cmd_parts_set_read_concern ( &parts, default_rc, wire_version, error)) { GOTO (done); } } ret = _mongoc_client_command_with_stream ( client, &parts, user_prefs, server_stream, reply_ptr, error); reply_initialized = true; if (ret && (mode & MONGOC_CMD_WRITE)) { ret = !_mongoc_parse_wc_err (reply_ptr, error); } done: if (reply_ptr == &reply_local) { if (reply_initialized) { bson_destroy (reply_ptr); } } else if (!reply_initialized) { _mongoc_bson_init_if_set (reply); } if (server_stream) { mongoc_server_stream_cleanup (server_stream); } mongoc_cmd_parts_cleanup (&parts); _mongoc_read_write_opts_cleanup (&read_write_opts); RETURN (ret); } bool mongoc_client_read_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (client, db_name, command, MONGOC_CMD_READ, opts, MONGOC_QUERY_NONE, read_prefs, client->read_prefs, client->read_concern, client->write_concern, reply, error); } bool mongoc_client_write_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (client, db_name, command, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, client->read_prefs, client->read_concern, client->write_concern, reply, error); } bool mongoc_client_read_write_command_with_opts ( mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (client, db_name, command, MONGOC_CMD_RW, opts, MONGOC_QUERY_NONE, read_prefs, client->read_prefs, client->read_concern, client->write_concern, reply, error); } bool mongoc_client_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (client, db_name, command, MONGOC_CMD_RAW, opts, MONGOC_QUERY_NONE, read_prefs, NULL, client->read_concern, client->write_concern, reply, error); } bool mongoc_client_command_simple_with_server_id ( mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, uint32_t server_id, bson_t *reply, bson_error_t *error) { mongoc_server_stream_t *server_stream; mongoc_cmd_parts_t parts; bool ret; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db_name); BSON_ASSERT (command); if (!_mongoc_read_prefs_validate (read_prefs, error)) { RETURN (false); } server_stream = mongoc_cluster_stream_for_server ( &client->cluster, server_id, true /* reconnect ok */, NULL, reply, error); if (server_stream) { mongoc_cmd_parts_init ( &parts, client, db_name, MONGOC_QUERY_NONE, command); parts.read_prefs = read_prefs; ret = _mongoc_client_command_with_stream ( client, &parts, read_prefs, server_stream, reply, error); mongoc_cmd_parts_cleanup (&parts); mongoc_server_stream_cleanup (server_stream); RETURN (ret); } else { /* stream_for_server initialized reply */ RETURN (false); } } static void _mongoc_client_prepare_killcursors_command (int64_t cursor_id, const char *collection, bson_t *command) { bson_t child; bson_append_utf8 (command, "killCursors", 11, collection, -1); bson_append_array_begin (command, "cursors", 7, &child); bson_append_int64 (&child, "0", 1, cursor_id); bson_append_array_end (command, &child); } void _mongoc_client_kill_cursor (mongoc_client_t *client, uint32_t server_id, int64_t cursor_id, int64_t operation_id, const char *db, const char *collection, mongoc_client_session_t *cs) { mongoc_server_stream_t *server_stream; ENTRY; BSON_ASSERT (client); BSON_ASSERT (cursor_id); /* don't attempt reconnect if server unavailable, and ignore errors */ server_stream = mongoc_cluster_stream_for_server ( &client->cluster, server_id, false /* reconnect_ok */, NULL, NULL, NULL); if (!server_stream) { return; } if (db && collection && server_stream->sd->max_wire_version >= WIRE_VERSION_KILLCURSORS_CMD) { _mongoc_client_killcursors_command ( &client->cluster, server_stream, cursor_id, db, collection, cs); } else { _mongoc_client_op_killcursors (&client->cluster, server_stream, cursor_id, operation_id, db, collection); } mongoc_server_stream_cleanup (server_stream); EXIT; } static void _mongoc_client_monitor_op_killcursors (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, int64_t cursor_id, int64_t operation_id, const char *db, const char *collection) { bson_t doc; mongoc_client_t *client; mongoc_apm_command_started_t event; ENTRY; client = cluster->client; if (!client->apm_callbacks.started) { return; } bson_init (&doc); _mongoc_client_prepare_killcursors_command (cursor_id, collection, &doc); mongoc_apm_command_started_init (&event, &doc, db, "killCursors", cluster->request_id, operation_id, &server_stream->sd->host, server_stream->sd->id, client->apm_context); client->apm_callbacks.started (&event); mongoc_apm_command_started_cleanup (&event); bson_destroy (&doc); EXIT; } static void _mongoc_client_monitor_op_killcursors_succeeded ( mongoc_cluster_t *cluster, int64_t duration, mongoc_server_stream_t *server_stream, int64_t cursor_id, int64_t operation_id) { mongoc_client_t *client; bson_t doc; bson_t cursors_unknown; mongoc_apm_command_succeeded_t event; ENTRY; client = cluster->client; if (!client->apm_callbacks.succeeded) { EXIT; } /* fake server reply to killCursors command: {ok: 1, cursorsUnknown: [42]} */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, 1); bson_append_array_begin (&doc, "cursorsUnknown", 14, &cursors_unknown); bson_append_int64 (&cursors_unknown, "0", 1, cursor_id); bson_append_array_end (&doc, &cursors_unknown); mongoc_apm_command_succeeded_init (&event, duration, &doc, "killCursors", cluster->request_id, operation_id, &server_stream->sd->host, server_stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); } static void _mongoc_client_monitor_op_killcursors_failed ( mongoc_cluster_t *cluster, int64_t duration, mongoc_server_stream_t *server_stream, const bson_error_t *error, int64_t operation_id) { mongoc_client_t *client; bson_t doc; mongoc_apm_command_failed_t event; ENTRY; client = cluster->client; if (!client->apm_callbacks.failed) { EXIT; } /* fake server reply to killCursors command: {ok: 0} */ bson_init (&doc); bson_append_int32 (&doc, "ok", 2, 0); mongoc_apm_command_failed_init (&event, duration, "killCursors", error, &doc, cluster->request_id, operation_id, &server_stream->sd->host, server_stream->sd->id, client->apm_context); client->apm_callbacks.failed (&event); mongoc_apm_command_failed_cleanup (&event); bson_destroy (&doc); } static void _mongoc_client_op_killcursors (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, int64_t cursor_id, int64_t operation_id, const char *db, const char *collection) { int64_t started; mongoc_rpc_t rpc = {{0}}; bson_error_t error; bool has_ns; bool r; /* called by old mongoc_client_kill_cursor without db/collection? */ has_ns = (db && collection); started = bson_get_monotonic_time (); ++cluster->request_id; rpc.header.msg_len = 0; rpc.header.request_id = cluster->request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_KILL_CURSORS; rpc.kill_cursors.zero = 0; rpc.kill_cursors.cursors = &cursor_id; rpc.kill_cursors.n_cursors = 1; if (has_ns) { _mongoc_client_monitor_op_killcursors ( cluster, server_stream, cursor_id, operation_id, db, collection); } r = mongoc_cluster_legacy_rpc_sendv_to_server ( cluster, &rpc, server_stream, &error); if (has_ns) { if (r) { _mongoc_client_monitor_op_killcursors_succeeded ( cluster, bson_get_monotonic_time () - started, server_stream, cursor_id, operation_id); } else { _mongoc_client_monitor_op_killcursors_failed ( cluster, bson_get_monotonic_time () - started, server_stream, &error, operation_id); } } } static void _mongoc_client_killcursors_command (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, int64_t cursor_id, const char *db, const char *collection, mongoc_client_session_t *cs) { bson_t command = BSON_INITIALIZER; mongoc_cmd_parts_t parts; ENTRY; _mongoc_client_prepare_killcursors_command (cursor_id, collection, &command); mongoc_cmd_parts_init ( &parts, cluster->client, db, MONGOC_QUERY_SLAVE_OK, &command); parts.assembled.operation_id = ++cluster->operation_id; mongoc_cmd_parts_set_session (&parts, cs); if (mongoc_cmd_parts_assemble (&parts, server_stream, NULL)) { /* Find, getMore And killCursors Commands Spec: "The result from the * killCursors command MAY be safely ignored." */ (void) mongoc_cluster_run_command_monitored ( cluster, &parts.assembled, NULL, NULL); } mongoc_cmd_parts_cleanup (&parts); bson_destroy (&command); EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_client_kill_cursor -- * * Destroy a cursor on the server. * * NOTE: this is only reliable when connected to a single mongod or * mongos. If connected to a replica set, the driver attempts to * kill the cursor on the primary. If connected to multiple mongoses * the kill-cursors message is sent to a *random* mongos. * * If no primary, mongos, or standalone server is known, return * without attempting to reconnect. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_client_kill_cursor (mongoc_client_t *client, int64_t cursor_id) { mongoc_topology_t *topology; mongoc_server_description_t *selected_server; mongoc_read_prefs_t *read_prefs; bson_error_t error; uint32_t server_id = 0; topology = client->topology; read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); bson_mutex_lock (&topology->mutex); if (!mongoc_topology_compatible (&topology->description, NULL, &error)) { MONGOC_ERROR ("Could not kill cursor: %s", error.message); bson_mutex_unlock (&topology->mutex); mongoc_read_prefs_destroy (read_prefs); return; } /* see if there's a known writable server - do no I/O or retries */ selected_server = mongoc_topology_description_select (&topology->description, MONGOC_SS_WRITE, read_prefs, topology->local_threshold_msec); if (selected_server) { server_id = selected_server->id; } bson_mutex_unlock (&topology->mutex); if (server_id) { _mongoc_client_kill_cursor (client, server_id, cursor_id, 0 /* operation_id */, NULL /* db */, NULL /* collection */, NULL /* session */); } else { MONGOC_INFO ("No server available for mongoc_client_kill_cursor"); } mongoc_read_prefs_destroy (read_prefs); } char ** mongoc_client_get_database_names (mongoc_client_t *client, bson_error_t *error) { return mongoc_client_get_database_names_with_opts (client, NULL, error); } char ** mongoc_client_get_database_names_with_opts (mongoc_client_t *client, const bson_t *opts, bson_error_t *error) { bson_iter_t iter; const char *name; char **ret = NULL; int i = 0; mongoc_cursor_t *cursor; const bson_t *doc; bson_t cmd = BSON_INITIALIZER; BSON_ASSERT (client); BSON_APPEND_INT32 (&cmd, "listDatabases", 1); BSON_APPEND_BOOL (&cmd, "nameOnly", true); /* ignore client read prefs */ cursor = _mongoc_cursor_array_new (client, "admin", &cmd, opts, "databases"); bson_destroy (&cmd); while (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init (&iter, doc) && bson_iter_find (&iter, "name") && BSON_ITER_HOLDS_UTF8 (&iter) && (name = bson_iter_utf8 (&iter, NULL))) { ret = (char **) bson_realloc (ret, sizeof (char *) * (i + 2)); ret[i] = bson_strdup (name); ret[++i] = NULL; } } if (!ret && !mongoc_cursor_error (cursor, error)) { ret = (char **) bson_malloc0 (sizeof (void *)); } mongoc_cursor_destroy (cursor); return ret; } mongoc_cursor_t * mongoc_client_find_databases (mongoc_client_t *client, bson_error_t *error) { /* existing bug in this deprecated API: error pointer is unused */ return mongoc_client_find_databases_with_opts (client, NULL); } mongoc_cursor_t * mongoc_client_find_databases_with_opts (mongoc_client_t *client, const bson_t *opts) { bson_t cmd = BSON_INITIALIZER; mongoc_cursor_t *cursor; BSON_ASSERT (client); BSON_APPEND_INT32 (&cmd, "listDatabases", 1); cursor = _mongoc_cursor_array_new (client, "admin", &cmd, opts, "databases"); bson_destroy (&cmd); return cursor; } int32_t mongoc_client_get_max_message_size (mongoc_client_t *client) /* IN */ { BSON_ASSERT (client); return mongoc_cluster_get_max_msg_size (&client->cluster); } int32_t mongoc_client_get_max_bson_size (mongoc_client_t *client) /* IN */ { BSON_ASSERT (client); return mongoc_cluster_get_max_bson_obj_size (&client->cluster); } bool mongoc_client_get_server_status (mongoc_client_t *client, /* IN */ mongoc_read_prefs_t *read_prefs, /* IN */ bson_t *reply, /* OUT */ bson_error_t *error) /* OUT */ { bson_t cmd = BSON_INITIALIZER; bool ret = false; BSON_ASSERT (client); BSON_APPEND_INT32 (&cmd, "serverStatus", 1); ret = mongoc_client_command_simple ( client, "admin", &cmd, read_prefs, reply, error); bson_destroy (&cmd); return ret; } void mongoc_client_set_stream_initiator (mongoc_client_t *client, mongoc_stream_initiator_t initiator, void *user_data) { BSON_ASSERT (client); if (!initiator) { initiator = mongoc_client_default_stream_initiator; user_data = client; } else { MONGOC_DEBUG ("Using custom stream initiator."); } client->initiator = initiator; client->initiator_data = user_data; if (client->topology->single_threaded) { mongoc_topology_scanner_set_stream_initiator ( client->topology->scanner, initiator, user_data); } } bool _mongoc_client_set_apm_callbacks_private (mongoc_client_t *client, mongoc_apm_callbacks_t *callbacks, void *context) { if (callbacks) { memcpy ( &client->apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t)); } else { memset (&client->apm_callbacks, 0, sizeof (mongoc_apm_callbacks_t)); } client->apm_context = context; mongoc_topology_set_apm_callbacks (client->topology, callbacks, context); return true; } bool mongoc_client_set_apm_callbacks (mongoc_client_t *client, mongoc_apm_callbacks_t *callbacks, void *context) { if (!client->topology->single_threaded) { MONGOC_ERROR ("Cannot set callbacks on a pooled client, use " "mongoc_client_pool_set_apm_callbacks"); return false; } return _mongoc_client_set_apm_callbacks_private (client, callbacks, context); } mongoc_server_description_t * mongoc_client_get_server_description (mongoc_client_t *client, uint32_t server_id) { /* the error info isn't useful */ return mongoc_topology_server_by_id (client->topology, server_id, NULL); } mongoc_server_description_t ** mongoc_client_get_server_descriptions (const mongoc_client_t *client, size_t *n /* OUT */) { mongoc_topology_t *topology; mongoc_server_description_t **sds; BSON_ASSERT (client); BSON_ASSERT (n); topology = client->topology; /* in case the client is pooled */ bson_mutex_lock (&topology->mutex); sds = mongoc_topology_description_get_servers (&topology->description, n); bson_mutex_unlock (&topology->mutex); return sds; } void mongoc_server_descriptions_destroy_all (mongoc_server_description_t **sds, size_t n) { size_t i; for (i = 0; i < n; ++i) { mongoc_server_description_destroy (sds[i]); } bson_free (sds); } mongoc_server_description_t * mongoc_client_select_server (mongoc_client_t *client, bool for_writes, const mongoc_read_prefs_t *prefs, bson_error_t *error) { mongoc_ss_optype_t optype = for_writes ? MONGOC_SS_WRITE : MONGOC_SS_READ; mongoc_server_description_t *sd; if (for_writes && prefs) { bson_set_error (error, MONGOC_ERROR_SERVER_SELECTION, MONGOC_ERROR_SERVER_SELECTION_FAILURE, "Cannot use read preferences with for_writes = true"); return NULL; } if (!_mongoc_read_prefs_validate (prefs, error)) { return NULL; } sd = mongoc_topology_select (client->topology, optype, prefs, error); if (!sd) { return NULL; } if (mongoc_cluster_check_interval (&client->cluster, sd->id)) { /* check not required, or it succeeded */ return sd; } /* check failed, retry once */ mongoc_server_description_destroy (sd); sd = mongoc_topology_select (client->topology, optype, prefs, error); if (sd) { return sd; } return NULL; } bool mongoc_client_set_error_api (mongoc_client_t *client, int32_t version) { if (!client->topology->single_threaded) { MONGOC_ERROR ("Cannot set Error API Version on a pooled client, use " "mongoc_client_pool_set_error_api"); return false; } if (version != MONGOC_ERROR_API_VERSION_LEGACY && version != MONGOC_ERROR_API_VERSION_2) { MONGOC_ERROR ("Unsupported Error API Version: %" PRId32, version); return false; } if (client->error_api_set) { MONGOC_ERROR ("Can only set Error API Version once"); return false; } client->error_api_version = version; client->error_api_set = true; return true; } bool mongoc_client_set_appname (mongoc_client_t *client, const char *appname) { if (!client->topology->single_threaded) { MONGOC_ERROR ("Cannot call set_appname on a client from a pool"); return false; } return _mongoc_topology_set_appname (client->topology, appname); } mongoc_server_session_t * _mongoc_client_pop_server_session (mongoc_client_t *client, bson_error_t *error) { return _mongoc_topology_pop_server_session (client->topology, error); } /* *-------------------------------------------------------------------------- * * _mongoc_client_lookup_session -- * * Retrieve a mongoc_client_session_t associated with @client_session_id. * Use this to find the "lsid" and "$clusterTime" to send in the server * command. * * Returns: * True on success, false on error and @error is set. Will return false * if the session is from an outdated client generation, a holdover * from before a call to mongoc_client_reset. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool _mongoc_client_lookup_session (const mongoc_client_t *client, uint32_t client_session_id, mongoc_client_session_t **cs /* OUT */, bson_error_t *error /* OUT */) { ENTRY; *cs = mongoc_set_get (client->client_sessions, client_session_id); if (*cs) { RETURN (true); } bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid sessionId"); RETURN (false); } void _mongoc_client_unregister_session (mongoc_client_t *client, mongoc_client_session_t *session) { mongoc_set_rm (client->client_sessions, session->client_session_id); } void _mongoc_client_push_server_session (mongoc_client_t *client, mongoc_server_session_t *server_session) { _mongoc_topology_push_server_session (client->topology, server_session); } /* *-------------------------------------------------------------------------- * * mongoc_client_end_sessions -- * * End all server sessions in the topology's server session pool. * Don't block long: if server selection or connecting fails, quit. * * The server session pool becomes invalid, but may not be empty. * Destroy the topology after this without using any sessions. * *-------------------------------------------------------------------------- */ void _mongoc_client_end_sessions (mongoc_client_t *client) { mongoc_topology_t *t = client->topology; mongoc_read_prefs_t *prefs; bson_error_t error; uint32_t server_id; bson_t cmd; mongoc_server_stream_t *stream; mongoc_cmd_parts_t parts; mongoc_cluster_t *cluster = &client->cluster; bool r; if (t->session_pool) { prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED); server_id = mongoc_topology_select_server_id (t, MONGOC_SS_READ, prefs, &error); mongoc_read_prefs_destroy (prefs); if (!server_id) { MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message); return; } stream = mongoc_cluster_stream_for_server ( cluster, server_id, false /* reconnect_ok */, NULL, NULL, &error); if (!stream) { MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message); return; } /* end sessions in chunks */ while (_mongoc_topology_end_sessions_cmd (t, &cmd)) { mongoc_cmd_parts_init ( &parts, client, "admin", MONGOC_QUERY_SLAVE_OK, &cmd); parts.assembled.operation_id = ++cluster->operation_id; parts.prohibit_lsid = true; r = mongoc_cmd_parts_assemble (&parts, stream, &error); if (!r) { MONGOC_WARNING ("Couldn't construct \"endSessions\" command: %s", error.message); } else { r = mongoc_cluster_run_command_monitored ( cluster, &parts.assembled, NULL, &error); if (!r) { MONGOC_WARNING ("Couldn't send \"endSessions\": %s", error.message); } } bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); } bson_destroy (&cmd); mongoc_server_stream_cleanup (stream); } } void mongoc_client_reset (mongoc_client_t *client) { BSON_ASSERT (client); client->generation++; /* Client sessions are owned and destroyed by the user, but we keep local pointers to them for reference. On reset, clear our local set without destroying the sessions or calling endSessions. client_sessions has no dtor, so it won't destroy its items. Destroying the local cache of client sessions here ensures they cannot be used by future operations--lookup for them will fail. */ mongoc_set_destroy (client->client_sessions); client->client_sessions = mongoc_set_new (8, NULL, NULL); /* Server sessions are owned by us, so we clear the pool on reset. */ _mongoc_topology_clear_session_pool (client->topology); mongoc_cluster_disconnect (&(client->cluster)); } mongoc_change_stream_t * mongoc_client_watch (mongoc_client_t *client, const bson_t *pipeline, const bson_t *opts) { return _mongoc_change_stream_new_from_client (client, pipeline, opts); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-client.h0000644000076500000240000002467513572250757024757 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLIENT_H #define MONGOC_CLIENT_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-apm.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-database.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-read-prefs.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl.h" #endif #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-server-description.h" BSON_BEGIN_DECLS #define MONGOC_NAMESPACE_MAX 128 #ifndef MONGOC_DEFAULT_CONNECTTIMEOUTMS #define MONGOC_DEFAULT_CONNECTTIMEOUTMS (10 * 1000L) #endif #ifndef MONGOC_DEFAULT_SOCKETTIMEOUTMS /* * NOTE: The default socket timeout for connections is 5 minutes. This * means that if your MongoDB server dies or becomes unavailable * it will take 5 minutes to detect this. * * You can change this by providing sockettimeoutms= in your * connection URI. */ #define MONGOC_DEFAULT_SOCKETTIMEOUTMS (1000L * 60L * 5L) #endif /** * mongoc_client_t: * * The mongoc_client_t structure maintains information about a connection to * a MongoDB server. */ typedef struct _mongoc_client_t mongoc_client_t; typedef struct _mongoc_client_session_t mongoc_client_session_t; typedef struct _mongoc_session_opt_t mongoc_session_opt_t; typedef struct _mongoc_transaction_opt_t mongoc_transaction_opt_t; /** * mongoc_stream_initiator_t: * @uri: The uri and options for the stream. * @host: The host and port (or UNIX domain socket path) to connect to. * @user_data: The pointer passed to mongoc_client_set_stream_initiator. * @error: A location for an error. * * Creates a new mongoc_stream_t for the host and port. Begin a * non-blocking connect and return immediately. * * This can be used by language bindings to create network transports other * than those built into libmongoc. An example of such would be the streams * API provided by PHP. * * Returns: A newly allocated mongoc_stream_t or NULL on failure. */ typedef mongoc_stream_t *(*mongoc_stream_initiator_t) ( const mongoc_uri_t *uri, const mongoc_host_list_t *host, void *user_data, bson_error_t *error); MONGOC_EXPORT (mongoc_client_t *) mongoc_client_new (const char *uri_string); MONGOC_EXPORT (mongoc_client_t *) mongoc_client_new_from_uri (const mongoc_uri_t *uri); MONGOC_EXPORT (const mongoc_uri_t *) mongoc_client_get_uri (const mongoc_client_t *client); MONGOC_EXPORT (void) mongoc_client_set_stream_initiator (mongoc_client_t *client, mongoc_stream_initiator_t initiator, void *user_data); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_client_command (mongoc_client_t *client, const char *db_name, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (void) mongoc_client_kill_cursor (mongoc_client_t *client, int64_t cursor_id) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (bool) mongoc_client_command_simple (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_read_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_write_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_read_write_command_with_opts ( mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_command_with_opts (mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_command_simple_with_server_id ( mongoc_client_t *client, const char *db_name, const bson_t *command, const mongoc_read_prefs_t *read_prefs, uint32_t server_id, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (void) mongoc_client_destroy (mongoc_client_t *client); MONGOC_EXPORT (mongoc_client_session_t *) mongoc_client_start_session (mongoc_client_t *client, const mongoc_session_opt_t *opts, bson_error_t *error) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_database_t *) mongoc_client_get_database (mongoc_client_t *client, const char *name); MONGOC_EXPORT (mongoc_database_t *) mongoc_client_get_default_database (mongoc_client_t *client); MONGOC_EXPORT (mongoc_gridfs_t *) mongoc_client_get_gridfs (mongoc_client_t *client, const char *db, const char *prefix, bson_error_t *error); MONGOC_EXPORT (mongoc_collection_t *) mongoc_client_get_collection (mongoc_client_t *client, const char *db, const char *collection); MONGOC_EXPORT (char **) mongoc_client_get_database_names (mongoc_client_t *client, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_client_get_database_names_with_opts); MONGOC_EXPORT (char **) mongoc_client_get_database_names_with_opts (mongoc_client_t *client, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_client_find_databases (mongoc_client_t *client, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_client_find_databases_with_opts); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_client_find_databases_with_opts (mongoc_client_t *client, const bson_t *opts); MONGOC_EXPORT (bool) mongoc_client_get_server_status (mongoc_client_t *client, mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (int32_t) mongoc_client_get_max_message_size (mongoc_client_t *client) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (int32_t) mongoc_client_get_max_bson_size (mongoc_client_t *client) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_client_get_write_concern (const mongoc_client_t *client); MONGOC_EXPORT (void) mongoc_client_set_write_concern (mongoc_client_t *client, const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (const mongoc_read_concern_t *) mongoc_client_get_read_concern (const mongoc_client_t *client); MONGOC_EXPORT (void) mongoc_client_set_read_concern (mongoc_client_t *client, const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (const mongoc_read_prefs_t *) mongoc_client_get_read_prefs (const mongoc_client_t *client); MONGOC_EXPORT (void) mongoc_client_set_read_prefs (mongoc_client_t *client, const mongoc_read_prefs_t *read_prefs); #ifdef MONGOC_ENABLE_SSL MONGOC_EXPORT (void) mongoc_client_set_ssl_opts (mongoc_client_t *client, const mongoc_ssl_opt_t *opts); #endif MONGOC_EXPORT (bool) mongoc_client_set_apm_callbacks (mongoc_client_t *client, mongoc_apm_callbacks_t *callbacks, void *context); MONGOC_EXPORT (mongoc_server_description_t *) mongoc_client_get_server_description (mongoc_client_t *client, uint32_t server_id); MONGOC_EXPORT (mongoc_server_description_t **) mongoc_client_get_server_descriptions (const mongoc_client_t *client, size_t *n); MONGOC_EXPORT (void) mongoc_server_descriptions_destroy_all (mongoc_server_description_t **sds, size_t n); MONGOC_EXPORT (mongoc_server_description_t *) mongoc_client_select_server (mongoc_client_t *client, bool for_writes, const mongoc_read_prefs_t *prefs, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_client_set_error_api (mongoc_client_t *client, int32_t version); MONGOC_EXPORT (bool) mongoc_client_set_appname (mongoc_client_t *client, const char *appname); MONGOC_EXPORT (mongoc_change_stream_t *) mongoc_client_watch (mongoc_client_t *client, const bson_t *pipeline, const bson_t *opts); MONGOC_EXPORT (void) mongoc_client_reset (mongoc_client_t *client); BSON_END_DECLS #endif /* MONGOC_CLIENT_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-cyrus-private.h0000644000076500000240000000211413572250757027755 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLUSTER_CYRUS_PRIVATE_H #define MONGOC_CLUSTER_CYRUS_PRIVATE_H #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cluster-private.h" #include bool _mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error); #endif /* MONGOC_CLUSTER_CYRUS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-cyrus.c0000644000076500000240000001000213572250757026273 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SASL_CYRUS #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-cyrus-private.h" #include "mongoc/mongoc-cluster-cyrus-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-trace-private.h" bool _mongoc_cluster_auth_node_cyrus (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; uint32_t buflen = 0; mongoc_cyrus_t sasl; bson_iter_t iter; bool ret = false; const char *tmpstr; uint8_t buf[4096] = {0}; bson_t cmd; bson_t reply; int conv_id = 0; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!_mongoc_cyrus_new_from_cluster ( &sasl, cluster, stream, sd->host.host, error)) { return false; } for (;;) { mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); if (!_mongoc_cyrus_step ( &sasl, buf, buflen, buf, sizeof buf, &buflen, error)) { goto failure; } bson_init (&cmd); if (sasl.step == 1) { _mongoc_cluster_build_sasl_start ( &cmd, sasl.credentials.mechanism, (const char *) buf, buflen); } else { _mongoc_cluster_build_sasl_continue ( &cmd, conv_id, (const char *) buf, buflen); } TRACE ("SASL: authenticating (step %d)", sasl.step); server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); goto failure; } if (!mongoc_cluster_run_command_private ( cluster, &parts.assembled, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); bson_destroy (&reply); goto failure; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } conv_id = _mongoc_cluster_get_conversation_id (&reply); if (!bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_UTF8 (&iter)) { MONGOC_DEBUG ("SASL: authentication failed"); bson_destroy (&reply); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SASL reply from MongoDB server."); goto failure; } tmpstr = bson_iter_utf8 (&iter, &buflen); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL reply from MongoDB is too large."); bson_destroy (&reply); goto failure; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } TRACE ("%s", "SASL: authenticated"); ret = true; failure: _mongoc_cyrus_destroy (&sasl); mongoc_cmd_parts_cleanup (&parts); return ret; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-private.h0000644000076500000240000001334013572250757026615 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLUSTER_PRIVATE_H #define MONGOC_CLUSTER_PRIVATE_H #include #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-list-private.h" #include "mongoc/mongoc-opcode.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-server-stream-private.h" #include "mongoc/mongoc-set-private.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-topology-description-private.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-scram-private.h" #include "mongoc/mongoc-cmd-private.h" BSON_BEGIN_DECLS typedef struct _mongoc_cluster_node_t { mongoc_stream_t *stream; char *connection_address; int32_t max_wire_version; int32_t min_wire_version; int32_t max_write_batch_size; int32_t max_bson_obj_size; int32_t max_msg_size; int64_t timestamp; } mongoc_cluster_node_t; typedef struct _mongoc_cluster_t { int64_t operation_id; uint32_t request_id; uint32_t sockettimeoutms; uint32_t socketcheckintervalms; mongoc_uri_t *uri; unsigned requires_auth : 1; mongoc_client_t *client; mongoc_set_t *nodes; mongoc_array_t iov; mongoc_scram_cache_t *scram_cache; } mongoc_cluster_t; void mongoc_cluster_init (mongoc_cluster_t *cluster, const mongoc_uri_t *uri, void *client); void mongoc_cluster_destroy (mongoc_cluster_t *cluster); void mongoc_cluster_disconnect (mongoc_cluster_t *cluster); void mongoc_cluster_disconnect_node (mongoc_cluster_t *cluster, uint32_t id, bool invalidate, const bson_error_t *why); int32_t mongoc_cluster_get_max_bson_obj_size (mongoc_cluster_t *cluster); int32_t mongoc_cluster_get_max_msg_size (mongoc_cluster_t *cluster); size_t _mongoc_cluster_buffer_iovec (mongoc_iovec_t *iov, size_t iovcnt, int skip, char *buffer); bool mongoc_cluster_check_interval (mongoc_cluster_t *cluster, uint32_t server_id); bool mongoc_cluster_legacy_rpc_sendv_to_server ( mongoc_cluster_t *cluster, mongoc_rpc_t *rpcs, mongoc_server_stream_t *server_stream, bson_error_t *error); bool mongoc_cluster_try_recv (mongoc_cluster_t *cluster, mongoc_rpc_t *rpc, mongoc_buffer_t *buffer, mongoc_server_stream_t *server_stream, bson_error_t *error); mongoc_server_stream_t * mongoc_cluster_stream_for_reads (mongoc_cluster_t *cluster, const mongoc_read_prefs_t *read_prefs, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error); mongoc_server_stream_t * mongoc_cluster_stream_for_writes (mongoc_cluster_t *cluster, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error); mongoc_server_stream_t * mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error); bool mongoc_cluster_run_command_monitored (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error); bool mongoc_cluster_run_command_parts (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, mongoc_cmd_parts_t *parts, bson_t *reply, bson_error_t *error); bool mongoc_cluster_run_command_private (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error); void _mongoc_cluster_build_sasl_start (bson_t *cmd, const char *mechanism, const char *buf, uint32_t buflen); void _mongoc_cluster_build_sasl_continue (bson_t *cmd, int conv_id, const char *buf, uint32_t buflen); int _mongoc_cluster_get_conversation_id (const bson_t *reply); mongoc_server_stream_t * _mongoc_cluster_create_server_stream (mongoc_topology_t *topology, uint32_t server_id, mongoc_stream_t *stream, bson_error_t *error /* OUT */); BSON_END_DECLS #endif /* MONGOC_CLUSTER_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-sasl-private.h0000644000076500000240000000210513572250757027552 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLUSTER_SASL_PRIVATE_H #define MONGOC_CLUSTER_SASL_PRIVATE_H #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cluster-private.h" #include bool _mongoc_cluster_auth_node_sasl (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error); #endif /* MONGOC_CLUSTER_SASL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-sasl.c0000644000076500000240000000650613572250757026106 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* for size_t */ #include #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-util-private.h" #ifdef MONGOC_ENABLE_SASL #ifdef MONGOC_ENABLE_SASL_CYRUS #include "mongoc/mongoc-cluster-cyrus-private.h" #endif #ifdef MONGOC_ENABLE_SASL_SSPI #include "mongoc/mongoc-cluster-sspi-private.h" #endif void _mongoc_cluster_build_sasl_start (bson_t *cmd, const char *mechanism, const char *buf, uint32_t buflen) { BSON_APPEND_INT32 (cmd, "saslStart", 1); BSON_APPEND_UTF8 (cmd, "mechanism", "GSSAPI"); bson_append_utf8 (cmd, "payload", 7, buf, buflen); BSON_APPEND_INT32 (cmd, "autoAuthorize", 1); } void _mongoc_cluster_build_sasl_continue (bson_t *cmd, int conv_id, const char *buf, uint32_t buflen) { BSON_APPEND_INT32 (cmd, "saslContinue", 1); BSON_APPEND_INT32 (cmd, "conversationId", conv_id); bson_append_utf8 (cmd, "payload", 7, buf, buflen); } int _mongoc_cluster_get_conversation_id (const bson_t *reply) { bson_iter_t iter; if (bson_iter_init_find (&iter, reply, "conversationId") && BSON_ITER_HOLDS_INT32 (&iter)) { return bson_iter_int32 (&iter); } return 0; } #endif /* *-------------------------------------------------------------------------- * * _mongoc_cluster_auth_node_sasl -- * * Perform authentication for a cluster node using SASL. This is * only supported for GSSAPI at the moment. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * error may be set. * *-------------------------------------------------------------------------- */ bool _mongoc_cluster_auth_node_sasl (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { #ifndef MONGOC_ENABLE_SASL bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "The GSSAPI authentication mechanism requires libmongoc " "built with ENABLE_SASL"); return false; #elif defined(MONGOC_ENABLE_SASL_CYRUS) return _mongoc_cluster_auth_node_cyrus (cluster, stream, sd, error); #elif defined(MONGOC_ENABLE_SASL_SSPI) return _mongoc_cluster_auth_node_sspi (cluster, stream, sd, error); #endif } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-sspi-private.h0000644000076500000240000000210513572250757027566 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CLUSTER_SSPI_PRIVATE_H #define MONGOC_CLUSTER_SSPI_PRIVATE_H #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cluster-private.h" #include bool _mongoc_cluster_auth_node_sspi (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error); #endif /* MONGOC_CLUSTER_SSPI_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster-sspi.c0000644000076500000240000002246313572250757026122 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SASL_SSPI #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-cluster-sspi-private.h" #include "mongoc/mongoc-cluster-sasl-private.h" #include "mongoc/mongoc-sasl-private.h" #include "mongoc/mongoc-sspi-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-util-private.h" static mongoc_sspi_client_state_t * _mongoc_cluster_sspi_new (mongoc_uri_t *uri, mongoc_stream_t *stream, const char *hostname) { WCHAR *service; /* L"serviceName@hostname@REALM" */ const char *service_name = "mongodb"; ULONG flags = ISC_REQ_MUTUAL_AUTH; const char *service_realm = NULL; char *service_ascii = NULL; mongoc_sspi_client_state_t *state; size_t service_ascii_len; size_t tmp_creds_len; bson_t properties; bson_iter_t iter; char real_name[BSON_HOST_NAME_MAX + 1]; int service_len; WCHAR *pass = NULL; WCHAR *user = NULL; size_t user_len = 0; size_t pass_len = 0; int res; state = (mongoc_sspi_client_state_t *) bson_malloc0 (sizeof *state); _mongoc_sasl_set_properties (&state->sasl, uri); if (state->sasl.canonicalize_host_name && _mongoc_sasl_get_canonicalized_name ( stream, real_name, sizeof real_name)) { hostname = real_name; } /* service realm is an SSPI-specific feature */ if (mongoc_uri_get_mechanism_properties (uri, &properties) && bson_iter_init_find_case (&iter, &properties, "SERVICE_REALM") && BSON_ITER_HOLDS_UTF8 (&iter)) { service_realm = bson_iter_utf8 (&iter, NULL); service_ascii = bson_strdup_printf ("%s@%s@%s", service_name, hostname, service_realm); } else { service_ascii = bson_strdup_printf ("%s@%s", service_name, hostname); } service_ascii_len = strlen (service_ascii); /* this is donated to the sspi */ service = calloc (service_ascii_len + 1, sizeof (WCHAR)); service_len = MultiByteToWideChar (CP_UTF8, 0, service_ascii, (int) service_ascii_len, service, (int) service_ascii_len); service[service_len] = L'\0'; bson_free (service_ascii); if (state->sasl.pass) { tmp_creds_len = strlen (state->sasl.pass); /* this is donated to the sspi */ pass = calloc (tmp_creds_len + 1, sizeof (WCHAR)); pass_len = MultiByteToWideChar (CP_UTF8, 0, state->sasl.pass, (int) tmp_creds_len, pass, (int) tmp_creds_len); pass[pass_len] = L'\0'; } if (state->sasl.user) { tmp_creds_len = strlen (state->sasl.user); /* this is donated to the sspi */ user = calloc (tmp_creds_len + 1, sizeof (WCHAR)); user_len = MultiByteToWideChar (CP_UTF8, 0, state->sasl.user, (int) tmp_creds_len, user, (int) tmp_creds_len); user[user_len] = L'\0'; } res = _mongoc_sspi_auth_sspi_client_init (service, flags, user, (ULONG) user_len, NULL, 0, pass, (ULONG) pass_len, state); if (res != MONGOC_SSPI_AUTH_GSS_ERROR) { return state; } bson_free (state); return NULL; } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_auth_node_sspi -- * * Perform authentication for a cluster node using SSPI * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * error may be set. * *-------------------------------------------------------------------------- */ bool _mongoc_cluster_auth_node_sspi (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; mongoc_sspi_client_state_t *state; SEC_CHAR buf[4096] = {0}; bson_iter_t iter; uint32_t buflen; bson_t reply; const char *tmpstr; int conv_id; bson_t cmd; int res = MONGOC_SSPI_AUTH_GSS_CONTINUE; int step; mongoc_server_stream_t *server_stream; state = _mongoc_cluster_sspi_new (cluster->uri, stream, sd->host.host); if (!state) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Couldn't initialize SSPI service."); goto failure; } for (step = 0;; step++) { mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); bson_init (&cmd); if (res == MONGOC_SSPI_AUTH_GSS_CONTINUE) { res = _mongoc_sspi_auth_sspi_client_step (state, buf); } else if (res == MONGOC_SSPI_AUTH_GSS_COMPLETE) { char *response; size_t tmp_creds_len = strlen (state->sasl.user); res = _mongoc_sspi_auth_sspi_client_unwrap (state, buf); response = bson_strdup (state->response); _mongoc_sspi_auth_sspi_client_wrap (state, response, (SEC_CHAR *) state->sasl.user, (ULONG) tmp_creds_len, 0); bson_free (response); } if (res == MONGOC_SSPI_AUTH_GSS_ERROR) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SSPI data."); mongoc_cmd_parts_cleanup (&parts); bson_destroy (&cmd); break; } if (step == 0) { _mongoc_cluster_build_sasl_start (&cmd, "GSSAPI", state->response, (uint32_t) strlen (state->response)); } else { if (state->response) { _mongoc_cluster_build_sasl_continue ( &cmd, conv_id, state->response, (uint32_t) strlen (state->response)); } else { _mongoc_cluster_build_sasl_continue (&cmd, conv_id, "", 0); } } server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { mongoc_server_stream_cleanup (server_stream); mongoc_cmd_parts_cleanup (&parts); bson_destroy (&cmd); break; } if (!mongoc_cluster_run_command_private ( cluster, &parts.assembled, &reply, error)) { mongoc_server_stream_cleanup (server_stream); mongoc_cmd_parts_cleanup (&parts); bson_destroy (&cmd); bson_destroy (&reply); break; } mongoc_server_stream_cleanup (server_stream); mongoc_cmd_parts_cleanup (&parts); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } conv_id = _mongoc_cluster_get_conversation_id (&reply); if (!bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_UTF8 (&iter)) { bson_destroy (&reply); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Received invalid SASL reply from MongoDB server."); break; } tmpstr = bson_iter_utf8 (&iter, &buflen); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL reply from MongoDB is too large."); bson_destroy (&reply); break; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); } bson_free (state); failure: if (error->domain) { return false; } return true; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cluster.c0000644000076500000240000027410713572250757025152 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-cluster-sasl-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-ssl-private.h" #include "mongoc/mongoc-stream-tls.h" #endif #include "common-b64-private.h" #include "mongoc/mongoc-scram-private.h" #include "mongoc/mongoc-set-private.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-thread-private.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-uri-private.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-compression-private.h" #include "mongoc/mongoc-cmd-private.h" #include "mongoc/utlist.h" #include "mongoc/mongoc-handshake-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "cluster" #define CHECK_CLOSED_DURATION_MSEC 1000 #define DB_AND_CMD_FROM_COLLECTION(outstr, name) \ do { \ const char *dot = strchr (name, '.'); \ if (!dot || ((dot - name) > (sizeof outstr - 6))) { \ bson_snprintf (outstr, sizeof outstr, "admin.$cmd"); \ } else { \ memcpy (outstr, name, dot - name); \ memcpy (outstr + (dot - name), ".$cmd", 6); \ } \ } while (0) #define IS_NOT_COMMAND(_name) (!!strcasecmp (cmd->command_name, _name)) /** * mongoc_op_msg_flags_t: * @MONGOC_MSG_CHECKSUM_PRESENT: The message ends with 4 bytes containing a * CRC-32C checksum. * @MONGOC_MSG_MORE_TO_COME: If set to 0, wait for a server response. If set to * 1, do not expect a server response. * @MONGOC_MSG_EXHAUST_ALLOWED: If set, allows multiple replies to this request * using the moreToCome bit. */ typedef enum { MONGOC_MSG_NONE = 0, MONGOC_MSG_CHECKSUM_PRESENT = 1 << 0, MONGOC_MSG_MORE_TO_COME = 1 << 1, MONGOC_EXHAUST_ALLOWED = 1 << 16, } mongoc_op_msg_flags_t; static mongoc_server_stream_t * mongoc_cluster_fetch_stream_single (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, bson_error_t *error); static mongoc_server_stream_t * mongoc_cluster_fetch_stream_pooled (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, bson_error_t *error); static bool mongoc_cluster_run_opmsg (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error); static void _bson_error_message_printf (bson_error_t *error, const char *format, ...) BSON_GNUC_PRINTF (2, 3); size_t _mongoc_cluster_buffer_iovec (mongoc_iovec_t *iov, size_t iovcnt, int skip, char *buffer) { int n; size_t buffer_offset = 0; int total_iov_len = 0; int difference = 0; for (n = 0; n < iovcnt; n++) { total_iov_len += iov[n].iov_len; if (total_iov_len <= skip) { continue; } /* If this iovec starts before the skip, and takes the total count * beyond the skip, we need to figure out the portion of the iovec * we should skip passed */ if (total_iov_len - iov[n].iov_len < skip) { difference = skip - (total_iov_len - iov[n].iov_len); } else { difference = 0; } memcpy (buffer + buffer_offset, ((char *) iov[n].iov_base) + difference, iov[n].iov_len - difference); buffer_offset += iov[n].iov_len - difference; } return buffer_offset; } /* Allows caller to safely overwrite error->message with a formatted string, * even if the formatted string includes original error->message. */ static void _bson_error_message_printf (bson_error_t *error, const char *format, ...) { va_list args; char error_message[sizeof error->message]; if (error) { va_start (args, format); bson_vsnprintf (error_message, sizeof error->message, format, args); va_end (args); bson_strncpy (error->message, error_message, sizeof error->message); } } #define RUN_CMD_ERR_DECORATE \ do { \ _bson_error_message_printf ( \ error, \ "Failed to send \"%s\" command with database \"%s\": %s", \ cmd->command_name, \ cmd->db_name, \ error->message); \ } while (0) #define RUN_CMD_ERR(_domain, _code, ...) \ do { \ bson_set_error (error, _domain, _code, __VA_ARGS__); \ RUN_CMD_ERR_DECORATE; \ } while (0) /* *-------------------------------------------------------------------------- * * mongoc_cluster_run_command_opquery -- * * Internal function to run a command on a given stream. @error and * @reply are optional out-pointers. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @reply is set and should ALWAYS be released with bson_destroy(). * On failure, @error is filled out. If this was a network error * and server_id is nonzero, the cluster disconnects from the server. * *-------------------------------------------------------------------------- */ static bool mongoc_cluster_run_command_opquery (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, mongoc_stream_t *stream, int32_t compressor_id, bson_t *reply, bson_error_t *error) { const size_t reply_header_size = sizeof (mongoc_rpc_reply_header_t); uint8_t reply_header_buf[sizeof (mongoc_rpc_reply_header_t)]; uint8_t *reply_buf; /* reply body */ mongoc_rpc_t rpc; /* sent to server */ bson_t reply_local; bson_t *reply_ptr; char cmd_ns[MONGOC_NAMESPACE_MAX]; uint32_t request_id; int32_t msg_len; size_t doc_len; bool ret = false; char *output = NULL; uint32_t server_id; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (cmd); BSON_ASSERT (stream); /* * setup */ reply_ptr = reply ? reply : &reply_local; bson_init (reply_ptr); error->code = 0; /* * prepare the request */ _mongoc_array_clear (&cluster->iov); bson_snprintf (cmd_ns, sizeof cmd_ns, "%s.$cmd", cmd->db_name); request_id = ++cluster->request_id; _mongoc_rpc_prep_command (&rpc, cmd_ns, cmd); rpc.header.request_id = request_id; server_id = cmd->server_stream->sd->id; _mongoc_rpc_gather (&rpc, &cluster->iov); _mongoc_rpc_swab_to_le (&rpc); if (compressor_id != -1 && IS_NOT_COMMAND ("ismaster") && IS_NOT_COMMAND ("saslstart") && IS_NOT_COMMAND ("saslcontinue") && IS_NOT_COMMAND ("getnonce") && IS_NOT_COMMAND ("authenticate") && IS_NOT_COMMAND ("createuser") && IS_NOT_COMMAND ("updateuser")) { output = _mongoc_rpc_compress (cluster, compressor_id, &rpc, error); if (output == NULL) { GOTO (done); } } if (cluster->client->in_exhaust) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_IN_EXHAUST, "A cursor derived from this client is in exhaust."); GOTO (done); } /* * send and receive */ if (!_mongoc_stream_writev_full (stream, cluster->iov.data, cluster->iov.len, cluster->sockettimeoutms, error)) { mongoc_cluster_disconnect_node (cluster, server_id, true, error); /* add info about the command to writev_full's error message */ RUN_CMD_ERR_DECORATE; GOTO (done); } if (reply_header_size != mongoc_stream_read (stream, &reply_header_buf, reply_header_size, reply_header_size, cluster->sockettimeoutms)) { RUN_CMD_ERR (MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "socket error or timeout"); mongoc_cluster_disconnect_node ( cluster, server_id, !mongoc_stream_timed_out (stream), error); GOTO (done); } memcpy (&msg_len, reply_header_buf, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if ((msg_len < reply_header_size) || (msg_len > MONGOC_DEFAULT_MAX_MSG_SIZE)) { mongoc_cluster_disconnect_node (cluster, server_id, true, error); GOTO (done); } if (!_mongoc_rpc_scatter_reply_header_only ( &rpc, reply_header_buf, reply_header_size)) { mongoc_cluster_disconnect_node (cluster, server_id, true, error); GOTO (done); } doc_len = (size_t) msg_len - reply_header_size; if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) { bson_t tmp = BSON_INITIALIZER; uint8_t *buf = NULL; size_t len = BSON_UINT32_FROM_LE (rpc.compressed.uncompressed_size) + sizeof (mongoc_rpc_header_t); reply_buf = bson_malloc0 (msg_len); memcpy (reply_buf, reply_header_buf, reply_header_size); if (doc_len != mongoc_stream_read (stream, reply_buf + reply_header_size, doc_len, doc_len, cluster->sockettimeoutms)) { RUN_CMD_ERR (MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "socket error or timeout"); mongoc_cluster_disconnect_node (cluster, server_id, true, error); GOTO (done); } if (!_mongoc_rpc_scatter (&rpc, reply_buf, msg_len)) { GOTO (done); } buf = bson_malloc0 (len); if (!_mongoc_rpc_decompress (&rpc, buf, len)) { RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Could not decompress server reply"); bson_free (reply_buf); bson_free (buf); GOTO (done); } _mongoc_rpc_swab_from_le (&rpc); if (!_mongoc_rpc_get_first_document (&rpc, &tmp)) { RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Corrupt compressed OP_QUERY reply from server"); bson_free (reply_buf); bson_free (buf); GOTO (done); } bson_copy_to (&tmp, reply_ptr); bson_free (reply_buf); bson_free (buf); } else if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_REPLY && BSON_UINT32_FROM_LE (rpc.reply_header.n_returned) == 1) { reply_buf = bson_reserve_buffer (reply_ptr, (uint32_t) doc_len); BSON_ASSERT (reply_buf); if (doc_len != mongoc_stream_read (stream, (void *) reply_buf, doc_len, doc_len, cluster->sockettimeoutms)) { RUN_CMD_ERR (MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "socket error or timeout"); mongoc_cluster_disconnect_node (cluster, server_id, true, error); GOTO (done); } _mongoc_rpc_swab_from_le (&rpc); } else { GOTO (done); } if (!_mongoc_cmd_check_ok ( reply_ptr, cluster->client->error_api_version, error)) { GOTO (done); } ret = true; done: if (!ret && error->code == 0) { /* generic error */ RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply from server."); } if (reply_ptr == &reply_local) { bson_destroy (reply_ptr); } bson_free (output); RETURN (ret); } typedef enum { MONGOC_REPLY_ERR_TYPE_NONE, MONGOC_REPLY_ERR_TYPE_NOT_MASTER, MONGOC_REPLY_ERR_TYPE_SHUTDOWN, MONGOC_REPLY_ERR_TYPE_NODE_IS_RECOVERING } reply_error_type_t; /*--------------------------------------------------------------------------- * * _check_not_master_or_recovering_error -- * * Checks @reply for a "not master" or "node is recovering" error and * sets @error. * * Return: * A reply_error_type_t indicating if @reply contained a "not master" * or "node is recovering" error. * *-------------------------------------------------------------------------- */ static reply_error_type_t _check_not_master_or_recovering_error (const mongoc_client_t *client, const bson_t *reply, bson_error_t *error) { if (_mongoc_cmd_check_ok_no_wce (reply, client->error_api_version, error)) { return MONGOC_REPLY_ERR_TYPE_NONE; } switch (error->code) { case 11600: /* InterruptedAtShutdown */ case 91: /* ShutdownInProgress */ return MONGOC_REPLY_ERR_TYPE_SHUTDOWN; case 11602: /* InterruptedDueToReplStateChange */ case 13436: /* NotMasterOrSecondary */ case 189: /* PrimarySteppedDown */ return MONGOC_REPLY_ERR_TYPE_NODE_IS_RECOVERING; case 10107: /* NotMaster */ case 13435: /* NotMasterNoSlaveOk */ return MONGOC_REPLY_ERR_TYPE_NOT_MASTER; default: if (strstr (error->message, "not master")) { return MONGOC_REPLY_ERR_TYPE_NOT_MASTER; } else if (strstr (error->message, "node is recovering")) { return MONGOC_REPLY_ERR_TYPE_NODE_IS_RECOVERING; } return MONGOC_REPLY_ERR_TYPE_NONE; } } static void handle_not_master_error (mongoc_cluster_t *cluster, uint32_t server_id, const bson_t *reply) { mongoc_topology_t *topology = cluster->client->topology; mongoc_server_description_t *sd; bson_error_t error; reply_error_type_t error_type = _check_not_master_or_recovering_error (cluster->client, reply, &error); if (error_type != MONGOC_REPLY_ERR_TYPE_NONE) { /* Server Discovery and Monitoring Spec: "When the client sees a 'not * master' or 'node is recovering' error it MUST replace the server's * description with a default ServerDescription of type Unknown." * * The client MUST clear its connection pool for the server * if the server is 4.0 or earlier, and MUST NOT clear its connection * pool for the server if the server is 4.2 or later. */ sd = mongoc_topology_server_by_id (topology, server_id, &error); if (sd->max_wire_version <= WIRE_VERSION_4_0 || error_type == MONGOC_REPLY_ERR_TYPE_SHUTDOWN) { mongoc_cluster_disconnect_node (cluster, server_id, false, NULL); } mongoc_server_description_destroy (sd); mongoc_topology_invalidate_server (topology, server_id, &error); if (topology->single_threaded) { /* SDAM Spec: "For single-threaded clients, in the case of a 'not * master' error, the client MUST check the server immediately... For a * 'node is recovering' error, single-threaded clients MUST NOT check * the server, as an immediate server check is unlikely to find a * usable server." * Instead of an immediate check, mark the topology as stale so the * next command scans all servers (to find the new primary). */ if (error_type == MONGOC_REPLY_ERR_TYPE_NOT_MASTER) { cluster->client->topology->stale = true; } } else { /* SDAM Spec: "Multi-threaded and asynchronous clients MUST request an * immediate check of the server." * Instead of requesting a check of the one server, request a scan * to all servers (to find the new primary). */ _mongoc_topology_request_scan (topology); } } } bool _in_sharded_txn (const mongoc_client_session_t *session) { return session && _mongoc_client_session_in_txn_or_ending (session) && _mongoc_topology_get_type (session->client->topology) == MONGOC_TOPOLOGY_SHARDED; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_run_command_monitored -- * * Internal function to run a command on a given stream. * @error and @reply are optional out-pointers. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * If the client's APM callbacks are set, they are executed. * @reply is set and should ALWAYS be released with bson_destroy(). * *-------------------------------------------------------------------------- */ bool mongoc_cluster_run_command_monitored (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error) { bool retval; uint32_t request_id = ++cluster->request_id; uint32_t server_id; mongoc_apm_callbacks_t *callbacks; mongoc_apm_command_started_t started_event; mongoc_apm_command_succeeded_t succeeded_event; mongoc_apm_command_failed_t failed_event; int64_t started = bson_get_monotonic_time (); const mongoc_server_stream_t *server_stream; bson_t reply_local; bson_error_t error_local; int32_t compressor_id; bson_iter_t iter; server_stream = cmd->server_stream; server_id = server_stream->sd->id; compressor_id = mongoc_server_description_compressor_id (server_stream->sd); callbacks = &cluster->client->apm_callbacks; if (!reply) { reply = &reply_local; } if (!error) { error = &error_local; } if (callbacks->started) { mongoc_apm_command_started_init_with_cmd ( &started_event, cmd, request_id, cluster->client->apm_context); callbacks->started (&started_event); mongoc_apm_command_started_cleanup (&started_event); } if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) { retval = mongoc_cluster_run_opmsg (cluster, cmd, reply, error); } else { retval = mongoc_cluster_run_command_opquery ( cluster, cmd, server_stream->stream, compressor_id, reply, error); } if (retval && callbacks->succeeded) { bson_t fake_reply = BSON_INITIALIZER; /* * Unacknowledged writes must provide a CommandSucceededEvent with an * {ok: 1} reply. * https://github.com/mongodb/specifications/blob/master/source/command-monitoring/command-monitoring.rst#unacknowledged-acknowledged-writes */ if (!cmd->is_acknowledged) { bson_append_int32 (&fake_reply, "ok", 2, 1); } mongoc_apm_command_succeeded_init (&succeeded_event, bson_get_monotonic_time () - started, cmd->is_acknowledged ? reply : &fake_reply, cmd->command_name, request_id, cmd->operation_id, &server_stream->sd->host, server_id, cluster->client->apm_context); callbacks->succeeded (&succeeded_event); mongoc_apm_command_succeeded_cleanup (&succeeded_event); bson_destroy (&fake_reply); } if (!retval && callbacks->failed) { mongoc_apm_command_failed_init (&failed_event, bson_get_monotonic_time () - started, cmd->command_name, error, reply, request_id, cmd->operation_id, &server_stream->sd->host, server_id, cluster->client->apm_context); callbacks->failed (&failed_event); mongoc_apm_command_failed_cleanup (&failed_event); } handle_not_master_error (cluster, server_id, reply); if (retval && _in_sharded_txn (cmd->session) && bson_iter_init_find (&iter, reply, "recoveryToken")) { bson_destroy (cmd->session->recovery_token); if (BSON_ITER_HOLDS_DOCUMENT (&iter)) { cmd->session->recovery_token = bson_new_from_data (bson_iter_value (&iter)->value.v_doc.data, bson_iter_value (&iter)->value.v_doc.data_len); } else { MONGOC_ERROR ("Malformed recovery token from server"); cmd->session->recovery_token = NULL; } } if (reply == &reply_local) { bson_destroy (&reply_local); } _mongoc_topology_update_last_used (cluster->client->topology, server_id); return retval; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_run_command_private -- * * Internal function to run a command on a given stream. * @error and @reply are optional out-pointers. * The client's APM callbacks are not executed. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @reply is set and should ALWAYS be released with bson_destroy(). * *-------------------------------------------------------------------------- */ bool mongoc_cluster_run_command_private (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error) { bool retval; const mongoc_server_stream_t *server_stream; bson_t reply_local; bson_error_t error_local; if (!error) { error = &error_local; } if (!reply) { reply = &reply_local; } server_stream = cmd->server_stream; if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) { retval = mongoc_cluster_run_opmsg (cluster, cmd, reply, error); } else { retval = mongoc_cluster_run_command_opquery ( cluster, cmd, cmd->server_stream->stream, -1, reply, error); } handle_not_master_error (cluster, server_stream->sd->id, reply); if (reply == &reply_local) { bson_destroy (&reply_local); } _mongoc_topology_update_last_used (cluster->client->topology, server_stream->sd->id); return retval; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_run_command_parts -- * * Internal function to assemble command parts and run a command * on a given stream. @error and @reply are optional out-pointers. * The client's APM callbacks are not executed. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @reply is set and should ALWAYS be released with bson_destroy(). * mongoc_cmd_parts_cleanup will be always be called on parts. The * caller should *not* call cleanup on the parts. * *-------------------------------------------------------------------------- */ bool mongoc_cluster_run_command_parts (mongoc_cluster_t *cluster, mongoc_server_stream_t *server_stream, mongoc_cmd_parts_t *parts, bson_t *reply, bson_error_t *error) { bool ret; if (!mongoc_cmd_parts_assemble (parts, server_stream, error)) { _mongoc_bson_init_if_set (reply); mongoc_cmd_parts_cleanup (parts); return false; } ret = mongoc_cluster_run_command_private ( cluster, &parts->assembled, reply, error); mongoc_cmd_parts_cleanup (parts); return ret; } /* *-------------------------------------------------------------------------- * * _mongoc_stream_run_ismaster -- * * Run an ismaster command on the given stream. If * @negotiate_sasl_supported_mechs is true, then saslSupportedMechs is * added to the ismaster command. * * Returns: * A mongoc_server_description_t you must destroy or NULL. If the call * failed its error is set and its type is MONGOC_SERVER_UNKNOWN. * *-------------------------------------------------------------------------- */ static mongoc_server_description_t * _mongoc_stream_run_ismaster (mongoc_cluster_t *cluster, mongoc_stream_t *stream, const char *address, uint32_t server_id, bool negotiate_sasl_supported_mechs, bson_error_t *error) { const bson_t *command; mongoc_cmd_parts_t parts; bson_t reply; int64_t start; int64_t rtt_msec; mongoc_server_description_t *sd; mongoc_server_stream_t *server_stream; bson_t *copied_command = NULL; bool r; bson_iter_t iter; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (stream); command = _mongoc_topology_get_ismaster (cluster->client->topology); if (negotiate_sasl_supported_mechs) { copied_command = bson_copy (command); _mongoc_handshake_append_sasl_supported_mechs (cluster->uri, copied_command); command = copied_command; } start = bson_get_monotonic_time (); server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, server_id, stream, error); if (!server_stream) { bson_destroy (copied_command); RETURN (NULL); } mongoc_cmd_parts_init ( &parts, cluster->client, "admin", MONGOC_QUERY_SLAVE_OK, command); parts.prohibit_lsid = true; if (!mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error)) { if (negotiate_sasl_supported_mechs) { if (bson_iter_init_find (&iter, &reply, "ok") && !bson_iter_as_bool (&iter)) { /* ismaster response returned ok: 0. According to auth spec: "If the * isMaster of the MongoDB Handshake fails with an error, drivers * MUST treat this an authentication error." */ error->domain = MONGOC_ERROR_CLIENT; error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE; } } bson_destroy (copied_command); bson_destroy (&reply); mongoc_server_stream_cleanup (server_stream); RETURN (NULL); } rtt_msec = (bson_get_monotonic_time () - start) / 1000; sd = (mongoc_server_description_t *) bson_malloc0 ( sizeof (mongoc_server_description_t)); mongoc_server_description_init (sd, address, server_id); /* send the error from run_command IN to handle_ismaster */ mongoc_server_description_handle_ismaster (sd, &reply, rtt_msec, error); bson_destroy (&reply); r = _mongoc_topology_update_from_handshake (cluster->client->topology, sd); if (!r) { mongoc_server_description_reset (sd); bson_set_error (&sd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NOT_ESTABLISHED, "\"%s\" removed from topology", address); } mongoc_server_stream_cleanup (server_stream); if (copied_command) { bson_destroy (copied_command); } RETURN (sd); } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_run_ismaster -- * * Run an initial ismaster command for the given node and handle result. * * Returns: * mongoc_server_description_t on success, NULL otherwise. * the mongoc_server_description_t MUST BE DESTROYED BY THE CALLER. * * Side effects: * Makes a blocking I/O call, updates cluster->topology->description * with ismaster result. * *-------------------------------------------------------------------------- */ static mongoc_server_description_t * _mongoc_cluster_run_ismaster (mongoc_cluster_t *cluster, mongoc_cluster_node_t *node, uint32_t server_id, bson_error_t *error /* OUT */) { mongoc_server_description_t *sd; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (node); BSON_ASSERT (node->stream); sd = _mongoc_stream_run_ismaster ( cluster, node->stream, node->connection_address, server_id, _mongoc_uri_requires_auth_negotiation (cluster->uri), error); if (!sd) { return NULL; } if (sd->type == MONGOC_SERVER_UNKNOWN) { memcpy (error, &sd->error, sizeof (bson_error_t)); mongoc_server_description_destroy (sd); return NULL; } else { node->max_write_batch_size = sd->max_write_batch_size; node->min_wire_version = sd->min_wire_version; node->max_wire_version = sd->max_wire_version; node->max_bson_obj_size = sd->max_bson_obj_size; node->max_msg_size = sd->max_msg_size; } return sd; } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_build_basic_auth_digest -- * * Computes the Basic Authentication digest using the credentials * configured for @cluster and the @nonce provided. * * The result should be freed by the caller using bson_free() when * they are finished with it. * * Returns: * A newly allocated string containing the digest. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static char * _mongoc_cluster_build_basic_auth_digest (mongoc_cluster_t *cluster, const char *nonce) { const char *username; const char *password; char *password_digest; char *password_md5; char *digest_in; char *ret; ENTRY; /* * The following generates the digest to be used for basic authentication * with a MongoDB server. More information on the format can be found * at the following location: * * http://docs.mongodb.org/meta-driver/latest/legacy/ * implement-authentication-in-driver/ */ BSON_ASSERT (cluster); BSON_ASSERT (cluster->uri); username = mongoc_uri_get_username (cluster->uri); password = mongoc_uri_get_password (cluster->uri); password_digest = bson_strdup_printf ("%s:mongo:%s", username, password); password_md5 = _mongoc_hex_md5 (password_digest); digest_in = bson_strdup_printf ("%s%s%s", nonce, username, password_md5); ret = _mongoc_hex_md5 (digest_in); bson_free (digest_in); bson_free (password_md5); bson_free (password_digest); RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_auth_node_cr -- * * Performs authentication of @node using the credentials provided * when configuring the @cluster instance. * * This is the Challenge-Response mode of authentication. * * Returns: * true if authentication was successful; otherwise false and * @error is set. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_cluster_auth_node_cr (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; const char *auth_source; bson_t command; bson_t reply; char *digest; char *nonce; bool ret; mongoc_server_stream_t *server_stream; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!(auth_source = mongoc_uri_get_auth_source (cluster->uri)) || (*auth_source == '\0')) { auth_source = "admin"; } /* * To authenticate a node using basic authentication, we need to first * get the nonce from the server. We use that to hash our password which * is sent as a reply to the server. If everything went good we get a * success notification back from the server. */ /* * Execute the getnonce command to fetch the nonce used for generating * md5 digest of our password information. */ bson_init (&command); bson_append_int32 (&command, "getnonce", 8, 1); mongoc_cmd_parts_init ( &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &command); parts.prohibit_lsid = true; server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&command); bson_destroy (&reply); RETURN (false); } bson_destroy (&command); if (!bson_iter_init_find_case (&iter, &reply, "nonce")) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_GETNONCE, "Invalid reply from getnonce"); bson_destroy (&reply); RETURN (false); } /* * Build our command to perform the authentication. */ nonce = bson_iter_dup_utf8 (&iter, NULL); digest = _mongoc_cluster_build_basic_auth_digest (cluster, nonce); bson_init (&command); bson_append_int32 (&command, "authenticate", 12, 1); bson_append_utf8 ( &command, "user", 4, mongoc_uri_get_username (cluster->uri), -1); bson_append_utf8 (&command, "nonce", 5, nonce, -1); bson_append_utf8 (&command, "key", 3, digest, -1); bson_destroy (&reply); bson_free (nonce); bson_free (digest); /* * Execute the authenticate command. mongoc_cluster_run_command_private * checks for {ok: 1} in the response. */ mongoc_cmd_parts_init ( &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &command); parts.prohibit_lsid = true; ret = mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error); if (!ret) { /* error->message is already set */ error->domain = MONGOC_ERROR_CLIENT; error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&command); bson_destroy (&reply); RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_auth_node_plain -- * * Perform SASL PLAIN authentication for @node. We do this manually * instead of using the SASL module because its rather simplistic. * * Returns: * true if successful; otherwise false and error is set. * * Side effects: * error may be set. * *-------------------------------------------------------------------------- */ static bool _mongoc_cluster_auth_node_plain (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { mongoc_cmd_parts_t parts; char buf[4096]; int buflen = 0; const char *username; const char *password; bson_t b = BSON_INITIALIZER; bson_t reply; size_t len; char *str; bool ret; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); username = mongoc_uri_get_username (cluster->uri); if (!username) { username = ""; } password = mongoc_uri_get_password (cluster->uri); if (!password) { password = ""; } str = bson_strdup_printf ("%c%s%c%s", '\0', username, '\0', password); len = strlen (username) + strlen (password) + 2; buflen = bson_b64_ntop ((const uint8_t *) str, len, buf, sizeof buf); bson_free (str); if (buflen == -1) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "failed base64 encoding message"); return false; } BSON_APPEND_INT32 (&b, "saslStart", 1); BSON_APPEND_UTF8 (&b, "mechanism", "PLAIN"); bson_append_utf8 (&b, "payload", 7, (const char *) buf, buflen); BSON_APPEND_INT32 (&b, "autoAuthorize", 1); mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &b); parts.prohibit_lsid = true; server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); ret = mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error); mongoc_server_stream_cleanup (server_stream); if (!ret) { /* error->message is already set */ error->domain = MONGOC_ERROR_CLIENT; error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE; } bson_destroy (&b); bson_destroy (&reply); return ret; } static bool _mongoc_cluster_auth_node_x509 (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { #ifndef MONGOC_ENABLE_SSL bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "The MONGODB-X509 authentication mechanism requires " "libmongoc built with ENABLE_SSL"); return false; #else mongoc_cmd_parts_t parts; const char *username_from_uri = NULL; char *username_from_subject = NULL; bson_t cmd; bson_t reply; bool ret; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); username_from_uri = mongoc_uri_get_username (cluster->uri); if (username_from_uri) { TRACE ("%s", "X509: got username from URI"); } else { if (!cluster->client->ssl_opts.pem_file) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "cannot determine username for " "X-509 authentication."); return false; } username_from_subject = mongoc_ssl_extract_subject ( cluster->client->ssl_opts.pem_file, cluster->client->ssl_opts.pem_pwd); if (!username_from_subject) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "No username provided for X509 authentication."); return false; } TRACE ("%s", "X509: got username from certificate"); } bson_init (&cmd); BSON_APPEND_INT32 (&cmd, "authenticate", 1); BSON_APPEND_UTF8 (&cmd, "mechanism", "MONGODB-X509"); BSON_APPEND_UTF8 (&cmd, "user", username_from_uri ? username_from_uri : username_from_subject); mongoc_cmd_parts_init ( &parts, cluster->client, "$external", MONGOC_QUERY_SLAVE_OK, &cmd); parts.prohibit_lsid = true; server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); ret = mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error); mongoc_server_stream_cleanup (server_stream); if (!ret) { /* error->message is already set */ error->domain = MONGOC_ERROR_CLIENT; error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE; } if (username_from_subject) { bson_free (username_from_subject); } bson_destroy (&cmd); bson_destroy (&reply); return ret; #endif } #ifdef MONGOC_ENABLE_CRYPTO static bool _mongoc_cluster_auth_node_scram (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, mongoc_crypto_hash_algorithm_t algo, bson_error_t *error) { mongoc_cmd_parts_t parts; uint32_t buflen = 0; mongoc_scram_t scram; bson_iter_t iter; bool ret = false; const char *tmpstr; const char *auth_source; uint8_t buf[4096] = {0}; bson_t cmd; bson_t reply; int conv_id = 0; bson_subtype_t btype; mongoc_server_stream_t *server_stream; BSON_ASSERT (cluster); BSON_ASSERT (stream); if (!(auth_source = mongoc_uri_get_auth_source (cluster->uri)) || (*auth_source == '\0')) { auth_source = "admin"; } _mongoc_scram_init (&scram, algo); _mongoc_scram_set_pass (&scram, mongoc_uri_get_password (cluster->uri)); _mongoc_scram_set_user (&scram, mongoc_uri_get_username (cluster->uri)); /* Apply previously cached SCRAM secrets if available */ if (cluster->scram_cache) { _mongoc_scram_set_cache (&scram, cluster->scram_cache); } for (;;) { if (!_mongoc_scram_step ( &scram, buf, buflen, buf, sizeof buf, &buflen, error)) { goto failure; } bson_init (&cmd); if (scram.step == 1) { BSON_APPEND_INT32 (&cmd, "saslStart", 1); if (algo == MONGOC_CRYPTO_ALGORITHM_SHA_1) { BSON_APPEND_UTF8 (&cmd, "mechanism", "SCRAM-SHA-1"); } else if (algo == MONGOC_CRYPTO_ALGORITHM_SHA_256) { BSON_APPEND_UTF8 (&cmd, "mechanism", "SCRAM-SHA-256"); } else { BSON_ASSERT (false); } bson_append_binary ( &cmd, "payload", 7, BSON_SUBTYPE_BINARY, buf, buflen); BSON_APPEND_INT32 (&cmd, "autoAuthorize", 1); } else { BSON_APPEND_INT32 (&cmd, "saslContinue", 1); BSON_APPEND_INT32 (&cmd, "conversationId", conv_id); bson_append_binary ( &cmd, "payload", 7, BSON_SUBTYPE_BINARY, buf, buflen); } TRACE ("SCRAM: authenticating (step %d)", scram.step); mongoc_cmd_parts_init ( &parts, cluster->client, auth_source, MONGOC_QUERY_SLAVE_OK, &cmd); parts.prohibit_lsid = true; server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, sd->id, stream, error); if (!mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, &reply, error)) { mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); bson_destroy (&reply); /* error->message is already set */ error->domain = MONGOC_ERROR_CLIENT; error->code = MONGOC_ERROR_CLIENT_AUTHENTICATE; goto failure; } mongoc_server_stream_cleanup (server_stream); bson_destroy (&cmd); if (bson_iter_init_find (&iter, &reply, "done") && bson_iter_as_bool (&iter)) { bson_destroy (&reply); break; } if (!bson_iter_init_find (&iter, &reply, "conversationId") || !BSON_ITER_HOLDS_INT32 (&iter) || !(conv_id = bson_iter_int32 (&iter)) || !bson_iter_init_find (&iter, &reply, "payload") || !BSON_ITER_HOLDS_BINARY (&iter)) { const char *errmsg = "Received invalid SCRAM reply from MongoDB server."; MONGOC_DEBUG ("SCRAM: authentication failed"); if (bson_iter_init_find (&iter, &reply, "errmsg") && BSON_ITER_HOLDS_UTF8 (&iter)) { errmsg = bson_iter_utf8 (&iter, NULL); } bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "%s", errmsg); bson_destroy (&reply); goto failure; } bson_iter_binary (&iter, &btype, &buflen, (const uint8_t **) &tmpstr); if (buflen > sizeof buf) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SCRAM reply from MongoDB is too large."); bson_destroy (&reply); goto failure; } memcpy (buf, tmpstr, buflen); bson_destroy (&reply); } TRACE ("%s", "SCRAM: authenticated"); ret = true; /* Save cached SCRAM secrets for future use */ if (cluster->scram_cache) { _mongoc_scram_cache_destroy (cluster->scram_cache); } cluster->scram_cache = _mongoc_scram_get_cache (&scram); failure: _mongoc_scram_destroy (&scram); return ret; } #endif static bool _mongoc_cluster_auth_node_scram_sha_1 (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { #ifndef MONGOC_ENABLE_CRYPTO bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "The SCRAM_SHA_1 authentication mechanism requires " "libmongoc built with ENABLE_SSL"); return false; #else return _mongoc_cluster_auth_node_scram ( cluster, stream, sd, MONGOC_CRYPTO_ALGORITHM_SHA_1, error); #endif } static bool _mongoc_cluster_auth_node_scram_sha_256 (mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, bson_error_t *error) { #ifndef MONGOC_ENABLE_CRYPTO bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "The SCRAM_SHA_256 authentication mechanism requires " "libmongoc built with ENABLE_SSL"); return false; #else return _mongoc_cluster_auth_node_scram ( cluster, stream, sd, MONGOC_CRYPTO_ALGORITHM_SHA_256, error); #endif } /* *-------------------------------------------------------------------------- * * _mongoc_cluster_auth_node -- * * Authenticate a cluster node depending on the required mechanism. * * Returns: * true if authenticated. false on failure and @error is set. * * Side effects: * @error is set on failure. * *-------------------------------------------------------------------------- */ static bool _mongoc_cluster_auth_node ( mongoc_cluster_t *cluster, mongoc_stream_t *stream, mongoc_server_description_t *sd, const mongoc_handshake_sasl_supported_mechs_t *sasl_supported_mechs, bson_error_t *error) { bool ret = false; const char *mechanism; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (stream); mechanism = mongoc_uri_get_auth_mechanism (cluster->uri); if (!mechanism) { if (sasl_supported_mechs->scram_sha_256) { /* Auth spec: "If SCRAM-SHA-256 is present in the list of mechanisms, * then it MUST be used as the default; otherwise, SCRAM-SHA-1 MUST be * used as the default, regardless of whether SCRAM-SHA-1 is in the * list. Drivers MUST NOT attempt to use any other mechanism (e.g. * PLAIN) as the default." [...] "If saslSupportedMechs is not present * in the isMaster results for mechanism negotiation, then SCRAM-SHA-1 * MUST be used when talking to servers >= 3.0." */ mechanism = "SCRAM-SHA-256"; } else { mechanism = "SCRAM-SHA-1"; } } if (0 == strcasecmp (mechanism, "MONGODB-CR")) { ret = _mongoc_cluster_auth_node_cr (cluster, stream, sd, error); } else if (0 == strcasecmp (mechanism, "MONGODB-X509")) { ret = _mongoc_cluster_auth_node_x509 (cluster, stream, sd, error); } else if (0 == strcasecmp (mechanism, "SCRAM-SHA-1")) { ret = _mongoc_cluster_auth_node_scram_sha_1 (cluster, stream, sd, error); } else if (0 == strcasecmp (mechanism, "SCRAM-SHA-256")) { ret = _mongoc_cluster_auth_node_scram_sha_256 (cluster, stream, sd, error); } else if (0 == strcasecmp (mechanism, "GSSAPI")) { ret = _mongoc_cluster_auth_node_sasl (cluster, stream, sd, error); } else if (0 == strcasecmp (mechanism, "PLAIN")) { ret = _mongoc_cluster_auth_node_plain (cluster, stream, sd, error); } else { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_AUTHENTICATE, "Unknown authentication mechanism \"%s\".", mechanism); } if (!ret) { mongoc_counter_auth_failure_inc (); MONGOC_DEBUG ("Authentication failed: %s", error->message); } else { mongoc_counter_auth_success_inc (); TRACE ("%s", "Authentication succeeded"); } RETURN (ret); } static bool _mongoc_cluster_disconnect_node_in_set (uint32_t id, void *item, void *ctx) { mongoc_cluster_t *cluster = (mongoc_cluster_t *) ctx; mongoc_cluster_disconnect_node (cluster, id, false, NULL); return true; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_disconnect -- * * Disconnects all nodes in this cluster. * * Returns: * None. * * Side effects: * Clears the cluster's set of nodes and frees them if pooled. * *-------------------------------------------------------------------------- */ void mongoc_cluster_disconnect (mongoc_cluster_t *cluster) { mongoc_topology_t *topology; BSON_ASSERT (cluster); topology = cluster->client->topology; /* in the single-threaded use case we share topology's streams */ if (topology->single_threaded) { mongoc_topology_scanner_disconnect (topology->scanner); } else { mongoc_set_for_each_with_id ( cluster->nodes, _mongoc_cluster_disconnect_node_in_set, cluster); } } /* *-------------------------------------------------------------------------- * * mongoc_cluster_disconnect_node -- * * Remove a node from the set of nodes. This should be done if * a stream in the set is found to be invalid. If @invalidate is * true, also mark the server Unknown in the topology description, * passing the error information from @why as the reason. * * WARNING: pointers to a disconnected mongoc_cluster_node_t or * its stream are now invalid, be careful of dangling pointers. * * Returns: * None. * * Side effects: * Removes node from cluster's set of nodes, and frees the * mongoc_cluster_node_t if pooled. * *-------------------------------------------------------------------------- */ void mongoc_cluster_disconnect_node (mongoc_cluster_t *cluster, uint32_t server_id, bool invalidate, const bson_error_t *why /* IN */) { mongoc_topology_t *topology = cluster->client->topology; ENTRY; if (topology->single_threaded) { mongoc_topology_scanner_node_t *scanner_node; scanner_node = mongoc_topology_scanner_get_node (topology->scanner, server_id); /* might never actually have connected */ if (scanner_node && scanner_node->stream) { mongoc_topology_scanner_node_disconnect (scanner_node, true); } } else { mongoc_set_rm (cluster->nodes, server_id); } if (invalidate) { mongoc_topology_invalidate_server (topology, server_id, why); } EXIT; } static void _mongoc_cluster_node_destroy (mongoc_cluster_node_t *node) { /* Failure, or Replica Set reconfigure without this node */ mongoc_stream_failed (node->stream); bson_free (node->connection_address); bson_free (node); } static void _mongoc_cluster_node_dtor (void *data_, void *ctx_) { mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) data_; _mongoc_cluster_node_destroy (node); } static mongoc_cluster_node_t * _mongoc_cluster_node_new (mongoc_stream_t *stream, const char *connection_address) { mongoc_cluster_node_t *node; if (!stream) { return NULL; } node = (mongoc_cluster_node_t *) bson_malloc0 (sizeof *node); node->stream = stream; node->connection_address = bson_strdup (connection_address); node->timestamp = bson_get_monotonic_time (); node->max_wire_version = MONGOC_DEFAULT_WIRE_VERSION; node->min_wire_version = MONGOC_DEFAULT_WIRE_VERSION; node->max_write_batch_size = MONGOC_DEFAULT_WRITE_BATCH_SIZE; node->max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE; node->max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE; return node; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_add_node -- * * Add a new node to this cluster for the given server description. * * NOTE: does NOT check if this server is already in the cluster. * * Returns: * A stream connected to the server, or NULL on failure. * * Side effects: * Adds a cluster node, or sets error on failure. * *-------------------------------------------------------------------------- */ static mongoc_stream_t * _mongoc_cluster_add_node (mongoc_cluster_t *cluster, uint32_t server_id, bson_error_t *error /* OUT */) { mongoc_host_list_t *host = NULL; mongoc_cluster_node_t *cluster_node = NULL; mongoc_stream_t *stream; mongoc_server_description_t *sd; mongoc_handshake_sasl_supported_mechs_t sasl_supported_mechs; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (!cluster->client->topology->single_threaded); host = _mongoc_topology_host_by_id (cluster->client->topology, server_id, error); if (!host) { GOTO (error); } TRACE ("Adding new server to cluster: %s", host->host_and_port); stream = _mongoc_client_create_stream (cluster->client, host, error); if (!stream) { MONGOC_WARNING ( "Failed connection to %s (%s)", host->host_and_port, error->message); GOTO (error); } /* take critical fields from a fresh ismaster */ cluster_node = _mongoc_cluster_node_new (stream, host->host_and_port); sd = _mongoc_cluster_run_ismaster (cluster, cluster_node, server_id, error); if (!sd) { GOTO (error); } _mongoc_handshake_parse_sasl_supported_mechs (&sd->last_is_master, &sasl_supported_mechs); if (cluster->requires_auth) { if (!_mongoc_cluster_auth_node ( cluster, cluster_node->stream, sd, &sasl_supported_mechs, error)) { MONGOC_WARNING ("Failed authentication to %s (%s)", host->host_and_port, error->message); mongoc_server_description_destroy (sd); GOTO (error); } } mongoc_server_description_destroy (sd); mongoc_set_add (cluster->nodes, server_id, cluster_node); _mongoc_host_list_destroy_all (host); RETURN (stream); error: _mongoc_host_list_destroy_all (host); /* null ok */ if (cluster_node) { _mongoc_cluster_node_destroy (cluster_node); /* also destroys stream */ } RETURN (NULL); } static void node_not_found (mongoc_topology_t *topology, uint32_t server_id, bson_error_t *error /* OUT */) { mongoc_server_description_t *sd; if (!error) { return; } sd = mongoc_topology_server_by_id (topology, server_id, error); if (!sd) { return; } if (sd->error.code) { memcpy (error, &sd->error, sizeof *error); } else { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NOT_ESTABLISHED, "Could not find node %s", sd->host.host_and_port); } mongoc_server_description_destroy (sd); } static void stream_not_found (mongoc_topology_t *topology, uint32_t server_id, const char *connection_address, bson_error_t *error /* OUT */) { mongoc_server_description_t *sd; sd = mongoc_topology_server_by_id (topology, server_id, error); if (error) { if (sd && sd->error.code) { memcpy (error, &sd->error, sizeof *error); } else { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NOT_ESTABLISHED, "Could not find stream for node %s", connection_address); } } if (sd) { mongoc_server_description_destroy (sd); } } mongoc_server_stream_t * _mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, const mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error /* OUT */) { mongoc_topology_t *topology; mongoc_server_stream_t *server_stream; bson_error_t err_local; /* if fetch_stream fails we need a place to receive error details and pass * them to mongoc_topology_invalidate_server. */ bson_error_t *err_ptr = error ? error : &err_local; ENTRY; topology = cluster->client->topology; /* in the single-threaded use case we share topology's streams */ if (topology->single_threaded) { server_stream = mongoc_cluster_fetch_stream_single ( cluster, server_id, reconnect_ok, err_ptr); } else { server_stream = mongoc_cluster_fetch_stream_pooled ( cluster, server_id, reconnect_ok, err_ptr); } if (!server_stream) { /* Server Discovery And Monitoring Spec: "When an application operation * fails because of any network error besides a socket timeout, the * client MUST replace the server's description with a default * ServerDescription of type Unknown, and fill the ServerDescription's * error field with useful information." * * error was filled by fetch_stream_single/pooled, pass it to disconnect() */ mongoc_cluster_disconnect_node (cluster, server_id, true, err_ptr); _mongoc_bson_init_with_transient_txn_error (cs, reply); } RETURN (server_stream); } /* *-------------------------------------------------------------------------- * * mongoc_cluster_stream_for_server -- * * Fetch the stream for @server_id. If @reconnect_ok and there is no * valid stream, attempts to reconnect; if not @reconnect_ok then only * an existing stream can be returned, or NULL. * * Returns: * A mongoc_server_stream_t, or NULL * * Side effects: * May add a node or reconnect one, if @reconnect_ok. * Authenticates the stream if needed. * Sets @error and initializes @reply on error. * *-------------------------------------------------------------------------- */ mongoc_server_stream_t * mongoc_cluster_stream_for_server (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error) { mongoc_server_stream_t *server_stream = NULL; bson_error_t err_local = {0}; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (server_id); if (cs && cs->server_id && cs->server_id != server_id) { _mongoc_bson_init_if_set (reply); bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_SERVER_SELECTION_INVALID_ID, "Requested server id does not matched pinned server id"); RETURN (NULL); } if (!error) { error = &err_local; } server_stream = _mongoc_cluster_stream_for_server ( cluster, server_id, reconnect_ok, cs, reply, error); if (!server_stream) { /* failed */ mongoc_cluster_disconnect_node (cluster, server_id, true, error); } if (_in_sharded_txn (cs)) { _mongoc_client_session_pin (cs, server_id); } else { /* Transactions Spec: Additionally, any non-transaction operation using * a pinned ClientSession MUST unpin the session and the operation MUST * perform normal server selection. */ if (cs && !_mongoc_client_session_in_txn_or_ending (cs)) { _mongoc_client_session_unpin (cs); } } RETURN (server_stream); } static mongoc_server_stream_t * mongoc_cluster_fetch_stream_single (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, bson_error_t *error /* OUT */) { mongoc_topology_t *topology; mongoc_server_description_t *sd; mongoc_topology_scanner_node_t *scanner_node; char *address; topology = cluster->client->topology; scanner_node = mongoc_topology_scanner_get_node (topology->scanner, server_id); BSON_ASSERT (scanner_node && !scanner_node->retired); if (scanner_node->stream) { sd = mongoc_topology_server_by_id (topology, server_id, error); if (!sd) { return NULL; } } else { if (!reconnect_ok) { stream_not_found ( topology, server_id, scanner_node->host.host_and_port, error); return NULL; } /* save the scanner node address in case it is removed during the scan. */ address = bson_strdup (scanner_node->host.host_and_port); _mongoc_topology_do_blocking_scan (topology, error); if (error->code) { bson_free (address); return NULL; } scanner_node = mongoc_topology_scanner_get_node (topology->scanner, server_id); if (!scanner_node || !scanner_node->stream) { stream_not_found (topology, server_id, address, error); bson_free (address); return NULL; } bson_free (address); sd = mongoc_topology_server_by_id (topology, server_id, error); if (!sd) { return NULL; } } if (sd->type == MONGOC_SERVER_UNKNOWN) { memcpy (error, &sd->error, sizeof *error); mongoc_server_description_destroy (sd); return NULL; } /* stream open but not auth'ed: first use since connect or reconnect */ if (cluster->requires_auth && !scanner_node->has_auth) { if (!_mongoc_cluster_auth_node (cluster, scanner_node->stream, sd, &scanner_node->sasl_supported_mechs, &sd->error)) { memcpy (error, &sd->error, sizeof *error); mongoc_server_description_destroy (sd); return NULL; } scanner_node->has_auth = true; } return mongoc_server_stream_new ( &topology->description, sd, scanner_node->stream); } mongoc_server_stream_t * _mongoc_cluster_create_server_stream (mongoc_topology_t *topology, uint32_t server_id, mongoc_stream_t *stream, bson_error_t *error /* OUT */) { mongoc_server_description_t *sd; mongoc_server_stream_t *server_stream = NULL; /* can't just use mongoc_topology_server_by_id(), since we must hold the * lock while copying topology->description.logical_time below */ bson_mutex_lock (&topology->mutex); sd = mongoc_server_description_new_copy ( mongoc_topology_description_server_by_id ( &topology->description, server_id, error)); if (sd) { server_stream = mongoc_server_stream_new (&topology->description, sd, stream); } bson_mutex_unlock (&topology->mutex); return server_stream; } static mongoc_server_stream_t * mongoc_cluster_fetch_stream_pooled (mongoc_cluster_t *cluster, uint32_t server_id, bool reconnect_ok, bson_error_t *error /* OUT */) { mongoc_topology_t *topology; mongoc_stream_t *stream; mongoc_cluster_node_t *cluster_node; int64_t timestamp; cluster_node = (mongoc_cluster_node_t *) mongoc_set_get (cluster->nodes, server_id); topology = cluster->client->topology; if (cluster_node) { BSON_ASSERT (cluster_node->stream); timestamp = mongoc_topology_server_timestamp (topology, server_id); if (timestamp == -1 || cluster_node->timestamp < timestamp) { /* topology change or net error during background scan made us remove * or replace server description since node's birth. destroy node. */ mongoc_cluster_disconnect_node ( cluster, server_id, false /* invalidate */, NULL); } else { return _mongoc_cluster_create_server_stream ( topology, server_id, cluster_node->stream, error); } } /* no node, or out of date */ if (!reconnect_ok) { node_not_found (topology, server_id, error); return NULL; } stream = _mongoc_cluster_add_node (cluster, server_id, error); if (stream) { return _mongoc_cluster_create_server_stream ( topology, server_id, stream, error); } else { return NULL; } } /* *-------------------------------------------------------------------------- * * mongoc_cluster_init -- * * Initializes @cluster using the @uri and @client provided. The * @uri is used to determine the "mode" of the cluster. Based on the * uri we can determine if we are connected to a single host, a * replicaSet, or a shardedCluster. * * Returns: * None. * * Side effects: * @cluster is initialized. * *-------------------------------------------------------------------------- */ void mongoc_cluster_init (mongoc_cluster_t *cluster, const mongoc_uri_t *uri, void *client) { ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (uri); memset (cluster, 0, sizeof *cluster); cluster->uri = mongoc_uri_copy (uri); cluster->client = (mongoc_client_t *) client; cluster->requires_auth = (mongoc_uri_get_username (uri) || mongoc_uri_get_auth_mechanism (uri)); cluster->sockettimeoutms = mongoc_uri_get_option_as_int32 ( uri, MONGOC_URI_SOCKETTIMEOUTMS, MONGOC_DEFAULT_SOCKETTIMEOUTMS); cluster->socketcheckintervalms = mongoc_uri_get_option_as_int32 (uri, MONGOC_URI_SOCKETCHECKINTERVALMS, MONGOC_TOPOLOGY_SOCKET_CHECK_INTERVAL_MS); /* TODO for single-threaded case we don't need this */ cluster->nodes = mongoc_set_new (8, _mongoc_cluster_node_dtor, NULL); _mongoc_array_init (&cluster->iov, sizeof (mongoc_iovec_t)); cluster->operation_id = rand (); EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_destroy -- * * Clean up after @cluster and destroy all active connections. * All resources for @cluster are released. * * Returns: * None. * * Side effects: * Everything. * *-------------------------------------------------------------------------- */ void mongoc_cluster_destroy (mongoc_cluster_t *cluster) /* INOUT */ { ENTRY; BSON_ASSERT (cluster); mongoc_uri_destroy (cluster->uri); mongoc_set_destroy (cluster->nodes); _mongoc_array_destroy (&cluster->iov); #ifdef MONGOC_ENABLE_CRYPTO if (cluster->scram_cache) { _mongoc_scram_cache_destroy (cluster->scram_cache); } #endif EXIT; } static uint32_t _mongoc_cluster_select_server_id (mongoc_client_session_t *cs, mongoc_topology_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { uint32_t server_id; if (_in_sharded_txn (cs)) { server_id = cs->server_id; if (!server_id) { server_id = mongoc_topology_select_server_id ( topology, optype, read_prefs, error); if (server_id) { _mongoc_client_session_pin (cs, server_id); } } } else { server_id = mongoc_topology_select_server_id (topology, optype, read_prefs, error); /* Transactions Spec: Additionally, any non-transaction operation using a * pinned ClientSession MUST unpin the session and the operation MUST * perform normal server selection. */ if (cs && !_mongoc_client_session_in_txn_or_ending (cs)) { _mongoc_client_session_unpin (cs); } } return server_id; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_stream_for_optype -- * * Internal server selection. * * Returns: * A mongoc_server_stream_t on which you must call * mongoc_server_stream_cleanup, or NULL on failure (sets @error) * * Side effects: * May add or disconnect nodes in @cluster->nodes. * Sets @error and initializes @reply on error. * *-------------------------------------------------------------------------- */ static mongoc_server_stream_t * _mongoc_cluster_stream_for_optype (mongoc_cluster_t *cluster, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error) { mongoc_server_stream_t *server_stream; uint32_t server_id; mongoc_topology_t *topology = cluster->client->topology; ENTRY; BSON_ASSERT (cluster); server_id = _mongoc_cluster_select_server_id ( cs, topology, optype, read_prefs, error); if (!server_id) { _mongoc_bson_init_with_transient_txn_error (cs, reply); RETURN (NULL); } if (!mongoc_cluster_check_interval (cluster, server_id)) { /* Server Selection Spec: try once more */ server_id = _mongoc_cluster_select_server_id ( cs, topology, optype, read_prefs, error); if (!server_id) { _mongoc_bson_init_with_transient_txn_error (cs, reply); RETURN (NULL); } } /* connect or reconnect to server if necessary */ server_stream = _mongoc_cluster_stream_for_server ( cluster, server_id, true /* reconnect_ok */, cs, reply, error); RETURN (server_stream); } /* *-------------------------------------------------------------------------- * * mongoc_cluster_stream_for_reads -- * * Internal server selection. * * Returns: * A mongoc_server_stream_t on which you must call * mongoc_server_stream_cleanup, or NULL on failure (sets @error) * * Side effects: * Sets @error and initializes @reply on error. * May add new nodes to @cluster->nodes. * *-------------------------------------------------------------------------- */ mongoc_server_stream_t * mongoc_cluster_stream_for_reads (mongoc_cluster_t *cluster, const mongoc_read_prefs_t *read_prefs, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error) { const mongoc_read_prefs_t *prefs_override = read_prefs; if (_mongoc_client_session_in_txn (cs)) { prefs_override = cs->txn.opts.read_prefs; } return _mongoc_cluster_stream_for_optype ( cluster, MONGOC_SS_READ, prefs_override, cs, reply, error); } /* *-------------------------------------------------------------------------- * * mongoc_cluster_stream_for_writes -- * * Get a stream for write operations. * * Returns: * A mongoc_server_stream_t on which you must call * mongoc_server_stream_cleanup, or NULL on failure (sets @error) * * Side effects: * Sets @error and initializes @reply on error. * May add new nodes to @cluster->nodes. * *-------------------------------------------------------------------------- */ mongoc_server_stream_t * mongoc_cluster_stream_for_writes (mongoc_cluster_t *cluster, mongoc_client_session_t *cs, bson_t *reply, bson_error_t *error) { return _mongoc_cluster_stream_for_optype ( cluster, MONGOC_SS_WRITE, NULL, cs, reply, error); } static bool _mongoc_cluster_min_of_max_obj_size_sds (void *item, void *ctx) { mongoc_server_description_t *sd = (mongoc_server_description_t *) item; int32_t *current_min = (int32_t *) ctx; if (sd->max_bson_obj_size < *current_min) { *current_min = sd->max_bson_obj_size; } return true; } static bool _mongoc_cluster_min_of_max_obj_size_nodes (void *item, void *ctx) { mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) item; int32_t *current_min = (int32_t *) ctx; if (node->max_bson_obj_size < *current_min) { *current_min = node->max_bson_obj_size; } return true; } static bool _mongoc_cluster_min_of_max_msg_size_sds (void *item, void *ctx) { mongoc_server_description_t *sd = (mongoc_server_description_t *) item; int32_t *current_min = (int32_t *) ctx; if (sd->max_msg_size < *current_min) { *current_min = sd->max_msg_size; } return true; } static bool _mongoc_cluster_min_of_max_msg_size_nodes (void *item, void *ctx) { mongoc_cluster_node_t *node = (mongoc_cluster_node_t *) item; int32_t *current_min = (int32_t *) ctx; if (node->max_msg_size < *current_min) { *current_min = node->max_msg_size; } return true; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_get_max_bson_obj_size -- * * Return the minimum max_bson_obj_size across all servers in cluster. * * NOTE: this method uses the topology's mutex. * * Returns: * The minimum max_bson_obj_size. * * Side effects: * None * *-------------------------------------------------------------------------- */ int32_t mongoc_cluster_get_max_bson_obj_size (mongoc_cluster_t *cluster) { int32_t max_bson_obj_size = -1; max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE; if (!cluster->client->topology->single_threaded) { mongoc_set_for_each (cluster->nodes, _mongoc_cluster_min_of_max_obj_size_nodes, &max_bson_obj_size); } else { mongoc_set_for_each (cluster->client->topology->description.servers, _mongoc_cluster_min_of_max_obj_size_sds, &max_bson_obj_size); } return max_bson_obj_size; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_get_max_msg_size -- * * Return the minimum max msg size across all servers in cluster. * * NOTE: this method uses the topology's mutex. * * Returns: * The minimum max_msg_size * * Side effects: * None * *-------------------------------------------------------------------------- */ int32_t mongoc_cluster_get_max_msg_size (mongoc_cluster_t *cluster) { int32_t max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE; if (!cluster->client->topology->single_threaded) { mongoc_set_for_each (cluster->nodes, _mongoc_cluster_min_of_max_msg_size_nodes, &max_msg_size); } else { mongoc_set_for_each (cluster->client->topology->description.servers, _mongoc_cluster_min_of_max_msg_size_sds, &max_msg_size); } return max_msg_size; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_check_interval -- * * Server Selection Spec: * * Only for single-threaded drivers. * * If a server is selected that has an existing connection that has been * idle for socketCheckIntervalMS, the driver MUST check the connection * with the "ping" command. If the ping succeeds, use the selected * connection. If not, set the server's type to Unknown and update the * Topology Description according to the Server Discovery and Monitoring * Spec, and attempt once more to select a server. * * Returns: * True if the check succeeded or no check was required, false if the * check failed. * * Side effects: * If a check fails, closes stream and may set server type Unknown. * *-------------------------------------------------------------------------- */ bool mongoc_cluster_check_interval (mongoc_cluster_t *cluster, uint32_t server_id) { mongoc_cmd_parts_t parts; mongoc_topology_t *topology; mongoc_topology_scanner_node_t *scanner_node; mongoc_stream_t *stream; int64_t now; bson_t command; bson_error_t error; bool r = true; mongoc_server_stream_t *server_stream; topology = cluster->client->topology; if (!topology->single_threaded) { return true; } scanner_node = mongoc_topology_scanner_get_node (topology->scanner, server_id); if (!scanner_node) { return false; } BSON_ASSERT (!scanner_node->retired); stream = scanner_node->stream; if (!stream) { return false; } now = bson_get_monotonic_time (); if (scanner_node->last_used + (1000 * CHECK_CLOSED_DURATION_MSEC) < now) { if (mongoc_stream_check_closed (stream)) { bson_set_error (&error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "connection closed"); mongoc_cluster_disconnect_node (cluster, server_id, true, &error); return false; } } if (scanner_node->last_used + (1000 * cluster->socketcheckintervalms) < now) { bson_init (&command); BSON_APPEND_INT32 (&command, "ping", 1); mongoc_cmd_parts_init ( &parts, cluster->client, "admin", MONGOC_QUERY_SLAVE_OK, &command); parts.prohibit_lsid = true; server_stream = _mongoc_cluster_create_server_stream ( cluster->client->topology, server_id, stream, &error); r = mongoc_cluster_run_command_parts ( cluster, server_stream, &parts, NULL, &error); mongoc_server_stream_cleanup (server_stream); bson_destroy (&command); if (!r) { mongoc_cluster_disconnect_node (cluster, server_id, true, &error); } } return r; } /* *-------------------------------------------------------------------------- * * mongoc_cluster_legacy_rpc_sendv_to_server -- * * Sends the given RPCs to the given server. Used for OP_QUERY cursors, * OP_KILLCURSORS, and legacy writes with OP_INSERT, OP_UPDATE, and * OP_DELETE. This function is *not* in the OP_QUERY command path. * * Returns: * True if successful. * * Side effects: * @rpc may be mutated and should be considered invalid after calling * this method. * * @error may be set. * *-------------------------------------------------------------------------- */ bool mongoc_cluster_legacy_rpc_sendv_to_server ( mongoc_cluster_t *cluster, mongoc_rpc_t *rpc, mongoc_server_stream_t *server_stream, bson_error_t *error) { uint32_t server_id; int32_t max_msg_size; bool ret = false; int32_t compressor_id = 0; char *output = NULL; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (rpc); BSON_ASSERT (server_stream); server_id = server_stream->sd->id; if (cluster->client->in_exhaust) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_IN_EXHAUST, "A cursor derived from this client is in exhaust."); GOTO (done); } _mongoc_array_clear (&cluster->iov); compressor_id = mongoc_server_description_compressor_id (server_stream->sd); _mongoc_rpc_gather (rpc, &cluster->iov); _mongoc_rpc_swab_to_le (rpc); if (compressor_id != -1) { output = _mongoc_rpc_compress (cluster, compressor_id, rpc, error); if (output == NULL) { GOTO (done); } } max_msg_size = mongoc_server_stream_max_msg_size (server_stream); if (BSON_UINT32_FROM_LE (rpc->header.msg_len) > max_msg_size) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_TOO_BIG, "Attempted to send an RPC larger than the " "max allowed message size. Was %u, allowed %u.", BSON_UINT32_FROM_LE (rpc->header.msg_len), max_msg_size); GOTO (done); } if (!_mongoc_stream_writev_full (server_stream->stream, cluster->iov.data, cluster->iov.len, cluster->sockettimeoutms, error)) { GOTO (done); } _mongoc_topology_update_last_used (cluster->client->topology, server_id); ret = true; done: if (compressor_id) { bson_free (output); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_cluster_try_recv -- * * Tries to receive the next event from the MongoDB server. * The contents are loaded into @buffer and then * scattered into the @rpc structure. @rpc is valid as long as * @buffer contains the contents read into it. * * Callers that can optimize a reuse of @buffer should do so. It * can save many memory allocations. * * Returns: * True if successful. * * Side effects: * @rpc is set on success, @error on failure. * @buffer will be filled with the input data. * *-------------------------------------------------------------------------- */ bool mongoc_cluster_try_recv (mongoc_cluster_t *cluster, mongoc_rpc_t *rpc, mongoc_buffer_t *buffer, mongoc_server_stream_t *server_stream, bson_error_t *error) { uint32_t server_id; bson_error_t err_local; int32_t msg_len; int32_t max_msg_size; off_t pos; ENTRY; BSON_ASSERT (cluster); BSON_ASSERT (rpc); BSON_ASSERT (buffer); BSON_ASSERT (server_stream); server_id = server_stream->sd->id; TRACE ("Waiting for reply from server_id \"%u\"", server_id); if (!error) { error = &err_local; } /* * Buffer the message length to determine how much more to read. */ pos = buffer->len; if (!_mongoc_buffer_append_from_stream ( buffer, server_stream->stream, 4, cluster->sockettimeoutms, error)) { MONGOC_DEBUG ( "Could not read 4 bytes, stream probably closed or timed out"); mongoc_counter_protocol_ingress_error_inc (); mongoc_cluster_disconnect_node ( cluster, server_id, !mongoc_stream_timed_out (server_stream->stream), error); RETURN (false); } /* * Read the msg length from the buffer. */ memcpy (&msg_len, &buffer->data[pos], 4); msg_len = BSON_UINT32_FROM_LE (msg_len); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); if ((msg_len < 16) || (msg_len > max_msg_size)) { bson_set_error (error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Corrupt or malicious reply received."); mongoc_cluster_disconnect_node (cluster, server_id, true, error); mongoc_counter_protocol_ingress_error_inc (); RETURN (false); } /* * Read the rest of the message from the stream. */ if (!_mongoc_buffer_append_from_stream (buffer, server_stream->stream, msg_len - 4, cluster->sockettimeoutms, error)) { mongoc_cluster_disconnect_node ( cluster, server_id, !mongoc_stream_timed_out (server_stream->stream), error); mongoc_counter_protocol_ingress_error_inc (); RETURN (false); } /* * Scatter the buffer into the rpc structure. */ if (!_mongoc_rpc_scatter (rpc, &buffer->data[pos], msg_len)) { bson_set_error (error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Failed to decode reply from server."); mongoc_cluster_disconnect_node (cluster, server_id, true, error); mongoc_counter_protocol_ingress_error_inc (); RETURN (false); } if (BSON_UINT32_FROM_LE (rpc->header.opcode) == MONGOC_OPCODE_COMPRESSED) { uint8_t *buf = NULL; size_t len = BSON_UINT32_FROM_LE (rpc->compressed.uncompressed_size) + sizeof (mongoc_rpc_header_t); buf = bson_malloc0 (len); if (!_mongoc_rpc_decompress (rpc, buf, len)) { bson_free (buf); bson_set_error (error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Could not decompress server reply"); RETURN (false); } _mongoc_buffer_destroy (buffer); _mongoc_buffer_init (buffer, buf, len, NULL, NULL); } _mongoc_rpc_swab_from_le (rpc); RETURN (true); } static void network_error_reply (bson_t *reply, mongoc_cmd_t *cmd) { bson_t labels; if (reply) { bson_init (reply); } /* Transactions Spec defines TransientTransactionError: "Any * network error or server selection error encountered running any * command besides commitTransaction in a transaction. In the case * of command errors, the server adds the label; in the case of * network errors or server selection errors where the client * receives no server reply, the client adds the label." */ if (_mongoc_client_session_in_txn (cmd->session) && !cmd->is_txn_finish) { /* Transaction Spec: "Drivers MUST unpin a ClientSession when a command * within a transaction, including commitTransaction and abortTransaction, * fails with a TransientTransactionError". If we're about to add * a TransientTransactionError label due to a client side error then we * unpin. If commitTransaction/abortTransation includes a label in the * server reply, we unpin in _mongoc_client_session_handle_reply. */ cmd->session->server_id = 0; if (!reply) { return; } BSON_APPEND_ARRAY_BEGIN (reply, "errorLabels", &labels); BSON_APPEND_UTF8 (&labels, "0", TRANSIENT_TXN_ERR); bson_append_array_end (reply, &labels); } } static bool mongoc_cluster_run_opmsg (mongoc_cluster_t *cluster, mongoc_cmd_t *cmd, bson_t *reply, bson_error_t *error) { mongoc_rpc_section_t section[2]; mongoc_buffer_t buffer; bson_t reply_local; /* only statically initialized */ char *output = NULL; mongoc_rpc_t rpc; int32_t msg_len; bool ok; const mongoc_server_stream_t *server_stream; server_stream = cmd->server_stream; if (!cmd->command_name) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Empty command document"); _mongoc_bson_init_if_set (reply); return false; } if (cluster->client->in_exhaust) { bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_IN_EXHAUST, "A cursor derived from this client is in exhaust."); _mongoc_bson_init_if_set (reply); return false; } _mongoc_array_clear (&cluster->iov); _mongoc_buffer_init (&buffer, NULL, 0, NULL, NULL); rpc.header.msg_len = 0; rpc.header.request_id = ++cluster->request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_MSG; if (cmd->is_acknowledged) { rpc.msg.flags = 0; } else { rpc.msg.flags = MONGOC_MSG_MORE_TO_COME; } rpc.msg.n_sections = 1; section[0].payload_type = 0; section[0].payload.bson_document = bson_get_data (cmd->command); rpc.msg.sections[0] = section[0]; if (cmd->payload) { section[1].payload_type = 1; section[1].payload.sequence.size = cmd->payload_size + strlen (cmd->payload_identifier) + 1 + sizeof (int32_t); section[1].payload.sequence.identifier = cmd->payload_identifier; section[1].payload.sequence.bson_documents = cmd->payload; rpc.msg.sections[1] = section[1]; rpc.msg.n_sections++; } _mongoc_rpc_gather (&rpc, &cluster->iov); _mongoc_rpc_swab_to_le (&rpc); if (mongoc_cmd_is_compressible (cmd)) { int32_t compressor_id = mongoc_server_description_compressor_id (server_stream->sd); TRACE ( "Function '%s' is compressible: %d", cmd->command_name, compressor_id); if (compressor_id != -1) { output = _mongoc_rpc_compress (cluster, compressor_id, &rpc, error); if (output == NULL) { _mongoc_bson_init_if_set (reply); _mongoc_buffer_destroy (&buffer); return false; } } } ok = _mongoc_stream_writev_full (server_stream->stream, (mongoc_iovec_t *) cluster->iov.data, cluster->iov.len, cluster->sockettimeoutms, error); if (!ok) { /* add info about the command to writev_full's error message */ RUN_CMD_ERR_DECORATE; mongoc_cluster_disconnect_node ( cluster, server_stream->sd->id, true, error); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } /* If acknowledged, wait for a server response. Otherwise, exit early */ if (cmd->is_acknowledged) { ok = _mongoc_buffer_append_from_stream ( &buffer, server_stream->stream, 4, cluster->sockettimeoutms, error); if (!ok) { RUN_CMD_ERR_DECORATE; mongoc_cluster_disconnect_node ( cluster, server_stream->sd->id, true, error); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } BSON_ASSERT (buffer.len == 4); memcpy (&msg_len, buffer.data, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); if ((msg_len < 16) || (msg_len > server_stream->sd->max_msg_size)) { RUN_CMD_ERR ( MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Message size %d is not within expected range 16-%d bytes", msg_len, server_stream->sd->max_msg_size); mongoc_cluster_disconnect_node ( cluster, server_stream->sd->id, true, error); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } ok = _mongoc_buffer_append_from_stream (&buffer, server_stream->stream, (size_t) msg_len - 4, cluster->sockettimeoutms, error); if (!ok) { RUN_CMD_ERR_DECORATE; mongoc_cluster_disconnect_node ( cluster, server_stream->sd->id, true, error); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } ok = _mongoc_rpc_scatter (&rpc, buffer.data, buffer.len); if (!ok) { RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Malformed message from server"); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } if (BSON_UINT32_FROM_LE (rpc.header.opcode) == MONGOC_OPCODE_COMPRESSED) { size_t len = BSON_UINT32_FROM_LE (rpc.compressed.uncompressed_size) + sizeof (mongoc_rpc_header_t); output = bson_realloc (output, len); if (!_mongoc_rpc_decompress (&rpc, (uint8_t *) output, len)) { RUN_CMD_ERR (MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Could not decompress message from server"); mongoc_cluster_disconnect_node ( cluster, server_stream->sd->id, true, error); bson_free (output); network_error_reply (reply, cmd); _mongoc_buffer_destroy (&buffer); return false; } } _mongoc_rpc_swab_from_le (&rpc); memcpy (&msg_len, rpc.msg.sections[0].payload.bson_document, 4); msg_len = BSON_UINT32_FROM_LE (msg_len); bson_init_static ( &reply_local, rpc.msg.sections[0].payload.bson_document, msg_len); _mongoc_topology_update_cluster_time (cluster->client->topology, &reply_local); ok = _mongoc_cmd_check_ok ( &reply_local, cluster->client->error_api_version, error); if (cmd->session) { _mongoc_client_session_handle_reply ( cmd->session, cmd->is_acknowledged, &reply_local); } if (reply) { bson_copy_to (&reply_local, reply); } } else { _mongoc_bson_init_if_set (reply); } _mongoc_buffer_destroy (&buffer); bson_free (output); return ok; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cmd-private.h0000644000076500000240000001042113572250757025674 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" /* * Internal struct to represent a command we will send to the server - command * parameters are collected in a mongoc_cmd_parts_t until we know the server's * wire version and whether it is mongos, then we collect the parts into a * mongoc_cmd_t, and gather that into a mongoc_rpc_t. */ #ifndef MONGOC_CMD_PRIVATE_H #define MONGOC_CMD_PRIVATE_H #include #include "mongoc/mongoc-server-stream-private.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc.h" #include "mongoc/mongoc-opts-private.h" BSON_BEGIN_DECLS #define MONGOC_DEFAULT_RETRYREADS true /* retryWrites requires sessions, which require crypto */ #ifdef MONGOC_ENABLE_CRYPTO #define MONGOC_DEFAULT_RETRYWRITES true #else #define MONGOC_DEFAULT_RETRYWRITES false #endif typedef enum { MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN, MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES, MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO } mongoc_cmd_parts_allow_txn_number_t; typedef struct _mongoc_cmd_t { const char *db_name; mongoc_query_flags_t query_flags; const bson_t *command; const char *command_name; const uint8_t *payload; int32_t payload_size; const char *payload_identifier; const mongoc_server_stream_t *server_stream; int64_t operation_id; mongoc_client_session_t *session; bool is_acknowledged; bool is_txn_finish; } mongoc_cmd_t; typedef struct _mongoc_cmd_parts_t { mongoc_cmd_t assembled; mongoc_query_flags_t user_query_flags; const bson_t *body; bson_t read_concern_document; bson_t write_concern_document; bson_t extra; const mongoc_read_prefs_t *read_prefs; bson_t assembled_body; bool is_read_command; bool is_write_command; bool prohibit_lsid; mongoc_cmd_parts_allow_txn_number_t allow_txn_number; bool is_retryable_read; bool is_retryable_write; bool has_temp_session; mongoc_client_t *client; } mongoc_cmd_parts_t; void mongoc_cmd_parts_init (mongoc_cmd_parts_t *op, mongoc_client_t *client, const char *db_name, mongoc_query_flags_t user_query_flags, const bson_t *command_body); void mongoc_cmd_parts_set_session (mongoc_cmd_parts_t *parts, mongoc_client_session_t *cs); bool mongoc_cmd_parts_append_opts (mongoc_cmd_parts_t *parts, bson_iter_t *iter, int max_wire_version, bson_error_t *error); bool mongoc_cmd_parts_set_read_concern (mongoc_cmd_parts_t *parts, const mongoc_read_concern_t *rc, int max_wire_version, bson_error_t *error); bool mongoc_cmd_parts_set_write_concern (mongoc_cmd_parts_t *parts, const mongoc_write_concern_t *wc, int max_wire_version, bson_error_t *error); bool mongoc_cmd_parts_append_read_write (mongoc_cmd_parts_t *parts, mongoc_read_write_opts_t *rw_opts, int max_wire_version, bson_error_t *error); bool mongoc_cmd_parts_assemble (mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream, bson_error_t *error); bool mongoc_cmd_is_compressible (mongoc_cmd_t *cmd); void mongoc_cmd_parts_cleanup (mongoc_cmd_parts_t *op); bool _is_retryable_read (const mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream); BSON_END_DECLS #endif /* MONGOC_CMD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cmd.c0000644000076500000240000010055413572250757024226 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-cmd-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-write-concern-private.h" /* For strcasecmp on Windows */ #include "mongoc/mongoc-util-private.h" void mongoc_cmd_parts_init (mongoc_cmd_parts_t *parts, mongoc_client_t *client, const char *db_name, mongoc_query_flags_t user_query_flags, const bson_t *command_body) { parts->body = command_body; parts->user_query_flags = user_query_flags; parts->read_prefs = NULL; parts->is_read_command = false; parts->is_write_command = false; parts->prohibit_lsid = false; parts->allow_txn_number = MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN; parts->is_retryable_read = false; parts->is_retryable_write = false; parts->has_temp_session = false; parts->client = client; bson_init (&parts->read_concern_document); bson_init (&parts->write_concern_document); bson_init (&parts->extra); bson_init (&parts->assembled_body); parts->assembled.db_name = db_name; parts->assembled.command = NULL; parts->assembled.query_flags = MONGOC_QUERY_NONE; parts->assembled.payload_identifier = NULL; parts->assembled.payload = NULL; parts->assembled.session = NULL; parts->assembled.is_acknowledged = true; parts->assembled.is_txn_finish = false; } /* *-------------------------------------------------------------------------- * * mongoc_cmd_parts_set_session -- * * Set the client session field. * * Side effects: * Aborts if the command is assembled or if mongoc_cmd_parts_append_opts * was called before. * *-------------------------------------------------------------------------- */ void mongoc_cmd_parts_set_session (mongoc_cmd_parts_t *parts, mongoc_client_session_t *cs) { BSON_ASSERT (parts); BSON_ASSERT (!parts->assembled.command); BSON_ASSERT (!parts->assembled.session); parts->assembled.session = cs; } /* *-------------------------------------------------------------------------- * * mongoc_cmd_parts_append_opts -- * * Take an iterator over user-supplied options document and append the * options to @parts->command_extra, taking the selected server's max * wire version into account. * * Return: * True if the options were successfully applied. If any options are * invalid, returns false and fills out @error. In that case @parts is * invalid and must not be used. * * Side effects: * May partly apply options before returning an error. * *-------------------------------------------------------------------------- */ bool mongoc_cmd_parts_append_opts (mongoc_cmd_parts_t *parts, bson_iter_t *iter, int max_wire_version, bson_error_t *error) { mongoc_client_session_t *cs = NULL; mongoc_write_concern_t *wc; uint32_t len; const uint8_t *data; bson_t read_concern; ENTRY; /* not yet assembled */ BSON_ASSERT (!parts->assembled.command); while (bson_iter_next (iter)) { if (BSON_ITER_IS_KEY (iter, "collation")) { if (max_wire_version < WIRE_VERSION_COLLATION) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); RETURN (false); } } else if (BSON_ITER_IS_KEY (iter, "writeConcern")) { wc = _mongoc_write_concern_new_from_iter (iter, error); if (!wc) { RETURN (false); } if (!mongoc_cmd_parts_set_write_concern ( parts, wc, max_wire_version, error)) { mongoc_write_concern_destroy (wc); RETURN (false); } mongoc_write_concern_destroy (wc); continue; } else if (BSON_ITER_IS_KEY (iter, "readConcern")) { if (max_wire_version < WIRE_VERSION_READ_CONCERN) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support readConcern"); RETURN (false); } if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Invalid readConcern"); RETURN (false); } /* add readConcern later, once we know about causal consistency */ bson_iter_document (iter, &len, &data); BSON_ASSERT (bson_init_static (&read_concern, data, (size_t) len)); bson_destroy (&parts->read_concern_document); bson_copy_to (&read_concern, &parts->read_concern_document); continue; } else if (BSON_ITER_IS_KEY (iter, "sessionId")) { BSON_ASSERT (!parts->assembled.session); if (!_mongoc_client_session_from_iter ( parts->client, iter, &cs, error)) { RETURN (false); } parts->assembled.session = cs; continue; } else if (BSON_ITER_IS_KEY (iter, "serverId") || BSON_ITER_IS_KEY (iter, "maxAwaitTimeMS")) { continue; } if (!bson_append_iter (&parts->extra, bson_iter_key (iter), -1, iter)) { RETURN (false); } } RETURN (true); } #define OPTS_ERR(_code, ...) \ do { \ bson_set_error ( \ error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_##_code, __VA_ARGS__); \ RETURN (false); \ } while (0) /* set readConcern if allowed, otherwise error */ bool mongoc_cmd_parts_set_read_concern (mongoc_cmd_parts_t *parts, const mongoc_read_concern_t *rc, int max_wire_version, bson_error_t *error) { const char *command_name; ENTRY; /* In a txn, set read concern in mongoc_cmd_parts_assemble, not here. * * Transactions Spec: "The readConcern MUST NOT be inherited from the * collection, database, or client associated with the driver method that * invokes the first command." */ if (_mongoc_client_session_in_txn (parts->assembled.session)) { RETURN (true); } command_name = _mongoc_get_command_name (parts->body); if (!command_name) { OPTS_ERR (COMMAND_INVALID_ARG, "Empty command document"); } if (mongoc_read_concern_is_default (rc)) { RETURN (true); } if (max_wire_version < WIRE_VERSION_READ_CONCERN) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "\"%s\" command does not support readConcern with " "wire version %d, wire version %d is required", command_name, max_wire_version, WIRE_VERSION_READ_CONCERN); RETURN (false); } bson_destroy (&parts->read_concern_document); bson_copy_to (_mongoc_read_concern_get_bson ((mongoc_read_concern_t *) rc), &parts->read_concern_document); RETURN (true); } /* set writeConcern if allowed, otherwise ignore - unlike set_read_concern, it's * the caller's responsibility to check if writeConcern is supported */ bool mongoc_cmd_parts_set_write_concern (mongoc_cmd_parts_t *parts, const mongoc_write_concern_t *wc, int max_wire_version, bson_error_t *error) { const char *command_name; bool is_fam; bool wc_allowed; ENTRY; if (!wc) { RETURN (true); } command_name = _mongoc_get_command_name (parts->body); if (!command_name) { OPTS_ERR (COMMAND_INVALID_ARG, "Empty command document"); } is_fam = !strcasecmp (command_name, "findandmodify"); wc_allowed = parts->is_write_command || (is_fam && max_wire_version >= WIRE_VERSION_FAM_WRITE_CONCERN) || (!is_fam && max_wire_version >= WIRE_VERSION_CMD_WRITE_CONCERN); if (wc_allowed) { parts->assembled.is_acknowledged = mongoc_write_concern_is_acknowledged (wc); bson_destroy (&parts->write_concern_document); bson_copy_to ( _mongoc_write_concern_get_bson ((mongoc_write_concern_t *) wc), &parts->write_concern_document); } RETURN (true); } /* *-------------------------------------------------------------------------- * * mongoc_cmd_parts_append_read_write -- * * Append user-supplied options to @parts->command_extra, taking the * selected server's max wire version into account. * * Return: * True if the options were successfully applied. If any options are * invalid, returns false and fills out @error. In that case @parts is * invalid and must not be used. * * Side effects: * May partly apply options before returning an error. * *-------------------------------------------------------------------------- */ bool mongoc_cmd_parts_append_read_write (mongoc_cmd_parts_t *parts, mongoc_read_write_opts_t *rw_opts, int max_wire_version, bson_error_t *error) { ENTRY; /* not yet assembled */ BSON_ASSERT (!parts->assembled.command); if (!bson_empty (&rw_opts->collation)) { if (max_wire_version < WIRE_VERSION_COLLATION) { OPTS_ERR (PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); } if (!bson_append_document ( &parts->extra, "collation", 9, &rw_opts->collation)) { OPTS_ERR (COMMAND_INVALID_ARG, "'opts' with 'collation' is too large"); } } if (!mongoc_cmd_parts_set_write_concern ( parts, rw_opts->writeConcern, max_wire_version, error)) { RETURN (false); } /* process explicit read concern */ if (!bson_empty (&rw_opts->readConcern)) { if (max_wire_version < WIRE_VERSION_READ_CONCERN) { OPTS_ERR (PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support readConcern"); } /* save readConcern for later, once we know about causal consistency */ bson_destroy (&parts->read_concern_document); bson_copy_to (&rw_opts->readConcern, &parts->read_concern_document); } if (rw_opts->client_session) { BSON_ASSERT (!parts->assembled.session); parts->assembled.session = rw_opts->client_session; } if (!bson_concat (&parts->extra, &rw_opts->extra)) { OPTS_ERR (COMMAND_INVALID_ARG, "'opts' with extra fields is too large"); } RETURN (true); } #undef OPTS_ERR static void _mongoc_cmd_parts_ensure_copied (mongoc_cmd_parts_t *parts) { if (parts->assembled.command == parts->body) { bson_concat (&parts->assembled_body, parts->body); bson_concat (&parts->assembled_body, &parts->extra); parts->assembled.command = &parts->assembled_body; } } static void _mongoc_cmd_parts_add_write_concern (mongoc_cmd_parts_t *parts) { if (!bson_empty (&parts->write_concern_document)) { _mongoc_cmd_parts_ensure_copied (parts); bson_append_document (&parts->assembled_body, "writeConcern", 12, &parts->write_concern_document); } } /* The server type must be mongos, or message must be OP_MSG. */ static void _mongoc_cmd_parts_add_read_prefs (bson_t *query, const mongoc_read_prefs_t *prefs) { bson_t child; const char *mode_str; const bson_t *tags; int64_t stale; mode_str = _mongoc_read_mode_as_str (mongoc_read_prefs_get_mode (prefs)); tags = mongoc_read_prefs_get_tags (prefs); stale = mongoc_read_prefs_get_max_staleness_seconds (prefs); bson_append_document_begin (query, "$readPreference", 15, &child); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (!bson_empty0 (tags)) { bson_append_array (&child, "tags", 4, tags); } if (stale != MONGOC_NO_MAX_STALENESS) { bson_append_int64 (&child, "maxStalenessSeconds", 19, stale); } bson_append_document_end (query, &child); } static void _iter_concat (bson_t *dst, bson_iter_t *iter) { uint32_t len; const uint8_t *data; bson_t src; bson_iter_document (iter, &len, &data); BSON_ASSERT (bson_init_static (&src, data, len)); BSON_ASSERT (bson_concat (dst, &src)); } /* Update result with the read prefs. Server must be mongos. */ static void _mongoc_cmd_parts_assemble_mongos (mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream) { mongoc_read_mode_t mode; const bson_t *tags = NULL; bool add_read_prefs = false; bson_t query; bson_iter_t dollar_query; bool has_dollar_query = false; bool requires_read_concern; bool requires_write_concern; ENTRY; mode = mongoc_read_prefs_get_mode (parts->read_prefs); if (parts->read_prefs) { tags = mongoc_read_prefs_get_tags (parts->read_prefs); } /* Server Selection Spec says: * * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag * and MUST NOT use $readPreference * * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference * * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol * flag and MUST also use $readPreference * * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol * flag. If the read preference contains a non-empty tag_sets parameter, * drivers MUST use $readPreference; otherwise, drivers MUST NOT use * $readPreference * * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference */ switch (mode) { case MONGOC_READ_PRIMARY: break; case MONGOC_READ_SECONDARY_PREFERRED: if (!bson_empty0 (tags)) { add_read_prefs = true; } parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK; break; case MONGOC_READ_PRIMARY_PREFERRED: case MONGOC_READ_SECONDARY: case MONGOC_READ_NEAREST: default: parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK; add_read_prefs = true; } requires_read_concern = !bson_empty (&parts->read_concern_document) && strcmp (parts->assembled.command_name, "getMore") != 0; requires_write_concern = !bson_empty (&parts->write_concern_document); if (add_read_prefs) { /* produce {$query: {user query, readConcern}, $readPreference: ... } */ bson_append_document_begin (&parts->assembled_body, "$query", 6, &query); if (bson_iter_init_find (&dollar_query, parts->body, "$query")) { /* user provided something like {$query: {key: "x"}} */ has_dollar_query = true; _iter_concat (&query, &dollar_query); } else { bson_concat (&query, parts->body); } bson_concat (&query, &parts->extra); if (requires_read_concern) { bson_append_document ( &query, "readConcern", 11, &parts->read_concern_document); } if (requires_write_concern) { bson_append_document ( &query, "writeConcern", 12, &parts->write_concern_document); } bson_append_document_end (&parts->assembled_body, &query); _mongoc_cmd_parts_add_read_prefs (&parts->assembled_body, parts->read_prefs); if (has_dollar_query) { /* copy anything that isn't in user's $query */ bson_copy_to_excluding_noinit ( parts->body, &parts->assembled_body, "$query", NULL); } parts->assembled.command = &parts->assembled_body; } else if (bson_iter_init_find (&dollar_query, parts->body, "$query")) { /* user provided $query, we have no read prefs */ bson_append_document_begin (&parts->assembled_body, "$query", 6, &query); _iter_concat (&query, &dollar_query); bson_concat (&query, &parts->extra); if (requires_read_concern) { bson_append_document ( &query, "readConcern", 11, &parts->read_concern_document); } if (requires_write_concern) { bson_append_document ( &query, "writeConcern", 12, &parts->write_concern_document); } bson_append_document_end (&parts->assembled_body, &query); /* copy anything that isn't in user's $query */ bson_copy_to_excluding_noinit ( parts->body, &parts->assembled_body, "$query", NULL); parts->assembled.command = &parts->assembled_body; } else { if (requires_read_concern) { _mongoc_cmd_parts_ensure_copied (parts); bson_append_document (&parts->assembled_body, "readConcern", 11, &parts->read_concern_document); } _mongoc_cmd_parts_add_write_concern (parts); } if (!bson_empty (&parts->extra)) { /* if none of the above logic has merged "extra", do it now */ _mongoc_cmd_parts_ensure_copied (parts); } EXIT; } static void _mongoc_cmd_parts_assemble_mongod (mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream) { ENTRY; if (!parts->is_write_command) { switch (server_stream->topology_type) { case MONGOC_TOPOLOGY_SINGLE: /* Server Selection Spec: for topology type single and server types * besides mongos, "clients MUST always set the slaveOK wire * protocol flag on reads to ensure that any server type can handle * the request." */ parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK; break; case MONGOC_TOPOLOGY_RS_NO_PRIMARY: case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: /* Server Selection Spec: for RS topology types, "For all read * preferences modes except primary, clients MUST set the slaveOK wire * protocol flag to ensure that any suitable server can handle the * request. Clients MUST NOT set the slaveOK wire protocol flag if the * read preference mode is primary. */ if (parts->read_prefs && parts->read_prefs->mode != MONGOC_READ_PRIMARY) { parts->assembled.query_flags |= MONGOC_QUERY_SLAVE_OK; } break; case MONGOC_TOPOLOGY_SHARDED: case MONGOC_TOPOLOGY_UNKNOWN: case MONGOC_TOPOLOGY_DESCRIPTION_TYPES: default: /* must not call this function w/ sharded or unknown topology type */ BSON_ASSERT (false); } } /* if (!parts->is_write_command) */ if (!bson_empty (&parts->extra)) { _mongoc_cmd_parts_ensure_copied (parts); } if (!bson_empty (&parts->read_concern_document) && strcmp (parts->assembled.command_name, "getMore") != 0) { _mongoc_cmd_parts_ensure_copied (parts); bson_append_document (&parts->assembled_body, "readConcern", 11, &parts->read_concern_document); } _mongoc_cmd_parts_add_write_concern (parts); EXIT; } static const bson_t * _largest_cluster_time (const bson_t *a, const bson_t *b) { if (!a) { return b; } if (!b) { return a; } if (_mongoc_cluster_time_greater (a, b)) { return a; } return b; } /* Check if the command should allow a transaction number if that has not * already been determined. * * This should only return true for write commands that are always retryable for * the server stream's wire version. * * The basic write commands (i.e. insert, update, delete) are intentionally * excluded here. While insert is always retryable, update and delete are only * retryable if they include no multi-document writes. Since it would be costly * to inspect the command document here, the bulk operation API explicitly sets * allow_txn_number for us. This means that insert, update, and delete are not * retryable if executed via mongoc_client_write_command_with_opts(); however, * documentation already instructs users not to use that for basic writes. */ static bool _allow_txn_number (const mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream) { /* There is no reason to call this function if allow_txn_number is set */ BSON_ASSERT (parts->allow_txn_number == MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN); if (!parts->is_write_command) { return false; } if (server_stream->sd->max_wire_version < WIRE_VERSION_RETRY_WRITES) { return false; } if (!parts->assembled.is_acknowledged) { return false; } if (!strcasecmp (parts->assembled.command_name, "findandmodify")) { return true; } return false; } /* Check if the write command should support retryable behavior. */ static bool _is_retryable_write (const mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream) { if (!parts->assembled.session) { return false; } if (!parts->is_write_command) { return false; } if (parts->allow_txn_number != MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES) { return false; } if (server_stream->sd->max_wire_version < WIRE_VERSION_RETRY_WRITES) { return false; } if (server_stream->sd->type == MONGOC_SERVER_STANDALONE) { return false; } if (_mongoc_client_session_in_txn (parts->assembled.session)) { return false; } if (!mongoc_uri_get_option_as_bool (parts->client->uri, MONGOC_URI_RETRYWRITES, MONGOC_DEFAULT_RETRYWRITES)) { return false; } return true; } /* Check if the read command should support retryable behavior. */ bool _is_retryable_read (const mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream) { if (!parts->is_read_command) { return false; } /* Commands that go through read_write_command helpers are also write * commands. Prohibit from read retry. */ if (parts->is_write_command) { return false; } if (server_stream->sd->max_wire_version < WIRE_VERSION_RETRY_READS) { return false; } if (_mongoc_client_session_in_txn (parts->assembled.session)) { return false; } if (!mongoc_uri_get_option_as_bool (parts->client->uri, MONGOC_URI_RETRYREADS, MONGOC_DEFAULT_RETRYREADS)) { return false; } return true; } /* *-------------------------------------------------------------------------- * * mongoc_cmd_parts_assemble -- * * Assemble the command body, options, and read preference into one * command. * * Return: * True if the options were successfully applied. If any options are * invalid, returns false and fills out @error. In that case @parts is * invalid and must not be used. * * Side effects: * May partly assemble before returning an error. * mongoc_cmd_parts_cleanup should be called in all cases. * *-------------------------------------------------------------------------- */ bool mongoc_cmd_parts_assemble (mongoc_cmd_parts_t *parts, const mongoc_server_stream_t *server_stream, bson_error_t *error) { mongoc_server_description_type_t server_type; mongoc_client_session_t *cs; const bson_t *cluster_time = NULL; mongoc_read_prefs_t *prefs = NULL; const char *cmd_name; bool is_get_more; const mongoc_read_prefs_t *prefs_ptr; bool ret = false; ENTRY; BSON_ASSERT (parts); BSON_ASSERT (server_stream); server_type = server_stream->sd->type; cs = parts->prohibit_lsid ? NULL : parts->assembled.session; /* must not be assembled already */ BSON_ASSERT (!parts->assembled.command); BSON_ASSERT (bson_empty (&parts->assembled_body)); /* begin with raw flags/cmd as assembled flags/cmd, might change below */ parts->assembled.command = parts->body; /* unused in OP_MSG: */ parts->assembled.query_flags = parts->user_query_flags; parts->assembled.server_stream = server_stream; cmd_name = parts->assembled.command_name = _mongoc_get_command_name (parts->assembled.command); if (!parts->assembled.command_name) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Empty command document"); GOTO (done); } TRACE ("Preparing '%s'", cmd_name); is_get_more = !strcmp (cmd_name, "getMore"); parts->assembled.is_txn_finish = !strcmp (cmd_name, "commitTransaction") || !strcmp (cmd_name, "abortTransaction"); if (!parts->is_write_command && IS_PREF_PRIMARY (parts->read_prefs) && server_stream->topology_type == MONGOC_TOPOLOGY_SINGLE && server_type != MONGOC_SERVER_MONGOS) { prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED); prefs_ptr = prefs; } else { prefs_ptr = parts->read_prefs; } if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) { if (!bson_has_field (parts->body, "$db")) { BSON_APPEND_UTF8 (&parts->extra, "$db", parts->assembled.db_name); } if (_mongoc_client_session_in_txn (cs)) { if (!IS_PREF_PRIMARY (cs->txn.opts.read_prefs) && !parts->is_write_command) { bson_set_error (error, MONGOC_ERROR_TRANSACTION, MONGOC_ERROR_TRANSACTION_INVALID_STATE, "Read preference in a transaction must be primary"); GOTO (done); } } else if (!IS_PREF_PRIMARY (prefs_ptr) && server_type != MONGOC_SERVER_STANDALONE) { /* "Type Standalone: clients MUST NOT send the read preference to the * server" */ _mongoc_cmd_parts_add_read_prefs (&parts->extra, prefs_ptr); } if (!bson_empty (&parts->extra)) { _mongoc_cmd_parts_ensure_copied (parts); } /* If an explicit session was not provided and lsid is not prohibited, * attempt to create an implicit session (ignoring any errors). */ if (!cs && !parts->prohibit_lsid && parts->assembled.is_acknowledged) { cs = mongoc_client_start_session (parts->client, NULL, NULL); if (cs) { parts->assembled.session = cs; parts->has_temp_session = true; } } /* Driver Sessions Spec: "For unacknowledged writes with an explicit * session, drivers SHOULD raise an error.... Without an explicit * session, drivers SHOULD NOT use an implicit session." We intentionally * do not restrict this logic to parts->is_write_command, since * mongoc_client_command_with_opts() does not identify as a write * command but may still include a write concern. */ if (cs) { if (!parts->assembled.is_acknowledged) { bson_set_error ( error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot use client session with unacknowledged command"); GOTO (done); } _mongoc_cmd_parts_ensure_copied (parts); bson_append_document (&parts->assembled_body, "lsid", 4, mongoc_client_session_get_lsid (cs)); cs->server_session->last_used_usec = bson_get_monotonic_time (); cluster_time = mongoc_client_session_get_cluster_time (cs); } /* Ensure we know if the write command allows a transaction number */ if (!_mongoc_client_session_txn_in_progress (cs) && parts->is_write_command && parts->allow_txn_number == MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_UNKNOWN) { parts->allow_txn_number = _allow_txn_number (parts, server_stream) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO; } /* Determine if the command is retryable. If so, append txnNumber now * for future use and mark the command as such. */ if (_is_retryable_write (parts, server_stream)) { _mongoc_cmd_parts_ensure_copied (parts); bson_append_int64 (&parts->assembled_body, "txnNumber", 9, 0); parts->is_retryable_write = true; } /* Conversely, check if the command is retryable if it is a read. */ if (_is_retryable_read (parts, server_stream) && !is_get_more) { parts->is_retryable_read = true; } if (!bson_empty (&server_stream->cluster_time)) { cluster_time = _largest_cluster_time (&server_stream->cluster_time, cluster_time); } if (cluster_time && server_type != MONGOC_SERVER_STANDALONE) { _mongoc_cmd_parts_ensure_copied (parts); bson_append_document ( &parts->assembled_body, "$clusterTime", 12, cluster_time); } if (!is_get_more) { if (cs) { _mongoc_client_session_append_read_concern ( cs, &parts->read_concern_document, parts->is_read_command, &parts->assembled_body); } else if (!bson_empty (&parts->read_concern_document)) { bson_append_document (&parts->assembled_body, "readConcern", 11, &parts->read_concern_document); } } if (parts->assembled.is_txn_finish || !_mongoc_client_session_in_txn (cs)) { _mongoc_cmd_parts_add_write_concern (parts); } if (!_mongoc_client_session_append_txn ( cs, &parts->assembled_body, error)) { GOTO (done); } ret = true; } else if (server_type == MONGOC_SERVER_MONGOS) { _mongoc_cmd_parts_assemble_mongos (parts, server_stream); ret = true; } else { _mongoc_cmd_parts_assemble_mongod (parts, server_stream); ret = true; } done: mongoc_read_prefs_destroy (prefs); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_cmd_parts_cleanup -- * * Free memory associated with a stack-allocated mongoc_cmd_parts_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_cmd_parts_cleanup (mongoc_cmd_parts_t *parts) { bson_destroy (&parts->read_concern_document); bson_destroy (&parts->write_concern_document); bson_destroy (&parts->extra); bson_destroy (&parts->assembled_body); if (parts->has_temp_session) { /* client session returns its server session to server session pool */ mongoc_client_session_destroy (parts->assembled.session); } } bool mongoc_cmd_is_compressible (mongoc_cmd_t *cmd) { BSON_ASSERT (cmd); BSON_ASSERT (cmd->command_name); return !!strcasecmp (cmd->command_name, "ismaster") && !!strcasecmp (cmd->command_name, "authenticate") && !!strcasecmp (cmd->command_name, "getnonce") && !!strcasecmp (cmd->command_name, "saslstart") && !!strcasecmp (cmd->command_name, "saslcontinue") && !!strcasecmp (cmd->command_name, "createuser") && !!strcasecmp (cmd->command_name, "updateuser"); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-collection-private.h0000644000076500000240000000274413572250757027275 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_COLLECTION_PRIVATE_H #define MONGOC_COLLECTION_PRIVATE_H #include #include "mongoc/mongoc-client.h" BSON_BEGIN_DECLS struct _mongoc_collection_t { mongoc_client_t *client; char ns[128]; uint32_t nslen; char db[128]; char collection[128]; uint32_t collectionlen; mongoc_read_prefs_t *read_prefs; mongoc_read_concern_t *read_concern; mongoc_write_concern_t *write_concern; bson_t *gle; }; mongoc_collection_t * _mongoc_collection_new (mongoc_client_t *client, const char *db, const char *collection, const mongoc_read_prefs_t *read_prefs, const mongoc_read_concern_t *read_concern, const mongoc_write_concern_t *write_concern); BSON_END_DECLS #endif /* MONGOC_COLLECTION_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-collection.c0000644000076500000240000031537613572250757025630 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-aggregate-private.h" #include "mongoc/mongoc-bulk-operation.h" #include "mongoc/mongoc-bulk-operation-private.h" #include "mongoc/mongoc-change-stream-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-find-and-modify-private.h" #include "mongoc/mongoc-find-and-modify.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-command-private.h" #include "mongoc/mongoc-opts-private.h" #include "mongoc-write-command-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "collection" static void _mongoc_collection_write_command_execute ( mongoc_write_command_t *command, const mongoc_collection_t *collection, const mongoc_write_concern_t *write_concern, mongoc_client_session_t *cs, mongoc_write_result_t *result) { mongoc_server_stream_t *server_stream; ENTRY; server_stream = mongoc_cluster_stream_for_writes ( &collection->client->cluster, cs, NULL, &result->error); if (!server_stream) { /* result->error has been filled out */ EXIT; } _mongoc_write_command_execute (command, collection->client, server_stream, collection->db, collection->collection, write_concern, 0 /* offset */, cs, result); mongoc_server_stream_cleanup (server_stream); EXIT; } static void _mongoc_collection_write_command_execute_idl ( mongoc_write_command_t *command, const mongoc_collection_t *collection, mongoc_crud_opts_t *crud, mongoc_write_result_t *result) { mongoc_server_stream_t *server_stream; bson_t reply; ENTRY; server_stream = mongoc_cluster_stream_for_writes (&collection->client->cluster, crud->client_session, &reply, &result->error); if (!server_stream) { /* result->error and reply have been filled out */ _mongoc_bson_array_copy_labels_to (&reply, &result->errorLabels); bson_destroy (&reply); EXIT; } if (_mongoc_client_session_in_txn (crud->client_session) && crud->writeConcern) { bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set write concern after starting transaction"); mongoc_server_stream_cleanup (server_stream); EXIT; } if (!crud->writeConcern && !_mongoc_client_session_in_txn (crud->client_session)) { crud->writeConcern = collection->write_concern; crud->write_concern_owned = false; } _mongoc_write_command_execute_idl (command, collection->client, server_stream, collection->db, collection->collection, 0 /* offset */, crud, result); mongoc_server_stream_cleanup (server_stream); EXIT; } /* *-------------------------------------------------------------------------- * * _mongoc_collection_new -- * * INTERNAL API * * Create a new mongoc_collection_t structure for the given client. * * @client must remain valid during the lifetime of this structure. * @db is the db name of the collection. * @collection is the name of the collection. * @read_prefs is the default read preferences to apply or NULL. * @read_concern is the default read concern to apply or NULL. * @write_concern is the default write concern to apply or NULL. * * Returns: * A newly allocated mongoc_collection_t that should be freed with * mongoc_collection_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_collection_t * _mongoc_collection_new (mongoc_client_t *client, const char *db, const char *collection, const mongoc_read_prefs_t *read_prefs, const mongoc_read_concern_t *read_concern, const mongoc_write_concern_t *write_concern) { mongoc_collection_t *col; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db); BSON_ASSERT (collection); col = (mongoc_collection_t *) bson_malloc0 (sizeof *col); col->client = client; col->write_concern = write_concern ? mongoc_write_concern_copy (write_concern) : mongoc_write_concern_new (); col->read_concern = read_concern ? mongoc_read_concern_copy (read_concern) : mongoc_read_concern_new (); col->read_prefs = read_prefs ? mongoc_read_prefs_copy (read_prefs) : mongoc_read_prefs_new (MONGOC_READ_PRIMARY); bson_snprintf (col->ns, sizeof col->ns, "%s.%s", db, collection); bson_snprintf (col->db, sizeof col->db, "%s", db); bson_snprintf (col->collection, sizeof col->collection, "%s", collection); col->collectionlen = (uint32_t) strlen (col->collection); col->nslen = (uint32_t) strlen (col->ns); col->gle = NULL; RETURN (col); } /* *-------------------------------------------------------------------------- * * mongoc_collection_destroy -- * * Release resources associated with @collection and frees the * structure. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_collection_destroy (mongoc_collection_t *collection) /* IN */ { ENTRY; if (!collection) { EXIT; } bson_clear (&collection->gle); if (collection->read_prefs) { mongoc_read_prefs_destroy (collection->read_prefs); collection->read_prefs = NULL; } if (collection->read_concern) { mongoc_read_concern_destroy (collection->read_concern); collection->read_concern = NULL; } if (collection->write_concern) { mongoc_write_concern_destroy (collection->write_concern); collection->write_concern = NULL; } bson_free (collection); EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_collection_copy -- * * Returns a copy of @collection that needs to be freed by calling * mongoc_collection_destroy. * * Returns: * A copy of this collection. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_collection_t * mongoc_collection_copy (mongoc_collection_t *collection) /* IN */ { ENTRY; BSON_ASSERT (collection); RETURN (_mongoc_collection_new (collection->client, collection->db, collection->collection, collection->read_prefs, collection->read_concern, collection->write_concern)); } mongoc_cursor_t * mongoc_collection_aggregate (mongoc_collection_t *collection, /* IN */ mongoc_query_flags_t flags, /* IN */ const bson_t *pipeline, /* IN */ const bson_t *opts, /* IN */ const mongoc_read_prefs_t *read_prefs) /* IN */ { return _mongoc_aggregate (collection->client, collection->ns, flags, pipeline, opts, read_prefs, collection->read_prefs, collection->read_concern, collection->write_concern); } /* *-------------------------------------------------------------------------- * * mongoc_collection_find -- * * DEPRECATED: use mongoc_collection_find_with_opts. * * Performs a query against the configured MongoDB server. If @read_prefs * is provided, it will be used to locate a MongoDB node in the cluster * to deliver the query to. * * @flags may be bitwise-or'd flags or MONGOC_QUERY_NONE. * * @skip may contain the number of documents to skip before returning the * matching document. * * @limit may contain the maximum number of documents that may be * returned. * * This function will always return a cursor, with the exception of * invalid API use. * * Parameters: * @collection: A mongoc_collection_t. * @flags: A bitwise or of mongoc_query_flags_t. * @skip: The number of documents to skip. * @limit: The maximum number of items. * @batch_size: The batch size * @query: The query to locate matching documents. * @fields: The fields to return, or NULL for all fields. * @read_prefs: Read preferences to choose cluster node. * * Returns: * A newly allocated mongoc_cursor_t that should be freed with * mongoc_cursor_destroy(). * * The client used by mongoc_collection_t must be valid for the * lifetime of the resulting mongoc_cursor_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_cursor_t * mongoc_collection_find (mongoc_collection_t *collection, /* IN */ mongoc_query_flags_t flags, /* IN */ uint32_t skip, /* IN */ uint32_t limit, /* IN */ uint32_t batch_size, /* IN */ const bson_t *query, /* IN */ const bson_t *fields, /* IN */ const mongoc_read_prefs_t *read_prefs) /* IN */ { bool has_unwrapped; bson_t unwrapped; bson_error_t error = {0}; bson_t opts; bool slave_ok; mongoc_cursor_t *cursor; BSON_ASSERT (collection); BSON_ASSERT (query); bson_clear (&collection->gle); bson_init (&opts); _mongoc_cursor_flags_to_opts (flags, &opts, &slave_ok); /* check if the query is wrapped in $query */ has_unwrapped = _mongoc_cursor_translate_dollar_query_opts ( query, &opts, &unwrapped, &error); if (!bson_empty0 (fields)) { bson_append_document ( &opts, MONGOC_CURSOR_PROJECTION, MONGOC_CURSOR_PROJECTION_LEN, fields); } cursor = _mongoc_cursor_find_new (collection->client, collection->ns, has_unwrapped ? &unwrapped : query, &opts, read_prefs, collection->read_prefs, collection->read_concern); if (skip) { _mongoc_cursor_set_opt_int64 (cursor, MONGOC_CURSOR_SKIP, skip); } if (limit) { /* limit must be cast to int32_t. Although the argument is a uint32_t, * callers can specify a negative limit by casting to a signed int32_t * value to uint32_t. E.g. to set a limit of -4, the caller passes * UINT32_MAX - 3 */ (void) mongoc_cursor_set_limit (cursor, (int32_t) limit); } if (batch_size) { mongoc_cursor_set_batch_size (cursor, batch_size); } bson_destroy (&unwrapped); bson_destroy (&opts); if (error.domain) { memcpy (&cursor->error, &error, sizeof (error)); } return cursor; } /* *-------------------------------------------------------------------------- * * mongoc_collection_find_with_opts -- * * Create a cursor with a query filter. All other options are * specified in a free-form BSON document. * * Parameters: * @collection: A mongoc_collection_t. * @filter: The query to locate matching documents. * @opts: Other options. * @read_prefs: Optional read preferences to choose cluster node. * * Returns: * A newly allocated mongoc_cursor_t that should be freed with * mongoc_cursor_destroy(). * * The client used by mongoc_collection_t must be valid for the * lifetime of the resulting mongoc_cursor_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_cursor_t * mongoc_collection_find_with_opts (mongoc_collection_t *collection, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (collection); BSON_ASSERT (filter); bson_clear (&collection->gle); return _mongoc_cursor_find_new (collection->client, collection->ns, filter, opts, read_prefs, collection->read_prefs, collection->read_concern); } /* *-------------------------------------------------------------------------- * * mongoc_collection_command -- * * Executes a command on a cluster node matching @read_prefs. If * @read_prefs is not provided, it will be run on the primary node. * * This function will always return a mongoc_cursor_t. * * Parameters: * @collection: A mongoc_collection_t. * @flags: Bitwise-or'd flags for command. * @skip: Number of documents to skip, typically 0. * @limit : Number of documents to return * @batch_size : Batch size * @query: The command to execute. * @fields: The fields to return, or NULL. * @read_prefs: Command read preferences or NULL. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_cursor_t * mongoc_collection_command (mongoc_collection_t *collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { char ns[MONGOC_NAMESPACE_MAX]; mongoc_cursor_t *cursor; BSON_ASSERT (collection); BSON_ASSERT (query); if (!read_prefs) { read_prefs = collection->read_prefs; } bson_clear (&collection->gle); if (NULL == strstr (collection->collection, "$cmd")) { bson_snprintf (ns, sizeof ns, "%s.$cmd", collection->db); } else { bson_snprintf (ns, sizeof ns, "%s", collection->db); } /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ /* flags, skip, limit, batch_size, fields are unused */ cursor = _mongoc_cursor_cmd_deprecated_new ( collection->client, ns, query, read_prefs); return cursor; } bool mongoc_collection_read_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { BSON_ASSERT (collection); return _mongoc_client_command_with_opts (collection->client, collection->db, command, MONGOC_CMD_READ, opts, MONGOC_QUERY_NONE, read_prefs, collection->read_prefs, collection->read_concern, collection->write_concern, reply, error); } bool mongoc_collection_write_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error) { BSON_ASSERT (collection); return _mongoc_client_command_with_opts (collection->client, collection->db, command, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, collection->read_prefs, collection->read_concern, collection->write_concern, reply, error); } bool mongoc_collection_read_write_command_with_opts ( mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error) { BSON_ASSERT (collection); return _mongoc_client_command_with_opts (collection->client, collection->db, command, MONGOC_CMD_RW, opts, MONGOC_QUERY_NONE, read_prefs, collection->read_prefs, collection->read_concern, collection->write_concern, reply, error); } bool mongoc_collection_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { BSON_ASSERT (collection); /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ return _mongoc_client_command_with_opts (collection->client, collection->db, command, MONGOC_CMD_RAW, opts, MONGOC_QUERY_NONE, read_prefs, NULL /* default prefs */, collection->read_concern, collection->write_concern, reply, error); } bool mongoc_collection_command_simple (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { BSON_ASSERT (collection); BSON_ASSERT (command); bson_clear (&collection->gle); /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ return _mongoc_client_command_with_opts (collection->client, collection->db, command, MONGOC_CMD_RAW, NULL /* opts */, MONGOC_QUERY_NONE, read_prefs, NULL /* default prefs */, NULL /* read concern */, NULL /* write concern */, reply, error); } /* *-------------------------------------------------------------------------- * * mongoc_collection_count -- * * Count the number of documents matching @query. * * Parameters: * @flags: A mongoc_query_flags_t describing the query flags or 0. * @query: The query to perform or NULL for {}. * @skip: The $skip to perform within the query or 0. * @limit: The $limit to perform within the query or 0. * @read_prefs: desired read preferences or NULL. * @error: A location for an error or NULL. * * Returns: * -1 on failure; otherwise the number of matching documents. * * Side effects: * @error is set upon failure if non-NULL. * *-------------------------------------------------------------------------- */ int64_t mongoc_collection_count (mongoc_collection_t *collection, /* IN */ mongoc_query_flags_t flags, /* IN */ const bson_t *query, /* IN */ int64_t skip, /* IN */ int64_t limit, /* IN */ const mongoc_read_prefs_t *read_prefs, /* IN */ bson_error_t *error) /* OUT */ { int64_t ret; bson_t opts = BSON_INITIALIZER; /* Complex types must be parts of `opts`, otherwise we can't * follow various specs that require validation etc */ if (collection->read_concern->level != NULL) { const bson_t *read_concern_bson; read_concern_bson = _mongoc_read_concern_get_bson (collection->read_concern); BSON_APPEND_DOCUMENT (&opts, "readConcern", read_concern_bson); } /* Server Selection Spec: "may-use-secondary" commands SHOULD take a read * preference argument and otherwise MUST use the default read preference * from client, database or collection configuration. */ BEGIN_IGNORE_DEPRECATIONS ret = mongoc_collection_count_with_opts ( collection, flags, query, skip, limit, &opts, read_prefs, error); END_IGNORE_DEPRECATIONS bson_destroy (&opts); return ret; } int64_t mongoc_collection_count_with_opts ( mongoc_collection_t *collection, /* IN */ mongoc_query_flags_t flags, /* IN */ const bson_t *query, /* IN */ int64_t skip, /* IN */ int64_t limit, /* IN */ const bson_t *opts, /* IN */ const mongoc_read_prefs_t *read_prefs, /* IN */ bson_error_t *error) /* OUT */ { bson_iter_t iter; int64_t ret = -1; bool success; bson_t reply; bson_t cmd = BSON_INITIALIZER; bson_t q; ENTRY; BSON_ASSERT (collection); bson_append_utf8 ( &cmd, "count", 5, collection->collection, collection->collectionlen); if (query) { bson_append_document (&cmd, "query", 5, query); } else { bson_init (&q); bson_append_document (&cmd, "query", 5, &q); bson_destroy (&q); } if (limit) { bson_append_int64 (&cmd, "limit", 5, limit); } if (skip) { bson_append_int64 (&cmd, "skip", 4, skip); } success = _mongoc_client_command_with_opts (collection->client, collection->db, &cmd, MONGOC_CMD_READ, opts, flags, read_prefs, collection->read_prefs, collection->read_concern, collection->write_concern, &reply, error); if (success) { if (bson_iter_init_find (&iter, &reply, "n")) { ret = bson_iter_as_int64 (&iter); } } bson_destroy (&reply); bson_destroy (&cmd); RETURN (ret); } int64_t mongoc_collection_estimated_document_count ( mongoc_collection_t *coll, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { bson_iter_t iter; int64_t count = -1; bool ret; bson_t reply_local; bson_t *reply_ptr; bson_t cmd = BSON_INITIALIZER; ENTRY; BSON_ASSERT (coll); reply_ptr = reply ? reply : &reply_local; bson_append_utf8 (&cmd, "count", 5, coll->collection, coll->collectionlen); ret = _mongoc_client_command_with_opts (coll->client, coll->db, &cmd, MONGOC_CMD_READ, opts, MONGOC_QUERY_NONE, read_prefs, coll->read_prefs, coll->read_concern, coll->write_concern, reply_ptr, error); if (ret) { if (bson_iter_init_find (&iter, reply_ptr, "n")) { count = bson_iter_as_int64 (&iter); } } if (!reply) { bson_destroy (&reply_local); } bson_destroy (&cmd); RETURN (count); } /* -------------------------------------------------------------------------- * * _make_aggregate_for_count -- * * Construct an aggregate pipeline with the following form: * { pipeline: [ * { $match: {...} }, * { $group: { _id: 1, n: { sum: 1 } } }, * { $skip: ... }, * { $limit: ... } * ] * } * *-------------------------------------------------------------------------- */ static void _make_aggregate_for_count (const mongoc_collection_t *coll, const bson_t *filter, const bson_t *opts, bson_t *out) { bson_iter_t iter; bson_t pipeline; bson_t match_stage; bson_t group_stage; bson_t group_stage_doc; bson_t sum; bson_t empty; const char *keys[] = {"0", "1", "2", "3"}; int key = 0; bson_init (out); bson_append_utf8 ( out, "aggregate", 9, coll->collection, coll->collectionlen); bson_append_document_begin (out, "cursor", 6, &empty); bson_append_document_end (out, &empty); bson_append_array_begin (out, "pipeline", 8, &pipeline); bson_append_document_begin (&pipeline, keys[key++], 1, &match_stage); bson_append_document (&match_stage, "$match", 6, filter); bson_append_document_end (&pipeline, &match_stage); /* if @opts includes "skip", or "count", append $skip and $count stages to * the aggregate pipeline. */ if (opts && bson_iter_init_find (&iter, opts, "skip")) { bson_t skip_stage; bson_append_document_begin (&pipeline, keys[key++], 1, &skip_stage); bson_append_value (&skip_stage, "$skip", 5, bson_iter_value (&iter)); bson_append_document_end (&pipeline, &skip_stage); } if (opts && bson_iter_init_find (&iter, opts, "limit")) { bson_t limit_stage; bson_append_document_begin (&pipeline, keys[key++], 1, &limit_stage); bson_append_value (&limit_stage, "$limit", 6, bson_iter_value (&iter)); bson_append_document_end (&pipeline, &limit_stage); } bson_append_document_begin (&pipeline, keys[key], 1, &group_stage); bson_append_document_begin (&group_stage, "$group", 6, &group_stage_doc); bson_append_int32 (&group_stage_doc, "_id", 3, 1); bson_append_document_begin (&group_stage_doc, "n", 1, &sum); bson_append_int32 (&sum, "$sum", 4, 1); bson_append_document_end (&group_stage_doc, &sum); bson_append_document_end (&group_stage, &group_stage_doc); bson_append_document_end (&pipeline, &group_stage); bson_append_array_end (out, &pipeline); } int64_t mongoc_collection_count_documents (mongoc_collection_t *coll, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { bson_t aggregate_cmd; bson_t aggregate_opts; bool ret; const bson_t *result; mongoc_cursor_t *cursor = NULL; int64_t count = -1; bson_t cmd_reply; bson_iter_t iter; ENTRY; BSON_ASSERT (coll); BSON_ASSERT (filter); _make_aggregate_for_count (coll, filter, opts, &aggregate_cmd); bson_init (&aggregate_opts); if (opts) { bson_copy_to_excluding_noinit ( opts, &aggregate_opts, "skip", "limit", NULL); } ret = mongoc_collection_read_command_with_opts ( coll, &aggregate_cmd, read_prefs, &aggregate_opts, &cmd_reply, error); bson_destroy (&aggregate_cmd); bson_destroy (&aggregate_opts); if (reply) { bson_copy_to (&cmd_reply, reply); } if (!ret) { bson_destroy (&cmd_reply); GOTO (done); } /* steals reply */ cursor = mongoc_cursor_new_from_command_reply_with_opts ( coll->client, &cmd_reply, NULL); BSON_ASSERT (mongoc_cursor_get_id (cursor) == 0); ret = mongoc_cursor_next (cursor, &result); if (!ret) { if (mongoc_cursor_error (cursor, error)) { GOTO (done); } else { count = 0; GOTO (done); } } if (bson_iter_init_find (&iter, result, "n") && BSON_ITER_HOLDS_INT (&iter)) { count = bson_iter_as_int64 (&iter); } done: if (cursor) { mongoc_cursor_destroy (cursor); } RETURN (count); } /* *-------------------------------------------------------------------------- * * mongoc_collection_drop -- * * Request the MongoDB server drop the collection. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @error is set upon failure. * *-------------------------------------------------------------------------- */ bool mongoc_collection_drop (mongoc_collection_t *collection, /* IN */ bson_error_t *error) /* OUT */ { return mongoc_collection_drop_with_opts (collection, NULL, error); } bool mongoc_collection_drop_with_opts (mongoc_collection_t *collection, const bson_t *opts, bson_error_t *error) { bool ret; bson_t cmd; BSON_ASSERT (collection); bson_init (&cmd); bson_append_utf8 ( &cmd, "drop", 4, collection->collection, collection->collectionlen); ret = _mongoc_client_command_with_opts (collection->client, collection->db, &cmd, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ collection->read_prefs, collection->read_concern, collection->write_concern, NULL, /* reply */ error); bson_destroy (&cmd); return ret; } /* *-------------------------------------------------------------------------- * * mongoc_collection_drop_index -- * * Request the MongoDB server drop the named index. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @error is setup upon failure if non-NULL. * *-------------------------------------------------------------------------- */ bool mongoc_collection_drop_index (mongoc_collection_t *collection, /* IN */ const char *index_name, /* IN */ bson_error_t *error) /* OUT */ { return mongoc_collection_drop_index_with_opts ( collection, index_name, NULL, error); } bool mongoc_collection_drop_index_with_opts (mongoc_collection_t *collection, const char *index_name, const bson_t *opts, bson_error_t *error) { bool ret; bson_t cmd; BSON_ASSERT (collection); BSON_ASSERT (index_name); bson_init (&cmd); bson_append_utf8 (&cmd, "dropIndexes", -1, collection->collection, collection->collectionlen); bson_append_utf8 (&cmd, "index", -1, index_name, -1); ret = _mongoc_client_command_with_opts (collection->client, collection->db, &cmd, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ collection->read_prefs, collection->read_concern, collection->write_concern, NULL, /* reply */ error); bson_destroy (&cmd); return ret; } char * mongoc_collection_keys_to_index_string (const bson_t *keys) { bson_string_t *s; bson_iter_t iter; bson_type_t type; int i = 0; BSON_ASSERT (keys); if (!bson_iter_init (&iter, keys)) { return NULL; } s = bson_string_new (NULL); while (bson_iter_next (&iter)) { /* Index type can be specified as a string ("2d") or as an integer * representing direction */ type = bson_iter_type (&iter); if (type == BSON_TYPE_UTF8) { bson_string_append_printf (s, (i++ ? "_%s_%s" : "%s_%s"), bson_iter_key (&iter), bson_iter_utf8 (&iter, NULL)); } else if (type == BSON_TYPE_INT32) { bson_string_append_printf (s, (i++ ? "_%s_%d" : "%s_%d"), bson_iter_key (&iter), bson_iter_int32 (&iter)); } else if (type == BSON_TYPE_INT64) { bson_string_append_printf (s, (i++ ? "_%s_%" PRId64 : "%s_%" PRId64), bson_iter_key (&iter), bson_iter_int64 (&iter)); } else { bson_string_free (s, true); return NULL; } } return bson_string_free (s, false); } bool mongoc_collection_create_index (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, bson_error_t *error) { bson_t reply; bool ret; BEGIN_IGNORE_DEPRECATIONS ret = mongoc_collection_create_index_with_opts ( collection, keys, opt, NULL, &reply, error); END_IGNORE_DEPRECATIONS bson_destroy (&reply); return ret; } bool mongoc_collection_create_index_with_opts (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_create_index_opts_t parsed; mongoc_cmd_parts_t parts; const mongoc_index_opt_t *def_opt; const mongoc_index_opt_geo_t *def_geo; const char *name; bson_t cmd = BSON_INITIALIZER; bson_t ar; bson_t doc; bson_t storage_doc; bson_t wt_doc; const mongoc_index_opt_geo_t *geo_opt; const mongoc_index_opt_storage_t *storage_opt; const mongoc_index_opt_wt_t *wt_opt; char *alloc_name = NULL; bool ret = false; bool reply_initialized = false; bool has_collation = false; mongoc_server_stream_t *server_stream = NULL; mongoc_cluster_t *cluster; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (keys); def_opt = mongoc_index_opt_get_default (); opt = opt ? opt : def_opt; mongoc_cmd_parts_init ( &parts, collection->client, collection->db, MONGOC_QUERY_NONE, &cmd); parts.is_write_command = true; if (!_mongoc_create_index_opts_parse ( collection->client, opts, &parsed, error)) { GOTO (done); } if (!parsed.writeConcern) { parsed.writeConcern = collection->write_concern; parsed.write_concern_owned = false; } /* * Generate the key name if it was not provided. */ name = (opt->name != def_opt->name) ? opt->name : NULL; if (!name) { alloc_name = mongoc_collection_keys_to_index_string (keys); if (alloc_name) { name = alloc_name; } else { bson_set_error ( error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Cannot generate index name from invalid `keys` argument"); GOTO (done); } } /* * Build our createIndexes command to send to the server. */ BSON_ASSERT ( BSON_APPEND_UTF8 (&cmd, "createIndexes", collection->collection)); bson_append_array_begin (&cmd, "indexes", 7, &ar); bson_append_document_begin (&ar, "0", 1, &doc); BSON_ASSERT (BSON_APPEND_DOCUMENT (&doc, "key", keys)); BSON_ASSERT (BSON_APPEND_UTF8 (&doc, "name", name)); if (opt->background) { BSON_ASSERT (BSON_APPEND_BOOL (&doc, "background", true)); } if (opt->unique) { BSON_ASSERT (BSON_APPEND_BOOL (&doc, "unique", true)); } if (opt->drop_dups) { BSON_ASSERT (BSON_APPEND_BOOL (&doc, "dropDups", true)); } if (opt->sparse) { BSON_ASSERT (BSON_APPEND_BOOL (&doc, "sparse", true)); } if (opt->expire_after_seconds != def_opt->expire_after_seconds) { BSON_ASSERT (BSON_APPEND_INT32 ( &doc, "expireAfterSeconds", opt->expire_after_seconds)); } if (opt->v != def_opt->v) { BSON_ASSERT (BSON_APPEND_INT32 (&doc, "v", opt->v)); } if (opt->weights && (opt->weights != def_opt->weights)) { BSON_ASSERT (BSON_APPEND_DOCUMENT (&doc, "weights", opt->weights)); } if (opt->default_language != def_opt->default_language) { BSON_ASSERT ( BSON_APPEND_UTF8 (&doc, "default_language", opt->default_language)); } if (opt->language_override != def_opt->language_override) { BSON_ASSERT ( BSON_APPEND_UTF8 (&doc, "language_override", opt->language_override)); } if (opt->partial_filter_expression) { BSON_ASSERT (BSON_APPEND_DOCUMENT ( &doc, "partialFilterExpression", opt->partial_filter_expression)); } if (opt->collation) { BSON_ASSERT (BSON_APPEND_DOCUMENT (&doc, "collation", opt->collation)); has_collation = true; } if (opt->geo_options) { geo_opt = opt->geo_options; def_geo = mongoc_index_opt_geo_get_default (); if (geo_opt->twod_sphere_version != def_geo->twod_sphere_version) { BSON_ASSERT (BSON_APPEND_INT32 ( &doc, "2dsphereIndexVersion", geo_opt->twod_sphere_version)); } if (geo_opt->twod_bits_precision != def_geo->twod_bits_precision) { BSON_ASSERT ( BSON_APPEND_INT32 (&doc, "bits", geo_opt->twod_bits_precision)); } if (geo_opt->twod_location_min != def_geo->twod_location_min) { BSON_ASSERT ( BSON_APPEND_DOUBLE (&doc, "min", geo_opt->twod_location_min)); } if (geo_opt->twod_location_max != def_geo->twod_location_max) { BSON_ASSERT ( BSON_APPEND_DOUBLE (&doc, "max", geo_opt->twod_location_max)); } if (geo_opt->haystack_bucket_size != def_geo->haystack_bucket_size) { BSON_ASSERT (BSON_APPEND_DOUBLE ( &doc, "bucketSize", geo_opt->haystack_bucket_size)); } } if (opt->storage_options) { storage_opt = opt->storage_options; switch (storage_opt->type) { case MONGOC_INDEX_STORAGE_OPT_WIREDTIGER: wt_opt = (mongoc_index_opt_wt_t *) storage_opt; BSON_APPEND_DOCUMENT_BEGIN (&doc, "storageEngine", &storage_doc); BSON_APPEND_DOCUMENT_BEGIN (&storage_doc, "wiredTiger", &wt_doc); BSON_ASSERT ( BSON_APPEND_UTF8 (&wt_doc, "configString", wt_opt->config_str)); bson_append_document_end (&storage_doc, &wt_doc); bson_append_document_end (&doc, &storage_doc); break; default: break; } } bson_append_document_end (&ar, &doc); bson_append_array_end (&cmd, &ar); server_stream = mongoc_cluster_stream_for_writes ( &collection->client->cluster, parsed.client_session, reply, error); if (!server_stream) { reply_initialized = true; GOTO (done); } if (!mongoc_cmd_parts_set_write_concern (&parts, parsed.writeConcern, server_stream->sd->max_wire_version, error)) { GOTO (done); } if (has_collation && server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); GOTO (done); } parts.assembled.session = parsed.client_session; if (!bson_concat (&parts.extra, &parsed.extra)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "'opts' is too large"); GOTO (done); } cluster = &collection->client->cluster; if (mongoc_cmd_parts_assemble (&parts, server_stream, error)) { ret = mongoc_cluster_run_command_monitored ( cluster, &parts.assembled, reply, error); } else { _mongoc_bson_init_if_set (reply); } reply_initialized = true; if (ret) { if (reply) { ret = !_mongoc_parse_wc_err (reply, error); } } done: bson_destroy (&cmd); bson_free (alloc_name); _mongoc_create_index_opts_cleanup (&parsed); mongoc_server_stream_cleanup (server_stream); mongoc_cmd_parts_cleanup (&parts); if (!reply_initialized && reply) { bson_init (reply); } RETURN (ret); } bool mongoc_collection_ensure_index (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, bson_error_t *error) { BEGIN_IGNORE_DEPRECATIONS return mongoc_collection_create_index (collection, keys, opt, error); END_IGNORE_DEPRECATIONS } mongoc_cursor_t * mongoc_collection_find_indexes (mongoc_collection_t *collection, bson_error_t *error) { mongoc_cursor_t *cursor; cursor = mongoc_collection_find_indexes_with_opts (collection, NULL); (void) mongoc_cursor_error (cursor, error); return cursor; } mongoc_cursor_t * mongoc_collection_find_indexes_with_opts (mongoc_collection_t *collection, const bson_t *opts) { mongoc_cursor_t *cursor; bson_t cmd = BSON_INITIALIZER; bson_t child; bson_error_t error; BSON_ASSERT (collection); bson_append_utf8 (&cmd, "listIndexes", -1, collection->collection, collection->collectionlen); BSON_APPEND_DOCUMENT_BEGIN (&cmd, "cursor", &child); bson_append_document_end (&cmd, &child); /* No read preference. Index Enumeration Spec: "run listIndexes on the * primary node in replicaSet mode". */ cursor = _mongoc_cursor_cmd_new ( collection->client, collection->ns, &cmd, opts, NULL, NULL, NULL); if (!mongoc_cursor_error (cursor, &error)) { _mongoc_cursor_prime (cursor); } if (mongoc_cursor_error (cursor, &error) && error.code == MONGOC_ERROR_COLLECTION_DOES_NOT_EXIST) { /* collection does not exist. from spec: return no documents but no err: * https://github.com/mongodb/specifications/blob/master/source/enumerate-indexes.rst#enumeration-getting-index-information */ _mongoc_cursor_set_empty (cursor); } bson_destroy (&cmd); return cursor; } /* *-------------------------------------------------------------------------- * * mongoc_collection_insert_bulk -- * * Bulk insert documents into a MongoDB collection. * * Parameters: * @collection: A mongoc_collection_t. * @flags: flags for the insert or 0. * @documents: The documents to insert. * @n_documents: The number of documents to insert. * @write_concern: A write concern or NULL. * @error: a location for an error or NULL. * * Returns: * true if successful; otherwise false and @error is set. * * If the write concern does not dictate checking the result of the * insert, then true may be returned even though the document was * not actually inserted on the MongoDB server or cluster. * * Side effects: * @collection->gle is setup, depending on write_concern->w value. * @error may be set upon failure if non-NULL. * *-------------------------------------------------------------------------- */ bool mongoc_collection_insert_bulk (mongoc_collection_t *collection, mongoc_insert_flags_t flags, const bson_t **documents, uint32_t n_documents, const mongoc_write_concern_t *write_concern, bson_error_t *error) { mongoc_write_command_t command; mongoc_write_result_t result; mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; uint32_t i; bool ret; BSON_ASSERT (collection); BSON_ASSERT (documents); if (!write_concern) { write_concern = collection->write_concern; } if (!(flags & MONGOC_INSERT_NO_VALIDATE)) { for (i = 0; i < n_documents; i++) { if (!_mongoc_validate_new_document ( documents[i], _mongoc_default_insert_vflags, error)) { RETURN (false); } } } bson_clear (&collection->gle); _mongoc_write_result_init (&result); write_flags.ordered = !(flags & MONGOC_INSERT_CONTINUE_ON_ERROR); _mongoc_write_command_init_insert ( &command, NULL, NULL, write_flags, ++collection->client->cluster.operation_id, true); for (i = 0; i < n_documents; i++) { _mongoc_write_command_insert_append (&command, documents[i]); } _mongoc_collection_write_command_execute ( &command, collection, write_concern, NULL, &result); collection->gle = bson_new (); ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, write_concern, /* no error domain override */ (mongoc_error_domain_t) 0, collection->gle, error); _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); return ret; } bool mongoc_collection_insert (mongoc_collection_t *collection, mongoc_insert_flags_t flags, const bson_t *document, const mongoc_write_concern_t *write_concern, bson_error_t *error) { bson_t opts = BSON_INITIALIZER; bson_t reply; bool r; bson_clear (&collection->gle); if (flags & MONGOC_INSERT_NO_VALIDATE) { bson_append_bool (&opts, "validate", 8, false); } if (write_concern) { mongoc_write_concern_append ((mongoc_write_concern_t *) write_concern, &opts); } r = mongoc_collection_insert_one (collection, document, &opts, &reply, error); collection->gle = bson_copy (&reply); bson_destroy (&reply); bson_destroy (&opts); return r; } /* *-------------------------------------------------------------------------- * * mongoc_collection_insert_one -- * * Insert a document into a MongoDB collection. * * Parameters: * @collection: A mongoc_collection_t. * @document: The document to insert. * @opts: Standard command options. * @reply: Optional. Uninitialized doc to receive the update result. * @error: A location for an error or NULL. * * Returns: * true if successful; otherwise false and @error is set. * * If the write concern does not dictate checking the result of the * insert, then true may be returned even though the document was * not actually inserted on the MongoDB server or cluster. * *-------------------------------------------------------------------------- */ bool mongoc_collection_insert_one (mongoc_collection_t *collection, const bson_t *document, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_insert_one_opts_t insert_one_opts; mongoc_write_command_t command; mongoc_write_result_t result; bool ret = false; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (document); _mongoc_bson_init_if_set (reply); if (!_mongoc_insert_one_opts_parse ( collection->client, opts, &insert_one_opts, error)) { GOTO (done); } if (!_mongoc_validate_new_document ( document, insert_one_opts.crud.validate, error)) { GOTO (done); } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert_idl ( &command, document, &insert_one_opts.extra, ++collection->client->cluster.operation_id, false); command.flags.bypass_document_validation = insert_one_opts.bypass; _mongoc_collection_write_command_execute_idl ( &command, collection, &insert_one_opts.crud, &result); ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, insert_one_opts.crud.writeConcern, /* no error domain override */ (mongoc_error_domain_t) 0, reply, error, "insertedCount"); _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); done: _mongoc_insert_one_opts_cleanup (&insert_one_opts); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_collection_insert_many -- * * Insert documents into a MongoDB collection. Replaces * mongoc_collection_insert_bulk. * * Parameters: * @collection: A mongoc_collection_t. * @documents: The documents to insert. * @n_documents: Length of @documents array. * @opts: Standard command options. * @reply: Optional. Uninitialized doc to receive the update result. * @error: A location for an error or NULL. * * Returns: * true if successful; otherwise false and @error is set. * * If the write concern does not dictate checking the result of the * insert, then true may be returned even though the document was * not actually inserted on the MongoDB server or cluster. * *-------------------------------------------------------------------------- */ bool mongoc_collection_insert_many (mongoc_collection_t *collection, const bson_t **documents, size_t n_documents, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_insert_many_opts_t insert_many_opts; mongoc_write_command_t command; mongoc_write_result_t result; size_t i; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (documents); _mongoc_bson_init_if_set (reply); if (!_mongoc_insert_many_opts_parse ( collection->client, opts, &insert_many_opts, error)) { _mongoc_insert_many_opts_cleanup (&insert_many_opts); return false; } _mongoc_write_result_init (&result); _mongoc_write_command_init_insert_idl ( &command, NULL, &insert_many_opts.extra, ++collection->client->cluster.operation_id, false); command.flags.ordered = insert_many_opts.ordered; command.flags.bypass_document_validation = insert_many_opts.bypass; for (i = 0; i < n_documents; i++) { if (!_mongoc_validate_new_document ( documents[i], insert_many_opts.crud.validate, error)) { ret = false; GOTO (done); } _mongoc_write_command_insert_append (&command, documents[i]); } _mongoc_collection_write_command_execute_idl ( &command, collection, &insert_many_opts.crud, &result); ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, insert_many_opts.crud.writeConcern, /* no error domain override */ (mongoc_error_domain_t) 0, reply, error, "insertedCount"); done: _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); _mongoc_insert_many_opts_cleanup (&insert_many_opts); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_collection_update -- * * Updates one or more documents matching @selector with @update. * * Parameters: * @collection: A mongoc_collection_t. * @flags: The flags for the update. * @selector: A bson_t containing your selector. * @update: A bson_t containing your update document. * @write_concern: The write concern or NULL. * @error: A location for an error or NULL. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @collection->gle is setup, depending on write_concern->w value. * @error is setup upon failure. * *-------------------------------------------------------------------------- */ bool mongoc_collection_update (mongoc_collection_t *collection, mongoc_update_flags_t uflags, const bson_t *selector, const bson_t *update, const mongoc_write_concern_t *write_concern, bson_error_t *error) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; bson_iter_t iter; bool ret; int flags = uflags; bson_t opts; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); BSON_ASSERT (update); bson_clear (&collection->gle); if (!write_concern) { write_concern = collection->write_concern; } if (!((uint32_t) flags & MONGOC_UPDATE_NO_VALIDATE) && bson_iter_init (&iter, update) && bson_iter_next (&iter)) { if (bson_iter_key (&iter)[0] == '$') { /* update document, all keys must be $-operators */ if (!_mongoc_validate_update ( update, _mongoc_default_update_vflags, error)) { return false; } } else { if (!_mongoc_validate_replace ( update, _mongoc_default_replace_vflags, error)) { return false; } } } bson_init (&opts); BSON_APPEND_BOOL (&opts, "upsert", !!(flags & MONGOC_UPDATE_UPSERT)); BSON_APPEND_BOOL (&opts, "multi", !!(flags & MONGOC_UPDATE_MULTI_UPDATE)); _mongoc_write_result_init (&result); _mongoc_write_command_init_update ( &command, selector, update, &opts, write_flags, ++collection->client->cluster.operation_id); bson_destroy (&opts); command.flags.has_multi_write = !!(flags & MONGOC_UPDATE_MULTI_UPDATE); _mongoc_collection_write_command_execute ( &command, collection, write_concern, NULL, &result); collection->gle = bson_new (); ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, write_concern, /* no error domain override */ (mongoc_error_domain_t) 0, collection->gle, error); _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); RETURN (ret); } static bool _mongoc_collection_update_or_replace (mongoc_collection_t *collection, const bson_t *selector, const bson_t *update, mongoc_update_opts_t *update_opts, bool multi, bool bypass, const bson_t *array_filters, bson_t *extra, bson_t *reply, bson_error_t *error) { mongoc_write_command_t command; mongoc_write_result_t result; mongoc_server_stream_t *server_stream = NULL; bool reply_initialized = false; bool ret = false; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); BSON_ASSERT (update); if (update_opts->upsert) { bson_append_bool (extra, "upsert", 6, true); } if (!bson_empty (&update_opts->collation)) { bson_append_document (extra, "collation", 9, &update_opts->collation); } if (!bson_empty0 (array_filters)) { bson_append_array (extra, "arrayFilters", 12, array_filters); } if (multi) { bson_append_bool (extra, "multi", 5, true); } _mongoc_write_result_init (&result); _mongoc_write_command_init_update_idl ( &command, selector, update, extra, ++collection->client->cluster.operation_id); command.flags.has_multi_write = multi; command.flags.bypass_document_validation = bypass; if (!bson_empty (&update_opts->collation)) { command.flags.has_collation = true; } server_stream = mongoc_cluster_stream_for_writes (&collection->client->cluster, update_opts->crud.client_session, reply, error); if (!server_stream) { /* mongoc_cluster_stream_for_writes inits reply on error */ reply_initialized = true; GOTO (done); } if (!bson_empty0 (array_filters)) { if (server_stream->sd->max_wire_version < WIRE_VERSION_ARRAY_FILTERS) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support array filters"); GOTO (done); } if (!mongoc_write_concern_is_acknowledged ( update_opts->crud.writeConcern)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Cannot use array filters with unacknowledged writes"); GOTO (done); } } if (_mongoc_client_session_in_txn (update_opts->crud.client_session) && update_opts->crud.writeConcern) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set write concern after starting transaction"); GOTO (done); } if (!update_opts->crud.writeConcern && !_mongoc_client_session_in_txn (update_opts->crud.client_session)) { update_opts->crud.writeConcern = collection->write_concern; update_opts->crud.write_concern_owned = false; } _mongoc_write_command_execute_idl (&command, collection->client, server_stream, collection->db, collection->collection, 0 /* offset */, &update_opts->crud, &result); _mongoc_bson_init_if_set (reply); reply_initialized = true; /* set fields described in CRUD spec for the UpdateResult */ ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, update_opts->crud.writeConcern, /* no error domain override */ (mongoc_error_domain_t) 0, reply, error, "modifiedCount", "matchedCount", "upsertedCount", "upsertedId"); done: _mongoc_write_result_destroy (&result); mongoc_server_stream_cleanup (server_stream); _mongoc_write_command_destroy (&command); if (!reply_initialized) { _mongoc_bson_init_if_set (reply); } RETURN (ret); } bool mongoc_collection_update_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *update, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_update_one_opts_t update_one_opts; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (update); if (!_mongoc_update_one_opts_parse ( collection->client, opts, &update_one_opts, error)) { _mongoc_update_one_opts_cleanup (&update_one_opts); _mongoc_bson_init_if_set (reply); return false; } if (!_mongoc_validate_update ( update, update_one_opts.update.crud.validate, error)) { _mongoc_update_one_opts_cleanup (&update_one_opts); _mongoc_bson_init_if_set (reply); return false; } ret = _mongoc_collection_update_or_replace (collection, selector, update, &update_one_opts.update, false /* multi */, update_one_opts.update.bypass, &update_one_opts.arrayFilters, &update_one_opts.extra, reply, error); _mongoc_update_one_opts_cleanup (&update_one_opts); RETURN (ret); } bool mongoc_collection_update_many (mongoc_collection_t *collection, const bson_t *selector, const bson_t *update, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_update_many_opts_t update_many_opts; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (update); if (!_mongoc_update_many_opts_parse ( collection->client, opts, &update_many_opts, error)) { _mongoc_update_many_opts_cleanup (&update_many_opts); _mongoc_bson_init_if_set (reply); return false; } if (!_mongoc_validate_update ( update, update_many_opts.update.crud.validate, error)) { _mongoc_update_many_opts_cleanup (&update_many_opts); _mongoc_bson_init_if_set (reply); return false; } ret = _mongoc_collection_update_or_replace (collection, selector, update, &update_many_opts.update, true /* multi */, update_many_opts.update.bypass, &update_many_opts.arrayFilters, &update_many_opts.extra, reply, error); _mongoc_update_many_opts_cleanup (&update_many_opts); RETURN (ret); } bool mongoc_collection_replace_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *replacement, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_replace_one_opts_t replace_one_opts; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (replacement); if (!_mongoc_replace_one_opts_parse ( collection->client, opts, &replace_one_opts, error)) { _mongoc_replace_one_opts_cleanup (&replace_one_opts); _mongoc_bson_init_if_set (reply); return false; } if (!_mongoc_validate_replace ( replacement, replace_one_opts.update.crud.validate, error)) { _mongoc_replace_one_opts_cleanup (&replace_one_opts); _mongoc_bson_init_if_set (reply); return false; } ret = _mongoc_collection_update_or_replace (collection, selector, replacement, &replace_one_opts.update, false /* multi */, replace_one_opts.update.bypass, NULL, &replace_one_opts.extra, reply, error); _mongoc_replace_one_opts_cleanup (&replace_one_opts); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_collection_save -- * * Save @document to @collection. * * If the document has an _id field, it will be updated. Otherwise, * the document will be inserted into the collection. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @error is set upon failure if non-NULL. * *-------------------------------------------------------------------------- */ bool mongoc_collection_save (mongoc_collection_t *collection, const bson_t *document, const mongoc_write_concern_t *write_concern, bson_error_t *error) { bson_iter_t iter; bool ret; bson_t selector; BSON_ASSERT (collection); BSON_ASSERT (document); BEGIN_IGNORE_DEPRECATIONS if (!bson_iter_init_find (&iter, document, "_id")) { return mongoc_collection_insert ( collection, MONGOC_INSERT_NONE, document, write_concern, error); } bson_init (&selector); if (!bson_append_iter (&selector, NULL, 0, &iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Failed to append bson to create update."); bson_destroy (&selector); return false; } /* this document will be inserted, validate same as for inserts */ if (!_mongoc_validate_new_document ( document, _mongoc_default_insert_vflags, error)) { return false; } ret = mongoc_collection_update (collection, MONGOC_UPDATE_UPSERT | MONGOC_UPDATE_NO_VALIDATE, &selector, document, write_concern, error); END_IGNORE_DEPRECATIONS bson_destroy (&selector); return ret; } /* *-------------------------------------------------------------------------- * * mongoc_collection_remove -- * * Delete one or more items from a collection. If you want to * limit to a single delete, provided MONGOC_REMOVE_SINGLE_REMOVE * for @flags. * * Superseded by mongoc_collection_delete_one/many. * * Parameters: * @collection: A mongoc_collection_t. * @flags: the delete flags or 0. * @selector: A selector of documents to delete. * @write_concern: A write concern or NULL. If NULL, the default * write concern for the collection will be used. * @error: A location for an error or NULL. * * Returns: * true if successful; otherwise false and error is set. * * If the write concern does not dictate checking the result, this * function may return true even if it failed. * * Side effects: * @collection->gle is setup, depending on write_concern->w value. * @error is setup upon failure. * *-------------------------------------------------------------------------- */ bool mongoc_collection_remove (mongoc_collection_t *collection, mongoc_remove_flags_t flags, const bson_t *selector, const mongoc_write_concern_t *write_concern, bson_error_t *error) { mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_command_t command; mongoc_write_result_t result; bson_t opts; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); bson_clear (&collection->gle); if (!write_concern) { write_concern = collection->write_concern; } bson_init (&opts); BSON_APPEND_INT32 ( &opts, "limit", flags & MONGOC_REMOVE_SINGLE_REMOVE ? 1 : 0); _mongoc_write_result_init (&result); ++collection->client->cluster.operation_id; _mongoc_write_command_init_delete (&command, selector, NULL, &opts, write_flags, collection->client->cluster.operation_id); bson_destroy (&opts); command.flags.has_multi_write = !(flags & MONGOC_REMOVE_SINGLE_REMOVE); _mongoc_collection_write_command_execute ( &command, collection, write_concern, NULL, &result); collection->gle = bson_new (); ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, write_concern, 0 /* no error domain override */, collection->gle, error); _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); RETURN (ret); } bool mongoc_collection_delete (mongoc_collection_t *collection, mongoc_delete_flags_t flags, const bson_t *selector, const mongoc_write_concern_t *write_concern, bson_error_t *error) { return mongoc_collection_remove (collection, (mongoc_remove_flags_t) flags, selector, write_concern, error); } static bool _mongoc_delete_one_or_many (mongoc_collection_t *collection, bool multi, const bson_t *selector, mongoc_crud_opts_t *crud, const bson_t *cmd_opts, const bson_t *collation, bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_write_command_t command; mongoc_write_result_t result; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); BSON_ASSERT (bson_empty0 (reply)); _mongoc_write_result_init (&result); bson_append_int32 (opts, "limit", 5, multi ? 0 : 1); if (!bson_empty (collation)) { bson_append_document (opts, "collation", 9, collation); } _mongoc_write_command_init_delete_idl ( &command, selector, cmd_opts, opts, ++collection->client->cluster.operation_id); command.flags.has_multi_write = multi; if (!bson_empty (collation)) { command.flags.has_collation = true; } _mongoc_collection_write_command_execute_idl ( &command, collection, crud, &result); /* set field described in CRUD spec for the DeleteResult */ ret = MONGOC_WRITE_RESULT_COMPLETE (&result, collection->client->error_api_version, crud->writeConcern, /* no error domain override */ (mongoc_error_domain_t) 0, reply, error, "deletedCount"); _mongoc_write_result_destroy (&result); _mongoc_write_command_destroy (&command); RETURN (ret); } bool mongoc_collection_delete_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_delete_one_opts_t delete_one_opts; bson_t limit = BSON_INITIALIZER; bool ret = false; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); _mongoc_bson_init_if_set (reply); if (!_mongoc_delete_one_opts_parse ( collection->client, opts, &delete_one_opts, error)) { GOTO (done); } ret = _mongoc_delete_one_or_many (collection, false /* multi */, selector, &delete_one_opts.crud, &delete_one_opts.extra, &delete_one_opts.collation, &limit, reply, error); done: _mongoc_delete_one_opts_cleanup (&delete_one_opts); bson_destroy (&limit); RETURN (ret); } bool mongoc_collection_delete_many (mongoc_collection_t *collection, const bson_t *selector, const bson_t *opts, bson_t *reply, bson_error_t *error) { mongoc_delete_many_opts_t delete_many_opts; bson_t limit = BSON_INITIALIZER; bool ret = false; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (selector); _mongoc_bson_init_if_set (reply); if (!_mongoc_delete_many_opts_parse ( collection->client, opts, &delete_many_opts, error)) { GOTO (done); } ret = _mongoc_delete_one_or_many (collection, true /* multi */, selector, &delete_many_opts.crud, &delete_many_opts.extra, &delete_many_opts.collation, &limit, reply, error); done: _mongoc_delete_many_opts_cleanup (&delete_many_opts); bson_destroy (&limit); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_collection_get_read_prefs -- * * Fetch the default read preferences for the collection. * * Returns: * A mongoc_read_prefs_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_prefs_t * mongoc_collection_get_read_prefs (const mongoc_collection_t *collection) { BSON_ASSERT (collection); return collection->read_prefs; } /* *-------------------------------------------------------------------------- * * mongoc_collection_set_read_prefs -- * * Sets the default read preferences for the collection instance. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_collection_set_read_prefs (mongoc_collection_t *collection, const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (collection); if (collection->read_prefs) { mongoc_read_prefs_destroy (collection->read_prefs); collection->read_prefs = NULL; } if (read_prefs) { collection->read_prefs = mongoc_read_prefs_copy (read_prefs); } } /* *-------------------------------------------------------------------------- * * mongoc_collection_get_read_concern -- * * Fetches the default read concern for the collection instance. * * Returns: * A mongoc_read_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_concern_t * mongoc_collection_get_read_concern (const mongoc_collection_t *collection) { BSON_ASSERT (collection); return collection->read_concern; } /* *-------------------------------------------------------------------------- * * mongoc_collection_set_read_concern -- * * Sets the default read concern for the collection instance. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_collection_set_read_concern (mongoc_collection_t *collection, const mongoc_read_concern_t *read_concern) { BSON_ASSERT (collection); if (collection->read_concern) { mongoc_read_concern_destroy (collection->read_concern); collection->read_concern = NULL; } if (read_concern) { collection->read_concern = mongoc_read_concern_copy (read_concern); } } /* *-------------------------------------------------------------------------- * * mongoc_collection_get_write_concern -- * * Fetches the default write concern for the collection instance. * * Returns: * A mongoc_write_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_write_concern_t * mongoc_collection_get_write_concern (const mongoc_collection_t *collection) { BSON_ASSERT (collection); return collection->write_concern; } /* *-------------------------------------------------------------------------- * * mongoc_collection_set_write_concern -- * * Sets the default write concern for the collection instance. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_collection_set_write_concern ( mongoc_collection_t *collection, const mongoc_write_concern_t *write_concern) { BSON_ASSERT (collection); if (collection->write_concern) { mongoc_write_concern_destroy (collection->write_concern); collection->write_concern = NULL; } if (write_concern) { collection->write_concern = mongoc_write_concern_copy (write_concern); } } /* *-------------------------------------------------------------------------- * * mongoc_collection_get_name -- * * Returns the name of the collection, excluding the database name. * * Returns: * A string which should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const char * mongoc_collection_get_name (mongoc_collection_t *collection) { BSON_ASSERT (collection); return collection->collection; } /* *-------------------------------------------------------------------------- * * mongoc_collection_get_last_error -- * * Returns a bulk result. * * Returns: * NULL or a bson_t that should not be modified or freed. This value * is not guaranteed to be persistent between calls into the * mongoc_collection_t instance, and therefore must be copied if * you would like to keep it around. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const bson_t * mongoc_collection_get_last_error ( const mongoc_collection_t *collection) /* IN */ { BSON_ASSERT (collection); return collection->gle; } /* *-------------------------------------------------------------------------- * * mongoc_collection_validate -- * * Helper to call the validate command on the MongoDB server to * validate the collection. * * Options may be additional options, or NULL. * Currently supported options are: * * "full": Boolean * * If full is true, then perform a more resource intensive * validation. * * The result is stored in reply. * * Returns: * true if successful; otherwise false and @error is set. * * Side effects: * @reply is set if successful. * @error may be set. * *-------------------------------------------------------------------------- */ bool mongoc_collection_validate (mongoc_collection_t *collection, /* IN */ const bson_t *options, /* IN */ bson_t *reply, /* OUT */ bson_error_t *error) /* IN */ { bson_iter_t iter; bson_t cmd = BSON_INITIALIZER; bool ret = false; bool reply_initialized = false; BSON_ASSERT (collection); if (options && bson_iter_init_find (&iter, options, "full") && !BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "'full' must be a boolean value."); goto cleanup; } bson_append_utf8 ( &cmd, "validate", 8, collection->collection, collection->collectionlen); if (options) { bson_concat (&cmd, options); } ret = mongoc_collection_command_simple (collection, &cmd, NULL, reply, error); reply_initialized = true; cleanup: bson_destroy (&cmd); if (reply && !reply_initialized) { bson_init (reply); } return ret; } /* *-------------------------------------------------------------------------- * * mongoc_collection_rename -- * * Rename the collection to @new_name. * * If @new_db is NULL, the same db will be used. * * If @drop_target_before_rename is true, then a collection named * @new_name will be dropped before renaming @collection to * @new_name. * * Returns: * true on success; false on failure and @error is set. * * Side effects: * @error is set on failure. * *-------------------------------------------------------------------------- */ bool mongoc_collection_rename (mongoc_collection_t *collection, const char *new_db, const char *new_name, bool drop_target_before_rename, bson_error_t *error) { return mongoc_collection_rename_with_opts ( collection, new_db, new_name, drop_target_before_rename, NULL, error); } bool mongoc_collection_rename_with_opts (mongoc_collection_t *collection, const char *new_db, const char *new_name, bool drop_target_before_rename, const bson_t *opts, bson_error_t *error) { bson_t cmd = BSON_INITIALIZER; char newns[MONGOC_NAMESPACE_MAX + 1]; bool ret; BSON_ASSERT (collection); BSON_ASSERT (new_name); if (strchr (new_name, '$')) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID, "\"%s\" is an invalid collection name.", new_name); return false; } bson_snprintf ( newns, sizeof newns, "%s.%s", new_db ? new_db : collection->db, new_name); BSON_APPEND_UTF8 (&cmd, "renameCollection", collection->ns); BSON_APPEND_UTF8 (&cmd, "to", newns); if (drop_target_before_rename) { BSON_APPEND_BOOL (&cmd, "dropTarget", true); } ret = _mongoc_client_command_with_opts (collection->client, "admin", &cmd, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ collection->read_prefs, collection->read_concern, collection->write_concern, NULL, /* reply */ error); if (ret) { if (new_db) { bson_snprintf (collection->db, sizeof collection->db, "%s", new_db); } bson_snprintf ( collection->collection, sizeof collection->collection, "%s", new_name); collection->collectionlen = (int) strlen (collection->collection); bson_snprintf (collection->ns, sizeof collection->ns, "%s.%s", collection->db, new_name); collection->nslen = (int) strlen (collection->ns); } bson_destroy (&cmd); return ret; } /* *-------------------------------------------------------------------------- * * mongoc_collection_stats -- * * Fetches statistics about the collection. * * The result is stored in @stats, which should NOT be an initialized * bson_t or a leak will occur. * * @stats, @options, and @error are optional. * * Returns: * true on success and @stats is set. * false on failure and @error is set. * * Side effects: * @stats and @error. * *-------------------------------------------------------------------------- */ bool mongoc_collection_stats (mongoc_collection_t *collection, const bson_t *options, bson_t *stats, bson_error_t *error) { bson_iter_t iter; bson_t cmd = BSON_INITIALIZER; bool ret; BSON_ASSERT (collection); if (options && bson_iter_init_find (&iter, options, "scale") && !BSON_ITER_HOLDS_INT32 (&iter)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "'scale' must be an int32 value."); return false; } BSON_APPEND_UTF8 (&cmd, "collStats", collection->collection); if (options) { bson_concat (&cmd, options); } /* Server Selection Spec: "may-use-secondary" commands SHOULD take a read * preference argument and otherwise MUST use the default read preference * from client, database or collection configuration. */ ret = mongoc_collection_command_simple ( collection, &cmd, collection->read_prefs, stats, error); bson_destroy (&cmd); return ret; } mongoc_bulk_operation_t * mongoc_collection_create_bulk_operation ( mongoc_collection_t *collection, bool ordered, const mongoc_write_concern_t *write_concern) { bson_t opts = BSON_INITIALIZER; mongoc_bulk_operation_t *bulk; bool wc_ok = true; bson_append_bool (&opts, "ordered", 7, ordered); if (write_concern) { wc_ok = mongoc_write_concern_append ( (mongoc_write_concern_t *) write_concern, &opts); } bulk = mongoc_collection_create_bulk_operation_with_opts (collection, &opts); bson_destroy (&opts); if (!wc_ok) { bson_set_error (&bulk->result.error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "invalid writeConcern"); } return bulk; } mongoc_bulk_operation_t * mongoc_collection_create_bulk_operation_with_opts ( mongoc_collection_t *collection, const bson_t *opts) { mongoc_bulk_opts_t bulk_opts; mongoc_bulk_write_flags_t write_flags = MONGOC_BULK_WRITE_FLAGS_INIT; mongoc_write_concern_t *wc = NULL; mongoc_bulk_operation_t *bulk; bson_error_t err = {0}; BSON_ASSERT (collection); (void) _mongoc_bulk_opts_parse (collection->client, opts, &bulk_opts, &err); if (!_mongoc_client_session_in_txn (bulk_opts.client_session)) { wc = COALESCE (bulk_opts.writeConcern, collection->write_concern); } write_flags.ordered = bulk_opts.ordered; bulk = _mongoc_bulk_operation_new (collection->client, collection->db, collection->collection, write_flags, wc); bulk->session = bulk_opts.client_session; if (err.domain) { /* _mongoc_bulk_opts_parse failed, above */ memcpy (&bulk->result.error, &err, sizeof (bson_error_t)); } else if (_mongoc_client_session_in_txn (bulk_opts.client_session) && !mongoc_write_concern_is_default (bulk_opts.writeConcern)) { bson_set_error (&bulk->result.error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set write concern after starting transaction"); } _mongoc_bulk_opts_cleanup (&bulk_opts); return bulk; } /* *-------------------------------------------------------------------------- * * mongoc_collection_find_and_modify_with_opts -- * * Find a document in @collection matching @query, applying @opts. * * If @reply is not NULL, then the result document will be placed * in reply and should be released with bson_destroy(). * * See http://docs.mongodb.org/manual/reference/command/findAndModify/ * for more information. * * Returns: * true on success; false on failure. * * Side effects: * reply is initialized. * error is set if false is returned. * *-------------------------------------------------------------------------- */ bool mongoc_collection_find_and_modify_with_opts ( mongoc_collection_t *collection, const bson_t *query, const mongoc_find_and_modify_opts_t *opts, bson_t *reply, bson_error_t *error) { mongoc_cluster_t *cluster; mongoc_client_session_t *cs = NULL; mongoc_cmd_parts_t parts; bool is_retryable; bson_iter_t iter; bson_iter_t inner; const char *name; bson_t reply_local; bson_t *reply_ptr; bool ret = false; bson_t command = BSON_INITIALIZER; mongoc_server_stream_t *server_stream = NULL; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (query); BSON_ASSERT (opts); reply_ptr = reply ? reply : &reply_local; cluster = &collection->client->cluster; mongoc_cmd_parts_init ( &parts, collection->client, collection->db, MONGOC_QUERY_NONE, &command); parts.is_read_command = true; parts.is_write_command = true; /* we need a session to fetch a stream to call cmd_parts_append_opts * below, which parses the session from opts->extra *again* */ if (bson_iter_init_find (&iter, &opts->extra, "sessionId")) { if (!_mongoc_client_session_from_iter ( collection->client, &iter, &cs, error)) { bson_init (reply_ptr); GOTO (done); } } server_stream = mongoc_cluster_stream_for_writes (cluster, cs, reply_ptr, error); if (!server_stream) { GOTO (done); } bson_init (reply_ptr); name = mongoc_collection_get_name (collection); BSON_APPEND_UTF8 (&command, "findAndModify", name); BSON_APPEND_DOCUMENT (&command, "query", query); if (opts->sort) { BSON_APPEND_DOCUMENT (&command, "sort", opts->sort); } if (opts->update) { if (_mongoc_document_is_pipeline (opts->update)) { BSON_APPEND_ARRAY (&command, "update", opts->update); } else { BSON_APPEND_DOCUMENT (&command, "update", opts->update); } } if (opts->fields) { BSON_APPEND_DOCUMENT (&command, "fields", opts->fields); } if (opts->flags & MONGOC_FIND_AND_MODIFY_REMOVE) { BSON_APPEND_BOOL (&command, "remove", true); } if (opts->flags & MONGOC_FIND_AND_MODIFY_UPSERT) { BSON_APPEND_BOOL (&command, "upsert", true); } if (opts->flags & MONGOC_FIND_AND_MODIFY_RETURN_NEW) { BSON_APPEND_BOOL (&command, "new", true); } if (opts->bypass_document_validation) { BSON_APPEND_BOOL (&command, "bypassDocumentValidation", opts->bypass_document_validation); } if (opts->max_time_ms > 0) { BSON_APPEND_INT32 (&command, "maxTimeMS", opts->max_time_ms); } if (bson_iter_init (&iter, &opts->extra)) { bool ok = mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error); if (!ok) { GOTO (done); } } if (_mongoc_client_session_in_txn (parts.assembled.session) && !bson_empty (&parts.write_concern_document)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set write concern after starting transaction"); GOTO (done); } if (!bson_has_field (&opts->extra, "writeConcern")) { if (server_stream->sd->max_wire_version >= WIRE_VERSION_FAM_WRITE_CONCERN) { if (!mongoc_write_concern_is_valid (collection->write_concern)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The write concern is invalid."); GOTO (done); } if (mongoc_write_concern_is_acknowledged (collection->write_concern)) { if (!mongoc_cmd_parts_set_write_concern ( &parts, collection->write_concern, server_stream->sd->max_wire_version, error)) { GOTO (done); } } } } parts.assembled.operation_id = ++cluster->operation_id; if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { GOTO (done); } is_retryable = parts.is_retryable_write; /* increment the transaction number for the first attempt of each retryable * write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: bson_destroy (reply_ptr); ret = mongoc_cluster_run_command_monitored ( cluster, &parts.assembled, reply_ptr, error); if (is_retryable) { _mongoc_write_error_update_if_unsupported_storage_engine ( ret, error, reply_ptr); } /* If a retryable error is encountered and the write is retryable, select * a new writable stream and retry. If server selection fails or the selected * server does not support retryable writes, fall through and allow the * original error to be reported. */ if (is_retryable && _mongoc_write_error_get_type (ret, error, reply_ptr) == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; retry_server_stream = mongoc_cluster_stream_for_writes ( cluster, parts.assembled.session, NULL /* reply */, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; GOTO (retry); } } if (bson_iter_init_find (&iter, reply_ptr, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { const char *errmsg = NULL; int32_t code = 0; BSON_ASSERT (bson_iter_recurse (&iter, &inner)); while (bson_iter_next (&inner)) { if (BSON_ITER_IS_KEY (&inner, "code")) { code = bson_iter_int32 (&inner); } else if (BSON_ITER_IS_KEY (&inner, "errmsg")) { errmsg = bson_iter_utf8 (&inner, NULL); } } bson_set_error (error, MONGOC_ERROR_WRITE_CONCERN, code, "Write Concern error: %s", errmsg); ret = false; } done: mongoc_server_stream_cleanup (server_stream); mongoc_server_stream_cleanup (retry_server_stream); mongoc_cmd_parts_cleanup (&parts); bson_destroy (&command); if (&reply_local == reply_ptr) { bson_destroy (&reply_local); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_collection_find_and_modify -- * * Find a document in @collection matching @query and update it with * the update document @update. * * If @reply is not NULL, then the result document will be placed * in reply and should be released with bson_destroy(). * * If @remove is true, then the matching documents will be removed. * * If @fields is not NULL, it will be used to select the desired * resulting fields. * * If @_new is true, then the new version of the document is returned * instead of the old document. * * See http://docs.mongodb.org/manual/reference/command/findAndModify/ * for more information. * * Returns: * true on success; false on failure. * * Side effects: * reply is initialized. * error is set if false is returned. * *-------------------------------------------------------------------------- */ bool mongoc_collection_find_and_modify (mongoc_collection_t *collection, const bson_t *query, const bson_t *sort, const bson_t *update, const bson_t *fields, bool _remove, bool upsert, bool _new, bson_t *reply, bson_error_t *error) { mongoc_find_and_modify_opts_t *opts; int flags = 0; bool ret; ENTRY; BSON_ASSERT (collection); BSON_ASSERT (query); BSON_ASSERT (update || _remove); if (_remove) { flags |= MONGOC_FIND_AND_MODIFY_REMOVE; } if (upsert) { flags |= MONGOC_FIND_AND_MODIFY_UPSERT; } if (_new) { flags |= MONGOC_FIND_AND_MODIFY_RETURN_NEW; } opts = mongoc_find_and_modify_opts_new (); mongoc_find_and_modify_opts_set_sort (opts, sort); mongoc_find_and_modify_opts_set_update (opts, update); mongoc_find_and_modify_opts_set_fields (opts, fields); mongoc_find_and_modify_opts_set_flags (opts, flags); ret = mongoc_collection_find_and_modify_with_opts ( collection, query, opts, reply, error); mongoc_find_and_modify_opts_destroy (opts); return ret; } mongoc_change_stream_t * mongoc_collection_watch (const mongoc_collection_t *coll, const bson_t *pipeline, const bson_t *opts) { return _mongoc_change_stream_new_from_collection (coll, pipeline, opts); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-collection.h0000644000076500000240000004075213572250757025626 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_COLLECTION_H #define MONGOC_COLLECTION_H #include #include "mongoc/mongoc-change-stream.h" #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-bulk-operation.h" #include "mongoc/mongoc-flags.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-find-and-modify.h" BSON_BEGIN_DECLS typedef struct _mongoc_collection_t mongoc_collection_t; MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_aggregate (mongoc_collection_t *collection, mongoc_query_flags_t flags, const bson_t *pipeline, const bson_t *opts, const mongoc_read_prefs_t *read_prefs) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (void) mongoc_collection_destroy (mongoc_collection_t *collection); MONGOC_EXPORT (mongoc_collection_t *) mongoc_collection_copy (mongoc_collection_t *collection); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_command (mongoc_collection_t *collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *command, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (bool) mongoc_collection_read_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_write_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_read_write_command_with_opts ( mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_command_with_opts (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_command_simple (mongoc_collection_t *collection, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (int64_t) mongoc_collection_count (mongoc_collection_t *collection, mongoc_query_flags_t flags, const bson_t *query, int64_t skip, int64_t limit, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_count_documents or mongoc_collection_estimated_document_count); MONGOC_EXPORT (int64_t) mongoc_collection_count_with_opts (mongoc_collection_t *collection, mongoc_query_flags_t flags, const bson_t *query, int64_t skip, int64_t limit, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_count_documents or mongoc_collection_estimated_document_count); MONGOC_EXPORT (bool) mongoc_collection_drop (mongoc_collection_t *collection, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_drop_with_opts (mongoc_collection_t *collection, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_drop_index (mongoc_collection_t *collection, const char *index_name, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_drop_index_with_opts (mongoc_collection_t *collection, const char *index_name, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_create_index (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (bool) mongoc_collection_create_index_with_opts (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, const bson_t *opts, bson_t *reply, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (bool) mongoc_collection_ensure_index (mongoc_collection_t *collection, const bson_t *keys, const mongoc_index_opt_t *opt, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_find_indexes (mongoc_collection_t *collection, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_find_indexes_with_opts); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_find_indexes_with_opts (mongoc_collection_t *collection, const bson_t *opts); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_find (mongoc_collection_t *collection, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *query, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_find_with_opts) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_cursor_t *) mongoc_collection_find_with_opts (mongoc_collection_t *collection, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *read_prefs) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (bool) mongoc_collection_insert (mongoc_collection_t *collection, mongoc_insert_flags_t flags, const bson_t *document, const mongoc_write_concern_t *write_concern, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_insert_one (mongoc_collection_t *collection, const bson_t *document, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_insert_many (mongoc_collection_t *collection, const bson_t **documents, size_t n_documents, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_insert_bulk (mongoc_collection_t *collection, mongoc_insert_flags_t flags, const bson_t **documents, uint32_t n_documents, const mongoc_write_concern_t *write_concern, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_insert_many); MONGOC_EXPORT (bool) mongoc_collection_update (mongoc_collection_t *collection, mongoc_update_flags_t flags, const bson_t *selector, const bson_t *update, const mongoc_write_concern_t *write_concern, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_update_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *update, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_update_many (mongoc_collection_t *collection, const bson_t *selector, const bson_t *update, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_replace_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *replacement, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_delete (mongoc_collection_t *collection, mongoc_delete_flags_t flags, const bson_t *selector, const mongoc_write_concern_t *write_concern, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_delete_one or mongoc_collection_delete_many); MONGOC_EXPORT (bool) mongoc_collection_save (mongoc_collection_t *collection, const bson_t *document, const mongoc_write_concern_t *write_concern, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_collection_insert_one or mongoc_collection_replace_one); MONGOC_EXPORT (bool) mongoc_collection_remove (mongoc_collection_t *collection, mongoc_remove_flags_t flags, const bson_t *selector, const mongoc_write_concern_t *write_concern, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_delete_one (mongoc_collection_t *collection, const bson_t *selector, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_delete_many (mongoc_collection_t *collection, const bson_t *selector, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_rename (mongoc_collection_t *collection, const char *new_db, const char *new_name, bool drop_target_before_rename, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_rename_with_opts (mongoc_collection_t *collection, const char *new_db, const char *new_name, bool drop_target_before_rename, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_find_and_modify_with_opts ( mongoc_collection_t *collection, const bson_t *query, const mongoc_find_and_modify_opts_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_find_and_modify (mongoc_collection_t *collection, const bson_t *query, const bson_t *sort, const bson_t *update, const bson_t *fields, bool _remove, bool upsert, bool _new, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_collection_stats (mongoc_collection_t *collection, const bson_t *options, bson_t *reply, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (mongoc_bulk_operation_t *) mongoc_collection_create_bulk_operation ( mongoc_collection_t *collection, bool ordered, const mongoc_write_concern_t *write_concern) BSON_GNUC_WARN_UNUSED_RESULT BSON_GNUC_DEPRECATED_FOR (mongoc_collection_create_bulk_operation_with_opts); MONGOC_EXPORT (mongoc_bulk_operation_t *) mongoc_collection_create_bulk_operation_with_opts ( mongoc_collection_t *collection, const bson_t *opts) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (const mongoc_read_prefs_t *) mongoc_collection_get_read_prefs (const mongoc_collection_t *collection); MONGOC_EXPORT (void) mongoc_collection_set_read_prefs (mongoc_collection_t *collection, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (const mongoc_read_concern_t *) mongoc_collection_get_read_concern (const mongoc_collection_t *collection); MONGOC_EXPORT (void) mongoc_collection_set_read_concern (mongoc_collection_t *collection, const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_collection_get_write_concern (const mongoc_collection_t *collection); MONGOC_EXPORT (void) mongoc_collection_set_write_concern ( mongoc_collection_t *collection, const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (const char *) mongoc_collection_get_name (mongoc_collection_t *collection); MONGOC_EXPORT (const bson_t *) mongoc_collection_get_last_error (const mongoc_collection_t *collection) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (char *) mongoc_collection_keys_to_index_string (const bson_t *keys); MONGOC_EXPORT (bool) mongoc_collection_validate (mongoc_collection_t *collection, const bson_t *options, bson_t *reply, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (mongoc_change_stream_t *) mongoc_collection_watch (const mongoc_collection_t *coll, const bson_t *pipeline, const bson_t *opts); MONGOC_EXPORT (int64_t) mongoc_collection_count_documents (mongoc_collection_t *coll, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (int64_t) mongoc_collection_estimated_document_count ( mongoc_collection_t *coll, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_COLLECTION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-compression-private.h0000644000076500000240000000350613572250757027500 0ustar alcaeusstaff/* * Copyright 2017 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_COMPRESSION_PRIVATE_H #define MONGOC_COMPRESSION_PRIVATE_H #include "bson/bson.h" /* Compressor IDs */ #define MONGOC_COMPRESSOR_NOOP_ID 0 #define MONGOC_COMPRESSOR_NOOP_STR "noop" #define MONGOC_COMPRESSOR_SNAPPY_ID 1 #define MONGOC_COMPRESSOR_SNAPPY_STR "snappy" #define MONGOC_COMPRESSOR_ZLIB_ID 2 #define MONGOC_COMPRESSOR_ZLIB_STR "zlib" #define MONGOC_COMPRESSOR_ZSTD_ID 3 #define MONGOC_COMPRESSOR_ZSTD_STR "zstd" BSON_BEGIN_DECLS size_t mongoc_compressor_max_compressed_length (int32_t compressor_id, size_t size); bool mongoc_compressor_supported (const char *compressor); const char * mongoc_compressor_id_to_name (int32_t compressor_id); int mongoc_compressor_name_to_id (const char *compressor); bool mongoc_uncompress (int32_t compressor_id, const uint8_t *compressed, size_t compressed_len, uint8_t *uncompressed, size_t *uncompressed_size); bool mongoc_compress (int32_t compressor_id, int32_t compression_level, char *uncompressed, size_t uncompressed_len, char *compressed, size_t *compressed_len); BSON_END_DECLS #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-compression.c0000644000076500000240000001713213572250757026023 0ustar alcaeusstaff/* * Copyright 2017 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-compression-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #ifdef MONGOC_ENABLE_COMPRESSION #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB #include #endif #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY #include #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD #include #endif #endif size_t mongoc_compressor_max_compressed_length (int32_t compressor_id, size_t len) { TRACE ("Getting compression length for '%s' (%d)", mongoc_compressor_id_to_name (compressor_id), compressor_id); switch (compressor_id) { #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY case MONGOC_COMPRESSOR_SNAPPY_ID: return snappy_max_compressed_length (len); break; #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB case MONGOC_COMPRESSOR_ZLIB_ID: return compressBound (len); break; #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD case MONGOC_COMPRESSOR_ZSTD_ID: return ZSTD_compressBound (len); break; #endif case MONGOC_COMPRESSOR_NOOP_ID: return len; break; default: return 0; } } bool mongoc_compressor_supported (const char *compressor) { #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY if (!strcasecmp (compressor, MONGOC_COMPRESSOR_SNAPPY_STR)) { return true; } #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB if (!strcasecmp (compressor, MONGOC_COMPRESSOR_ZLIB_STR)) { return true; } #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD if (!strcasecmp (compressor, MONGOC_COMPRESSOR_ZSTD_STR)) { return true; } #endif if (!strcasecmp (compressor, MONGOC_COMPRESSOR_NOOP_STR)) { return true; } return false; } const char * mongoc_compressor_id_to_name (int32_t compressor_id) { switch (compressor_id) { case MONGOC_COMPRESSOR_SNAPPY_ID: return MONGOC_COMPRESSOR_SNAPPY_STR; case MONGOC_COMPRESSOR_ZLIB_ID: return MONGOC_COMPRESSOR_ZLIB_STR; case MONGOC_COMPRESSOR_ZSTD_ID: return MONGOC_COMPRESSOR_ZSTD_STR; case MONGOC_COMPRESSOR_NOOP_ID: return MONGOC_COMPRESSOR_NOOP_STR; default: return "unknown"; } } int mongoc_compressor_name_to_id (const char *compressor) { #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY if (strcasecmp (MONGOC_COMPRESSOR_SNAPPY_STR, compressor) == 0) { return MONGOC_COMPRESSOR_SNAPPY_ID; } #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB if (strcasecmp (MONGOC_COMPRESSOR_ZLIB_STR, compressor) == 0) { return MONGOC_COMPRESSOR_ZLIB_ID; } #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD if (strcasecmp (MONGOC_COMPRESSOR_ZSTD_STR, compressor) == 0) { return MONGOC_COMPRESSOR_ZSTD_ID; } #endif if (strcasecmp (MONGOC_COMPRESSOR_NOOP_STR, compressor) == 0) { return MONGOC_COMPRESSOR_NOOP_ID; } return -1; } bool mongoc_uncompress (int32_t compressor_id, const uint8_t *compressed, size_t compressed_len, uint8_t *uncompressed, size_t *uncompressed_len) { TRACE ("Uncompressing with '%s' (%d)", mongoc_compressor_id_to_name (compressor_id), compressor_id); switch (compressor_id) { case MONGOC_COMPRESSOR_SNAPPY_ID: { #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY snappy_status status; status = snappy_uncompress ((const char *) compressed, compressed_len, (char *) uncompressed, uncompressed_len); return status == SNAPPY_OK; #else MONGOC_WARNING ("Received snappy compressed opcode, but snappy " "compression is not compiled in"); return false; #endif break; } case MONGOC_COMPRESSOR_ZLIB_ID: { #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB int ok; ok = uncompress (uncompressed, (unsigned long *) uncompressed_len, compressed, compressed_len); return ok == Z_OK; #else MONGOC_WARNING ("Received zlib compressed opcode, but zlib " "compression is not compiled in"); return false; #endif break; } case MONGOC_COMPRESSOR_ZSTD_ID: { #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD int ok; ok = ZSTD_decompress ((void *) uncompressed, *uncompressed_len, (const void *) compressed, compressed_len); if (!ZSTD_isError (ok)) { *uncompressed_len = ok; } return !ZSTD_isError (ok); #else MONGOC_WARNING ("Received zstd compressed opcode, but zstd " "compression is not compiled in"); return false; #endif break; } case MONGOC_COMPRESSOR_NOOP_ID: memcpy (uncompressed, compressed, compressed_len); *uncompressed_len = compressed_len; return true; default: MONGOC_WARNING ("Unknown compressor ID %d", compressor_id); } return false; } bool mongoc_compress (int32_t compressor_id, int32_t compression_level, char *uncompressed, size_t uncompressed_len, char *compressed, size_t *compressed_len) { TRACE ("Compressing with '%s' (%d)", mongoc_compressor_id_to_name (compressor_id), compressor_id); switch (compressor_id) { case MONGOC_COMPRESSOR_SNAPPY_ID: #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY /* No compression_level option for snappy */ return snappy_compress ( uncompressed, uncompressed_len, compressed, compressed_len) == SNAPPY_OK; #else MONGOC_ERROR ("Client attempting to use compress with snappy, but snappy " "compression is not compiled in"); return false; #endif case MONGOC_COMPRESSOR_ZLIB_ID: #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB return compress2 ((unsigned char *) compressed, (unsigned long *) compressed_len, (unsigned char *) uncompressed, uncompressed_len, compression_level) == Z_OK; #else MONGOC_ERROR ("Client attempting to use compress with zlib, but zlib " "compression is not compiled in"); return false; #endif case MONGOC_COMPRESSOR_ZSTD_ID: { #ifdef MONGOC_ENABLE_COMPRESSION_ZSTD int ok; ok = ZSTD_compress ((void *) compressed, *compressed_len, (const void *) uncompressed, uncompressed_len, 0); if (!ZSTD_isError (ok)) { *compressed_len = ok; } return !ZSTD_isError (ok); #else MONGOC_ERROR ("Client attempting to use compress with zstd, but zstd " "compression is not compiled in"); return false; #endif } case MONGOC_COMPRESSOR_NOOP_ID: memcpy (compressed, uncompressed, uncompressed_len); *compressed_len = uncompressed_len; return true; default: return false; } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-config.h0000644000076500000240000002023713572250757024734 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION) #error "Only can be included directly." #endif #ifndef MONGOC_CONFIG_H #define MONGOC_CONFIG_H /* MONGOC_USER_SET_CFLAGS is set from config based on what compiler flags were * used to compile mongoc */ #define MONGOC_USER_SET_CFLAGS "" #define MONGOC_USER_SET_LDFLAGS "" /* MONGOC_CC is used to determine what C compiler was used to compile mongoc */ #define MONGOC_CC "cc" /* * MONGOC_ENABLE_SSL_SECURE_CHANNEL is set from configure to determine if we are * compiled with Native SSL support on Windows */ #define MONGOC_ENABLE_SSL_SECURE_CHANNEL 0 #if MONGOC_ENABLE_SSL_SECURE_CHANNEL != 1 # undef MONGOC_ENABLE_SSL_SECURE_CHANNEL #endif /* * MONGOC_ENABLE_CRYPTO_CNG is set from configure to determine if we are * compiled with Native Crypto support on Windows */ #define MONGOC_ENABLE_CRYPTO_CNG 0 #if MONGOC_ENABLE_CRYPTO_CNG != 1 # undef MONGOC_ENABLE_CRYPTO_CNG #endif /* * MONGOC_ENABLE_SSL_SECURE_TRANSPORT is set from configure to determine if we are * compiled with Native SSL support on Darwin */ #define MONGOC_ENABLE_SSL_SECURE_TRANSPORT 1 #if MONGOC_ENABLE_SSL_SECURE_TRANSPORT != 1 # undef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #endif /* * MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO is set from configure to determine if we are * compiled with Native Crypto support on Darwin */ #define MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO 1 #if MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO #endif /* * MONGOC_ENABLE_SSL_LIBRESSL is set from configure to determine if we are * compiled with LibreSSL support. */ #define MONGOC_ENABLE_SSL_LIBRESSL 0 #if MONGOC_ENABLE_SSL_LIBRESSL != 1 # undef MONGOC_ENABLE_SSL_LIBRESSL #endif /* * MONGOC_ENABLE_SSL_OPENSSL is set from configure to determine if we are * compiled with OpenSSL support. */ #define MONGOC_ENABLE_SSL_OPENSSL 0 #if MONGOC_ENABLE_SSL_OPENSSL != 1 # undef MONGOC_ENABLE_SSL_OPENSSL #endif /* * MONGOC_ENABLE_CRYPTO_LIBCRYPTO is set from configure to determine if we are * compiled with OpenSSL support. */ #define MONGOC_ENABLE_CRYPTO_LIBCRYPTO 0 #if MONGOC_ENABLE_CRYPTO_LIBCRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO_LIBCRYPTO #endif /* * MONGOC_ENABLE_SSL is set from configure to determine if we are * compiled with any SSL support. */ #define MONGOC_ENABLE_SSL 1 #if MONGOC_ENABLE_SSL != 1 # undef MONGOC_ENABLE_SSL #endif /* * MONGOC_ENABLE_CRYPTO is set from configure to determine if we are * compiled with any crypto support. */ #define MONGOC_ENABLE_CRYPTO 1 #if MONGOC_ENABLE_CRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO #endif /* * Use system crypto profile */ #define MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE 0 #if MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE != 1 # undef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE #endif /* * Use ASN1_STRING_get0_data () rather than the deprecated ASN1_STRING_data */ #define MONGOC_HAVE_ASN1_STRING_GET0_DATA 0 #if MONGOC_HAVE_ASN1_STRING_GET0_DATA != 1 # undef MONGOC_HAVE_ASN1_STRING_GET0_DATA #endif /* * MONGOC_ENABLE_SASL is set from configure to determine if we are * compiled with SASL support. */ #define MONGOC_ENABLE_SASL 1 #if MONGOC_ENABLE_SASL != 1 # undef MONGOC_ENABLE_SASL #endif /* * MONGOC_ENABLE_SASL_CYRUS is set from configure to determine if we are * compiled with Cyrus SASL support. */ #define MONGOC_ENABLE_SASL_CYRUS 1 #if MONGOC_ENABLE_SASL_CYRUS != 1 # undef MONGOC_ENABLE_SASL_CYRUS #endif /* * MONGOC_ENABLE_SASL_SSPI is set from configure to determine if we are * compiled with SSPI support. */ #define MONGOC_ENABLE_SASL_SSPI 0 #if MONGOC_ENABLE_SASL_SSPI != 1 # undef MONGOC_ENABLE_SASL_SSPI #endif /* * MONGOC_HAVE_SASL_CLIENT_DONE is set from configure to determine if we * have SASL and its version is new enough to use sasl_client_done (), * which supersedes sasl_done (). */ #define MONGOC_HAVE_SASL_CLIENT_DONE 1 #if MONGOC_HAVE_SASL_CLIENT_DONE != 1 # undef MONGOC_HAVE_SASL_CLIENT_DONE #endif /* * Disable automatic calls to mongoc_init() and mongoc_cleanup() * before main() is called, and after exit() (respectively). */ #define MONGOC_NO_AUTOMATIC_GLOBALS 1 #if MONGOC_NO_AUTOMATIC_GLOBALS != 1 # undef MONGOC_NO_AUTOMATIC_GLOBALS #endif /* * MONGOC_HAVE_SOCKLEN is set from configure to determine if we * need to emulate the type. */ #define MONGOC_HAVE_SOCKLEN 1 #if MONGOC_HAVE_SOCKLEN != 1 # undef MONGOC_HAVE_SOCKLEN #endif /* * MONGOC_HAVE_DNSAPI is set from configure to determine if we should use the * Windows dnsapi for SRV record lookups. */ #define MONGOC_HAVE_DNSAPI 0 #if MONGOC_HAVE_DNSAPI != 1 # undef MONGOC_HAVE_DNSAPI #endif /* * MONGOC_HAVE_RES_NSEARCH is set from configure to determine if we * have thread-safe res_nsearch(). */ #define MONGOC_HAVE_RES_NSEARCH 1 #if MONGOC_HAVE_RES_NSEARCH != 1 # undef MONGOC_HAVE_RES_NSEARCH #endif /* * MONGOC_HAVE_RES_NDESTROY is set from configure to determine if we * have BSD / Darwin's res_ndestroy(). */ #define MONGOC_HAVE_RES_NDESTROY 1 #if MONGOC_HAVE_RES_NDESTROY != 1 # undef MONGOC_HAVE_RES_NDESTROY #endif /* * MONGOC_HAVE_RES_NCLOSE is set from configure to determine if we * have Linux's res_nclose(). */ #define MONGOC_HAVE_RES_NCLOSE 0 #if MONGOC_HAVE_RES_NCLOSE != 1 # undef MONGOC_HAVE_RES_NCLOSE #endif /* * MONGOC_HAVE_RES_SEARCH is set from configure to determine if we * have thread-unsafe res_search(). It's unset if we have the preferred * res_nsearch(). */ #define MONGOC_HAVE_RES_SEARCH 0 #if MONGOC_HAVE_RES_SEARCH != 1 # undef MONGOC_HAVE_RES_SEARCH #endif /* * Set from configure, see * https://curl.haxx.se/mail/lib-2009-04/0287.html */ #define MONGOC_SOCKET_ARG2 struct sockaddr #define MONGOC_SOCKET_ARG3 socklen_t /* * Enable wire protocol compression negotiation * */ #define MONGOC_ENABLE_COMPRESSION 1 #if MONGOC_ENABLE_COMPRESSION != 1 # undef MONGOC_ENABLE_COMPRESSION #endif /* * Set if we have snappy compression support * */ #define MONGOC_ENABLE_COMPRESSION_SNAPPY 0 #if MONGOC_ENABLE_COMPRESSION_SNAPPY != 1 # undef MONGOC_ENABLE_COMPRESSION_SNAPPY #endif /* * Set if we have zlib compression support * */ #define MONGOC_ENABLE_COMPRESSION_ZLIB 1 #if MONGOC_ENABLE_COMPRESSION_ZLIB != 1 # undef MONGOC_ENABLE_COMPRESSION_ZLIB #endif /* * Set if we have zstd compression support * */ #define MONGOC_ENABLE_COMPRESSION_ZSTD 0 #if MONGOC_ENABLE_COMPRESSION_ZSTD != 1 # undef MONGOC_ENABLE_COMPRESSION_ZSTD #endif /* * Set if performance counters are available and not disabled. * */ #define MONGOC_ENABLE_SHM_COUNTERS 0 #if MONGOC_ENABLE_SHM_COUNTERS != 1 # undef MONGOC_ENABLE_SHM_COUNTERS #endif /* * Set if we have enabled fast counters on Intel using the RDTSCP instruction * */ #define MONGOC_ENABLE_RDTSCP 0 #if MONGOC_ENABLE_RDTSCP != 1 # undef MONGOC_ENABLE_RDTSCP #endif /* * Set if we have the sched_getcpu() function for use with counters * */ #define MONGOC_HAVE_SCHED_GETCPU 0 #if MONGOC_HAVE_SCHED_GETCPU != 1 # undef MONGOC_HAVE_SCHED_GETCPU #endif /* * Set if tracing is enabled. Logs things like network communication and * entry/exit of certain functions. * */ #define MONGOC_TRACE 1 #if MONGOC_TRACE != 1 # undef MONGOC_TRACE #endif /* * Set if we have ICU support. */ #define MONGOC_ENABLE_ICU 0 #if MONGOC_ENABLE_ICU != 1 # undef MONGOC_ENABLE_ICU #endif /* * NOTICE: * If you're about to update this file and add a config flag, make sure to * update: * o The bitfield in mongoc-handshake-private.h * o _get_config_bitfield() in mongoc-handshake.c * o examples/parse_handshake_cfg.py * o test_handshake_config_string in test-mongoc-handshake.c */ #endif /* MONGOC_CONFIG_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-config.h.in0000644000076500000240000002201613572250757025336 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION) #error "Only can be included directly." #endif #ifndef MONGOC_CONFIG_H #define MONGOC_CONFIG_H /* MONGOC_USER_SET_CFLAGS is set from config based on what compiler flags were * used to compile mongoc */ #define MONGOC_USER_SET_CFLAGS "@MONGOC_USER_SET_CFLAGS@" #define MONGOC_USER_SET_LDFLAGS "@MONGOC_USER_SET_LDFLAGS@" /* MONGOC_CC is used to determine what C compiler was used to compile mongoc */ #define MONGOC_CC "@MONGOC_CC@" /* * MONGOC_ENABLE_SSL_SECURE_CHANNEL is set from configure to determine if we are * compiled with Native SSL support on Windows */ #define MONGOC_ENABLE_SSL_SECURE_CHANNEL @MONGOC_ENABLE_SSL_SECURE_CHANNEL@ #if MONGOC_ENABLE_SSL_SECURE_CHANNEL != 1 # undef MONGOC_ENABLE_SSL_SECURE_CHANNEL #endif /* * MONGOC_ENABLE_CRYPTO_CNG is set from configure to determine if we are * compiled with Native Crypto support on Windows */ #define MONGOC_ENABLE_CRYPTO_CNG @MONGOC_ENABLE_CRYPTO_CNG@ #if MONGOC_ENABLE_CRYPTO_CNG != 1 # undef MONGOC_ENABLE_CRYPTO_CNG #endif /* * MONGOC_ENABLE_SSL_SECURE_TRANSPORT is set from configure to determine if we are * compiled with Native SSL support on Darwin */ #define MONGOC_ENABLE_SSL_SECURE_TRANSPORT @MONGOC_ENABLE_SSL_SECURE_TRANSPORT@ #if MONGOC_ENABLE_SSL_SECURE_TRANSPORT != 1 # undef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #endif /* * MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO is set from configure to determine if we are * compiled with Native Crypto support on Darwin */ #define MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO @MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO@ #if MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO #endif /* * MONGOC_ENABLE_SSL_LIBRESSL is set from configure to determine if we are * compiled with LibreSSL support. */ #define MONGOC_ENABLE_SSL_LIBRESSL @MONGOC_ENABLE_SSL_LIBRESSL@ #if MONGOC_ENABLE_SSL_LIBRESSL != 1 # undef MONGOC_ENABLE_SSL_LIBRESSL #endif /* * MONGOC_ENABLE_SSL_OPENSSL is set from configure to determine if we are * compiled with OpenSSL support. */ #define MONGOC_ENABLE_SSL_OPENSSL @MONGOC_ENABLE_SSL_OPENSSL@ #if MONGOC_ENABLE_SSL_OPENSSL != 1 # undef MONGOC_ENABLE_SSL_OPENSSL #endif /* * MONGOC_ENABLE_CRYPTO_LIBCRYPTO is set from configure to determine if we are * compiled with OpenSSL support. */ #define MONGOC_ENABLE_CRYPTO_LIBCRYPTO @MONGOC_ENABLE_CRYPTO_LIBCRYPTO@ #if MONGOC_ENABLE_CRYPTO_LIBCRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO_LIBCRYPTO #endif /* * MONGOC_ENABLE_SSL is set from configure to determine if we are * compiled with any SSL support. */ #define MONGOC_ENABLE_SSL @MONGOC_ENABLE_SSL@ #if MONGOC_ENABLE_SSL != 1 # undef MONGOC_ENABLE_SSL #endif /* * MONGOC_ENABLE_CRYPTO is set from configure to determine if we are * compiled with any crypto support. */ #define MONGOC_ENABLE_CRYPTO @MONGOC_ENABLE_CRYPTO@ #if MONGOC_ENABLE_CRYPTO != 1 # undef MONGOC_ENABLE_CRYPTO #endif /* * Use system crypto profile */ #define MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE @MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE@ #if MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE != 1 # undef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE #endif /* * Use ASN1_STRING_get0_data () rather than the deprecated ASN1_STRING_data */ #define MONGOC_HAVE_ASN1_STRING_GET0_DATA @MONGOC_HAVE_ASN1_STRING_GET0_DATA@ #if MONGOC_HAVE_ASN1_STRING_GET0_DATA != 1 # undef MONGOC_HAVE_ASN1_STRING_GET0_DATA #endif /* * MONGOC_ENABLE_SASL is set from configure to determine if we are * compiled with SASL support. */ #define MONGOC_ENABLE_SASL @MONGOC_ENABLE_SASL@ #if MONGOC_ENABLE_SASL != 1 # undef MONGOC_ENABLE_SASL #endif /* * MONGOC_ENABLE_SASL_CYRUS is set from configure to determine if we are * compiled with Cyrus SASL support. */ #define MONGOC_ENABLE_SASL_CYRUS @MONGOC_ENABLE_SASL_CYRUS@ #if MONGOC_ENABLE_SASL_CYRUS != 1 # undef MONGOC_ENABLE_SASL_CYRUS #endif /* * MONGOC_ENABLE_SASL_SSPI is set from configure to determine if we are * compiled with SSPI support. */ #define MONGOC_ENABLE_SASL_SSPI @MONGOC_ENABLE_SASL_SSPI@ #if MONGOC_ENABLE_SASL_SSPI != 1 # undef MONGOC_ENABLE_SASL_SSPI #endif /* * MONGOC_HAVE_SASL_CLIENT_DONE is set from configure to determine if we * have SASL and its version is new enough to use sasl_client_done (), * which supersedes sasl_done (). */ #define MONGOC_HAVE_SASL_CLIENT_DONE @MONGOC_HAVE_SASL_CLIENT_DONE@ #if MONGOC_HAVE_SASL_CLIENT_DONE != 1 # undef MONGOC_HAVE_SASL_CLIENT_DONE #endif /* * Disable automatic calls to mongoc_init() and mongoc_cleanup() * before main() is called, and after exit() (respectively). */ #define MONGOC_NO_AUTOMATIC_GLOBALS @MONGOC_NO_AUTOMATIC_GLOBALS@ #if MONGOC_NO_AUTOMATIC_GLOBALS != 1 # undef MONGOC_NO_AUTOMATIC_GLOBALS #endif /* * MONGOC_HAVE_SOCKLEN is set from configure to determine if we * need to emulate the type. */ #define MONGOC_HAVE_SOCKLEN @MONGOC_HAVE_SOCKLEN@ #if MONGOC_HAVE_SOCKLEN != 1 # undef MONGOC_HAVE_SOCKLEN #endif /* * MONGOC_HAVE_DNSAPI is set from configure to determine if we should use the * Windows dnsapi for SRV record lookups. */ #define MONGOC_HAVE_DNSAPI @MONGOC_HAVE_DNSAPI@ #if MONGOC_HAVE_DNSAPI != 1 # undef MONGOC_HAVE_DNSAPI #endif /* * MONGOC_HAVE_RES_NSEARCH is set from configure to determine if we * have thread-safe res_nsearch(). */ #define MONGOC_HAVE_RES_NSEARCH @MONGOC_HAVE_RES_NSEARCH@ #if MONGOC_HAVE_RES_NSEARCH != 1 # undef MONGOC_HAVE_RES_NSEARCH #endif /* * MONGOC_HAVE_RES_NDESTROY is set from configure to determine if we * have BSD / Darwin's res_ndestroy(). */ #define MONGOC_HAVE_RES_NDESTROY @MONGOC_HAVE_RES_NDESTROY@ #if MONGOC_HAVE_RES_NDESTROY != 1 # undef MONGOC_HAVE_RES_NDESTROY #endif /* * MONGOC_HAVE_RES_NCLOSE is set from configure to determine if we * have Linux's res_nclose(). */ #define MONGOC_HAVE_RES_NCLOSE @MONGOC_HAVE_RES_NCLOSE@ #if MONGOC_HAVE_RES_NCLOSE != 1 # undef MONGOC_HAVE_RES_NCLOSE #endif /* * MONGOC_HAVE_RES_SEARCH is set from configure to determine if we * have thread-unsafe res_search(). It's unset if we have the preferred * res_nsearch(). */ #define MONGOC_HAVE_RES_SEARCH @MONGOC_HAVE_RES_SEARCH@ #if MONGOC_HAVE_RES_SEARCH != 1 # undef MONGOC_HAVE_RES_SEARCH #endif /* * Set from configure, see * https://curl.haxx.se/mail/lib-2009-04/0287.html */ #define MONGOC_SOCKET_ARG2 @MONGOC_SOCKET_ARG2@ #define MONGOC_SOCKET_ARG3 @MONGOC_SOCKET_ARG3@ /* * Enable wire protocol compression negotiation * */ #define MONGOC_ENABLE_COMPRESSION @MONGOC_ENABLE_COMPRESSION@ #if MONGOC_ENABLE_COMPRESSION != 1 # undef MONGOC_ENABLE_COMPRESSION #endif /* * Set if we have snappy compression support * */ #define MONGOC_ENABLE_COMPRESSION_SNAPPY @MONGOC_ENABLE_COMPRESSION_SNAPPY@ #if MONGOC_ENABLE_COMPRESSION_SNAPPY != 1 # undef MONGOC_ENABLE_COMPRESSION_SNAPPY #endif /* * Set if we have zlib compression support * */ #define MONGOC_ENABLE_COMPRESSION_ZLIB @MONGOC_ENABLE_COMPRESSION_ZLIB@ #if MONGOC_ENABLE_COMPRESSION_ZLIB != 1 # undef MONGOC_ENABLE_COMPRESSION_ZLIB #endif /* * Set if we have zstd compression support * */ #define MONGOC_ENABLE_COMPRESSION_ZSTD @MONGOC_ENABLE_COMPRESSION_ZSTD@ #if MONGOC_ENABLE_COMPRESSION_ZSTD != 1 # undef MONGOC_ENABLE_COMPRESSION_ZSTD #endif /* * Set if performance counters are available and not disabled. * */ #define MONGOC_ENABLE_SHM_COUNTERS @MONGOC_ENABLE_SHM_COUNTERS@ #if MONGOC_ENABLE_SHM_COUNTERS != 1 # undef MONGOC_ENABLE_SHM_COUNTERS #endif /* * Set if we have enabled fast counters on Intel using the RDTSCP instruction * */ #define MONGOC_ENABLE_RDTSCP @MONGOC_ENABLE_RDTSCP@ #if MONGOC_ENABLE_RDTSCP != 1 # undef MONGOC_ENABLE_RDTSCP #endif /* * Set if we have the sched_getcpu() function for use with counters * */ #define MONGOC_HAVE_SCHED_GETCPU @MONGOC_HAVE_SCHED_GETCPU@ #if MONGOC_HAVE_SCHED_GETCPU != 1 # undef MONGOC_HAVE_SCHED_GETCPU #endif /* * Set if tracing is enabled. Logs things like network communication and * entry/exit of certain functions. * */ #define MONGOC_TRACE @MONGOC_TRACE@ #if MONGOC_TRACE != 1 # undef MONGOC_TRACE #endif /* * Set if we have ICU support. */ #define MONGOC_ENABLE_ICU @MONGOC_ENABLE_ICU@ #if MONGOC_ENABLE_ICU != 1 # undef MONGOC_ENABLE_ICU #endif /* * NOTICE: * If you're about to update this file and add a config flag, make sure to * update: * o The bitfield in mongoc-handshake-private.h * o _get_config_bitfield() in mongoc-handshake.c * o examples/parse_handshake_cfg.py * o test_handshake_config_string in test-mongoc-handshake.c */ #endif /* MONGOC_CONFIG_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-counters-private.h0000644000076500000240000001362413572250757027003 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_COUNTERS_PRIVATE_H #define MONGOC_COUNTERS_PRIVATE_H #include #include #ifdef __linux__ #include #include #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \ defined(__OpenBSD__) #include #include #include #elif defined(__hpux__) #include #endif BSON_BEGIN_DECLS void _mongoc_counters_init (void); void _mongoc_counters_cleanup (void); static BSON_INLINE unsigned _mongoc_get_cpu_count (void) { #if defined(__linux__) return get_nprocs (); #elif defined(__hpux__) struct pst_dynamic psd; if (pstat_getdynamic (&psd, sizeof (psd), (size_t) 1, 0) != -1) { return psd.psd_max_proc_cnt; } return 1; #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \ defined(__OpenBSD__) int mib[2]; int maxproc; size_t len; mib[0] = CTL_HW; mib[1] = HW_NCPU; len = sizeof (maxproc); if (-1 == sysctl (mib, 2, &maxproc, &len, NULL, 0)) { return 1; } return len; #elif defined(__APPLE__) || defined(__sun) || defined(_AIX) int ncpu; ncpu = (int) sysconf (_SC_NPROCESSORS_ONLN); return (ncpu > 0) ? ncpu : 1; #elif defined(_MSC_VER) || defined(_WIN32) SYSTEM_INFO si; GetSystemInfo (&si); return si.dwNumberOfProcessors; #else #warning "_mongoc_get_cpu_count() not supported, defaulting to 1." return 1; #endif } #define _mongoc_counter_add(v, count) bson_atomic_int64_add (&(v), (count)) #if defined(MONGOC_ENABLE_RDTSCP) static BSON_INLINE unsigned _mongoc_sched_getcpu (void) { volatile uint32_t rax, rdx, rcx; __asm__ volatile("rdtscp\n" : "=a"(rax), "=d"(rdx), "=c"(rcx) : :); unsigned node_id, core_id; // node_id = (rcx & 0xFFF000)>>12; // node_id is unused core_id = rcx & 0xFFF; return core_id; } #elif defined(MONGOC_HAVE_SCHED_GETCPU) #define _mongoc_sched_getcpu sched_getcpu #else #define _mongoc_sched_getcpu() (0) #endif #ifndef SLOTS_PER_CACHELINE #define SLOTS_PER_CACHELINE 8 #endif typedef struct { int64_t slots[SLOTS_PER_CACHELINE]; } mongoc_counter_slots_t; typedef struct { mongoc_counter_slots_t *cpus; } mongoc_counter_t; #define COUNTER(ident, Category, Name, Description) \ extern mongoc_counter_t __mongoc_counter_##ident; #include "mongoc-counters.defs" #undef COUNTER enum { #define COUNTER(ident, Category, Name, Description) COUNTER_##ident, #include "mongoc-counters.defs" #undef COUNTER LAST_COUNTER }; #ifdef MONGOC_ENABLE_SHM_COUNTERS #define COUNTER(ident, Category, Name, Description) \ static BSON_INLINE void mongoc_counter_##ident##_add (int64_t val) \ { \ (void) _mongoc_counter_add ( \ __mongoc_counter_##ident.cpus[_mongoc_sched_getcpu ()] \ .slots[COUNTER_##ident % SLOTS_PER_CACHELINE], \ val); \ } \ static BSON_INLINE void mongoc_counter_##ident##_inc (void) \ { \ mongoc_counter_##ident##_add (1); \ } \ static BSON_INLINE void mongoc_counter_##ident##_dec (void) \ { \ mongoc_counter_##ident##_add (-1); \ } \ static BSON_INLINE void mongoc_counter_##ident##_reset (void) \ { \ uint32_t i; \ for (i = 0; i < _mongoc_get_cpu_count (); i++) { \ __mongoc_counter_##ident.cpus[i] \ .slots[COUNTER_##ident % SLOTS_PER_CACHELINE] = 0; \ } \ bson_memory_barrier (); \ } #include "mongoc-counters.defs" #undef COUNTER #else /* when counters are disabled, these functions are no-ops */ #define COUNTER(ident, Category, Name, Description) \ static BSON_INLINE void mongoc_counter_##ident##_add (int64_t val) \ { \ } \ static BSON_INLINE void mongoc_counter_##ident##_inc (void) \ { \ } \ static BSON_INLINE void mongoc_counter_##ident##_dec (void) \ { \ } \ static BSON_INLINE void mongoc_counter_##ident##_reset (void) \ { \ } #include "mongoc-counters.defs" #undef COUNTER #endif BSON_END_DECLS #endif /* MONGOC_COUNTERS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-counters.c0000644000076500000240000001775013572250757025332 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #ifdef BSON_OS_UNIX #include #include #endif #ifdef _MSC_VER #include #endif #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-log.h" #pragma pack(1) typedef struct { uint32_t offset; uint32_t slot; char category[24]; char name[32]; char description[64]; } mongoc_counter_info_t; #pragma pack() BSON_STATIC_ASSERT2 (counter_info_t, sizeof (mongoc_counter_info_t) == 128); #pragma pack(1) typedef struct { uint32_t size; uint32_t n_cpu; uint32_t n_counters; uint32_t infos_offset; uint32_t values_offset; uint8_t padding[44]; } mongoc_counters_t; #pragma pack() BSON_STATIC_ASSERT2 (counters_t, sizeof (mongoc_counters_t) == 64); #ifdef MONGOC_ENABLE_SHM_COUNTERS /* When counters are enabled at compile time but fail to initiate a shared * memory segment, then fall back to a malloc'd segment. This malloc'd segment * isn't useful to anyone. But by using this fallback, the counter increment * functions can behave the same. I.e. they do not need to have a runtime check * for whether or not initiating the shared memory segment succeeded. */ static void *gCounterFallback = NULL; #define COUNTER(ident, Category, Name, Description) \ mongoc_counter_t __mongoc_counter_##ident; #include "mongoc-counters.defs" #undef COUNTER /** * mongoc_counters_use_shm: * * Checks to see if counters should be exported over a shared memory segment. * * Returns: true if SHM is to be used. */ static bool mongoc_counters_use_shm (void) { return !getenv ("MONGOC_DISABLE_SHM"); } /** * mongoc_counters_calc_size: * * Returns the number of bytes required for the shared memory segment of * the process. This segment contains the various statistical counters for * the process. * * Returns: The number of bytes required. */ static size_t mongoc_counters_calc_size (void) { size_t n_cpu; size_t n_groups; size_t size; n_cpu = _mongoc_get_cpu_count (); n_groups = (LAST_COUNTER / SLOTS_PER_CACHELINE) + 1; size = (sizeof (mongoc_counters_t) + (LAST_COUNTER * sizeof (mongoc_counter_info_t)) + (n_cpu * n_groups * sizeof (mongoc_counter_slots_t))); #ifdef BSON_OS_UNIX return BSON_MAX (getpagesize (), size); #else return size; #endif } #endif /** * mongoc_counters_destroy: * * Removes the shared memory segment for the current processes counters. */ void _mongoc_counters_cleanup (void) { #ifdef MONGOC_ENABLE_SHM_COUNTERS if (gCounterFallback) { bson_free (gCounterFallback); gCounterFallback = NULL; #if defined(BSON_OS_UNIX) && defined(MONGOC_ENABLE_SHM_COUNTERS) } else { char name[32]; int pid; pid = getpid (); bson_snprintf (name, sizeof name, "/mongoc-%u", pid); shm_unlink (name); #endif } #endif } #ifdef MONGOC_ENABLE_SHM_COUNTERS /** * mongoc_counters_alloc: * @size: The size of the shared memory segment. * * This function allocates the shared memory segment for use by counters * within the process. * * Returns: A shared memory segment, or malloc'd memory on failure. */ static void * mongoc_counters_alloc (size_t size) { #if defined(BSON_OS_UNIX) && defined(MONGOC_ENABLE_SHM_COUNTERS) void *mem; char name[32]; int pid; int fd; if (!mongoc_counters_use_shm ()) { goto skip_shm; } pid = getpid (); bson_snprintf (name, sizeof name, "/mongoc-%u", pid); #ifndef O_NOFOLLOW #define O_NOFOLLOW 0 #endif if (-1 == (fd = shm_open (name, O_CREAT | O_EXCL | O_RDWR, S_IRUSR | S_IWUSR | O_NOFOLLOW))) { goto fail_noclean; } /* * NOTE: * * ftruncate() will cause reads to be zero. Therefore, we don't need to * do write() of zeroes to initialize the shared memory area. */ if (-1 == ftruncate (fd, size)) { goto fail_cleanup; } mem = mmap (NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (mem == MAP_FAILED) { goto fail_cleanup; } close (fd); memset (mem, 0, size); return mem; fail_cleanup: shm_unlink (name); close (fd); fail_noclean: MONGOC_WARNING ("Falling back to malloc for counters."); skip_shm: #endif gCounterFallback = (void *) bson_malloc0 (size); return gCounterFallback; } /** * mongoc_counters_register: * @counters: A mongoc_counter_t. * @num: The counter number. * @category: The counter category. * @name: THe counter name. * @description The counter description. * * Registers a new counter in the memory segment for counters. If the counters * are exported over shared memory, it will be made available. * * Returns: The offset to the data for the counters values. */ static size_t mongoc_counters_register (mongoc_counters_t *counters, uint32_t num, const char *category, const char *name, const char *description) { mongoc_counter_info_t *infos; char *segment; int n_cpu; BSON_ASSERT (counters); BSON_ASSERT (category); BSON_ASSERT (name); BSON_ASSERT (description); /* * Implementation Note: * * The memory barrier is required so that all of the above has been * completed. Then increment the n_counters so that a reading application * only knows about the counter after we have initialized it. */ n_cpu = _mongoc_get_cpu_count (); segment = (char *) counters; infos = (mongoc_counter_info_t *) (segment + counters->infos_offset); infos = &infos[counters->n_counters]; infos->slot = num % SLOTS_PER_CACHELINE; infos->offset = (counters->values_offset + ((num / SLOTS_PER_CACHELINE) * n_cpu * sizeof (mongoc_counter_slots_t))); bson_strncpy (infos->category, category, sizeof infos->category); bson_strncpy (infos->name, name, sizeof infos->name); bson_strncpy (infos->description, description, sizeof infos->description); bson_memory_barrier (); counters->n_counters++; return infos->offset; } #endif /** * mongoc_counters_init: * * Initializes the mongoc counters system. This should be run on library * initialization using the GCC constructor attribute. */ void _mongoc_counters_init (void) { #ifdef MONGOC_ENABLE_SHM_COUNTERS mongoc_counter_info_t *info; mongoc_counters_t *counters; size_t infos_size; size_t off; size_t size; char *segment; size = mongoc_counters_calc_size (); segment = (char *) mongoc_counters_alloc (size); infos_size = LAST_COUNTER * sizeof *info; counters = (mongoc_counters_t *) segment; counters->n_cpu = _mongoc_get_cpu_count (); counters->n_counters = 0; counters->infos_offset = sizeof *counters; counters->values_offset = (uint32_t) (counters->infos_offset + infos_size); BSON_ASSERT ((counters->values_offset % 64) == 0); #define COUNTER(ident, Category, Name, Desc) \ off = mongoc_counters_register ( \ counters, COUNTER_##ident, Category, Name, Desc); \ __mongoc_counter_##ident.cpus = (mongoc_counter_slots_t *) (segment + off); #include "mongoc-counters.defs" #undef COUNTER /* * NOTE: * * Only update the size of the shared memory area for the client after * we have initialized the rest of the counters. Don't forget our memory * barrier to prevent compiler reordering. */ bson_memory_barrier (); counters->size = (uint32_t) size; #endif } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-counters.defs0000644000076500000240000000736313572250757026030 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ COUNTER(op_egress_total, "Operations", "Egress Total", "The number of sent operations.") COUNTER(op_ingress_total, "Operations", "Ingress Total", "The number of received operations.") COUNTER(op_egress_msg, "Operations", "Egress Messages", "The number of sent messages operations.") COUNTER(op_ingress_msg, "Operations", "Ingress Messages", "The number of received messages operations.") COUNTER(op_egress_compressed, "Operations", "Egress Compressed", "The number of sent compressed operations.") COUNTER(op_ingress_compressed, "Operations", "Ingress Compressed", "The number of received compressed operations.") COUNTER(op_egress_query, "Operations", "Egress Queries", "The number of sent Query operations.") COUNTER(op_ingress_reply, "Operations", "Ingress Reply", "The number of received Reply operations.") COUNTER(op_egress_getmore, "Operations", "Egress GetMore", "The number of sent GetMore operations.") COUNTER(op_egress_insert, "Operations", "Egress Insert", "The number of sent Insert operations.") COUNTER(op_egress_delete, "Operations", "Egress Delete", "The number of sent Delete operations.") COUNTER(op_egress_update, "Operations", "Egress Update", "The number of sent Update operations.") COUNTER(op_egress_killcursors, "Operations", "Egress KillCursors", "The number of sent KillCursors operations.") COUNTER(cursors_active, "Cursors", "Active", "The number of active cursors.") COUNTER(cursors_disposed, "Cursors", "Disposed", "The number of disposed cursors.") COUNTER(clients_active, "Clients", "Active", "The number of active clients.") COUNTER(clients_disposed, "Clients", "Disposed", "The number of disposed clients.") COUNTER(streams_active, "Streams", "Active", "The number of active streams.") COUNTER(streams_disposed, "Streams", "Disposed", "The number of disposed streams.") COUNTER(streams_egress, "Streams", "Egress Bytes", "The number of bytes sent.") COUNTER(streams_ingress, "Streams", "Ingress Bytes", "The number of bytes received.") COUNTER(streams_timeout, "Streams", "N Socket Timeouts", "The number of socket timeouts.") COUNTER(client_pools_active, "Client Pools", "Active", "The number of active client pools.") COUNTER(client_pools_disposed, "Client Pools", "Disposed", "The number of disposed client pools.") COUNTER(protocol_ingress_error, "Protocol", "Ingress Errors", "The number of protocol errors on ingress.") COUNTER(auth_failure, "Auth", "Failures", "The number of failed authentication requests.") COUNTER(auth_success, "Auth", "Success", "The number of successful authentication requests.") COUNTER(dns_failure, "DNS", "Failure", "The number of failed DNS requests.") COUNTER(dns_success, "DNS", "Success", "The number of successful DNS requests.") mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-cng-private.h0000644000076500000240000000372013572250757027222 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifdef MONGOC_ENABLE_CRYPTO_CNG #ifndef MONGOC_CRYPTO_CNG_PRIVATE_H #define MONGOC_CRYPTO_CNG_PRIVATE_H #include "mongoc/mongoc-config.h" BSON_BEGIN_DECLS void mongoc_crypto_cng_init (void); void mongoc_crypto_cng_cleanup (void); void mongoc_crypto_cng_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_cng_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); void mongoc_crypto_cng_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_cng_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); BSON_END_DECLS #endif /* MONGOC_CRYPTO_CNG_PRIVATE_H */ #endif /* MONGOC_ENABLE_CRYPTO_CNG */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-cng.c0000644000076500000240000001563713572250757025557 0ustar alcaeusstaff/* Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_CRYPTO_CNG #include "mongoc/mongoc-crypto-private.h" #include "mongoc/mongoc-crypto-cng-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-thread-private.h" #include #include #include #define NT_SUCCESS(Status) (((NTSTATUS) (Status)) >= 0) #define STATUS_UNSUCCESSFUL ((NTSTATUS) 0xC0000001L) static BCRYPT_ALG_HANDLE _sha1_hash_algo; static BCRYPT_ALG_HANDLE _sha1_hmac_algo; static BCRYPT_ALG_HANDLE _sha256_hash_algo; static BCRYPT_ALG_HANDLE _sha256_hmac_algo; void mongoc_crypto_cng_init (void) { NTSTATUS status = STATUS_UNSUCCESSFUL; _sha1_hash_algo = 0; status = BCryptOpenAlgorithmProvider ( &_sha1_hash_algo, BCRYPT_SHA1_ALGORITHM, NULL, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptOpenAlgorithmProvider(SHA1): %x", status); } _sha1_hmac_algo = 0; status = BCryptOpenAlgorithmProvider (&_sha1_hmac_algo, BCRYPT_SHA1_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptOpenAlgorithmProvider(SHA1 HMAC): %x", status); } _sha256_hash_algo = 0; status = BCryptOpenAlgorithmProvider ( &_sha256_hash_algo, BCRYPT_SHA256_ALGORITHM, NULL, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptOpenAlgorithmProvider(SHA256): %x", status); } _sha256_hmac_algo = 0; status = BCryptOpenAlgorithmProvider (&_sha256_hmac_algo, BCRYPT_SHA256_ALGORITHM, NULL, BCRYPT_ALG_HANDLE_HMAC_FLAG); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptOpenAlgorithmProvider(SHA256 HMAC): %x", status); } } void mongoc_crypto_cng_cleanup (void) { if (_sha1_hash_algo) { BCryptCloseAlgorithmProvider (&_sha1_hash_algo, 0); } if (_sha1_hmac_algo) { BCryptCloseAlgorithmProvider (&_sha1_hmac_algo, 0); } if (_sha256_hash_algo) { BCryptCloseAlgorithmProvider (&_sha256_hash_algo, 0); } if (_sha256_hash_algo) { BCryptCloseAlgorithmProvider (&_sha256_hash_algo, 0); } } bool _mongoc_crypto_cng_hmac_or_hash (BCRYPT_ALG_HANDLE algorithm, void *key, size_t key_length, void *data, size_t data_length, void *output) { char *hash_object_buffer = 0; ULONG hash_object_length = 0; BCRYPT_HASH_HANDLE hash = 0; ULONG mac_length = 0; NTSTATUS status = STATUS_UNSUCCESSFUL; bool retval = false; ULONG noop = 0; status = BCryptGetProperty (algorithm, BCRYPT_OBJECT_LENGTH, (char *) &hash_object_length, sizeof hash_object_length, &noop, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptGetProperty(): OBJECT_LENGTH %x", status); return false; } status = BCryptGetProperty (algorithm, BCRYPT_HASH_LENGTH, (char *) &mac_length, sizeof mac_length, &noop, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptGetProperty(): HASH_LENGTH %x", status); return false; } hash_object_buffer = bson_malloc (hash_object_length); status = BCryptCreateHash (algorithm, &hash, hash_object_buffer, hash_object_length, key, (ULONG) key_length, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptCreateHash(): %x", status); goto cleanup; } status = BCryptHashData (hash, data, (ULONG) data_length, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptHashData(): %x", status); goto cleanup; } status = BCryptFinishHash (hash, output, mac_length, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptFinishHash(): %x", status); goto cleanup; } retval = true; cleanup: if (hash) { (void) BCryptDestroyHash (hash); } bson_free (hash_object_buffer); return retval; } void mongoc_crypto_cng_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { NTSTATUS status = STATUS_UNSUCCESSFUL; if (!_sha1_hmac_algo) { return; } _mongoc_crypto_cng_hmac_or_hash ( _sha1_hmac_algo, key, key_len, data, data_len, hmac_out); } bool mongoc_crypto_cng_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { NTSTATUS status = STATUS_UNSUCCESSFUL; bool res; if (!_sha1_hash_algo) { return false; } res = _mongoc_crypto_cng_hmac_or_hash ( _sha1_hash_algo, NULL, 0, input, input_len, hash_out); return res; } void mongoc_crypto_cng_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { NTSTATUS status = STATUS_UNSUCCESSFUL; if (!_sha256_hmac_algo) { return; } _mongoc_crypto_cng_hmac_or_hash ( _sha256_hmac_algo, key, key_len, data, data_len, hmac_out); } bool mongoc_crypto_cng_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { NTSTATUS status = STATUS_UNSUCCESSFUL; bool res; if (!_sha256_hash_algo) { return false; } res = _mongoc_crypto_cng_hmac_or_hash ( _sha256_hash_algo, NULL, 0, input, input_len, hash_out); return res; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto-private.h0000644000076500000240000000417413572250757031265 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifdef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO #ifndef MONGOC_CRYPTO_COMMON_CRYPTO_PRIVATE_H #define MONGOC_CRYPTO_COMMON_CRYPTO_PRIVATE_H #include "mongoc/mongoc-config.h" BSON_BEGIN_DECLS void mongoc_crypto_common_crypto_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_common_crypto_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); void mongoc_crypto_common_crypto_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_common_crypto_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); BSON_END_DECLS #endif /* MONGOC_CRYPTO_COMMON_CRYPTO_PRIVATE_H */ #endif /* MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-common-crypto.c0000644000076500000240000000501413572250757027602 0ustar alcaeusstaff/* Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-crypto-private.h" #ifdef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO #include "mongoc/mongoc-crypto-common-crypto-private.h" #include #include void mongoc_crypto_common_crypto_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { /* U1 = HMAC(input, salt + 0001) */ CCHmac ( kCCHmacAlgSHA1, key, (size_t) key_len, data, (size_t) data_len, hmac_out); } bool mongoc_crypto_common_crypto_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { if (CC_SHA1 (input, (CC_LONG) input_len, hash_out)) { return true; } return false; } void mongoc_crypto_common_crypto_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { CCHmac (kCCHmacAlgSHA256, key, (size_t) key_len, data, (size_t) data_len, hmac_out); } bool mongoc_crypto_common_crypto_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { if (CC_SHA256 (input, (CC_LONG) input_len, hash_out)) { return true; } return false; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-openssl-private.h0000644000076500000240000000405113572250757030134 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #include "mongoc/mongoc-config.h" #include #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO #ifndef MONGOC_CRYPTO_OPENSSL_PRIVATE_H #define MONGOC_CRYPTO_OPENSSL_PRIVATE_H #include "mongoc/mongoc-crypto-private.h" BSON_BEGIN_DECLS void mongoc_crypto_openssl_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_openssl_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); void mongoc_crypto_openssl_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_openssl_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); BSON_END_DECLS #endif /* MONGOC_CRYPTO_OPENSSL_PRIVATE_H */ #endif /* MONGOC_ENABLE_CRYPTO_LIBCRYPTO */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-openssl.c0000644000076500000240000000632413572250757026464 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO #include "mongoc/mongoc-crypto-openssl-private.h" #include "mongoc/mongoc-crypto-private.h" #include #include #include void mongoc_crypto_openssl_hmac_sha1 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { /* U1 = HMAC(input, salt + 0001) */ HMAC (EVP_sha1 (), key, key_len, data, data_len, hmac_out, NULL); } #if OPENSSL_VERSION_NUMBER < 0x10100000L || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) EVP_MD_CTX * EVP_MD_CTX_new (void) { return bson_malloc0 (sizeof (EVP_MD_CTX)); } void EVP_MD_CTX_free (EVP_MD_CTX *ctx) { EVP_MD_CTX_cleanup (ctx); bson_free (ctx); } #endif bool mongoc_crypto_openssl_sha1 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { EVP_MD_CTX *digest_ctxp = EVP_MD_CTX_new (); bool rval = false; if (1 != EVP_DigestInit_ex (digest_ctxp, EVP_sha1 (), NULL)) { goto cleanup; } if (1 != EVP_DigestUpdate (digest_ctxp, input, input_len)) { goto cleanup; } rval = (1 == EVP_DigestFinal_ex (digest_ctxp, hash_out, NULL)); cleanup: EVP_MD_CTX_free (digest_ctxp); return rval; } void mongoc_crypto_openssl_hmac_sha256 (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { /* U1 = HMAC(input, salt + 0001) */ HMAC (EVP_sha256 (), key, key_len, data, data_len, hmac_out, NULL); } bool mongoc_crypto_openssl_sha256 (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out) { EVP_MD_CTX *digest_ctxp = EVP_MD_CTX_new (); bool rval = false; if (1 != EVP_DigestInit_ex (digest_ctxp, EVP_sha256 (), NULL)) { goto cleanup; } if (1 != EVP_DigestUpdate (digest_ctxp, input, input_len)) { goto cleanup; } rval = (1 == EVP_DigestFinal_ex (digest_ctxp, hash_out, NULL)); cleanup: EVP_MD_CTX_free (digest_ctxp); return rval; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto-private.h0000644000076500000240000000403113572250757026451 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #include "mongoc/mongoc-config.h" #include #ifdef MONGOC_ENABLE_CRYPTO #ifndef MONGOC_CRYPTO_PRIVATE_H #define MONGOC_CRYPTO_PRIVATE_H BSON_BEGIN_DECLS typedef struct _mongoc_crypto_t mongoc_crypto_t; typedef enum { MONGOC_CRYPTO_ALGORITHM_SHA_1, MONGOC_CRYPTO_ALGORITHM_SHA_256 } mongoc_crypto_hash_algorithm_t; struct _mongoc_crypto_t { void (*hmac) (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool (*hash) (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); mongoc_crypto_hash_algorithm_t algorithm; }; void mongoc_crypto_init (mongoc_crypto_t *crypto, mongoc_crypto_hash_algorithm_t algo); void mongoc_crypto_hmac (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out); bool mongoc_crypto_hash (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *hash_out); BSON_END_DECLS #endif /* MONGOC_CRYPTO_PRIVATE_H */ #endif /* MONGOC_ENABLE_CRYPTO */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-crypto.c0000644000076500000240000000551013572250757024777 0ustar alcaeusstaff/* Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_CRYPTO #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-crypto-private.h" #if defined(MONGOC_ENABLE_CRYPTO_LIBCRYPTO) #include "mongoc/mongoc-crypto-openssl-private.h" #elif defined(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO) #include "mongoc/mongoc-crypto-common-crypto-private.h" #elif defined(MONGOC_ENABLE_CRYPTO_CNG) #include "mongoc/mongoc-crypto-cng-private.h" #endif void mongoc_crypto_init (mongoc_crypto_t *crypto, mongoc_crypto_hash_algorithm_t algo) { crypto->hmac = NULL; crypto->hash = NULL; if (algo == MONGOC_CRYPTO_ALGORITHM_SHA_1) { #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO crypto->hmac = mongoc_crypto_openssl_hmac_sha1; crypto->hash = mongoc_crypto_openssl_sha1; #elif defined(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO) crypto->hmac = mongoc_crypto_common_crypto_hmac_sha1; crypto->hash = mongoc_crypto_common_crypto_sha1; #elif defined(MONGOC_ENABLE_CRYPTO_CNG) crypto->hmac = mongoc_crypto_cng_hmac_sha1; crypto->hash = mongoc_crypto_cng_sha1; #endif } else if (algo == MONGOC_CRYPTO_ALGORITHM_SHA_256) { #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO crypto->hmac = mongoc_crypto_openssl_hmac_sha256; crypto->hash = mongoc_crypto_openssl_sha256; #elif defined(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO) crypto->hmac = mongoc_crypto_common_crypto_hmac_sha256; crypto->hash = mongoc_crypto_common_crypto_sha256; #elif defined(MONGOC_ENABLE_CRYPTO_CNG) crypto->hmac = mongoc_crypto_cng_hmac_sha256; crypto->hash = mongoc_crypto_cng_sha256; #endif } BSON_ASSERT (crypto->hmac); BSON_ASSERT (crypto->hash); crypto->algorithm = algo; } void mongoc_crypto_hmac (mongoc_crypto_t *crypto, const void *key, int key_len, const unsigned char *data, int data_len, unsigned char *hmac_out) { crypto->hmac (crypto, key, key_len, data, data_len, hmac_out); } bool mongoc_crypto_hash (mongoc_crypto_t *crypto, const unsigned char *input, const size_t input_len, unsigned char *output) { return crypto->hash (crypto, input, input_len, output); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-array.c0000644000076500000240000000635513572250757026120 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" typedef struct _data_array_t { bson_t cmd; bson_t array; bson_iter_t iter; bson_t bson; /* current document */ char *field_name; } data_array_t; static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { bson_iter_t iter; data_array_t *data = (data_array_t *) cursor->impl.data; bson_destroy (&data->array); /* this cursor is only used with the listDatabases command. it iterates * over the array in the response's "databases" field. */ if (_mongoc_cursor_run_command ( cursor, &data->cmd, &cursor->opts, &data->array, false) && bson_iter_init_find (&iter, &data->array, data->field_name) && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &data->iter)) { return IN_BATCH; } return DONE; } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { uint32_t document_len; const uint8_t *document; data_array_t *data = (data_array_t *) cursor->impl.data; if (bson_iter_next (&data->iter)) { bson_iter_document (&data->iter, &document_len, &document); BSON_ASSERT (bson_init_static (&data->bson, document, document_len)); cursor->current = &data->bson; return IN_BATCH; } return DONE; } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_array_t *data_dst = bson_malloc0 (sizeof (data_array_t)); data_array_t *data_src = (data_array_t *) src->data; bson_init (&data_dst->array); bson_copy_to (&data_src->cmd, &data_dst->cmd); data_dst->field_name = bson_strdup (data_src->field_name); dst->data = data_dst; } static void _destroy (mongoc_cursor_impl_t *impl) { data_array_t *data = (data_array_t *) impl->data; bson_destroy (&data->array); bson_destroy (&data->cmd); bson_free (data->field_name); bson_free (data); } mongoc_cursor_t * _mongoc_cursor_array_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const bson_t *opts, const char *field_name) { mongoc_cursor_t *cursor = _mongoc_cursor_new_with_opts ( client, db_and_coll, opts, NULL, NULL, NULL); data_array_t *data = bson_malloc0 (sizeof (*data)); bson_copy_to (cmd, &data->cmd); bson_init (&data->array); data->field_name = bson_strdup (field_name); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.destroy = _destroy; cursor->impl.clone = _clone; cursor->impl.data = (void *) data; return cursor; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-change-stream.c0000644000076500000240000001242213572250757027510 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" typedef struct _data_change_stream_t { mongoc_cursor_response_t response; bson_t post_batch_resume_token; } _data_change_stream_t; static void _update_post_batch_resume_token (mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; bson_iter_t iter, child; if (mongoc_cursor_error (cursor, NULL)) { return; } if (bson_iter_init (&iter, &data->response.reply) && bson_iter_find_descendant ( &iter, "cursor.postBatchResumeToken", &child) && BSON_ITER_HOLDS_DOCUMENT (&child)) { uint32_t len; const uint8_t *buf; bson_t post_batch_resume_token; bson_iter_document (&child, &len, &buf); BSON_ASSERT (bson_init_static (&post_batch_resume_token, buf, len)); bson_destroy (&data->post_batch_resume_token); bson_copy_to (&post_batch_resume_token, &data->post_batch_resume_token); } } static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { fprintf (stderr, "Prime unsupported on change stream cursor."); BSON_ASSERT (false); return IN_BATCH; } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; _mongoc_cursor_response_read (cursor, &data->response, &cursor->current); if (cursor->current) { return IN_BATCH; } else { return cursor->cursor_id ? END_OF_BATCH : DONE; } } mongoc_cursor_state_t _get_next_batch (mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; bson_t getmore_cmd; _mongoc_cursor_prepare_getmore_command (cursor, &getmore_cmd); _mongoc_cursor_response_refresh ( cursor, &getmore_cmd, NULL /* opts */, &data->response); bson_destroy (&getmore_cmd); _update_post_batch_resume_token (cursor); return IN_BATCH; } static void _destroy (mongoc_cursor_impl_t *impl) { _data_change_stream_t *data = (_data_change_stream_t *) impl->data; bson_destroy (&data->response.reply); bson_destroy (&data->post_batch_resume_token); bson_free (data); } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { fprintf (stderr, "Clone unsupported on change stream cursor."); BSON_ASSERT (false); } mongoc_cursor_t * _mongoc_cursor_change_stream_new (mongoc_client_t *client, bson_t *reply, const bson_t *getmore_opts) { mongoc_cursor_t *cursor; _data_change_stream_t *data; BSON_ASSERT (client); BSON_ASSERT (reply); data = bson_malloc0 (sizeof (*data)); /* _mongoc_cursor_response_t.reply is already uninitialized and we can trust * that reply comes from mongoc_client_read_command_with_opts() */ BSON_ASSERT (bson_steal (&data->response.reply, reply)); bson_init (&data->post_batch_resume_token); cursor = _mongoc_cursor_new_with_opts ( client, NULL, getmore_opts, NULL, NULL, NULL); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.get_next_batch = _get_next_batch; cursor->impl.destroy = _destroy; cursor->impl.clone = _clone; cursor->impl.data = (void *) data; cursor->state = IN_BATCH; if (!_mongoc_cursor_start_reading_response (cursor, &data->response)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Couldn't parse cursor document"); } _update_post_batch_resume_token (cursor); return cursor; } static bool _bson_iter_has_next (bson_iter_t *iter) { bson_iter_t iter_copy = {0}; memcpy (&iter_copy, iter, sizeof (bson_iter_t)); return bson_iter_next (&iter_copy); } bool _mongoc_cursor_change_stream_end_of_batch (mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; return !_bson_iter_has_next (&data->response.batch_iter); } const bson_t * _mongoc_cursor_change_stream_get_post_batch_resume_token ( mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; return &data->post_batch_resume_token; } bool _mongoc_cursor_change_stream_has_post_batch_resume_token ( mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; return !bson_empty (&data->post_batch_resume_token); } const bson_t * _mongoc_cursor_change_stream_get_reply (mongoc_cursor_t *cursor) { _data_change_stream_t *data = (_data_change_stream_t *) cursor->impl.data; return &data->response.reply; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-cmd-deprecated.c0000644000076500000240000000655613572250757027646 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" /* This cursor is returned by the deprecated functions mongoc_client_command, * mongoc_database_command, and mongoc_collection_command. It runs the command * on the first call to mongoc_cursor_next and returns the only result. */ typedef struct _data_cmd_deprecated_t { bson_t cmd; bson_t reply; } data_cmd_deprecated_t; static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { data_cmd_deprecated_t *data = (data_cmd_deprecated_t *) cursor->impl.data; bson_destroy (&data->reply); if (_mongoc_cursor_run_command ( cursor, &data->cmd, &cursor->opts, &data->reply, true)) { return IN_BATCH; } else { return DONE; } } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { data_cmd_deprecated_t *data = (data_cmd_deprecated_t *) cursor->impl.data; cursor->current = &data->reply; /* don't return DONE here. a cursor is marked DONE when it returns NULL. */ return END_OF_BATCH; } static mongoc_cursor_state_t _get_next_batch (mongoc_cursor_t *cursor) { /* there's no next batch to get, return DONE immediately. */ return DONE; } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_cmd_deprecated_t *data_src = (data_cmd_deprecated_t *) src->data; data_cmd_deprecated_t *data_dst = bson_malloc0 (sizeof (data_cmd_deprecated_t)); bson_init (&data_dst->reply); bson_copy_to (&data_src->cmd, &data_dst->cmd); dst->data = data_dst; } static void _destroy (mongoc_cursor_impl_t *impl) { data_cmd_deprecated_t *data = (data_cmd_deprecated_t *) impl->data; bson_destroy (&data->reply); bson_destroy (&data->cmd); bson_free (data); } mongoc_cursor_t * _mongoc_cursor_cmd_deprecated_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const mongoc_read_prefs_t *read_prefs) { mongoc_cursor_t *cursor = _mongoc_cursor_new_with_opts (client, db_and_coll, NULL, read_prefs /* user prefs */, NULL /* default prefs */, NULL); data_cmd_deprecated_t *data = bson_malloc0 (sizeof (data_cmd_deprecated_t)); _mongoc_cursor_check_and_copy_to (cursor, "command", cmd, &data->cmd); bson_init (&data->reply); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.get_next_batch = _get_next_batch; cursor->impl.data = data; cursor->impl.clone = _clone; cursor->impl.destroy = _destroy; return cursor; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-cmd.c0000644000076500000240000001520213572250757025534 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" typedef enum { NONE, CMD_RESPONSE, OP_GETMORE_RESPONSE } reading_from_t; typedef enum { UNKNOWN, GETMORE_CMD, OP_GETMORE } getmore_type_t; typedef struct _data_cmd_t { /* Two paths: * - Mongo 3.2+, sent "getMore" cmd, we're reading reply's "nextBatch" array * - Mongo 2.6 to 3, after "aggregate" or similar command we sent OP_GETMORE, * we're reading the raw reply from a stream */ mongoc_cursor_response_t response; mongoc_cursor_response_legacy_t response_legacy; reading_from_t reading_from; getmore_type_t getmore_type; /* cache after first getmore. */ bson_t cmd; } data_cmd_t; static getmore_type_t _getmore_type (mongoc_cursor_t *cursor) { mongoc_server_stream_t *server_stream; bool use_cmd; data_cmd_t *data = (data_cmd_t *) cursor->impl.data; if (data->getmore_type != UNKNOWN) { return data->getmore_type; } server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { return UNKNOWN; } use_cmd = server_stream->sd->max_wire_version >= WIRE_VERSION_FIND_CMD && !_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST); data->getmore_type = use_cmd ? GETMORE_CMD : OP_GETMORE; mongoc_server_stream_cleanup (server_stream); return data->getmore_type; } static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { data_cmd_t *data = (data_cmd_t *) cursor->impl.data; bson_t copied_opts; bson_init (&copied_opts); cursor->operation_id = ++cursor->client->cluster.operation_id; /* commands like agg have a cursor field, so copy opts without "batchSize" */ bson_copy_to_excluding_noinit ( &cursor->opts, &copied_opts, "batchSize", NULL); /* server replies to aggregate/listIndexes/listCollections with: * {cursor: {id: N, firstBatch: []}} */ _mongoc_cursor_response_refresh ( cursor, &data->cmd, &copied_opts, &data->response); data->reading_from = CMD_RESPONSE; bson_destroy (&copied_opts); return IN_BATCH; } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { data_cmd_t *data = (data_cmd_t *) cursor->impl.data; switch (data->reading_from) { case CMD_RESPONSE: _mongoc_cursor_response_read (cursor, &data->response, &cursor->current); break; case OP_GETMORE_RESPONSE: cursor->current = bson_reader_read (data->response_legacy.reader, NULL); break; case NONE: default: fprintf (stderr, "trying to pop from an uninitialized cursor reader.\n"); BSON_ASSERT (false); } if (cursor->current) { return IN_BATCH; } else { return cursor->cursor_id ? END_OF_BATCH : DONE; } } static mongoc_cursor_state_t _get_next_batch (mongoc_cursor_t *cursor) { data_cmd_t *data = (data_cmd_t *) cursor->impl.data; bson_t getmore_cmd; getmore_type_t getmore_type = _getmore_type (cursor); switch (getmore_type) { case GETMORE_CMD: _mongoc_cursor_prepare_getmore_command (cursor, &getmore_cmd); _mongoc_cursor_response_refresh ( cursor, &getmore_cmd, NULL /* opts */, &data->response); bson_destroy (&getmore_cmd); data->reading_from = CMD_RESPONSE; return IN_BATCH; case OP_GETMORE: _mongoc_cursor_op_getmore (cursor, &data->response_legacy); data->reading_from = OP_GETMORE_RESPONSE; return IN_BATCH; case UNKNOWN: default: return DONE; } } static void _destroy (mongoc_cursor_impl_t *impl) { data_cmd_t *data = (data_cmd_t *) impl->data; bson_destroy (&data->response.reply); bson_destroy (&data->cmd); _mongoc_cursor_response_legacy_destroy (&data->response_legacy); bson_free (data); } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_cmd_t *data_src = (data_cmd_t *) src->data; data_cmd_t *data_dst = bson_malloc0 (sizeof (data_cmd_t)); bson_init (&data_dst->response.reply); _mongoc_cursor_response_legacy_init (&data_dst->response_legacy); bson_copy_to (&data_src->cmd, &data_dst->cmd); dst->data = data_dst; } mongoc_cursor_t * _mongoc_cursor_cmd_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern) { mongoc_cursor_t *cursor; data_cmd_t *data = bson_malloc0 (sizeof (*data)); cursor = _mongoc_cursor_new_with_opts ( client, db_and_coll, opts, user_prefs, default_prefs, read_concern); _mongoc_cursor_response_legacy_init (&data->response_legacy); _mongoc_cursor_check_and_copy_to (cursor, "command", cmd, &data->cmd); bson_init (&data->response.reply); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.get_next_batch = _get_next_batch; cursor->impl.destroy = _destroy; cursor->impl.clone = _clone; cursor->impl.data = (void *) data; return cursor; } mongoc_cursor_t * _mongoc_cursor_cmd_new_from_reply (mongoc_client_t *client, const bson_t *cmd, const bson_t *opts, bson_t *reply) { mongoc_cursor_t *cursor = _mongoc_cursor_cmd_new (client, NULL, cmd, opts, NULL, NULL, NULL); data_cmd_t *data = (data_cmd_t *) cursor->impl.data; data->reading_from = CMD_RESPONSE; cursor->state = IN_BATCH; bson_destroy (&data->response.reply); if (!bson_steal (&data->response.reply, reply)) { bson_destroy (&data->response.reply); BSON_ASSERT (bson_steal (&data->response.reply, bson_copy (reply))); } if (!_mongoc_cursor_start_reading_response (cursor, &data->response)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Couldn't parse cursor document"); } return cursor; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-find-cmd.c0000644000076500000240000000621713572250757026460 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" typedef struct _data_find_cmd_t { mongoc_cursor_response_t response; bson_t filter; } data_find_cmd_t; static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { data_find_cmd_t *data = (data_find_cmd_t *) cursor->impl.data; bson_t find_cmd; bson_init (&find_cmd); cursor->operation_id = ++cursor->client->cluster.operation_id; /* construct { find: "", filter: {} } */ _mongoc_cursor_prepare_find_command (cursor, &data->filter, &find_cmd); _mongoc_cursor_response_refresh ( cursor, &find_cmd, &cursor->opts, &data->response); bson_destroy (&find_cmd); return IN_BATCH; } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { data_find_cmd_t *data = (data_find_cmd_t *) cursor->impl.data; _mongoc_cursor_response_read (cursor, &data->response, &cursor->current); if (cursor->current) { return IN_BATCH; } else { return cursor->cursor_id ? END_OF_BATCH : DONE; } } static mongoc_cursor_state_t _get_next_batch (mongoc_cursor_t *cursor) { data_find_cmd_t *data = (data_find_cmd_t *) cursor->impl.data; bson_t getmore_cmd; if (!cursor->cursor_id) { return DONE; } _mongoc_cursor_prepare_getmore_command (cursor, &getmore_cmd); _mongoc_cursor_response_refresh ( cursor, &getmore_cmd, NULL /* opts */, &data->response); bson_destroy (&getmore_cmd); return IN_BATCH; } static void _destroy (mongoc_cursor_impl_t *impl) { data_find_cmd_t *data = (data_find_cmd_t *) impl->data; bson_destroy (&data->filter); bson_destroy (&data->response.reply); bson_free (data); } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_find_cmd_t *data_src = (data_find_cmd_t *) src->data; data_find_cmd_t *data_dst = bson_malloc0 (sizeof (data_find_cmd_t)); bson_init (&data_dst->response.reply); bson_copy_to (&data_src->filter, &data_dst->filter); dst->data = data_dst; } /* transition a find cursor to use the find command. */ void _mongoc_cursor_impl_find_cmd_init (mongoc_cursor_t *cursor, bson_t *filter) { data_find_cmd_t *data = bson_malloc0 (sizeof (*data)); BSON_ASSERT (bson_steal (&data->filter, filter)); bson_init (&data->response.reply); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.get_next_batch = _get_next_batch; cursor->impl.destroy = _destroy; cursor->impl.clone = _clone; cursor->impl.data = (void *) data; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-find-opquery.c0000644000076500000240000000630013572250757027412 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-cursor-private.h" typedef struct _data_find_opquery_t { mongoc_cursor_response_legacy_t response_legacy; bson_t filter; } data_find_opquery_t; static bool _hit_limit (mongoc_cursor_t *cursor) { int64_t limit, limit_abs; limit = mongoc_cursor_get_limit (cursor); /* don't use llabs, that is a C99 function. */ limit_abs = limit > 0 ? limit : -limit; /* mark as done if we've hit the limit. */ if (limit && cursor->count >= limit_abs) { return true; } return false; } static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { data_find_opquery_t *data = (data_find_opquery_t *) cursor->impl.data; if (_hit_limit (cursor)) { return DONE; } _mongoc_cursor_op_query_find (cursor, &data->filter, &data->response_legacy); return IN_BATCH; } static mongoc_cursor_state_t _pop_from_batch (mongoc_cursor_t *cursor) { data_find_opquery_t *data = (data_find_opquery_t *) cursor->impl.data; if (_hit_limit (cursor)) { return DONE; } cursor->current = bson_reader_read (data->response_legacy.reader, NULL); if (cursor->current) { return IN_BATCH; } else { return cursor->cursor_id ? END_OF_BATCH : DONE; } } static mongoc_cursor_state_t _get_next_batch (mongoc_cursor_t *cursor) { data_find_opquery_t *data = (data_find_opquery_t *) cursor->impl.data; _mongoc_cursor_op_getmore (cursor, &data->response_legacy); return IN_BATCH; } static void _destroy (mongoc_cursor_impl_t *impl) { data_find_opquery_t *data = (data_find_opquery_t *) impl->data; _mongoc_cursor_response_legacy_destroy (&data->response_legacy); bson_destroy (&data->filter); bson_free (data); } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_find_opquery_t *data_dst = bson_malloc0 (sizeof (data_find_opquery_t)); data_find_opquery_t *data_src = (data_find_opquery_t *) src->data; _mongoc_cursor_response_legacy_init (&data_dst->response_legacy); bson_copy_to (&data_src->filter, &data_dst->filter); dst->data = data_dst; } void _mongoc_cursor_impl_find_opquery_init (mongoc_cursor_t *cursor, bson_t *filter) { data_find_opquery_t *data = bson_malloc0 (sizeof (*data)); _mongoc_cursor_response_legacy_init (&data->response_legacy); BSON_ASSERT (bson_steal (&data->filter, filter)); cursor->impl.prime = _prime; cursor->impl.pop_from_batch = _pop_from_batch; cursor->impl.get_next_batch = _get_next_batch; cursor->impl.destroy = _destroy; cursor->impl.clone = _clone; cursor->impl.data = data; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-find.c0000644000076500000240000000654013572250757025716 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" typedef struct _data_find_t { bson_t filter; } data_find_t; extern void _mongoc_cursor_impl_find_cmd_init (mongoc_cursor_t *cursor, bson_t *filter); extern void _mongoc_cursor_impl_find_opquery_init (mongoc_cursor_t *cursor, bson_t *filter); static mongoc_cursor_state_t _prime (mongoc_cursor_t *cursor) { bool use_find_command; mongoc_server_stream_t *server_stream; data_find_t *data = (data_find_t *) cursor->impl.data; /* determine if this should be a command or op_query cursor. */ server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { return DONE; } /* find_getmore_killcursors spec: * "The find command does not support the exhaust flag from OP_QUERY." */ use_find_command = server_stream->sd->max_wire_version >= WIRE_VERSION_FIND_CMD && !_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST); mongoc_server_stream_cleanup (server_stream); /* set all mongoc_impl_t function pointers. */ if (use_find_command) { _mongoc_cursor_impl_find_cmd_init (cursor, &data->filter /* stolen */); } else { _mongoc_cursor_impl_find_opquery_init (cursor, &data->filter /* stolen */); } /* destroy this impl data since impl functions have been replaced. */ bson_free (data); /* prime with the new implementation. */ return cursor->impl.prime (cursor); } static void _clone (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src) { data_find_t *data_dst = bson_malloc0 (sizeof (data_find_t)); data_find_t *data_src = (data_find_t *) src->data; bson_copy_to (&data_src->filter, &data_dst->filter); dst->data = data_dst; } static void _destroy (mongoc_cursor_impl_t *impl) { data_find_t *data = (data_find_t *) impl->data; bson_destroy (&data->filter); bson_free (data); } mongoc_cursor_t * _mongoc_cursor_find_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern) { mongoc_cursor_t *cursor; data_find_t *data = bson_malloc0 (sizeof (data_find_t)); cursor = _mongoc_cursor_new_with_opts ( client, db_and_coll, opts, user_prefs, default_prefs, read_concern); _mongoc_cursor_check_and_copy_to (cursor, "filter", filter, &data->filter); cursor->impl.prime = _prime; cursor->impl.clone = _clone; cursor->impl.destroy = _destroy; cursor->impl.data = data; return cursor; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-legacy.c0000644000076500000240000005315513572250757026246 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* cursor functions for pre-3.2 MongoDB, including: * - OP_QUERY find (superseded by the find command) * - OP_GETMORE (superseded by the getMore command) * - receiving OP_REPLY documents in a stream (instead of batch) */ #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-rpc-private.h" static bool _mongoc_cursor_monitor_legacy_get_more (mongoc_cursor_t *cursor, mongoc_server_stream_t *server_stream) { bson_t doc; char db[MONGOC_NAMESPACE_MAX]; mongoc_client_t *client; mongoc_apm_command_started_t event; ENTRY; client = cursor->client; if (!client->apm_callbacks.started) { /* successful */ RETURN (true); } _mongoc_cursor_prepare_getmore_command (cursor, &doc); bson_strncpy (db, cursor->ns, cursor->dblen + 1); mongoc_apm_command_started_init (&event, &doc, db, "getMore", client->cluster.request_id, cursor->operation_id, &server_stream->sd->host, server_stream->sd->id, client->apm_context); client->apm_callbacks.started (&event); mongoc_apm_command_started_cleanup (&event); bson_destroy (&doc); RETURN (true); } static bool _mongoc_cursor_monitor_legacy_query (mongoc_cursor_t *cursor, const bson_t *filter, mongoc_server_stream_t *server_stream) { bson_t doc; mongoc_client_t *client; char db[MONGOC_NAMESPACE_MAX]; bool r; ENTRY; client = cursor->client; if (!client->apm_callbacks.started) { /* successful */ RETURN (true); } bson_init (&doc); bson_strncpy (db, cursor->ns, cursor->dblen + 1); /* simulate a MongoDB 3.2+ "find" command */ _mongoc_cursor_prepare_find_command (cursor, filter, &doc); bson_copy_to_excluding_noinit ( &cursor->opts, &doc, "serverId", "maxAwaitTimeMS", "sessionId", NULL); r = _mongoc_cursor_monitor_command (cursor, server_stream, &doc, "find"); bson_destroy (&doc); RETURN (r); } void _mongoc_cursor_op_getmore (mongoc_cursor_t *cursor, mongoc_cursor_response_legacy_t *response) { int64_t started; mongoc_rpc_t rpc; uint32_t request_id; mongoc_cluster_t *cluster; mongoc_query_flags_t flags; mongoc_server_stream_t *server_stream; ENTRY; started = bson_get_monotonic_time (); cluster = &cursor->client->cluster; server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { return; } if (!_mongoc_cursor_opts_to_flags (cursor, server_stream, &flags)) { GOTO (fail); } if (cursor->in_exhaust) { request_id = (uint32_t) response->rpc.header.request_id; } else { request_id = ++cluster->request_id; rpc.get_more.cursor_id = cursor->cursor_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_GET_MORE; rpc.get_more.zero = 0; rpc.get_more.collection = cursor->ns; if (flags & MONGOC_QUERY_TAILABLE_CURSOR) { rpc.get_more.n_return = 0; } else { rpc.get_more.n_return = _mongoc_n_return (cursor); } if (!_mongoc_cursor_monitor_legacy_get_more (cursor, server_stream)) { GOTO (fail); } if (!mongoc_cluster_legacy_rpc_sendv_to_server ( cluster, &rpc, server_stream, &cursor->error)) { GOTO (fail); } } _mongoc_buffer_clear (&response->buffer, false); /* reset the last known cursor id. */ cursor->cursor_id = 0; if (!_mongoc_client_recv (cursor->client, &response->rpc, &response->buffer, server_stream, &cursor->error)) { GOTO (fail); } if (response->rpc.header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid opcode. Expected %d, got %d.", MONGOC_OPCODE_REPLY, response->rpc.header.opcode); GOTO (fail); } if (response->rpc.header.response_to != request_id) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid response_to for getmore. Expected %d, got %d.", request_id, response->rpc.header.response_to); GOTO (fail); } if (!_mongoc_rpc_check_ok (&response->rpc, cursor->client->error_api_version, &cursor->error, &cursor->error_doc)) { GOTO (fail); } if (response->reader) { bson_reader_destroy (response->reader); } cursor->cursor_id = response->rpc.reply.cursor_id; response->reader = bson_reader_new_from_data (response->rpc.reply.documents, (size_t) response->rpc.reply.documents_len); _mongoc_cursor_monitor_succeeded (cursor, response, bson_get_monotonic_time () - started, false, /* not first batch */ server_stream, "getMore"); GOTO (done); fail: _mongoc_cursor_monitor_failed ( cursor, bson_get_monotonic_time () - started, server_stream, "getMore"); done: mongoc_server_stream_cleanup (server_stream); } #define OPT_CHECK(_type) \ do { \ if (!BSON_ITER_HOLDS_##_type (&iter)) { \ bson_set_error (&cursor->error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ "invalid option %s, should be type %s", \ key, \ #_type); \ return NULL; \ } \ } while (false) #define OPT_CHECK_INT() \ do { \ if (!BSON_ITER_HOLDS_INT (&iter)) { \ bson_set_error (&cursor->error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ "invalid option %s, should be integer", \ key); \ return NULL; \ } \ } while (false) #define OPT_ERR(_msg) \ do { \ bson_set_error (&cursor->error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ _msg); \ return NULL; \ } while (false) #define OPT_BSON_ERR(_msg) \ do { \ bson_set_error ( \ &cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, _msg); \ return NULL; \ } while (false) #define OPT_FLAG(_flag) \ do { \ OPT_CHECK (BOOL); \ if (bson_iter_as_bool (&iter)) { \ *flags |= _flag; \ } \ } while (false) #define PUSH_DOLLAR_QUERY() \ do { \ if (!pushed_dollar_query) { \ pushed_dollar_query = true; \ bson_append_document (query, "$query", 6, filter); \ } \ } while (false) #define OPT_SUBDOCUMENT(_opt_name, _legacy_name) \ do { \ OPT_CHECK (DOCUMENT); \ bson_iter_document (&iter, &len, &data); \ if (!bson_init_static (&subdocument, data, (size_t) len)) { \ OPT_BSON_ERR ("Invalid '" #_opt_name "' subdocument in 'opts'."); \ } \ BSON_APPEND_DOCUMENT (query, "$" #_legacy_name, &subdocument); \ } while (false) static bson_t * _mongoc_cursor_parse_opts_for_op_query (mongoc_cursor_t *cursor, mongoc_server_stream_t *stream, bson_t *filter, bson_t *query /* OUT */, bson_t *fields /* OUT */, mongoc_query_flags_t *flags /* OUT */, int32_t *skip /* OUT */) { bool pushed_dollar_query; bson_iter_t iter; uint32_t len; const uint8_t *data; bson_t subdocument; const char *key; char *dollar_modifier; *flags = MONGOC_QUERY_NONE; *skip = 0; /* assume we'll send filter straight to server, like "{a: 1}". if we find an * opt we must add, like "sort", we push the query like "$query: {a: 1}", * then add a query modifier for the option, in this example "$orderby". */ pushed_dollar_query = false; if (!bson_iter_init (&iter, &cursor->opts)) { OPT_BSON_ERR ("Invalid 'opts' parameter."); } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); /* most common options first */ if (!strcmp (key, MONGOC_CURSOR_PROJECTION)) { OPT_CHECK (DOCUMENT); bson_iter_document (&iter, &len, &data); if (!bson_init_static (&subdocument, data, (size_t) len)) { OPT_BSON_ERR ("Invalid 'projection' subdocument in 'opts'."); } bson_destroy (fields); bson_copy_to (&subdocument, fields); } else if (!strcmp (key, MONGOC_CURSOR_SORT)) { PUSH_DOLLAR_QUERY (); OPT_SUBDOCUMENT (sort, orderby); } else if (!strcmp (key, MONGOC_CURSOR_SKIP)) { OPT_CHECK_INT (); *skip = (int32_t) bson_iter_as_int64 (&iter); } /* the rest of the options, alphabetically */ else if (!strcmp (key, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS)) { OPT_FLAG (MONGOC_QUERY_PARTIAL); } else if (!strcmp (key, MONGOC_CURSOR_AWAIT_DATA)) { OPT_FLAG (MONGOC_QUERY_AWAIT_DATA); } else if (!strcmp (key, MONGOC_CURSOR_COMMENT)) { OPT_CHECK (UTF8); PUSH_DOLLAR_QUERY (); BSON_APPEND_UTF8 (query, "$comment", bson_iter_utf8 (&iter, NULL)); } else if (!strcmp (key, MONGOC_CURSOR_HINT)) { if (BSON_ITER_HOLDS_UTF8 (&iter)) { PUSH_DOLLAR_QUERY (); BSON_APPEND_UTF8 (query, "$hint", bson_iter_utf8 (&iter, NULL)); } else if (BSON_ITER_HOLDS_DOCUMENT (&iter)) { PUSH_DOLLAR_QUERY (); OPT_SUBDOCUMENT (hint, hint); } else { OPT_ERR ("Wrong type for 'hint' field in 'opts'."); } } else if (!strcmp (key, MONGOC_CURSOR_MAX)) { PUSH_DOLLAR_QUERY (); OPT_SUBDOCUMENT (max, max); } else if (!strcmp (key, MONGOC_CURSOR_MAX_SCAN)) { OPT_CHECK_INT (); PUSH_DOLLAR_QUERY (); BSON_APPEND_INT64 (query, "$maxScan", bson_iter_as_int64 (&iter)); } else if (!strcmp (key, MONGOC_CURSOR_MAX_TIME_MS)) { OPT_CHECK_INT (); PUSH_DOLLAR_QUERY (); BSON_APPEND_INT64 (query, "$maxTimeMS", bson_iter_as_int64 (&iter)); } else if (!strcmp (key, MONGOC_CURSOR_MIN)) { PUSH_DOLLAR_QUERY (); OPT_SUBDOCUMENT (min, min); } else if (!strcmp (key, MONGOC_CURSOR_READ_CONCERN)) { OPT_ERR ("Set readConcern on client, database, or collection," " not in a query."); } else if (!strcmp (key, MONGOC_CURSOR_RETURN_KEY)) { OPT_CHECK (BOOL); PUSH_DOLLAR_QUERY (); BSON_APPEND_BOOL (query, "$returnKey", bson_iter_as_bool (&iter)); } else if (!strcmp (key, MONGOC_CURSOR_SHOW_RECORD_ID)) { OPT_CHECK (BOOL); PUSH_DOLLAR_QUERY (); BSON_APPEND_BOOL (query, "$showDiskLoc", bson_iter_as_bool (&iter)); } else if (!strcmp (key, MONGOC_CURSOR_SNAPSHOT)) { OPT_CHECK (BOOL); PUSH_DOLLAR_QUERY (); BSON_APPEND_BOOL (query, "$snapshot", bson_iter_as_bool (&iter)); } else if (!strcmp (key, MONGOC_CURSOR_COLLATION)) { bson_set_error (&cursor->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); return NULL; } /* singleBatch limit and batchSize are handled in _mongoc_n_return, * exhaust noCursorTimeout oplogReplay tailable in _mongoc_cursor_flags * maxAwaitTimeMS is handled in _mongoc_cursor_prepare_getmore_command * sessionId is used to retrieve the mongoc_client_session_t */ else if (strcmp (key, MONGOC_CURSOR_SINGLE_BATCH) && strcmp (key, MONGOC_CURSOR_LIMIT) && strcmp (key, MONGOC_CURSOR_BATCH_SIZE) && strcmp (key, MONGOC_CURSOR_EXHAUST) && strcmp (key, MONGOC_CURSOR_NO_CURSOR_TIMEOUT) && strcmp (key, MONGOC_CURSOR_OPLOG_REPLAY) && strcmp (key, MONGOC_CURSOR_TAILABLE) && strcmp (key, MONGOC_CURSOR_MAX_AWAIT_TIME_MS)) { /* pass unrecognized options to server, prefixed with $ */ PUSH_DOLLAR_QUERY (); dollar_modifier = bson_strdup_printf ("$%s", key); if (!bson_append_iter (query, dollar_modifier, -1, &iter)) { bson_set_error (&cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Error adding \"%s\" to query", dollar_modifier); bson_free (dollar_modifier); return NULL; } bson_free (dollar_modifier); } } if (!_mongoc_cursor_opts_to_flags (cursor, stream, flags)) { /* cursor->error is set */ return NULL; } return pushed_dollar_query ? query : filter; } #undef OPT_CHECK #undef OPT_ERR #undef OPT_BSON_ERR #undef OPT_FLAG #undef OPT_SUBDOCUMENT bool _mongoc_cursor_op_query_find (mongoc_cursor_t *cursor, bson_t *filter, mongoc_cursor_response_legacy_t *response) { int64_t started; uint32_t request_id; mongoc_rpc_t rpc; const bson_t *query_ptr; bson_t query = BSON_INITIALIZER; bson_t fields = BSON_INITIALIZER; mongoc_query_flags_t flags; mongoc_assemble_query_result_t result = ASSEMBLE_QUERY_RESULT_INIT; bool succeeded = false; mongoc_server_stream_t *server_stream; ENTRY; server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { return false; } started = bson_get_monotonic_time (); /* When the user explicitly provides a readConcern -- but the server * doesn't support readConcern, we must error: * https://github.com/mongodb/specifications/blob/master/source/read-write-concern/read-write-concern.rst#errors-1 */ if (cursor->read_concern->level != NULL && server_stream->sd->max_wire_version < WIRE_VERSION_READ_CONCERN) { bson_set_error (&cursor->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support readConcern"); GOTO (done); } cursor->operation_id = ++cursor->client->cluster.operation_id; request_id = ++cursor->client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_QUERY; rpc.query.flags = MONGOC_QUERY_NONE; rpc.query.collection = cursor->ns; rpc.query.skip = 0; rpc.query.n_return = 0; rpc.query.fields = NULL; query_ptr = _mongoc_cursor_parse_opts_for_op_query ( cursor, server_stream, filter, &query, &fields, &flags, &rpc.query.skip); if (!query_ptr) { /* invalid opts. cursor->error is set */ GOTO (done); } assemble_query ( cursor->read_prefs, server_stream, query_ptr, flags, &result); rpc.query.query = bson_get_data (result.assembled_query); rpc.query.flags = result.flags; rpc.query.n_return = _mongoc_n_return (cursor); if (!bson_empty (&fields)) { rpc.query.fields = bson_get_data (&fields); } /* cursor from mongoc_collection_find[_with_opts] is about to send its * initial OP_QUERY to pre-3.2 MongoDB */ if (!_mongoc_cursor_monitor_legacy_query (cursor, filter, server_stream)) { GOTO (done); } if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &cursor->client->cluster, &rpc, server_stream, &cursor->error)) { GOTO (done); } _mongoc_buffer_clear (&response->buffer, false); if (!_mongoc_client_recv (cursor->client, &response->rpc, &response->buffer, server_stream, &cursor->error)) { GOTO (done); } if (response->rpc.header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid opcode. Expected %d, got %d.", MONGOC_OPCODE_REPLY, response->rpc.header.opcode); GOTO (done); } if (response->rpc.header.response_to != request_id) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid response_to for query. Expected %d, got %d.", request_id, response->rpc.header.response_to); GOTO (done); } if (!_mongoc_rpc_check_ok (&response->rpc, cursor->client->error_api_version, &cursor->error, &cursor->error_doc)) { GOTO (done); } if (response->reader) { bson_reader_destroy (response->reader); } cursor->cursor_id = response->rpc.reply.cursor_id; response->reader = bson_reader_new_from_data (response->rpc.reply.documents, (size_t) response->rpc.reply.documents_len); if (_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST)) { cursor->in_exhaust = true; cursor->client->in_exhaust = true; } _mongoc_cursor_monitor_succeeded (cursor, response, bson_get_monotonic_time () - started, true, /* first_batch */ server_stream, "find"); succeeded = true; done: if (!succeeded) { _mongoc_cursor_monitor_failed ( cursor, bson_get_monotonic_time () - started, server_stream, "find"); } mongoc_server_stream_cleanup (server_stream); assemble_query_result_cleanup (&result); bson_destroy (&query); bson_destroy (&fields); return succeeded; } void _mongoc_cursor_response_legacy_init (mongoc_cursor_response_legacy_t *response) { _mongoc_buffer_init (&response->buffer, NULL, 0, NULL, NULL); } void _mongoc_cursor_response_legacy_destroy ( mongoc_cursor_response_legacy_t *response) { if (response->reader) { bson_reader_destroy (response->reader); response->reader = NULL; } _mongoc_buffer_destroy (&response->buffer); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor-private.h0000644000076500000240000002761513572250757026463 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CURSOR_PRIVATE_H #define MONGOC_CURSOR_PRIVATE_H #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-server-stream-private.h" BSON_BEGIN_DECLS #define MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS "allowPartialResults" #define MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS_LEN 19 #define MONGOC_CURSOR_AWAIT_DATA "awaitData" #define MONGOC_CURSOR_AWAIT_DATA_LEN 9 #define MONGOC_CURSOR_BATCH_SIZE "batchSize" #define MONGOC_CURSOR_BATCH_SIZE_LEN 9 #define MONGOC_CURSOR_COLLATION "collation" #define MONGOC_CURSOR_COLLATION_LEN 9 #define MONGOC_CURSOR_COMMENT "comment" #define MONGOC_CURSOR_COMMENT_LEN 7 #define MONGOC_CURSOR_EXHAUST "exhaust" #define MONGOC_CURSOR_EXHAUST_LEN 7 #define MONGOC_CURSOR_FILTER "filter" #define MONGOC_CURSOR_FILTER_LEN 6 #define MONGOC_CURSOR_FIND "find" #define MONGOC_CURSOR_FIND_LEN 4 #define MONGOC_CURSOR_HINT "hint" #define MONGOC_CURSOR_HINT_LEN 4 #define MONGOC_CURSOR_LIMIT "limit" #define MONGOC_CURSOR_LIMIT_LEN 5 #define MONGOC_CURSOR_MAX "max" #define MONGOC_CURSOR_MAX_LEN 3 #define MONGOC_CURSOR_MAX_AWAIT_TIME_MS "maxAwaitTimeMS" #define MONGOC_CURSOR_MAX_AWAIT_TIME_MS_LEN 14 #define MONGOC_CURSOR_MAX_SCAN "maxScan" #define MONGOC_CURSOR_MAX_SCAN_LEN 7 #define MONGOC_CURSOR_MAX_TIME_MS "maxTimeMS" #define MONGOC_CURSOR_MAX_TIME_MS_LEN 9 #define MONGOC_CURSOR_MIN "min" #define MONGOC_CURSOR_MIN_LEN 3 #define MONGOC_CURSOR_NO_CURSOR_TIMEOUT "noCursorTimeout" #define MONGOC_CURSOR_NO_CURSOR_TIMEOUT_LEN 15 #define MONGOC_CURSOR_OPLOG_REPLAY "oplogReplay" #define MONGOC_CURSOR_OPLOG_REPLAY_LEN 11 #define MONGOC_CURSOR_ORDERBY "orderby" #define MONGOC_CURSOR_ORDERBY_LEN 7 #define MONGOC_CURSOR_PROJECTION "projection" #define MONGOC_CURSOR_PROJECTION_LEN 10 #define MONGOC_CURSOR_QUERY "query" #define MONGOC_CURSOR_QUERY_LEN 5 #define MONGOC_CURSOR_READ_CONCERN "readConcern" #define MONGOC_CURSOR_READ_CONCERN_LEN 11 #define MONGOC_CURSOR_RETURN_KEY "returnKey" #define MONGOC_CURSOR_RETURN_KEY_LEN 9 #define MONGOC_CURSOR_SHOW_DISK_LOC "showDiskLoc" #define MONGOC_CURSOR_SHOW_DISK_LOC_LEN 11 #define MONGOC_CURSOR_SHOW_RECORD_ID "showRecordId" #define MONGOC_CURSOR_SHOW_RECORD_ID_LEN 12 #define MONGOC_CURSOR_SINGLE_BATCH "singleBatch" #define MONGOC_CURSOR_SINGLE_BATCH_LEN 11 #define MONGOC_CURSOR_SKIP "skip" #define MONGOC_CURSOR_SKIP_LEN 4 #define MONGOC_CURSOR_SNAPSHOT "snapshot" #define MONGOC_CURSOR_SNAPSHOT_LEN 8 #define MONGOC_CURSOR_SORT "sort" #define MONGOC_CURSOR_SORT_LEN 4 #define MONGOC_CURSOR_TAILABLE "tailable" #define MONGOC_CURSOR_TAILABLE_LEN 8 typedef struct _mongoc_cursor_impl_t mongoc_cursor_impl_t; typedef enum { UNPRIMED, IN_BATCH, END_OF_BATCH, DONE } mongoc_cursor_state_t; typedef mongoc_cursor_state_t (*_mongoc_cursor_impl_transition_t) ( mongoc_cursor_t *cursor); struct _mongoc_cursor_impl_t { void (*clone) (mongoc_cursor_impl_t *dst, const mongoc_cursor_impl_t *src); void (*destroy) (mongoc_cursor_impl_t *ctx); _mongoc_cursor_impl_transition_t prime; _mongoc_cursor_impl_transition_t pop_from_batch; _mongoc_cursor_impl_transition_t get_next_batch; void *data; }; /* pre-3.2 and exhaust cursor responses -- read documents from stream. */ typedef struct _mongoc_cursor_response_legacy { mongoc_rpc_t rpc; mongoc_buffer_t buffer; bson_reader_t *reader; } mongoc_cursor_response_legacy_t; /* 3.2+ responses -- read batch docs like {cursor:{id: 123, firstBatch: []}} */ typedef struct _mongoc_cursor_response_t { bson_t reply; /* the entire command reply */ bson_iter_t batch_iter; /* iterates over the batch array */ bson_t current_doc; /* the current doc inside the batch array */ } mongoc_cursor_response_t; struct _mongoc_cursor_t { mongoc_client_t *client; uint32_t client_generation; uint32_t server_id; bool slave_ok; mongoc_cursor_state_t state; bool in_exhaust; bson_t opts; mongoc_read_concern_t *read_concern; mongoc_read_prefs_t *read_prefs; mongoc_write_concern_t *write_concern; bool explicit_session; mongoc_client_session_t *client_session; uint32_t count; char ns[140]; uint32_t nslen; uint32_t dblen; bson_error_t error; bson_t error_doc; /* always initialized, and set with server errors. */ const bson_t *current; mongoc_cursor_impl_t impl; int64_t operation_id; int64_t cursor_id; }; int32_t _mongoc_n_return (mongoc_cursor_t *cursor); void _mongoc_set_cursor_ns (mongoc_cursor_t *cursor, const char *ns, uint32_t nslen); bool _mongoc_cursor_get_opt_bool (const mongoc_cursor_t *cursor, const char *option); void _mongoc_cursor_flags_to_opts (mongoc_query_flags_t qflags, bson_t *opts, bool *slave_ok); bool _mongoc_cursor_translate_dollar_query_opts (const bson_t *query, bson_t *opts, bson_t *unwrapped, bson_error_t *error); mongoc_server_stream_t * _mongoc_cursor_fetch_stream (mongoc_cursor_t *cursor); void _mongoc_cursor_collection (const mongoc_cursor_t *cursor, const char **collection, int *collection_len); bool _mongoc_cursor_run_command (mongoc_cursor_t *cursor, const bson_t *command, const bson_t *opts, bson_t *reply, bool retry_prohibited); bool _mongoc_cursor_more (mongoc_cursor_t *cursor); bool _mongoc_cursor_set_opt_int64 (mongoc_cursor_t *cursor, const char *option, int64_t value); void _mongoc_cursor_monitor_failed (mongoc_cursor_t *cursor, int64_t duration, mongoc_server_stream_t *stream, const char *cmd_name); bool _mongoc_cursor_monitor_command (mongoc_cursor_t *cursor, mongoc_server_stream_t *server_stream, const bson_t *cmd, const char *cmd_name); void _mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor, const bson_t *filter, bson_t *command); const bson_t * _mongoc_cursor_initial_query (mongoc_cursor_t *cursor); const bson_t * _mongoc_cursor_get_more (mongoc_cursor_t *cursor); bool _mongoc_cursor_opts_to_flags (mongoc_cursor_t *cursor, mongoc_server_stream_t *stream, mongoc_query_flags_t *flags /* OUT */); void _mongoc_cursor_monitor_succeeded (mongoc_cursor_t *cursor, mongoc_cursor_response_legacy_t *response, int64_t duration, bool first_batch, mongoc_server_stream_t *stream, const char *cmd_name); /* start iterating a reply like * {cursor: {id: 1234, ns: "db.collection", firstBatch: [...]}} or * {cursor: {id: 1234, ns: "db.collection", nextBatch: [...]}} */ void _mongoc_cursor_response_refresh (mongoc_cursor_t *cursor, const bson_t *command, const bson_t *opts, mongoc_cursor_response_t *response); bool _mongoc_cursor_start_reading_response (mongoc_cursor_t *cursor, mongoc_cursor_response_t *response); void _mongoc_cursor_response_read (mongoc_cursor_t *cursor, mongoc_cursor_response_t *response, const bson_t **bson); void _mongoc_cursor_prepare_getmore_command (mongoc_cursor_t *cursor, bson_t *command); void _mongoc_cursor_set_empty (mongoc_cursor_t *cursor); bool _mongoc_cursor_check_and_copy_to (mongoc_cursor_t *cursor, const char *err_prefix, const bson_t *src, bson_t *dst); void _mongoc_cursor_prime (mongoc_cursor_t *cursor); /* legacy functions defined in mongoc-cursor-legacy.c */ bool _mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson); bool _mongoc_cursor_op_query_find (mongoc_cursor_t *cursor, bson_t *filter, mongoc_cursor_response_legacy_t *response); void _mongoc_cursor_op_getmore (mongoc_cursor_t *cursor, mongoc_cursor_response_legacy_t *response); mongoc_cursor_t * _mongoc_cursor_new_with_opts (mongoc_client_t *client, const char *db_and_collection, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern); void _mongoc_cursor_response_legacy_init (mongoc_cursor_response_legacy_t *response); void _mongoc_cursor_response_legacy_destroy ( mongoc_cursor_response_legacy_t *response); /* cursor constructors. */ mongoc_cursor_t * _mongoc_cursor_find_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *filter, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern); mongoc_cursor_t * _mongoc_cursor_cmd_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern); mongoc_cursor_t * _mongoc_cursor_cmd_new_from_reply (mongoc_client_t *client, const bson_t *cmd, const bson_t *opts, bson_t *reply); mongoc_cursor_t * _mongoc_cursor_cmd_deprecated_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const mongoc_read_prefs_t *read_prefs); mongoc_cursor_t * _mongoc_cursor_array_new (mongoc_client_t *client, const char *db_and_coll, const bson_t *cmd, const bson_t *opts, const char *field_name); mongoc_cursor_t * _mongoc_cursor_change_stream_new (mongoc_client_t *client, bson_t *reply, const bson_t *opts); bool _mongoc_cursor_change_stream_end_of_batch (mongoc_cursor_t *cursor); const bson_t * _mongoc_cursor_change_stream_get_post_batch_resume_token ( mongoc_cursor_t *cursor); bool _mongoc_cursor_change_stream_has_post_batch_resume_token ( mongoc_cursor_t *cursor); const bson_t * _mongoc_cursor_change_stream_get_reply (mongoc_cursor_t *cursor); BSON_END_DECLS #endif /* MONGOC_CURSOR_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor.c0000644000076500000240000014674613572250757025015 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-error-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-aggregate-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "cursor" #define CURSOR_FAILED(cursor_) ((cursor_)->error.domain != 0) static bool _translate_query_opt (const char *query_field, const char **cmd_field, int *len); bool _mongoc_cursor_set_opt_int64 (mongoc_cursor_t *cursor, const char *option, int64_t value) { bson_iter_t iter; if (bson_iter_init_find (&iter, &cursor->opts, option)) { if (!BSON_ITER_HOLDS_INT64 (&iter)) { return false; } bson_iter_overwrite_int64 (&iter, value); return true; } return BSON_APPEND_INT64 (&cursor->opts, option, value); } static int64_t _mongoc_cursor_get_opt_int64 (const mongoc_cursor_t *cursor, const char *option, int64_t default_value) { bson_iter_t iter; if (bson_iter_init_find (&iter, &cursor->opts, option)) { return bson_iter_as_int64 (&iter); } return default_value; } static bool _mongoc_cursor_set_opt_bool (mongoc_cursor_t *cursor, const char *option, bool value) { bson_iter_t iter; if (bson_iter_init_find (&iter, &cursor->opts, option)) { if (!BSON_ITER_HOLDS_BOOL (&iter)) { return false; } bson_iter_overwrite_bool (&iter, value); return true; } return BSON_APPEND_BOOL (&cursor->opts, option, value); } bool _mongoc_cursor_get_opt_bool (const mongoc_cursor_t *cursor, const char *option) { bson_iter_t iter; if (bson_iter_init_find (&iter, &cursor->opts, option)) { return bson_iter_as_bool (&iter); } return false; } int32_t _mongoc_n_return (mongoc_cursor_t *cursor) { int64_t limit; int64_t batch_size; int64_t n_return; /* calculate numberToReturn according to: * https://github.com/mongodb/specifications/blob/master/source/crud/crud.rst#combining-limit-and-batch-size-for-the-wire-protocol */ limit = mongoc_cursor_get_limit (cursor); batch_size = mongoc_cursor_get_batch_size (cursor); if (limit < 0) { n_return = limit; } else if (limit == 0) { n_return = batch_size; } else if (batch_size == 0) { n_return = limit; } else if (limit < batch_size) { n_return = limit; } else { n_return = batch_size; } /* if a specified limit exists, account for documents already returned. */ if (limit > 0 && cursor->count) { int64_t remaining = limit - cursor->count; /* remaining can be 0 if we have retrieved "limit" documents, but still * have a cursor id: SERVER-21086. use nonzero batchSize to fetch final * empty batch and trigger server to close cursor. */ if (remaining <= 0) { return 1; } n_return = BSON_MIN (n_return, remaining); } /* check boundary conditions */ if (n_return < INT32_MIN) { return INT32_MIN; } else if (n_return > INT32_MAX) { return INT32_MAX; } else { return (int32_t) n_return; } } void _mongoc_set_cursor_ns (mongoc_cursor_t *cursor, const char *ns, uint32_t nslen) { const char *dot; bson_strncpy (cursor->ns, ns, sizeof cursor->ns); cursor->nslen = BSON_MIN (nslen, sizeof cursor->ns); dot = strstr (cursor->ns, "."); if (dot) { cursor->dblen = (uint32_t) (dot - cursor->ns); } else { /* a database name with no collection name */ cursor->dblen = cursor->nslen; } } /* return first key beginning with $, or NULL. precondition: bson is valid. */ static const char * _first_dollar_field (const bson_t *bson) { bson_iter_t iter; const char *key; BSON_ASSERT (bson_iter_init (&iter, bson)); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (key[0] == '$') { return key; } } return NULL; } /* if src is non-NULL, it is validated and copied to dst. returns false and * sets the cursor error if validation fails. */ bool _mongoc_cursor_check_and_copy_to (mongoc_cursor_t *cursor, const char *err_prefix, const bson_t *src, bson_t *dst) { bson_error_t validate_err; bson_init (dst); if (src) { if (!bson_validate_with_error ( src, BSON_VALIDATE_EMPTY_KEYS, &validate_err)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Invalid %s: %s", err_prefix, validate_err.message); return false; } bson_destroy (dst); bson_copy_to (src, dst); } return true; } mongoc_cursor_t * _mongoc_cursor_new_with_opts (mongoc_client_t *client, const char *db_and_collection, const bson_t *opts, const mongoc_read_prefs_t *user_prefs, const mongoc_read_prefs_t *default_prefs, const mongoc_read_concern_t *read_concern) { mongoc_cursor_t *cursor; mongoc_topology_description_type_t td_type; uint32_t server_id; mongoc_read_concern_t *read_concern_local = NULL; bson_error_t validate_err; const char *dollar_field; bson_iter_t iter; ENTRY; BSON_ASSERT (client); cursor = (mongoc_cursor_t *) bson_malloc0 (sizeof *cursor); cursor->client = client; cursor->state = UNPRIMED; cursor->client_generation = client->generation; bson_init (&cursor->opts); bson_init (&cursor->error_doc); if (opts) { if (!bson_validate_with_error ( opts, BSON_VALIDATE_EMPTY_KEYS, &validate_err)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Invalid opts: %s", validate_err.message); GOTO (finish); } dollar_field = _first_dollar_field (opts); if (dollar_field) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot use $-modifiers in opts: \"%s\"", dollar_field); GOTO (finish); } if (bson_iter_init_find (&iter, opts, "sessionId")) { if (!_mongoc_client_session_from_iter ( client, &iter, &cursor->client_session, &cursor->error)) { GOTO (finish); } cursor->explicit_session = true; } if (bson_iter_init_find (&iter, opts, "readConcern")) { read_concern_local = _mongoc_read_concern_new_from_iter (&iter, &cursor->error); if (!read_concern_local) { /* invalid read concern */ GOTO (finish); } read_concern = read_concern_local; } /* true if there's a valid serverId or no serverId, false on err */ if (!_mongoc_get_server_id_from_opts (opts, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, &server_id, &cursor->error)) { GOTO (finish); } if (server_id) { (void) mongoc_cursor_set_hint (cursor, server_id); } bson_copy_to_excluding_noinit (opts, &cursor->opts, "serverId", "sessionId", "bypassDocumentValidation", NULL); /* only include bypassDocumentValidation if it's true */ if (bson_iter_init_find (&iter, opts, "bypassDocumentValidation") && bson_iter_as_bool (&iter)) { BSON_APPEND_BOOL (&cursor->opts, "bypassDocumentValidation", true); } } if (_mongoc_client_session_in_txn (cursor->client_session)) { if (!IS_PREF_PRIMARY (user_prefs)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Read preference in a transaction must be primary"); GOTO (finish); } cursor->read_prefs = mongoc_read_prefs_copy (cursor->client_session->txn.opts.read_prefs); if (bson_has_field (opts, "readConcern")) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot set read concern after starting transaction"); GOTO (finish); } } else if (user_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (user_prefs); } else if (default_prefs) { cursor->read_prefs = mongoc_read_prefs_copy (default_prefs); } else { cursor->read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); } cursor->read_concern = read_concern ? mongoc_read_concern_copy (read_concern) : mongoc_read_concern_new (); if (db_and_collection) { _mongoc_set_cursor_ns ( cursor, db_and_collection, (uint32_t) strlen (db_and_collection)); } if (_mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_EXHAUST)) { if (_mongoc_cursor_get_opt_int64 (cursor, MONGOC_CURSOR_LIMIT, 0)) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot specify both 'exhaust' and 'limit'."); GOTO (finish); } td_type = _mongoc_topology_get_type (client->topology); if (td_type == MONGOC_TOPOLOGY_SHARDED) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot use exhaust cursor with sharded cluster."); GOTO (finish); } } (void) _mongoc_read_prefs_validate (cursor->read_prefs, &cursor->error); finish: mongoc_read_concern_destroy (read_concern_local); mongoc_counter_cursors_active_inc (); RETURN (cursor); } static bool _translate_query_opt (const char *query_field, const char **cmd_field, int *len) { if (query_field[0] != '$') { *cmd_field = query_field; *len = -1; return true; } /* strip the leading '$' */ query_field++; if (!strcmp (MONGOC_CURSOR_ORDERBY, query_field)) { *cmd_field = MONGOC_CURSOR_SORT; *len = MONGOC_CURSOR_SORT_LEN; } else if (!strcmp (MONGOC_CURSOR_SHOW_DISK_LOC, query_field)) { /* <= MongoDb 3.0 */ *cmd_field = MONGOC_CURSOR_SHOW_RECORD_ID; *len = MONGOC_CURSOR_SHOW_RECORD_ID_LEN; } else if (!strcmp (MONGOC_CURSOR_HINT, query_field)) { *cmd_field = MONGOC_CURSOR_HINT; *len = MONGOC_CURSOR_HINT_LEN; } else if (!strcmp (MONGOC_CURSOR_COMMENT, query_field)) { *cmd_field = MONGOC_CURSOR_COMMENT; *len = MONGOC_CURSOR_COMMENT_LEN; } else if (!strcmp (MONGOC_CURSOR_MAX_SCAN, query_field)) { *cmd_field = MONGOC_CURSOR_MAX_SCAN; *len = MONGOC_CURSOR_MAX_SCAN_LEN; } else if (!strcmp (MONGOC_CURSOR_MAX_TIME_MS, query_field)) { *cmd_field = MONGOC_CURSOR_MAX_TIME_MS; *len = MONGOC_CURSOR_MAX_TIME_MS_LEN; } else if (!strcmp (MONGOC_CURSOR_MAX, query_field)) { *cmd_field = MONGOC_CURSOR_MAX; *len = MONGOC_CURSOR_MAX_LEN; } else if (!strcmp (MONGOC_CURSOR_MIN, query_field)) { *cmd_field = MONGOC_CURSOR_MIN; *len = MONGOC_CURSOR_MIN_LEN; } else if (!strcmp (MONGOC_CURSOR_RETURN_KEY, query_field)) { *cmd_field = MONGOC_CURSOR_RETURN_KEY; *len = MONGOC_CURSOR_RETURN_KEY_LEN; } else if (!strcmp (MONGOC_CURSOR_SNAPSHOT, query_field)) { *cmd_field = MONGOC_CURSOR_SNAPSHOT; *len = MONGOC_CURSOR_SNAPSHOT_LEN; } else { /* not a special command field, must be a query operator like $or */ return false; } return true; } /* set up a new opt bson from older ways of specifying options. * slave_ok may be NULL. * error may be NULL. */ void _mongoc_cursor_flags_to_opts (mongoc_query_flags_t qflags, bson_t *opts, /* IN/OUT */ bool *slave_ok /* OUT */) { ENTRY; BSON_ASSERT (opts); if (slave_ok) { *slave_ok = !!(qflags & MONGOC_QUERY_SLAVE_OK); } if (qflags & MONGOC_QUERY_TAILABLE_CURSOR) { bson_append_bool ( opts, MONGOC_CURSOR_TAILABLE, MONGOC_CURSOR_TAILABLE_LEN, true); } if (qflags & MONGOC_QUERY_OPLOG_REPLAY) { bson_append_bool (opts, MONGOC_CURSOR_OPLOG_REPLAY, MONGOC_CURSOR_OPLOG_REPLAY_LEN, true); } if (qflags & MONGOC_QUERY_NO_CURSOR_TIMEOUT) { bson_append_bool (opts, MONGOC_CURSOR_NO_CURSOR_TIMEOUT, MONGOC_CURSOR_NO_CURSOR_TIMEOUT_LEN, true); } if (qflags & MONGOC_QUERY_AWAIT_DATA) { bson_append_bool ( opts, MONGOC_CURSOR_AWAIT_DATA, MONGOC_CURSOR_AWAIT_DATA_LEN, true); } if (qflags & MONGOC_QUERY_EXHAUST) { bson_append_bool ( opts, MONGOC_CURSOR_EXHAUST, MONGOC_CURSOR_EXHAUST_LEN, true); } if (qflags & MONGOC_QUERY_PARTIAL) { bson_append_bool (opts, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS_LEN, true); } } /* Checks if the passed query was wrapped in a $query, and if so, parses the * query modifiers: * https://docs.mongodb.com/manual/reference/operator/query-modifier/ * and translates them to find command options: * https://docs.mongodb.com/manual/reference/command/find/ * opts must be initialized, and may already have options set. * unwrapped must be uninitialized, and will be initialized at return. * Returns true if query was unwrapped. */ bool _mongoc_cursor_translate_dollar_query_opts (const bson_t *query, bson_t *opts, bson_t *unwrapped, bson_error_t *error) { bool has_filter = false; const char *key; bson_iter_t iter; const char *opt_key; int len; uint32_t data_len; const uint8_t *data; bson_error_t error_local = {0}; ENTRY; BSON_ASSERT (query); BSON_ASSERT (opts); /* If the query is explicitly specified wrapped in $query, unwrap it and * translate the options to new options. */ if (bson_has_field (query, "$query")) { /* like "{$query: {a: 1}, $orderby: {b: 1}, $otherModifier: true}" */ if (!bson_iter_init (&iter, query)) { bson_set_error (&error_local, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid BSON in query document"); GOTO (done); } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (key[0] != '$') { bson_set_error (&error_local, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot mix $query with non-dollar field '%s'", key); GOTO (done); } if (!strcmp (key, "$query")) { /* set "filter" to the incoming document's "$query" */ bson_iter_document (&iter, &data_len, &data); if (!bson_init_static (unwrapped, data, (size_t) data_len)) { bson_set_error (&error_local, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid BSON in $query subdocument"); GOTO (done); } has_filter = true; } else if (_translate_query_opt (key, &opt_key, &len)) { /* "$orderby" becomes "sort", etc., "$unknown" -> "unknown" */ if (!bson_append_iter (opts, opt_key, len, &iter)) { bson_set_error (&error_local, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Error adding \"%s\" to query", opt_key); } } else { /* strip leading "$" */ if (!bson_append_iter (opts, key + 1, -1, &iter)) { bson_set_error (&error_local, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Error adding \"%s\" to query", key); } } } } done: if (error) { memcpy (error, &error_local, sizeof (bson_error_t)); } if (!has_filter) { bson_init (unwrapped); } RETURN (has_filter); } void mongoc_cursor_destroy (mongoc_cursor_t *cursor) { char db[MONGOC_NAMESPACE_MAX]; ENTRY; if (!cursor) { EXIT; } if (cursor->impl.destroy) { cursor->impl.destroy (&cursor->impl); } if (cursor->client_generation == cursor->client->generation) { if (cursor->in_exhaust) { cursor->client->in_exhaust = false; if (cursor->state != DONE) { /* The only way to stop an exhaust cursor is to kill the connection */ mongoc_cluster_disconnect_node ( &cursor->client->cluster, cursor->server_id, false, NULL); } } else if (cursor->cursor_id) { bson_strncpy (db, cursor->ns, cursor->dblen + 1); _mongoc_client_kill_cursor (cursor->client, cursor->server_id, cursor->cursor_id, cursor->operation_id, db, cursor->ns + cursor->dblen + 1, cursor->client_session); } } if (cursor->client_session && !cursor->explicit_session) { mongoc_client_session_destroy (cursor->client_session); } mongoc_read_prefs_destroy (cursor->read_prefs); mongoc_read_concern_destroy (cursor->read_concern); mongoc_write_concern_destroy (cursor->write_concern); bson_destroy (&cursor->opts); bson_destroy (&cursor->error_doc); bson_free (cursor); mongoc_counter_cursors_active_dec (); mongoc_counter_cursors_disposed_inc (); EXIT; } mongoc_server_stream_t * _mongoc_cursor_fetch_stream (mongoc_cursor_t *cursor) { mongoc_server_stream_t *server_stream; bson_t reply; ENTRY; if (cursor->server_id) { server_stream = mongoc_cluster_stream_for_server (&cursor->client->cluster, cursor->server_id, true /* reconnect_ok */, cursor->client_session, &reply, &cursor->error); } else { server_stream = mongoc_cluster_stream_for_reads (&cursor->client->cluster, cursor->read_prefs, cursor->client_session, &reply, &cursor->error); if (server_stream) { cursor->server_id = server_stream->sd->id; } } if (!server_stream) { bson_destroy (&cursor->error_doc); bson_copy_to (&reply, &cursor->error_doc); bson_destroy (&reply); } RETURN (server_stream); } bool _mongoc_cursor_monitor_command (mongoc_cursor_t *cursor, mongoc_server_stream_t *server_stream, const bson_t *cmd, const char *cmd_name) { mongoc_client_t *client; mongoc_apm_command_started_t event; char db[MONGOC_NAMESPACE_MAX]; ENTRY; client = cursor->client; if (!client->apm_callbacks.started) { /* successful */ RETURN (true); } bson_strncpy (db, cursor->ns, cursor->dblen + 1); mongoc_apm_command_started_init (&event, cmd, db, cmd_name, client->cluster.request_id, cursor->operation_id, &server_stream->sd->host, server_stream->sd->id, client->apm_context); client->apm_callbacks.started (&event); mongoc_apm_command_started_cleanup (&event); RETURN (true); } /* append array of docs from current cursor batch */ static void _mongoc_cursor_append_docs_array (mongoc_cursor_t *cursor, bson_t *docs, mongoc_cursor_response_legacy_t *response) { bool eof = false; char str[16]; const char *key; uint32_t i = 0; size_t keylen; const bson_t *doc; while ((doc = bson_reader_read (response->reader, &eof))) { keylen = bson_uint32_to_string (i, &key, str, sizeof str); bson_append_document (docs, key, (int) keylen, doc); } bson_reader_reset (response->reader); } void _mongoc_cursor_monitor_succeeded (mongoc_cursor_t *cursor, mongoc_cursor_response_legacy_t *response, int64_t duration, bool first_batch, mongoc_server_stream_t *stream, const char *cmd_name) { bson_t docs_array; mongoc_apm_command_succeeded_t event; mongoc_client_t *client; bson_t reply; bson_t reply_cursor; ENTRY; client = cursor->client; if (!client->apm_callbacks.succeeded) { EXIT; } /* we sent OP_QUERY/OP_GETMORE, fake a reply to find/getMore command: * {ok: 1, cursor: {id: 17, ns: "...", first/nextBatch: [ ... docs ... ]}} */ bson_init (&docs_array); _mongoc_cursor_append_docs_array (cursor, &docs_array, response); bson_init (&reply); bson_append_int32 (&reply, "ok", 2, 1); bson_append_document_begin (&reply, "cursor", 6, &reply_cursor); bson_append_int64 (&reply_cursor, "id", 2, mongoc_cursor_get_id (cursor)); bson_append_utf8 (&reply_cursor, "ns", 2, cursor->ns, cursor->nslen); bson_append_array (&reply_cursor, first_batch ? "firstBatch" : "nextBatch", first_batch ? 10 : 9, &docs_array); bson_append_document_end (&reply, &reply_cursor); bson_destroy (&docs_array); mongoc_apm_command_succeeded_init (&event, duration, &reply, cmd_name, client->cluster.request_id, cursor->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&reply); EXIT; } void _mongoc_cursor_monitor_failed (mongoc_cursor_t *cursor, int64_t duration, mongoc_server_stream_t *stream, const char *cmd_name) { mongoc_apm_command_failed_t event; mongoc_client_t *client; bson_t reply; ENTRY; client = cursor->client; if (!client->apm_callbacks.failed) { EXIT; } /* we sent OP_QUERY/OP_GETMORE, fake a reply to find/getMore command: * {ok: 0} */ bson_init (&reply); bson_append_int32 (&reply, "ok", 2, 0); mongoc_apm_command_failed_init (&event, duration, cmd_name, &cursor->error, &reply, client->cluster.request_id, cursor->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.failed (&event); mongoc_apm_command_failed_cleanup (&event); bson_destroy (&reply); EXIT; } #define ADD_FLAG(_flags, _value) \ do { \ if (!BSON_ITER_HOLDS_BOOL (&iter)) { \ bson_set_error (&cursor->error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ "invalid option %s, should be type bool", \ key); \ return false; \ } \ if (bson_iter_as_bool (&iter)) { \ *_flags |= _value; \ } \ } while (false); bool _mongoc_cursor_opts_to_flags (mongoc_cursor_t *cursor, mongoc_server_stream_t *stream, mongoc_query_flags_t *flags /* OUT */) { bson_iter_t iter; const char *key; *flags = MONGOC_QUERY_NONE; if (!bson_iter_init (&iter, &cursor->opts)) { bson_set_error (&cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (!strcmp (key, MONGOC_CURSOR_ALLOW_PARTIAL_RESULTS)) { ADD_FLAG (flags, MONGOC_QUERY_PARTIAL); } else if (!strcmp (key, MONGOC_CURSOR_AWAIT_DATA)) { ADD_FLAG (flags, MONGOC_QUERY_AWAIT_DATA); } else if (!strcmp (key, MONGOC_CURSOR_EXHAUST)) { ADD_FLAG (flags, MONGOC_QUERY_EXHAUST); } else if (!strcmp (key, MONGOC_CURSOR_NO_CURSOR_TIMEOUT)) { ADD_FLAG (flags, MONGOC_QUERY_NO_CURSOR_TIMEOUT); } else if (!strcmp (key, MONGOC_CURSOR_OPLOG_REPLAY)) { ADD_FLAG (flags, MONGOC_QUERY_OPLOG_REPLAY); } else if (!strcmp (key, MONGOC_CURSOR_TAILABLE)) { ADD_FLAG (flags, MONGOC_QUERY_TAILABLE_CURSOR); } } if (cursor->slave_ok) { *flags |= MONGOC_QUERY_SLAVE_OK; } else if (cursor->server_id && (stream->topology_type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY || stream->topology_type == MONGOC_TOPOLOGY_RS_NO_PRIMARY) && stream->sd->type != MONGOC_SERVER_RS_PRIMARY) { *flags |= MONGOC_QUERY_SLAVE_OK; } return true; } bool _mongoc_cursor_run_command (mongoc_cursor_t *cursor, const bson_t *command, const bson_t *opts, bson_t *reply, bool retry_prohibited) { mongoc_server_stream_t *server_stream; bson_iter_t iter; mongoc_cmd_parts_t parts; const char *cmd_name; bool is_primary; mongoc_read_prefs_t *prefs = NULL; char db[MONGOC_NAMESPACE_MAX]; mongoc_session_opt_t *session_opts; bool ret = false; bool is_retryable = true; ENTRY; mongoc_cmd_parts_init ( &parts, cursor->client, db, MONGOC_QUERY_NONE, command); parts.is_read_command = true; parts.read_prefs = cursor->read_prefs; parts.assembled.operation_id = cursor->operation_id; server_stream = _mongoc_cursor_fetch_stream (cursor); if (!server_stream) { _mongoc_bson_init_if_set (reply); GOTO (done); } if (opts) { if (!bson_iter_init (&iter, opts)) { _mongoc_bson_init_if_set (reply); bson_set_error (&cursor->error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid BSON in opts document"); GOTO (done); } if (!mongoc_cmd_parts_append_opts (&parts, &iter, server_stream->sd->max_wire_version, &cursor->error)) { _mongoc_bson_init_if_set (reply); GOTO (done); } } if (parts.assembled.session) { /* initial query/aggregate/etc, and opts contains "sessionId" */ BSON_ASSERT (!cursor->client_session); BSON_ASSERT (!cursor->explicit_session); cursor->client_session = parts.assembled.session; cursor->explicit_session = true; } else if (cursor->client_session) { /* a getMore with implicit or explicit session already acquired */ mongoc_cmd_parts_set_session (&parts, cursor->client_session); } else { /* try to create an implicit session. not causally consistent. we keep * the session but leave cursor->explicit_session as 0, so we use the * same lsid for getMores but destroy the session when the cursor dies. */ session_opts = mongoc_session_opts_new (); mongoc_session_opts_set_causal_consistency (session_opts, false); /* returns NULL if sessions aren't supported. ignore errors. */ cursor->client_session = mongoc_client_start_session (cursor->client, session_opts, NULL); mongoc_cmd_parts_set_session (&parts, cursor->client_session); mongoc_session_opts_destroy (session_opts); } if (!mongoc_cmd_parts_set_read_concern (&parts, cursor->read_concern, server_stream->sd->max_wire_version, &cursor->error)) { _mongoc_bson_init_if_set (reply); GOTO (done); } bson_strncpy (db, cursor->ns, cursor->dblen + 1); parts.assembled.db_name = db; if (!_mongoc_cursor_opts_to_flags ( cursor, server_stream, &parts.user_query_flags)) { _mongoc_bson_init_if_set (reply); GOTO (done); } /* we might use mongoc_cursor_set_hint to target a secondary but have no * read preference, so the secondary rejects the read. same if we have a * direct connection to a secondary (topology type "single"). with * OP_QUERY we handle this by setting slaveOk. here we use $readPreference. */ cmd_name = _mongoc_get_command_name (command); is_primary = !cursor->read_prefs || cursor->read_prefs->mode == MONGOC_READ_PRIMARY; if (strcmp (cmd_name, "getMore") != 0 && server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG && is_primary && parts.user_query_flags & MONGOC_QUERY_SLAVE_OK) { parts.read_prefs = prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY_PREFERRED); } else { parts.read_prefs = cursor->read_prefs; } is_retryable = _is_retryable_read (&parts, server_stream); if (!strcmp (cmd_name, "getMore")) { is_retryable = false; } if (!strcmp (cmd_name, "aggregate")) { bson_iter_t pipeline_iter; if (bson_iter_init_find (&pipeline_iter, command, "pipeline") && BSON_ITER_HOLDS_ARRAY (&pipeline_iter) && bson_iter_recurse (&pipeline_iter, &pipeline_iter)) { if (_has_write_key (&pipeline_iter)) { is_retryable = false; } } } if (is_retryable && retry_prohibited) { is_retryable = false; } if (cursor->write_concern && !mongoc_write_concern_is_default (cursor->write_concern) && server_stream->sd->max_wire_version >= WIRE_VERSION_CMD_WRITE_CONCERN) { parts.assembled.is_acknowledged = mongoc_write_concern_is_acknowledged (cursor->write_concern); mongoc_write_concern_append (cursor->write_concern, &parts.extra); } if (!mongoc_cmd_parts_assemble (&parts, server_stream, &cursor->error)) { _mongoc_bson_init_if_set (reply); GOTO (done); } retry: ret = mongoc_cluster_run_command_monitored ( &cursor->client->cluster, &parts.assembled, reply, &cursor->error); if (ret) { memset (&cursor->error, 0, sizeof (bson_error_t)); } if (is_retryable && _mongoc_read_error_get_type (ret, &cursor->error, reply) == MONGOC_READ_ERR_RETRY) { is_retryable = false; mongoc_server_stream_cleanup (server_stream); server_stream = mongoc_cluster_stream_for_reads (&cursor->client->cluster, cursor->read_prefs, cursor->client_session, reply, &cursor->error); if (server_stream && server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_READS) { cursor->server_id = server_stream->sd->id; parts.assembled.server_stream = server_stream; bson_destroy (reply); GOTO (retry); } } if (cursor->error.domain) { bson_destroy (&cursor->error_doc); bson_copy_to (reply, &cursor->error_doc); } /* Read and Write Concern Spec: "Drivers SHOULD parse server replies for a * "writeConcernError" field and report the error only in command-specific * helper methods that take a separate write concern parameter or an options * parameter that may contain a write concern option. * * Only command helpers with names like "_with_write_concern" can create * cursors with a non-NULL write_concern field. */ if (ret && cursor->write_concern) { ret = !_mongoc_parse_wc_err (reply, &cursor->error); } done: mongoc_server_stream_cleanup (server_stream); mongoc_cmd_parts_cleanup (&parts); mongoc_read_prefs_destroy (prefs); return ret; } void _mongoc_cursor_collection (const mongoc_cursor_t *cursor, const char **collection, int *collection_len) { /* ns is like "db.collection". Collection name is located past the ".". */ *collection = cursor->ns + (cursor->dblen + 1); /* Collection name's length is ns length, minus length of db name and ".". */ *collection_len = cursor->nslen - cursor->dblen - 1; BSON_ASSERT (*collection_len > 0); } void _mongoc_cursor_prepare_find_command (mongoc_cursor_t *cursor, const bson_t *filter, bson_t *command) { const char *collection; int collection_len; _mongoc_cursor_collection (cursor, &collection, &collection_len); bson_append_utf8 (command, MONGOC_CURSOR_FIND, MONGOC_CURSOR_FIND_LEN, collection, collection_len); bson_append_document ( command, MONGOC_CURSOR_FILTER, MONGOC_CURSOR_FILTER_LEN, filter); } bool mongoc_cursor_error (mongoc_cursor_t *cursor, bson_error_t *error) { ENTRY; RETURN (mongoc_cursor_error_document (cursor, error, NULL)); } bool mongoc_cursor_error_document (mongoc_cursor_t *cursor, bson_error_t *error, const bson_t **doc) { ENTRY; BSON_ASSERT (cursor); if (BSON_UNLIKELY (CURSOR_FAILED (cursor))) { bson_set_error (error, cursor->error.domain, cursor->error.code, "%s", cursor->error.message); if (doc) { *doc = &cursor->error_doc; } RETURN (true); } if (doc) { *doc = NULL; } RETURN (false); } static mongoc_cursor_state_t _call_transition (mongoc_cursor_t *cursor) { mongoc_cursor_state_t state = cursor->state; _mongoc_cursor_impl_transition_t fn = NULL; switch (state) { case UNPRIMED: fn = cursor->impl.prime; break; case IN_BATCH: fn = cursor->impl.pop_from_batch; break; case END_OF_BATCH: fn = cursor->impl.get_next_batch; break; case DONE: default: fn = NULL; break; } if (!fn) { return DONE; } state = fn (cursor); if (cursor->error.domain) { state = DONE; } return state; } bool mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson) { bool ret = false; bool attempted_refresh = false; ENTRY; BSON_ASSERT (cursor); BSON_ASSERT (bson); TRACE ("cursor_id(%" PRId64 ")", cursor->cursor_id); if (cursor->client_generation != cursor->client->generation) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot advance cursor after client reset"); RETURN (false); } if (bson) { *bson = NULL; } if (CURSOR_FAILED (cursor)) { RETURN (false); } if (cursor->state == DONE) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot advance a completed or failed cursor."); RETURN (false); } /* * We cannot proceed if another cursor is receiving results in exhaust mode. */ if (cursor->client->in_exhaust && !cursor->in_exhaust) { bson_set_error (&cursor->error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_IN_EXHAUST, "Another cursor derived from this client is in exhaust."); RETURN (false); } cursor->current = NULL; /* if an error was set on this cursor before calling next, transition to DONE * immediately. */ if (cursor->error.domain) { cursor->state = DONE; GOTO (done); } while (cursor->state != DONE) { /* even when there is no data to return, some cursors remain open and * continue sending empty batches (e.g. a tailable or change stream * cursor). in that case, do not attempt to get another batch. */ if (cursor->state == END_OF_BATCH) { if (attempted_refresh) { RETURN (false); } attempted_refresh = true; } cursor->state = _call_transition (cursor); /* check if we received a document. */ if (cursor->current) { *bson = cursor->current; ret = true; GOTO (done); } if (cursor->state == DONE) { GOTO (done); } } done: cursor->count++; RETURN (ret); } bool mongoc_cursor_more (mongoc_cursor_t *cursor) { ENTRY; BSON_ASSERT (cursor); if (CURSOR_FAILED (cursor)) { RETURN (false); } RETURN (cursor->state != DONE); } void mongoc_cursor_get_host (mongoc_cursor_t *cursor, mongoc_host_list_t *host) { mongoc_server_description_t *description; BSON_ASSERT (cursor); BSON_ASSERT (host); memset (host, 0, sizeof *host); if (!cursor->server_id) { MONGOC_WARNING ("%s(): Must send query before fetching peer.", BSON_FUNC); return; } description = mongoc_topology_server_by_id ( cursor->client->topology, cursor->server_id, &cursor->error); if (!description) { return; } *host = description->host; mongoc_server_description_destroy (description); EXIT; } mongoc_cursor_t * mongoc_cursor_clone (const mongoc_cursor_t *cursor) { mongoc_cursor_t *_clone; BSON_ASSERT (cursor); _clone = (mongoc_cursor_t *) bson_malloc0 (sizeof *_clone); _clone->client = cursor->client; _clone->nslen = cursor->nslen; _clone->dblen = cursor->dblen; _clone->explicit_session = cursor->explicit_session; if (cursor->read_prefs) { _clone->read_prefs = mongoc_read_prefs_copy (cursor->read_prefs); } if (cursor->read_concern) { _clone->read_concern = mongoc_read_concern_copy (cursor->read_concern); } if (cursor->write_concern) { _clone->write_concern = mongoc_write_concern_copy (cursor->write_concern); } if (cursor->explicit_session) { _clone->client_session = cursor->client_session; } bson_copy_to (&cursor->opts, &_clone->opts); bson_init (&_clone->error_doc); bson_strncpy (_clone->ns, cursor->ns, sizeof _clone->ns); /* copy the context functions by default. */ memcpy (&_clone->impl, &cursor->impl, sizeof (cursor->impl)); if (cursor->impl.clone) { cursor->impl.clone (&_clone->impl, &cursor->impl); } mongoc_counter_cursors_active_inc (); RETURN (_clone); } /* *-------------------------------------------------------------------------- * * mongoc_cursor_is_alive -- * * Deprecated for mongoc_cursor_more. * *-------------------------------------------------------------------------- */ bool mongoc_cursor_is_alive (const mongoc_cursor_t *cursor) /* IN */ { return mongoc_cursor_more ((mongoc_cursor_t *) cursor); } const bson_t * mongoc_cursor_current (const mongoc_cursor_t *cursor) /* IN */ { BSON_ASSERT (cursor); return cursor->current; } void mongoc_cursor_set_batch_size (mongoc_cursor_t *cursor, uint32_t batch_size) { BSON_ASSERT (cursor); _mongoc_cursor_set_opt_int64 ( cursor, MONGOC_CURSOR_BATCH_SIZE, (int64_t) batch_size); } uint32_t mongoc_cursor_get_batch_size (const mongoc_cursor_t *cursor) { BSON_ASSERT (cursor); return (uint32_t) _mongoc_cursor_get_opt_int64 ( cursor, MONGOC_CURSOR_BATCH_SIZE, 0); } bool mongoc_cursor_set_limit (mongoc_cursor_t *cursor, int64_t limit) { BSON_ASSERT (cursor); if (cursor->state == UNPRIMED) { if (limit < 0) { return _mongoc_cursor_set_opt_int64 ( cursor, MONGOC_CURSOR_LIMIT, -limit) && _mongoc_cursor_set_opt_bool ( cursor, MONGOC_CURSOR_SINGLE_BATCH, true); } else { return _mongoc_cursor_set_opt_int64 ( cursor, MONGOC_CURSOR_LIMIT, limit); } } else { return false; } } int64_t mongoc_cursor_get_limit (const mongoc_cursor_t *cursor) { int64_t limit; bool single_batch; BSON_ASSERT (cursor); limit = _mongoc_cursor_get_opt_int64 (cursor, MONGOC_CURSOR_LIMIT, 0); single_batch = _mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_SINGLE_BATCH); if (limit > 0 && single_batch) { limit = -limit; } return limit; } bool mongoc_cursor_set_hint (mongoc_cursor_t *cursor, uint32_t server_id) { BSON_ASSERT (cursor); if (cursor->server_id) { MONGOC_ERROR ("mongoc_cursor_set_hint: server_id already set"); return false; } if (!server_id) { MONGOC_ERROR ("mongoc_cursor_set_hint: cannot set server_id to 0"); return false; } cursor->server_id = server_id; return true; } uint32_t mongoc_cursor_get_hint (const mongoc_cursor_t *cursor) { BSON_ASSERT (cursor); return cursor->server_id; } int64_t mongoc_cursor_get_id (const mongoc_cursor_t *cursor) { BSON_ASSERT (cursor); return cursor->cursor_id; } void mongoc_cursor_set_max_await_time_ms (mongoc_cursor_t *cursor, uint32_t max_await_time_ms) { BSON_ASSERT (cursor); if (cursor->state == UNPRIMED) { _mongoc_cursor_set_opt_int64 ( cursor, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, (int64_t) max_await_time_ms); } } uint32_t mongoc_cursor_get_max_await_time_ms (const mongoc_cursor_t *cursor) { bson_iter_t iter; BSON_ASSERT (cursor); if (bson_iter_init_find ( &iter, &cursor->opts, MONGOC_CURSOR_MAX_AWAIT_TIME_MS)) { return (uint32_t) bson_iter_as_int64 (&iter); } return 0; } /* deprecated for mongoc_cursor_new_from_command_reply_with_opts */ mongoc_cursor_t * mongoc_cursor_new_from_command_reply (mongoc_client_t *client, bson_t *reply, uint32_t server_id) { mongoc_cursor_t *cursor; bson_t cmd = BSON_INITIALIZER; bson_t opts = BSON_INITIALIZER; BSON_ASSERT (client); BSON_ASSERT (reply); /* options are passed through by adding them to reply. */ bson_copy_to_excluding_noinit (reply, &opts, "cursor", "ok", "operationTime", "$clusterTime", "$gleStats", NULL); if (server_id) { bson_append_int64 (&opts, "serverId", 8, server_id); } cursor = _mongoc_cursor_cmd_new_from_reply (client, &cmd, &opts, reply); bson_destroy (&cmd); bson_destroy (&opts); return cursor; } mongoc_cursor_t * mongoc_cursor_new_from_command_reply_with_opts (mongoc_client_t *client, bson_t *reply, const bson_t *opts) { mongoc_cursor_t *cursor; bson_t cmd = BSON_INITIALIZER; BSON_ASSERT (client); BSON_ASSERT (reply); cursor = _mongoc_cursor_cmd_new_from_reply (client, &cmd, opts, reply); bson_destroy (&cmd); return cursor; } bool _mongoc_cursor_start_reading_response (mongoc_cursor_t *cursor, mongoc_cursor_response_t *response) { bson_iter_t iter; bson_iter_t child; const char *ns; uint32_t nslen; bool in_batch = false; if (bson_iter_init_find (&iter, &response->reply, "cursor") && BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child)) { while (bson_iter_next (&child)) { if (BSON_ITER_IS_KEY (&child, "id")) { cursor->cursor_id = bson_iter_as_int64 (&child); } else if (BSON_ITER_IS_KEY (&child, "ns")) { ns = bson_iter_utf8 (&child, &nslen); _mongoc_set_cursor_ns (cursor, ns, nslen); } else if (BSON_ITER_IS_KEY (&child, "firstBatch") || BSON_ITER_IS_KEY (&child, "nextBatch")) { if (BSON_ITER_HOLDS_ARRAY (&child) && bson_iter_recurse (&child, &response->batch_iter)) { in_batch = true; } } } } /* Driver Sessions Spec: "When an implicit session is associated with a * cursor for use with getMore operations, the session MUST be returned to * the pool immediately following a getMore operation that indicates that the * cursor has been exhausted." */ if (cursor->cursor_id == 0 && cursor->client_session && !cursor->explicit_session) { mongoc_client_session_destroy (cursor->client_session); cursor->client_session = NULL; } return in_batch; } void _mongoc_cursor_response_read (mongoc_cursor_t *cursor, mongoc_cursor_response_t *response, const bson_t **bson) { const uint8_t *data = NULL; uint32_t data_len = 0; ENTRY; if (bson_iter_next (&response->batch_iter) && BSON_ITER_HOLDS_DOCUMENT (&response->batch_iter)) { bson_iter_document (&response->batch_iter, &data_len, &data); /* bson_iter_next guarantees valid BSON, so this must succeed */ BSON_ASSERT (bson_init_static (&response->current_doc, data, data_len)); *bson = &response->current_doc; } } /* sets cursor error if could not get the next batch. */ void _mongoc_cursor_response_refresh (mongoc_cursor_t *cursor, const bson_t *command, const bson_t *opts, mongoc_cursor_response_t *response) { ENTRY; bson_destroy (&response->reply); /* server replies to find / aggregate with {cursor: {id: N, firstBatch: []}}, * to getMore command with {cursor: {id: N, nextBatch: []}}. */ if (_mongoc_cursor_run_command ( cursor, command, opts, &response->reply, false) && _mongoc_cursor_start_reading_response (cursor, response)) { return; } if (!cursor->error.domain) { bson_set_error (&cursor->error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Invalid reply to %s command.", _mongoc_get_command_name (command)); } } void _mongoc_cursor_prepare_getmore_command (mongoc_cursor_t *cursor, bson_t *command) { const char *collection; int collection_len; int64_t batch_size; bool await_data; int64_t max_await_time_ms; ENTRY; _mongoc_cursor_collection (cursor, &collection, &collection_len); bson_init (command); bson_append_int64 (command, "getMore", 7, mongoc_cursor_get_id (cursor)); bson_append_utf8 (command, "collection", 10, collection, collection_len); batch_size = mongoc_cursor_get_batch_size (cursor); /* See find, getMore, and killCursors Spec for batchSize rules */ if (batch_size) { bson_append_int64 (command, MONGOC_CURSOR_BATCH_SIZE, MONGOC_CURSOR_BATCH_SIZE_LEN, abs (_mongoc_n_return (cursor))); } /* Find, getMore And killCursors Commands Spec: "In the case of a tailable cursor with awaitData == true the driver MUST provide a Cursor level option named maxAwaitTimeMS (See CRUD specification for details). The maxTimeMS option on the getMore command MUST be set to the value of the option maxAwaitTimeMS. If no maxAwaitTimeMS is specified, the driver SHOULD not set maxTimeMS on the getMore command." */ await_data = _mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_TAILABLE) && _mongoc_cursor_get_opt_bool (cursor, MONGOC_CURSOR_AWAIT_DATA); if (await_data) { max_await_time_ms = _mongoc_cursor_get_opt_int64 ( cursor, MONGOC_CURSOR_MAX_AWAIT_TIME_MS, 0); if (max_await_time_ms) { bson_append_int64 (command, MONGOC_CURSOR_MAX_TIME_MS, MONGOC_CURSOR_MAX_TIME_MS_LEN, max_await_time_ms); } } } /* sets the cursor to be empty so it returns NULL on the first call to * cursor_next but does not return an error. */ void _mongoc_cursor_set_empty (mongoc_cursor_t *cursor) { memset (&cursor->error, 0, sizeof (bson_error_t)); bson_reinit (&cursor->error_doc); cursor->state = IN_BATCH; } void _mongoc_cursor_prime (mongoc_cursor_t *cursor) { cursor->state = cursor->impl.prime (cursor); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cursor.h0000644000076500000240000000657313572250757025013 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CURSOR_H #define MONGOC_CURSOR_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-host-list.h" BSON_BEGIN_DECLS typedef struct _mongoc_cursor_t mongoc_cursor_t; /* forward decl */ struct _mongoc_client_t; MONGOC_EXPORT (mongoc_cursor_t *) mongoc_cursor_clone (const mongoc_cursor_t *cursor) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (void) mongoc_cursor_destroy (mongoc_cursor_t *cursor); MONGOC_EXPORT (bool) mongoc_cursor_more (mongoc_cursor_t *cursor); MONGOC_EXPORT (bool) mongoc_cursor_next (mongoc_cursor_t *cursor, const bson_t **bson); MONGOC_EXPORT (bool) mongoc_cursor_error (mongoc_cursor_t *cursor, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_cursor_error_document (mongoc_cursor_t *cursor, bson_error_t *error, const bson_t **doc); MONGOC_EXPORT (void) mongoc_cursor_get_host (mongoc_cursor_t *cursor, mongoc_host_list_t *host); MONGOC_EXPORT (bool) mongoc_cursor_is_alive (const mongoc_cursor_t *cursor) BSON_GNUC_DEPRECATED_FOR (mongoc_cursor_more); MONGOC_EXPORT (const bson_t *) mongoc_cursor_current (const mongoc_cursor_t *cursor); MONGOC_EXPORT (void) mongoc_cursor_set_batch_size (mongoc_cursor_t *cursor, uint32_t batch_size); MONGOC_EXPORT (uint32_t) mongoc_cursor_get_batch_size (const mongoc_cursor_t *cursor); MONGOC_EXPORT (bool) mongoc_cursor_set_limit (mongoc_cursor_t *cursor, int64_t limit); MONGOC_EXPORT (int64_t) mongoc_cursor_get_limit (const mongoc_cursor_t *cursor); /* These names include the term "hint" for backward compatibility, should be * mongoc_cursor_get_server_id, mongoc_cursor_set_server_id. */ MONGOC_EXPORT (bool) mongoc_cursor_set_hint (mongoc_cursor_t *cursor, uint32_t server_id); MONGOC_EXPORT (uint32_t) mongoc_cursor_get_hint (const mongoc_cursor_t *cursor); MONGOC_EXPORT (int64_t) mongoc_cursor_get_id (const mongoc_cursor_t *cursor); MONGOC_EXPORT (void) mongoc_cursor_set_max_await_time_ms (mongoc_cursor_t *cursor, uint32_t max_await_time_ms); MONGOC_EXPORT (uint32_t) mongoc_cursor_get_max_await_time_ms (const mongoc_cursor_t *cursor); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_cursor_new_from_command_reply (struct _mongoc_client_t *client, bson_t *reply, uint32_t server_id) BSON_GNUC_WARN_UNUSED_RESULT BSON_GNUC_DEPRECATED_FOR (mongoc_cursor_new_from_command_reply_with_opts); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_cursor_new_from_command_reply_with_opts (struct _mongoc_client_t *client, bson_t *reply, const bson_t *opts) BSON_GNUC_WARN_UNUSED_RESULT; BSON_END_DECLS #endif /* MONGOC_CURSOR_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cyrus-private.h0000644000076500000240000000373513572250757026310 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_CYRUS_PRIVATE_H #define MONGOC_CYRUS_PRIVATE_H #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-sasl-private.h" #include #include #include BSON_BEGIN_DECLS typedef struct _mongoc_cyrus_t mongoc_cyrus_t; struct _mongoc_cyrus_t { mongoc_sasl_t credentials; sasl_callback_t callbacks[6]; sasl_conn_t *conn; bool done; int step; sasl_interact_t *interact; }; #ifndef SASL_CALLBACK_FN #define SASL_CALLBACK_FN(_f) ((int (*) (void)) (_f)) #endif void _mongoc_cyrus_init (mongoc_cyrus_t *sasl); bool _mongoc_cyrus_new_from_cluster (mongoc_cyrus_t *sasl, mongoc_cluster_t *cluster, mongoc_stream_t *stream, const char *hostname, bson_error_t *error); int _mongoc_cyrus_log (mongoc_cyrus_t *sasl, int level, const char *message); void _mongoc_cyrus_destroy (mongoc_cyrus_t *sasl); bool _mongoc_cyrus_step (mongoc_cyrus_t *sasl, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_CYRUS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-cyrus.c0000644000076500000240000002755213572250757024636 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SASL_CYRUS #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-cyrus-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-trace-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "CYRUS-SASL" bool _mongoc_cyrus_set_mechanism (mongoc_cyrus_t *sasl, const char *mechanism, bson_error_t *error) { bson_string_t *str = bson_string_new (""); const char **mechs = sasl_global_listmech (); int i = 0; bool ok = false; BSON_ASSERT (sasl); for (i = 0; mechs[i]; i++) { if (!strcmp (mechs[i], mechanism)) { ok = true; break; } bson_string_append (str, mechs[i]); if (mechs[i + 1]) { bson_string_append (str, ","); } } if (ok) { bson_free (sasl->credentials.mechanism); sasl->credentials.mechanism = mechanism ? bson_strdup (mechanism) : NULL; } else { bson_set_error (error, MONGOC_ERROR_SASL, SASL_NOMECH, "SASL Failure: Unsupported mechanism by client: %s. " "Available mechanisms: %s", mechanism, str->str); } bson_string_free (str, true); return ok; } static int _mongoc_cyrus_get_pass (mongoc_cyrus_t *sasl, int param_id, const char **result, unsigned *result_len) { BSON_ASSERT (sasl); BSON_ASSERT (param_id == SASL_CB_PASS); if (result) { *result = sasl->credentials.pass; } if (result_len) { *result_len = sasl->credentials.pass ? (unsigned) strlen (sasl->credentials.pass) : 0; } return (sasl->credentials.pass != NULL) ? SASL_OK : SASL_FAIL; } static int _mongoc_cyrus_canon_user (sasl_conn_t *conn, mongoc_cyrus_t *sasl, const char *in, unsigned inlen, unsigned flags, const char *user_realm, char *out, unsigned out_max, unsigned *out_len) { TRACE ("Canonicalizing %s (%" PRIu32 ")\n", in, inlen); strcpy (out, in); *out_len = inlen; return SASL_OK; } static int _mongoc_cyrus_get_user (mongoc_cyrus_t *sasl, int param_id, const char **result, unsigned *result_len) { BSON_ASSERT (sasl); BSON_ASSERT ((param_id == SASL_CB_USER) || (param_id == SASL_CB_AUTHNAME)); if (result) { *result = sasl->credentials.user; } if (result_len) { *result_len = sasl->credentials.user ? (unsigned) strlen (sasl->credentials.user) : 0; } return (sasl->credentials.user != NULL) ? SASL_OK : SASL_FAIL; } void _mongoc_cyrus_init (mongoc_cyrus_t *sasl) { sasl_callback_t callbacks[] = { {SASL_CB_AUTHNAME, SASL_CALLBACK_FN (_mongoc_cyrus_get_user), sasl}, {SASL_CB_USER, SASL_CALLBACK_FN (_mongoc_cyrus_get_user), sasl}, {SASL_CB_PASS, SASL_CALLBACK_FN (_mongoc_cyrus_get_pass), sasl}, {SASL_CB_CANON_USER, SASL_CALLBACK_FN (_mongoc_cyrus_canon_user), sasl}, {SASL_CB_LIST_END}}; BSON_ASSERT (sasl); memset (sasl, 0, sizeof *sasl); memcpy (&sasl->callbacks, callbacks, sizeof callbacks); sasl->done = false; sasl->step = 0; sasl->conn = NULL; sasl->interact = NULL; sasl->credentials.mechanism = NULL; sasl->credentials.user = NULL; sasl->credentials.pass = NULL; sasl->credentials.service_name = NULL; sasl->credentials.service_host = NULL; } bool _mongoc_cyrus_new_from_cluster (mongoc_cyrus_t *sasl, mongoc_cluster_t *cluster, mongoc_stream_t *stream, const char *hostname, bson_error_t *error) { const char *mechanism; char real_name[BSON_HOST_NAME_MAX + 1]; _mongoc_cyrus_init (sasl); mechanism = mongoc_uri_get_auth_mechanism (cluster->uri); if (!mechanism) { mechanism = "GSSAPI"; } if (!_mongoc_cyrus_set_mechanism (sasl, mechanism, error)) { _mongoc_cyrus_destroy (sasl); return false; } _mongoc_sasl_set_pass ((mongoc_sasl_t *) sasl, mongoc_uri_get_password (cluster->uri)); _mongoc_sasl_set_user ((mongoc_sasl_t *) sasl, mongoc_uri_get_username (cluster->uri)); _mongoc_sasl_set_properties ((mongoc_sasl_t *) sasl, cluster->uri); /* * If the URI requested canonicalizeHostname, we need to resolve the real * hostname for the IP Address and pass that to the SASL layer. Some * underlying GSSAPI layers will do this for us, but can be disabled in * their config (krb.conf). * * This allows the consumer to specify canonicalizeHostname=true in the URI * and have us do that for them. * * See CDRIVER-323 for more information. */ if (sasl->credentials.canonicalize_host_name && _mongoc_sasl_get_canonicalized_name ( stream, real_name, sizeof real_name)) { _mongoc_sasl_set_service_host ((mongoc_sasl_t *) sasl, real_name); } else { _mongoc_sasl_set_service_host ((mongoc_sasl_t *) sasl, hostname); } return true; } void _mongoc_cyrus_destroy (mongoc_cyrus_t *sasl) { BSON_ASSERT (sasl); if (sasl->conn) { sasl_dispose (&sasl->conn); } bson_free (sasl->credentials.user); bson_free (sasl->credentials.pass); bson_free (sasl->credentials.mechanism); bson_free (sasl->credentials.service_name); bson_free (sasl->credentials.service_host); } static bool _mongoc_cyrus_is_failure (int status, bson_error_t *error) { bool ret = (status < 0); TRACE ("Got status: %d ok is %d, continue=%d interact=%d\n", status, SASL_OK, SASL_CONTINUE, SASL_INTERACT); if (ret) { switch (status) { case SASL_NOMEM: bson_set_error (error, MONGOC_ERROR_SASL, status, "SASL Failure: insufficient memory."); break; case SASL_NOMECH: { bson_string_t *str = bson_string_new ("available mechanisms: "); const char **mechs = sasl_global_listmech (); int i = 0; for (i = 0; mechs[i]; i++) { bson_string_append (str, mechs[i]); if (mechs[i + 1]) { bson_string_append (str, ","); } } bson_set_error (error, MONGOC_ERROR_SASL, status, "SASL Failure: failure to negotiate mechanism (%s)", str->str); bson_string_free (str, 0); } break; case SASL_BADPARAM: bson_set_error (error, MONGOC_ERROR_SASL, status, "Bad parameter supplied. Please file a bug " "with mongo-c-driver."); break; default: bson_set_error (error, MONGOC_ERROR_SASL, status, "SASL Failure: (%d): %s", status, sasl_errstring (status, NULL, NULL)); break; } } return ret; } static bool _mongoc_cyrus_start (mongoc_cyrus_t *sasl, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { const char *service_name = "mongodb"; const char *service_host = ""; const char *mechanism = NULL; const char *raw = NULL; unsigned raw_len = 0; int status; BSON_ASSERT (sasl); BSON_ASSERT (outbuf); BSON_ASSERT (outbufmax); BSON_ASSERT (outbuflen); if (sasl->credentials.service_name) { service_name = sasl->credentials.service_name; } if (sasl->credentials.service_host) { service_host = sasl->credentials.service_host; } status = sasl_client_new ( service_name, service_host, NULL, NULL, sasl->callbacks, 0, &sasl->conn); TRACE ("Created new sasl client %s", status == SASL_OK ? "successfully" : "UNSUCCESSFULLY"); if (_mongoc_cyrus_is_failure (status, error)) { return false; } status = sasl_client_start (sasl->conn, sasl->credentials.mechanism, &sasl->interact, &raw, &raw_len, &mechanism); TRACE ("Started the sasl client %s", status == SASL_CONTINUE ? "successfully" : "UNSUCCESSFULLY"); if (_mongoc_cyrus_is_failure (status, error)) { return false; } if ((0 != strcasecmp (mechanism, "GSSAPI")) && (0 != strcasecmp (mechanism, "PLAIN"))) { bson_set_error (error, MONGOC_ERROR_SASL, SASL_NOMECH, "SASL Failure: invalid mechanism \"%s\"", mechanism); return false; } status = sasl_encode64 (raw, raw_len, (char *) outbuf, outbufmax, outbuflen); if (_mongoc_cyrus_is_failure (status, error)) { return false; } return true; } bool _mongoc_cyrus_step (mongoc_cyrus_t *sasl, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { const char *raw = NULL; unsigned rawlen = 0; int status; BSON_ASSERT (sasl); BSON_ASSERT (inbuf); BSON_ASSERT (outbuf); BSON_ASSERT (outbuflen); TRACE ("Running %d, inbuflen: %" PRIu32, sasl->step, inbuflen); sasl->step++; if (sasl->step == 1) { return _mongoc_cyrus_start (sasl, outbuf, outbufmax, outbuflen, error); } else if (sasl->step >= 10) { bson_set_error (error, MONGOC_ERROR_SASL, SASL_NOTDONE, "SASL Failure: maximum steps detected"); return false; } TRACE ("Running %d, inbuflen: %" PRIu32, sasl->step, inbuflen); if (!inbuflen) { bson_set_error (error, MONGOC_ERROR_SASL, MONGOC_ERROR_CLIENT_AUTHENTICATE, "SASL Failure: no payload provided from server: %s", sasl_errdetail (sasl->conn)); return false; } status = sasl_decode64 ( (char *) inbuf, inbuflen, (char *) outbuf, outbufmax, outbuflen); if (_mongoc_cyrus_is_failure (status, error)) { return false; } TRACE ("%s", "Running client_step"); status = sasl_client_step ( sasl->conn, (char *) outbuf, *outbuflen, &sasl->interact, &raw, &rawlen); TRACE ("%s sent a client step", status == SASL_OK ? "Successfully" : "UNSUCCESSFULLY"); if (_mongoc_cyrus_is_failure (status, error)) { return false; } status = sasl_encode64 (raw, rawlen, (char *) outbuf, outbufmax, outbuflen); if (_mongoc_cyrus_is_failure (status, error)) { return false; } return true; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-database-private.h0000644000076500000240000000265613572250757026710 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_DATABASE_PRIVATE_H #define MONGOC_DATABASE_PRIVATE_H #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-write-concern.h" BSON_BEGIN_DECLS struct _mongoc_database_t { mongoc_client_t *client; char name[128]; mongoc_read_prefs_t *read_prefs; mongoc_read_concern_t *read_concern; mongoc_write_concern_t *write_concern; }; mongoc_database_t * _mongoc_database_new (mongoc_client_t *client, const char *name, const mongoc_read_prefs_t *read_prefs, const mongoc_read_concern_t *read_concern, const mongoc_write_concern_t *write_concern); BSON_END_DECLS #endif /* MONGOC_DATABASE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-database.c0000644000076500000240000007413213572250757025231 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-aggregate-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-database.h" #include "mongoc/mongoc-database-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-change-stream-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "database" /* *-------------------------------------------------------------------------- * * _mongoc_database_new -- * * Create a new instance of mongoc_database_t for @client. * * @client must stay valid for the life of the resulting * database structure. * * Returns: * A newly allocated mongoc_database_t that should be freed with * mongoc_database_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_database_t * _mongoc_database_new (mongoc_client_t *client, const char *name, const mongoc_read_prefs_t *read_prefs, const mongoc_read_concern_t *read_concern, const mongoc_write_concern_t *write_concern) { mongoc_database_t *db; ENTRY; BSON_ASSERT (client); BSON_ASSERT (name); db = (mongoc_database_t *) bson_malloc0 (sizeof *db); db->client = client; db->write_concern = write_concern ? mongoc_write_concern_copy (write_concern) : mongoc_write_concern_new (); db->read_concern = read_concern ? mongoc_read_concern_copy (read_concern) : mongoc_read_concern_new (); db->read_prefs = read_prefs ? mongoc_read_prefs_copy (read_prefs) : mongoc_read_prefs_new (MONGOC_READ_PRIMARY); bson_strncpy (db->name, name, sizeof db->name); RETURN (db); } /* *-------------------------------------------------------------------------- * * mongoc_database_destroy -- * * Releases resources associated with @database. * * Returns: * None. * * Side effects: * Everything. * *-------------------------------------------------------------------------- */ void mongoc_database_destroy (mongoc_database_t *database) { ENTRY; if (!database) { EXIT; } if (database->read_prefs) { mongoc_read_prefs_destroy (database->read_prefs); database->read_prefs = NULL; } if (database->read_concern) { mongoc_read_concern_destroy (database->read_concern); database->read_concern = NULL; } if (database->write_concern) { mongoc_write_concern_destroy (database->write_concern); database->write_concern = NULL; } bson_free (database); EXIT; } mongoc_cursor_t * mongoc_database_aggregate (mongoc_database_t *db, /* IN */ const bson_t *pipeline, /* IN */ const bson_t *opts, /* IN */ const mongoc_read_prefs_t *read_prefs) /* IN */ { return _mongoc_aggregate (db->client, db->name, MONGOC_QUERY_NONE, pipeline, opts, read_prefs, db->read_prefs, db->read_concern, db->write_concern); } /* *-------------------------------------------------------------------------- * * mongoc_database_copy -- * * Returns a copy of @database that needs to be freed by calling * mongoc_database_destroy. * * Returns: * A copy of this database. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_database_t * mongoc_database_copy (mongoc_database_t *database) { ENTRY; BSON_ASSERT (database); RETURN (_mongoc_database_new (database->client, database->name, database->read_prefs, database->read_concern, database->write_concern)); } mongoc_cursor_t * mongoc_database_command (mongoc_database_t *database, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *command, const bson_t *fields, const mongoc_read_prefs_t *read_prefs) { char ns[MONGOC_NAMESPACE_MAX]; BSON_ASSERT (database); BSON_ASSERT (command); bson_snprintf (ns, sizeof ns, "%s.$cmd", database->name); /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ /* flags, skip, limit, batch_size, fields are unused */ return _mongoc_cursor_cmd_deprecated_new ( database->client, ns, command, read_prefs); } bool mongoc_database_command_simple (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error) { BSON_ASSERT (database); BSON_ASSERT (command); /* Server Selection Spec: "The generic command method has a default read * preference of mode 'primary'. The generic command method MUST ignore any * default read preference from client, database or collection * configuration. The generic command method SHOULD allow an optional read * preference argument." */ return _mongoc_client_command_with_opts (database->client, database->name, command, MONGOC_CMD_RAW, NULL /* opts */, MONGOC_QUERY_NONE, read_prefs, NULL, /* user prefs */ NULL /* read concern */, NULL /* write concern */, reply, error); } bool mongoc_database_read_command_with_opts (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (database->client, database->name, command, MONGOC_CMD_READ, opts, MONGOC_QUERY_NONE, read_prefs, database->read_prefs, database->read_concern, database->write_concern, reply, error); } bool mongoc_database_write_command_with_opts (mongoc_database_t *database, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (database->client, database->name, command, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ database->read_prefs, database->read_concern, database->write_concern, reply, error); } bool mongoc_database_read_write_command_with_opts ( mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (database->client, database->name, command, MONGOC_CMD_RW, opts, MONGOC_QUERY_NONE, read_prefs, database->read_prefs, database->read_concern, database->write_concern, reply, error); } bool mongoc_database_command_with_opts (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error) { return _mongoc_client_command_with_opts (database->client, database->name, command, MONGOC_CMD_RAW, opts, MONGOC_QUERY_NONE, read_prefs, NULL, /* default prefs */ database->read_concern, database->write_concern, reply, error); } /* *-------------------------------------------------------------------------- * * mongoc_database_drop -- * * Requests that the MongoDB server drops @database, including all * collections and indexes associated with @database. * * Make sure this is really what you want! * * Returns: * true if @database was dropped. * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ bool mongoc_database_drop (mongoc_database_t *database, bson_error_t *error) { return mongoc_database_drop_with_opts (database, NULL, error); } bool mongoc_database_drop_with_opts (mongoc_database_t *database, const bson_t *opts, bson_error_t *error) { bool ret; bson_t cmd; BSON_ASSERT (database); bson_init (&cmd); bson_append_int32 (&cmd, "dropDatabase", 12, 1); ret = _mongoc_client_command_with_opts (database->client, database->name, &cmd, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ database->read_prefs, database->read_concern, database->write_concern, NULL, /* reply */ error); bson_destroy (&cmd); return ret; } bool mongoc_database_remove_user (mongoc_database_t *database, const char *username, bson_error_t *error) { bson_t cmd; bool ret; ENTRY; BSON_ASSERT (database); BSON_ASSERT (username); bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "dropUser", username); ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error); bson_destroy (&cmd); RETURN (ret); } bool mongoc_database_remove_all_users (mongoc_database_t *database, bson_error_t *error) { bson_t cmd; bool ret; ENTRY; BSON_ASSERT (database); bson_init (&cmd); BSON_APPEND_INT32 (&cmd, "dropAllUsersFromDatabase", 1); ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error); bson_destroy (&cmd); RETURN (ret); } /** * mongoc_database_add_user: * @database: A #mongoc_database_t. * @username: A string containing the username. * @password: (allow-none): A string containing password, or NULL. * @roles: (allow-none): An optional bson_t of roles. * @custom_data: (allow-none): An optional bson_t of data to store. * @error: (out) (allow-none): A location for a bson_error_t or %NULL. * * Creates a new user with access to @database. * * Returns: None. * Side effects: None. */ bool mongoc_database_add_user (mongoc_database_t *database, const char *username, const char *password, const bson_t *roles, const bson_t *custom_data, bson_error_t *error) { bson_t cmd; bson_t ar; bool ret = false; ENTRY; BSON_ASSERT (database); BSON_ASSERT (username); bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "createUser", username); BSON_APPEND_UTF8 (&cmd, "pwd", password); if (custom_data) { BSON_APPEND_DOCUMENT (&cmd, "customData", custom_data); } if (roles) { BSON_APPEND_ARRAY (&cmd, "roles", roles); } else { bson_append_array_begin (&cmd, "roles", 5, &ar); bson_append_array_end (&cmd, &ar); } ret = mongoc_database_command_simple (database, &cmd, NULL, NULL, error); bson_destroy (&cmd); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_database_get_read_prefs -- * * Fetch the read preferences for @database. * * Returns: * A mongoc_read_prefs_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_prefs_t * mongoc_database_get_read_prefs (const mongoc_database_t *database) /* IN */ { BSON_ASSERT (database); return database->read_prefs; } /* *-------------------------------------------------------------------------- * * mongoc_database_set_read_prefs -- * * Sets the default read preferences for @database. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_database_set_read_prefs (mongoc_database_t *database, const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (database); if (database->read_prefs) { mongoc_read_prefs_destroy (database->read_prefs); database->read_prefs = NULL; } if (read_prefs) { database->read_prefs = mongoc_read_prefs_copy (read_prefs); } } /* *-------------------------------------------------------------------------- * * mongoc_database_get_read_concern -- * * Fetches the read concern for @database. * * Returns: * A mongoc_read_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_read_concern_t * mongoc_database_get_read_concern (const mongoc_database_t *database) { BSON_ASSERT (database); return database->read_concern; } /* *-------------------------------------------------------------------------- * * mongoc_database_set_read_concern -- * * Set the default read concern for @database. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_database_set_read_concern (mongoc_database_t *database, const mongoc_read_concern_t *read_concern) { BSON_ASSERT (database); if (database->read_concern) { mongoc_read_concern_destroy (database->read_concern); database->read_concern = NULL; } if (read_concern) { database->read_concern = mongoc_read_concern_copy (read_concern); } } /* *-------------------------------------------------------------------------- * * mongoc_database_get_write_concern -- * * Fetches the write concern for @database. * * Returns: * A mongoc_write_concern_t that should not be modified or freed. * * Side effects: * None. * *-------------------------------------------------------------------------- */ const mongoc_write_concern_t * mongoc_database_get_write_concern (const mongoc_database_t *database) { BSON_ASSERT (database); return database->write_concern; } /* *-------------------------------------------------------------------------- * * mongoc_database_set_write_concern -- * * Set the default write concern for @database. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_database_set_write_concern (mongoc_database_t *database, const mongoc_write_concern_t *write_concern) { BSON_ASSERT (database); if (database->write_concern) { mongoc_write_concern_destroy (database->write_concern); database->write_concern = NULL; } if (write_concern) { database->write_concern = mongoc_write_concern_copy (write_concern); } } /** * mongoc_database_has_collection: * @database: (in): A #mongoc_database_t. * @name: (in): The name of the collection to check for. * @error: (out) (allow-none): A location for a #bson_error_t, or %NULL. * * Checks to see if a collection exists within the database on the MongoDB * server. * * This will return %false if their was an error communicating with the * server, or if the collection does not exist. * * If @error is provided, it will first be zeroed. Upon error, error.domain * will be set. * * Returns: %true if @name exists, otherwise %false. @error may be set. */ bool mongoc_database_has_collection (mongoc_database_t *database, const char *name, bson_error_t *error) { bson_iter_t col_iter; bool ret = false; const char *cur_name; bson_t opts = BSON_INITIALIZER; bson_t filter; mongoc_cursor_t *cursor; const bson_t *doc; ENTRY; BSON_ASSERT (database); BSON_ASSERT (name); if (error) { memset (error, 0, sizeof *error); } BSON_APPEND_DOCUMENT_BEGIN (&opts, "filter", &filter); BSON_APPEND_UTF8 (&filter, "name", name); bson_append_document_end (&opts, &filter); cursor = mongoc_database_find_collections_with_opts (database, &opts); while (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init (&col_iter, doc) && bson_iter_find (&col_iter, "name") && BSON_ITER_HOLDS_UTF8 (&col_iter) && (cur_name = bson_iter_utf8 (&col_iter, NULL))) { if (!strcmp (cur_name, name)) { ret = true; GOTO (cleanup); } } } (void) mongoc_cursor_error (cursor, error); cleanup: mongoc_cursor_destroy (cursor); bson_destroy (&opts); RETURN (ret); } mongoc_cursor_t * mongoc_database_find_collections (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) { bson_t opts = BSON_INITIALIZER; mongoc_cursor_t *cursor; BSON_ASSERT (database); if (filter) { if (!BSON_APPEND_DOCUMENT (&opts, "filter", filter)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'filter' parameter."); bson_destroy (&opts); return NULL; } } cursor = mongoc_database_find_collections_with_opts (database, &opts); bson_destroy (&opts); /* this deprecated API returns NULL on error */ if (mongoc_cursor_error (cursor, error)) { mongoc_cursor_destroy (cursor); return NULL; } return cursor; } mongoc_cursor_t * mongoc_database_find_collections_with_opts (mongoc_database_t *database, const bson_t *opts) { mongoc_cursor_t *cursor; bson_t cmd = BSON_INITIALIZER; BSON_ASSERT (database); BSON_APPEND_INT32 (&cmd, "listCollections", 1); /* Enumerate Collections Spec: "run listCollections on the primary node in * replicaset mode" */ cursor = _mongoc_cursor_cmd_new ( database->client, database->name, &cmd, opts, NULL, NULL, NULL); if (cursor->error.domain == 0) { _mongoc_cursor_prime (cursor); } bson_destroy (&cmd); return cursor; } char ** mongoc_database_get_collection_names (mongoc_database_t *database, bson_error_t *error) { return mongoc_database_get_collection_names_with_opts ( database, NULL, error); } char ** mongoc_database_get_collection_names_with_opts (mongoc_database_t *database, const bson_t *opts, bson_error_t *error) { bson_t opts_copy; bson_iter_t col; const char *name; char *namecopy; mongoc_array_t strv_buf; mongoc_cursor_t *cursor; const bson_t *doc; char **ret; BSON_ASSERT (database); if (opts) { bson_copy_to (opts, &opts_copy); } else { bson_init (&opts_copy); } /* nameOnly option is faster in MongoDB 4+, ignored by older versions, * see Enumerating Collections Spec */ if (!bson_has_field (&opts_copy, "nameOnly")) { bson_append_bool (&opts_copy, "nameOnly", 8, true); } cursor = mongoc_database_find_collections_with_opts (database, &opts_copy); _mongoc_array_init (&strv_buf, sizeof (char *)); while (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init (&col, doc) && bson_iter_find (&col, "name") && BSON_ITER_HOLDS_UTF8 (&col) && (name = bson_iter_utf8 (&col, NULL))) { namecopy = bson_strdup (name); _mongoc_array_append_val (&strv_buf, namecopy); } } /* append a null pointer for the last value. also handles the case * of no values. */ namecopy = NULL; _mongoc_array_append_val (&strv_buf, namecopy); if (mongoc_cursor_error (cursor, error)) { _mongoc_array_destroy (&strv_buf); ret = NULL; } else { ret = (char **) strv_buf.data; } mongoc_cursor_destroy (cursor); bson_destroy (&opts_copy); return ret; } mongoc_collection_t * mongoc_database_create_collection (mongoc_database_t *database, const char *name, const bson_t *opts, bson_error_t *error) { mongoc_collection_t *collection = NULL; bson_iter_t iter; bson_t cmd; bool capped = false; BSON_ASSERT (database); BSON_ASSERT (name); if (strchr (name, '$')) { bson_set_error (error, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_NAMESPACE_INVALID, "The namespace \"%s\" is invalid.", name); return NULL; } if (opts) { if (bson_iter_init_find (&iter, opts, "capped")) { if (!BSON_ITER_HOLDS_BOOL (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"capped\" must be a boolean."); return NULL; } capped = bson_iter_bool (&iter); } if (bson_iter_init_find (&iter, opts, "size")) { if (!BSON_ITER_HOLDS_INT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"size\" must be an integer."); return NULL; } if (!capped) { bson_set_error ( error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"size\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, opts, "max")) { if (!BSON_ITER_HOLDS_INT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The argument \"max\" must be an integer."); return NULL; } if (!capped) { bson_set_error ( error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"max\" parameter requires {\"capped\": true}"); return NULL; } } if (bson_iter_init_find (&iter, opts, "storageEngine")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error ( error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"storageEngine\" parameter must be a document"); return NULL; } if (bson_iter_find (&iter, "wiredTiger")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredTiger\" option must take a document " "argument with a \"configString\" field"); return NULL; } if (bson_iter_find (&iter, "configString")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { bson_set_error ( error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"configString\" parameter must be a string"); return NULL; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The \"wiredTiger\" option must take a document " "argument with a \"configString\" field"); return NULL; } } } } bson_init (&cmd); BSON_APPEND_UTF8 (&cmd, "create", name); if (_mongoc_client_command_with_opts (database->client, database->name, &cmd, MONGOC_CMD_WRITE, opts, MONGOC_QUERY_NONE, NULL, /* user prefs */ database->read_prefs, database->read_concern, database->write_concern, NULL, /* reply */ error)) { collection = _mongoc_collection_new (database->client, database->name, name, database->read_prefs, database->read_concern, database->write_concern); } bson_destroy (&cmd); return collection; } mongoc_collection_t * mongoc_database_get_collection (mongoc_database_t *database, const char *collection) { BSON_ASSERT (database); BSON_ASSERT (collection); return _mongoc_collection_new (database->client, database->name, collection, database->read_prefs, database->read_concern, database->write_concern); } const char * mongoc_database_get_name (mongoc_database_t *database) { BSON_ASSERT (database); return database->name; } mongoc_change_stream_t * mongoc_database_watch (const mongoc_database_t *db, const bson_t *pipeline, const bson_t *opts) { return _mongoc_change_stream_new_from_database (db, pipeline, opts); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-database.h0000644000076500000240000001566113572250757025240 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_DATABASE_H #define MONGOC_DATABASE_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-flags.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-write-concern.h" BSON_BEGIN_DECLS typedef struct _mongoc_database_t mongoc_database_t; MONGOC_EXPORT (const char *) mongoc_database_get_name (mongoc_database_t *database); MONGOC_EXPORT (bool) mongoc_database_remove_user (mongoc_database_t *database, const char *username, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_remove_all_users (mongoc_database_t *database, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_add_user (mongoc_database_t *database, const char *username, const char *password, const bson_t *roles, const bson_t *custom_data, bson_error_t *error); MONGOC_EXPORT (void) mongoc_database_destroy (mongoc_database_t *database); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_database_aggregate (mongoc_database_t *db, const bson_t *pipeline, const bson_t *opts, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (mongoc_database_t *) mongoc_database_copy (mongoc_database_t *database); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_database_command (mongoc_database_t *database, mongoc_query_flags_t flags, uint32_t skip, uint32_t limit, uint32_t batch_size, const bson_t *command, const bson_t *fields, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (bool) mongoc_database_read_command_with_opts (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_write_command_with_opts (mongoc_database_t *database, const bson_t *command, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_read_write_command_with_opts ( mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs /* IGNORED */, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_command_with_opts (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, const bson_t *opts, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_command_simple (mongoc_database_t *database, const bson_t *command, const mongoc_read_prefs_t *read_prefs, bson_t *reply, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_drop (mongoc_database_t *database, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_drop_with_opts (mongoc_database_t *database, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_database_has_collection (mongoc_database_t *database, const char *name, bson_error_t *error); MONGOC_EXPORT (mongoc_collection_t *) mongoc_database_create_collection (mongoc_database_t *database, const char *name, const bson_t *options, bson_error_t *error); MONGOC_EXPORT (const mongoc_read_prefs_t *) mongoc_database_get_read_prefs (const mongoc_database_t *database); MONGOC_EXPORT (void) mongoc_database_set_read_prefs (mongoc_database_t *database, const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_database_get_write_concern (const mongoc_database_t *database); MONGOC_EXPORT (void) mongoc_database_set_write_concern (mongoc_database_t *database, const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (const mongoc_read_concern_t *) mongoc_database_get_read_concern (const mongoc_database_t *database); MONGOC_EXPORT (void) mongoc_database_set_read_concern (mongoc_database_t *database, const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_database_find_collections (mongoc_database_t *database, const bson_t *filter, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_database_find_collections_with_opts); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_database_find_collections_with_opts (mongoc_database_t *database, const bson_t *opts); MONGOC_EXPORT (char **) mongoc_database_get_collection_names (mongoc_database_t *database, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_database_get_collection_names_with_opts); MONGOC_EXPORT (char **) mongoc_database_get_collection_names_with_opts (mongoc_database_t *database, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (mongoc_collection_t *) mongoc_database_get_collection (mongoc_database_t *database, const char *name); MONGOC_EXPORT (mongoc_change_stream_t *) mongoc_database_watch (const mongoc_database_t *db, const bson_t *pipeline, const bson_t *opts); BSON_END_DECLS #endif /* MONGOC_DATABASE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-errno-private.h0000644000076500000240000000317413572250757026265 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_ERRNO_PRIVATE_H #define MONGOC_ERRNO_PRIVATE_H #include #include #ifdef _WIN32 #include #include #endif BSON_BEGIN_DECLS #if defined(_WIN32) #define MONGOC_ERRNO_IS_AGAIN(errno) \ ((errno == EAGAIN) || (errno == WSAEWOULDBLOCK) || (errno == WSAEINPROGRESS)) #define MONGOC_ERRNO_IS_TIMEDOUT(errno) (errno == WSAETIMEDOUT) #elif defined(__sun) /* for some reason, accept() returns -1 and errno of 0 */ #define MONGOC_ERRNO_IS_AGAIN(errno) \ ((errno == EINTR) || (errno == EAGAIN) || (errno == EWOULDBLOCK) || \ (errno == EINPROGRESS) || (errno == 0)) #define MONGOC_ERRNO_IS_TIMEDOUT(errno) (errno == ETIMEDOUT) #else #define MONGOC_ERRNO_IS_AGAIN(errno) \ ((errno == EINTR) || (errno == EAGAIN) || (errno == EWOULDBLOCK) || \ (errno == EINPROGRESS)) #define MONGOC_ERRNO_IS_TIMEDOUT(errno) (errno == ETIMEDOUT) #endif BSON_END_DECLS #endif /* MONGOC_ERRNO_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-error-private.h0000644000076500000240000000175313572250757026272 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #include #include BSON_BEGIN_DECLS typedef enum { MONGOC_READ_ERR_NONE, MONGOC_READ_ERR_OTHER, MONGOC_READ_ERR_RETRY, } mongoc_read_err_type_t; mongoc_read_err_type_t _mongoc_read_error_get_type (bool cmd_ret, const bson_error_t *cmd_err, const bson_t *reply); BSON_END_DECLSmongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-error.c0000644000076500000240000000622513572250757024614 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-error-private.h" #include "mongoc/mongoc-rpc-private.h" bool mongoc_error_has_label (const bson_t *reply, const char *label) { bson_iter_t iter; bson_iter_t error_labels; BSON_ASSERT (reply); BSON_ASSERT (label); if (bson_iter_init_find (&iter, reply, "errorLabels") && bson_iter_recurse (&iter, &error_labels)) { while (bson_iter_next (&error_labels)) { if (BSON_ITER_HOLDS_UTF8 (&error_labels) && !strcmp (bson_iter_utf8 (&error_labels, NULL), label)) { return true; } } } return false; } /*-------------------------------------------------------------------------- * * _mongoc_read_error_get_type -- * * Checks if the error or reply from a read command is considered * retryable according to the retryable reads spec. Checks both * for a client error (a network exception) and a server error in * the reply. @cmd_ret and @cmd_err come from the result of a * read_command function. * * * Return: * A mongoc_read_error_type_t indicating the type of error (if any). * *-------------------------------------------------------------------------- */ mongoc_read_err_type_t _mongoc_read_error_get_type (bool cmd_ret, const bson_error_t *cmd_err, const bson_t *reply) { bson_error_t error; /* check for a client error. */ if (!cmd_ret && cmd_err && cmd_err->domain == MONGOC_ERROR_STREAM) { /* Retryable reads spec: "considered retryable if [...] any network * exception (e.g. socket timeout or error) */ return MONGOC_READ_ERR_RETRY; } /* check for a server error. */ if (_mongoc_cmd_check_ok_no_wce ( reply, MONGOC_ERROR_API_VERSION_2, &error)) { return MONGOC_READ_ERR_NONE; } switch (error.code) { case 11600: /* InterruptedAtShutdown */ case 11602: /* InterruptedDueToReplStateChange */ case 10107: /* NotMaster */ case 13435: /* NotMasterNoSlaveOk */ case 13436: /* NotMasterOrSecondary */ case 189: /* PrimarySteppedDown */ case 91: /* ShutdownInProgress */ case 7: /* HostNotFound */ case 6: /* HostUnreachable */ case 89: /* NetworkTimeout */ case 9001: /* SocketException */ return MONGOC_READ_ERR_RETRY; default: if (strstr (error.message, "not master") || strstr (error.message, "node is recovering")) { return MONGOC_READ_ERR_RETRY; } return MONGOC_READ_ERR_OTHER; } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-error.h0000644000076500000240000000650713572250757024624 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_ERRORS_H #define MONGOC_ERRORS_H #include #include "mongoc/mongoc-macros.h" #define MONGOC_ERROR_API_VERSION_LEGACY 1 #define MONGOC_ERROR_API_VERSION_2 2 BSON_BEGIN_DECLS typedef enum { MONGOC_ERROR_CLIENT = 1, MONGOC_ERROR_STREAM, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_CURSOR, MONGOC_ERROR_QUERY, MONGOC_ERROR_INSERT, MONGOC_ERROR_SASL, MONGOC_ERROR_BSON, MONGOC_ERROR_MATCHER, MONGOC_ERROR_NAMESPACE, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SERVER_SELECTION, MONGOC_ERROR_WRITE_CONCERN, MONGOC_ERROR_SERVER, /* Error API Version 2 only */ MONGOC_ERROR_TRANSACTION, } mongoc_error_domain_t; typedef enum { MONGOC_ERROR_STREAM_INVALID_TYPE = 1, MONGOC_ERROR_STREAM_INVALID_STATE, MONGOC_ERROR_STREAM_NAME_RESOLUTION, MONGOC_ERROR_STREAM_SOCKET, MONGOC_ERROR_STREAM_CONNECT, MONGOC_ERROR_STREAM_NOT_ESTABLISHED, MONGOC_ERROR_CLIENT_NOT_READY, MONGOC_ERROR_CLIENT_TOO_BIG, MONGOC_ERROR_CLIENT_TOO_SMALL, MONGOC_ERROR_CLIENT_GETNONCE, MONGOC_ERROR_CLIENT_AUTHENTICATE, MONGOC_ERROR_CLIENT_NO_ACCEPTABLE_PEER, MONGOC_ERROR_CLIENT_IN_EXHAUST, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, MONGOC_ERROR_CURSOR_INVALID_CURSOR, MONGOC_ERROR_QUERY_FAILURE, MONGOC_ERROR_BSON_INVALID, MONGOC_ERROR_MATCHER_INVALID, MONGOC_ERROR_NAMESPACE_INVALID, MONGOC_ERROR_NAMESPACE_INVALID_FILTER_TYPE, MONGOC_ERROR_COMMAND_INVALID_ARG, MONGOC_ERROR_COLLECTION_INSERT_FAILED, MONGOC_ERROR_COLLECTION_UPDATE_FAILED, MONGOC_ERROR_COLLECTION_DELETE_FAILED, MONGOC_ERROR_COLLECTION_DOES_NOT_EXIST = 26, MONGOC_ERROR_GRIDFS_INVALID_FILENAME, MONGOC_ERROR_SCRAM_NOT_DONE, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND = 59, MONGOC_ERROR_QUERY_NOT_TAILABLE = 13051, MONGOC_ERROR_SERVER_SELECTION_BAD_WIRE_VERSION, MONGOC_ERROR_SERVER_SELECTION_FAILURE, MONGOC_ERROR_SERVER_SELECTION_INVALID_ID, MONGOC_ERROR_GRIDFS_CHUNK_MISSING, MONGOC_ERROR_GRIDFS_PROTOCOL_ERROR, /* Dup with query failure. */ MONGOC_ERROR_PROTOCOL_ERROR = 17, MONGOC_ERROR_WRITE_CONCERN_ERROR = 64, MONGOC_ERROR_DUPLICATE_KEY = 11000, MONGOC_ERROR_MAX_TIME_MS_EXPIRED = 50, MONGOC_ERROR_CHANGE_STREAM_NO_RESUME_TOKEN, MONGOC_ERROR_CLIENT_SESSION_FAILURE, MONGOC_ERROR_TRANSACTION_INVALID_STATE, MONGOC_ERROR_GRIDFS_CORRUPT, MONGOC_ERROR_GRIDFS_BUCKET_FILE_NOT_FOUND, MONGOC_ERROR_GRIDFS_BUCKET_STREAM } mongoc_error_code_t; MONGOC_EXPORT (bool) mongoc_error_has_label (const bson_t *reply, const char *label); BSON_END_DECLS #endif /* MONGOC_ERRORS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-find-and-modify-private.h0000644000076500000240000000210013572250757030071 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_FIND_AND_MODIFY_PRIVATE_H #define MONGOC_FIND_AND_MODIFY_PRIVATE_H #include #include "mongoc/mongoc-write-command-private.h" BSON_BEGIN_DECLS struct _mongoc_find_and_modify_opts_t { bson_t *sort; bson_t *update; bson_t *fields; mongoc_find_and_modify_flags_t flags; bool bypass_document_validation; uint32_t max_time_ms; bson_t extra; }; BSON_END_DECLS #endif /* MONGOC_FIND_AND_MODIFY_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-find-and-modify.c0000644000076500000240000001166213572250757026431 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-find-and-modify.h" #include "mongoc/mongoc-find-and-modify-private.h" #include "mongoc/mongoc-util-private.h" /** * mongoc_find_and_modify_new: * * Create a new mongoc_find_and_modify_t. * * Returns: A newly allocated mongoc_find_and_modify_t. This should be freed * with mongoc_find_and_modify_destroy(). */ mongoc_find_and_modify_opts_t * mongoc_find_and_modify_opts_new (void) { mongoc_find_and_modify_opts_t *opts = NULL; opts = (mongoc_find_and_modify_opts_t *) bson_malloc0 (sizeof *opts); bson_init (&opts->extra); opts->bypass_document_validation = false; return opts; } bool mongoc_find_and_modify_opts_set_sort (mongoc_find_and_modify_opts_t *opts, const bson_t *sort) { BSON_ASSERT (opts); if (sort) { bson_destroy (opts->sort); opts->sort = bson_copy (sort); return true; } return false; } void mongoc_find_and_modify_opts_get_sort (const mongoc_find_and_modify_opts_t *opts, bson_t *sort) { BSON_ASSERT (opts); BSON_ASSERT (sort); if (opts->sort) { bson_copy_to (opts->sort, sort); } else { bson_init (sort); } } bool mongoc_find_and_modify_opts_set_update (mongoc_find_and_modify_opts_t *opts, const bson_t *update) { BSON_ASSERT (opts); if (update) { bson_destroy (opts->update); opts->update = bson_copy (update); return true; } return false; } void mongoc_find_and_modify_opts_get_update ( const mongoc_find_and_modify_opts_t *opts, bson_t *update) { BSON_ASSERT (opts); BSON_ASSERT (update); if (opts->update) { bson_copy_to (opts->update, update); } else { bson_init (update); } } bool mongoc_find_and_modify_opts_set_fields (mongoc_find_and_modify_opts_t *opts, const bson_t *fields) { BSON_ASSERT (opts); if (fields) { bson_destroy (opts->fields); opts->fields = bson_copy (fields); return true; } return false; } void mongoc_find_and_modify_opts_get_fields ( const mongoc_find_and_modify_opts_t *opts, bson_t *fields) { BSON_ASSERT (opts); BSON_ASSERT (fields); if (opts->fields) { bson_copy_to (opts->fields, fields); } else { bson_init (fields); } } bool mongoc_find_and_modify_opts_set_flags ( mongoc_find_and_modify_opts_t *opts, const mongoc_find_and_modify_flags_t flags) { BSON_ASSERT (opts); opts->flags = flags; return true; } mongoc_find_and_modify_flags_t mongoc_find_and_modify_opts_get_flags ( const mongoc_find_and_modify_opts_t *opts) { BSON_ASSERT (opts); return opts->flags; } bool mongoc_find_and_modify_opts_set_bypass_document_validation ( mongoc_find_and_modify_opts_t *opts, bool bypass) { BSON_ASSERT (opts); opts->bypass_document_validation = bypass; return true; } bool mongoc_find_and_modify_opts_get_bypass_document_validation ( const mongoc_find_and_modify_opts_t *opts) { BSON_ASSERT (opts); return opts->bypass_document_validation; } bool mongoc_find_and_modify_opts_set_max_time_ms ( mongoc_find_and_modify_opts_t *opts, uint32_t max_time_ms) { BSON_ASSERT (opts); opts->max_time_ms = max_time_ms; return true; } uint32_t mongoc_find_and_modify_opts_get_max_time_ms ( const mongoc_find_and_modify_opts_t *opts) { BSON_ASSERT (opts); return opts->max_time_ms; } bool mongoc_find_and_modify_opts_append (mongoc_find_and_modify_opts_t *opts, const bson_t *extra) { BSON_ASSERT (opts); BSON_ASSERT (extra); return bson_concat (&opts->extra, extra); } void mongoc_find_and_modify_opts_get_extra ( const mongoc_find_and_modify_opts_t *opts, bson_t *extra) { BSON_ASSERT (opts); BSON_ASSERT (extra); bson_copy_to (&opts->extra, extra); } /** * mongoc_find_and_modify_opts_destroy: * @opts: A mongoc_find_and_modify_opts_t. * * Releases a mongoc_find_and_modify_opts_t and all associated memory. */ void mongoc_find_and_modify_opts_destroy (mongoc_find_and_modify_opts_t *opts) { if (opts) { bson_destroy (opts->sort); bson_destroy (opts->update); bson_destroy (opts->fields); bson_destroy (&opts->extra); bson_free (opts); } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-find-and-modify.h0000644000076500000240000000632113572250757026432 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_FIND_AND_MODIFY_H #define MONGOC_FIND_AND_MODIFY_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS typedef enum { MONGOC_FIND_AND_MODIFY_NONE = 0, MONGOC_FIND_AND_MODIFY_REMOVE = 1 << 0, MONGOC_FIND_AND_MODIFY_UPSERT = 1 << 1, MONGOC_FIND_AND_MODIFY_RETURN_NEW = 1 << 2, } mongoc_find_and_modify_flags_t; typedef struct _mongoc_find_and_modify_opts_t mongoc_find_and_modify_opts_t; MONGOC_EXPORT (mongoc_find_and_modify_opts_t *) mongoc_find_and_modify_opts_new (void); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_sort (mongoc_find_and_modify_opts_t *opts, const bson_t *sort); MONGOC_EXPORT (void) mongoc_find_and_modify_opts_get_sort (const mongoc_find_and_modify_opts_t *opts, bson_t *sort); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_update (mongoc_find_and_modify_opts_t *opts, const bson_t *update); MONGOC_EXPORT (void) mongoc_find_and_modify_opts_get_update ( const mongoc_find_and_modify_opts_t *opts, bson_t *update); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_fields (mongoc_find_and_modify_opts_t *opts, const bson_t *fields); MONGOC_EXPORT (void) mongoc_find_and_modify_opts_get_fields ( const mongoc_find_and_modify_opts_t *opts, bson_t *fields); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_flags ( mongoc_find_and_modify_opts_t *opts, const mongoc_find_and_modify_flags_t flags); MONGOC_EXPORT (mongoc_find_and_modify_flags_t) mongoc_find_and_modify_opts_get_flags ( const mongoc_find_and_modify_opts_t *opts); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_bypass_document_validation ( mongoc_find_and_modify_opts_t *opts, bool bypass); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_get_bypass_document_validation ( const mongoc_find_and_modify_opts_t *opts); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_set_max_time_ms ( mongoc_find_and_modify_opts_t *opts, uint32_t max_time_ms); MONGOC_EXPORT (uint32_t) mongoc_find_and_modify_opts_get_max_time_ms ( const mongoc_find_and_modify_opts_t *opts); MONGOC_EXPORT (bool) mongoc_find_and_modify_opts_append (mongoc_find_and_modify_opts_t *opts, const bson_t *extra); MONGOC_EXPORT (void) mongoc_find_and_modify_opts_get_extra ( const mongoc_find_and_modify_opts_t *opts, bson_t *extra); MONGOC_EXPORT (void) mongoc_find_and_modify_opts_destroy (mongoc_find_and_modify_opts_t *opts); BSON_END_DECLS #endif /* MONGOC_FIND_AND_MODIFY_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-flags.h0000644000076500000240000001074413572250757024565 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_FLAGS_H #define MONGOC_FLAGS_H #include BSON_BEGIN_DECLS /** * mongoc_delete_flags_t: * @MONGOC_DELETE_NONE: Specify no delete flags. * @MONGOC_DELETE_SINGLE_REMOVE: Only remove the first document matching the * document selector. * * This type is only for use with deprecated functions and should not be * used in new code. Use mongoc_remove_flags_t instead. * * #mongoc_delete_flags_t are used when performing a delete operation. */ typedef enum { MONGOC_DELETE_NONE = 0, MONGOC_DELETE_SINGLE_REMOVE = 1 << 0, } mongoc_delete_flags_t; /** * mongoc_remove_flags_t: * @MONGOC_REMOVE_NONE: Specify no delete flags. * @MONGOC_REMOVE_SINGLE_REMOVE: Only remove the first document matching the * document selector. * * #mongoc_remove_flags_t are used when performing a remove operation. */ typedef enum { MONGOC_REMOVE_NONE = 0, MONGOC_REMOVE_SINGLE_REMOVE = 1 << 0, } mongoc_remove_flags_t; /** * mongoc_insert_flags_t: * @MONGOC_INSERT_NONE: Specify no insert flags. * @MONGOC_INSERT_CONTINUE_ON_ERROR: Continue inserting documents from * the insertion set even if one fails. * * #mongoc_insert_flags_t are used when performing an insert operation. */ typedef enum { MONGOC_INSERT_NONE = 0, MONGOC_INSERT_CONTINUE_ON_ERROR = 1 << 0, } mongoc_insert_flags_t; #define MONGOC_INSERT_NO_VALIDATE (1U << 31) /** * mongoc_query_flags_t: * @MONGOC_QUERY_NONE: No query flags supplied. * @MONGOC_QUERY_TAILABLE_CURSOR: Cursor will not be closed when the last * data is retrieved. You can resume this cursor later. * @MONGOC_QUERY_SLAVE_OK: Allow query of replica slave. * @MONGOC_QUERY_OPLOG_REPLAY: Used internally by Mongo. * @MONGOC_QUERY_NO_CURSOR_TIMEOUT: The server normally times out idle * cursors after an inactivity period (10 minutes). This prevents that. * @MONGOC_QUERY_AWAIT_DATA: Use with %MONGOC_QUERY_TAILABLE_CURSOR. Block * rather than returning no data. After a period, time out. * @MONGOC_QUERY_EXHAUST: Stream the data down full blast in multiple * "more" packages. Faster when you are pulling a lot of data and * know you want to pull it all down. * @MONGOC_QUERY_PARTIAL: Get partial results from mongos if some shards * are down (instead of throwing an error). * * #mongoc_query_flags_t is used for querying a Mongo instance. */ typedef enum { MONGOC_QUERY_NONE = 0, MONGOC_QUERY_TAILABLE_CURSOR = 1 << 1, MONGOC_QUERY_SLAVE_OK = 1 << 2, MONGOC_QUERY_OPLOG_REPLAY = 1 << 3, MONGOC_QUERY_NO_CURSOR_TIMEOUT = 1 << 4, MONGOC_QUERY_AWAIT_DATA = 1 << 5, MONGOC_QUERY_EXHAUST = 1 << 6, MONGOC_QUERY_PARTIAL = 1 << 7, } mongoc_query_flags_t; /** * mongoc_reply_flags_t: * @MONGOC_REPLY_NONE: No flags set. * @MONGOC_REPLY_CURSOR_NOT_FOUND: Cursor was not found. * @MONGOC_REPLY_QUERY_FAILURE: Query failed, error document provided. * @MONGOC_REPLY_SHARD_CONFIG_STALE: Shard configuration is stale. * @MONGOC_REPLY_AWAIT_CAPABLE: Wait for data to be returned until timeout * has passed. Used with %MONGOC_QUERY_TAILABLE_CURSOR. * * #mongoc_reply_flags_t contains flags supplied by the Mongo server in reply * to a request. */ typedef enum { MONGOC_REPLY_NONE = 0, MONGOC_REPLY_CURSOR_NOT_FOUND = 1 << 0, MONGOC_REPLY_QUERY_FAILURE = 1 << 1, MONGOC_REPLY_SHARD_CONFIG_STALE = 1 << 2, MONGOC_REPLY_AWAIT_CAPABLE = 1 << 3, } mongoc_reply_flags_t; /** * mongoc_update_flags_t: * @MONGOC_UPDATE_NONE: No update flags specified. * @MONGOC_UPDATE_UPSERT: Perform an upsert. * @MONGOC_UPDATE_MULTI_UPDATE: Continue updating after first match. * * #mongoc_update_flags_t is used when updating documents found in Mongo. */ typedef enum { MONGOC_UPDATE_NONE = 0, MONGOC_UPDATE_UPSERT = 1 << 0, MONGOC_UPDATE_MULTI_UPDATE = 1 << 1, } mongoc_update_flags_t; #define MONGOC_UPDATE_NO_VALIDATE (1U << 31) BSON_END_DECLS #endif /* MONGOC_FLAGS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-bucket-file-private.h0000644000076500000240000000370413572250757030605 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_BUCKET_FILE_PRIVATE_H #define MONGOC_GRIDFS_BUCKET_FILE_PRIVATE_H #include "bson/bson.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-gridfs-bucket.h" BSON_BEGIN_DECLS typedef struct { /* corresponding bucket */ mongoc_gridfs_bucket_t *bucket; /* file data */ char *filename; bson_value_t *file_id; bson_t *metadata; int32_t chunk_size; int64_t length; /* fields for reading and writing */ uint8_t *buffer; size_t in_buffer; int32_t curr_chunk; /* for writing */ bool saved; /* for reading */ mongoc_cursor_t *cursor; int32_t bytes_read; bool finished; /* Error */ bson_error_t err; } mongoc_gridfs_bucket_file_t; ssize_t _mongoc_gridfs_bucket_file_writev (mongoc_gridfs_bucket_file_t *file, const mongoc_iovec_t *iov, size_t iovcnt); ssize_t _mongoc_gridfs_bucket_file_readv (mongoc_gridfs_bucket_file_t *file, mongoc_iovec_t *iov, size_t iovcnt); bool _mongoc_gridfs_bucket_file_save (mongoc_gridfs_bucket_file_t *file); void _mongoc_gridfs_bucket_file_destroy (mongoc_gridfs_bucket_file_t *file); BSON_END_DECLS #endif /* MONGOC_GRIDFS_BUCKET_FILE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-bucket-file.c0000644000076500000240000003750013572250757027131 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc.h" #include "mongoc-gridfs-bucket-file-private.h" #include "mongoc-gridfs-bucket-private.h" #include "mongoc-trace-private.h" #include "mongoc-stream-gridfs-download-private.h" #include "mongoc-stream-gridfs-upload-private.h" /* Returns the minimum of two numbers */ static size_t _mongoc_min (const size_t a, const size_t b) { return a < b ? a : b; } /*-------------------------------------------------------------------------- * * _mongoc_create_index_if_not_present -- * * Creates an index in the given collection if it doesn't exist. * * Return: * True if the index was already present or was successfully created. * False if an error occurred while trying to create the index. * *-------------------------------------------------------------------------- */ static bool _mongoc_create_index_if_not_present (mongoc_collection_t *col, const bson_t *index, bool unique, bson_error_t *error) { mongoc_cursor_t *cursor; bool index_exists; bool r; const bson_t *doc; bson_iter_t iter; bson_t inner_doc; char *index_name; bson_t index_command; uint32_t data_len; const uint8_t *data; BSON_ASSERT (col); BSON_ASSERT (index); cursor = mongoc_collection_find_indexes_with_opts (col, NULL); index_exists = false; while (mongoc_cursor_next (cursor, &doc)) { r = bson_iter_init_find (&iter, doc, "key"); if (!r) { continue; } bson_iter_document (&iter, &data_len, &data); bson_init_static (&inner_doc, data, data_len); if (bson_compare (&inner_doc, index) == 0) { index_exists = true; } bson_destroy (&inner_doc); } mongoc_cursor_destroy (cursor); if (index_exists) { return true; } index_name = mongoc_collection_keys_to_index_string (index); bson_init (&index_command); BCON_APPEND (&index_command, "createIndexes", BCON_UTF8 (mongoc_collection_get_name (col)), "indexes", "[", "{", "key", BCON_DOCUMENT (index), "name", BCON_UTF8 (index_name), "unique", BCON_BOOL (unique), "}", "]"); r = mongoc_collection_write_command_with_opts ( col, &index_command, NULL, NULL, error); bson_destroy (&index_command); bson_free (index_name); if (!r) { return false; } return true; } /*-------------------------------------------------------------------------- * * _mongoc_gridfs_bucket_create_indexes -- * * Creates the indexes needed for GridFS on the 'files' and 'chunks' * collections. * * Return: * True if creating the indexes was successful, otherwise returns * false. * *-------------------------------------------------------------------------- */ static bool _mongoc_gridfs_bucket_create_indexes (mongoc_gridfs_bucket_t *bucket, bson_error_t *error) { mongoc_read_prefs_t *prefs; bson_t filter; bson_t opts; mongoc_cursor_t *cursor; const bson_t *doc; bson_t files_index; bson_t chunks_index; bool r; /* Check to see if there already exists a document in the files collection */ bson_init (&filter); bson_append_int32 (&filter, "_id", 3, 1); bson_init (&opts); bson_append_bool (&opts, "singleBatch", 11, true); bson_append_int32 (&opts, "limit", 5, 1); prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); cursor = mongoc_collection_find_with_opts (bucket->files, &filter, &opts, prefs); bson_destroy (&filter); bson_destroy (&opts); r = mongoc_cursor_next (cursor, &doc); mongoc_read_prefs_destroy (prefs); if (r) { /* Files exist */ mongoc_cursor_destroy (cursor); return true; } else if (mongoc_cursor_error (cursor, error)) { mongoc_cursor_destroy (cursor); return false; } mongoc_cursor_destroy (cursor); /* Create files index */ bson_init (&files_index); BSON_APPEND_INT32 (&files_index, "filename", 1); BSON_APPEND_INT32 (&files_index, "uploadDate", 1); r = _mongoc_create_index_if_not_present ( bucket->files, &files_index, false, error); bson_destroy (&files_index); if (!r) { return false; } /* Create unique chunks index */ bson_init (&chunks_index); BSON_APPEND_INT32 (&chunks_index, "files_id", 1); BSON_APPEND_INT32 (&chunks_index, "n", 1); r = _mongoc_create_index_if_not_present ( bucket->chunks, &chunks_index, true, error); bson_destroy (&chunks_index); if (!r) { return false; } return true; } /*-------------------------------------------------------------------------- * * _mongoc_gridfs_bucket_write_chunk -- * * Writes a chunk from the buffer into the chunks collection. * * Return: * Returns true if the chunk was successfully written. Otherwise, * returns false and sets an error on the bucket file. * *-------------------------------------------------------------------------- */ static bool _mongoc_gridfs_bucket_write_chunk (mongoc_gridfs_bucket_file_t *file) { bson_t chunk; bool r; BSON_ASSERT (file); bson_init (&chunk); BSON_APPEND_INT32 (&chunk, "n", file->curr_chunk); BSON_APPEND_VALUE (&chunk, "files_id", file->file_id); BSON_APPEND_BINARY (&chunk, "data", BSON_SUBTYPE_BINARY, file->buffer, (uint32_t) file->in_buffer); r = mongoc_collection_insert_one (file->bucket->chunks, &chunk, NULL /* opts */, NULL /* reply */, &file->err); bson_destroy (&chunk); if (!r) { return false; } file->curr_chunk++; file->in_buffer = 0; return true; } /*-------------------------------------------------------------------------- * * _mongoc_gridfs_bucket_init_cursor -- * * Initializes the cursor at file->cursor for the given file. * *-------------------------------------------------------------------------- */ static void _mongoc_gridfs_bucket_init_cursor (mongoc_gridfs_bucket_file_t *file) { bson_t filter; bson_t opts; bson_t sort; BSON_ASSERT (file); bson_init (&filter); bson_init (&opts); bson_init (&sort); BSON_APPEND_VALUE (&filter, "files_id", file->file_id); BSON_APPEND_INT32 (&sort, "n", 1); BSON_APPEND_DOCUMENT (&opts, "sort", &sort); file->cursor = mongoc_collection_find_with_opts ( file->bucket->chunks, &filter, &opts, NULL); bson_destroy (&filter); bson_destroy (&opts); bson_destroy (&sort); } /*-------------------------------------------------------------------------- * * _mongoc_gridfs_bucket_read_chunk -- * * Reads a chunk from the server and places it into the file's buffer * * Return: * True if the buffer has been filled with any available data. * Otherwise, false and sets the error on the bucket file. * *-------------------------------------------------------------------------- */ static bool _mongoc_gridfs_bucket_read_chunk (mongoc_gridfs_bucket_file_t *file) { const bson_t *next; bool r; bson_iter_t iter; int32_t n; const uint8_t *data; uint32_t data_len; int64_t total_chunks; int64_t expected_size; BSON_ASSERT (file); if (file->length == 0) { /* This file has zero length */ file->in_buffer = 0; file->finished = true; return true; } /* Calculate the total number of chunks for this file */ total_chunks = (file->length / file->chunk_size); if (file->length % file->chunk_size != 0) { total_chunks++; } if (file->curr_chunk == total_chunks) { /* All chunks have been read! */ file->in_buffer = 0; file->finished = true; return true; } if (file->cursor == NULL) { _mongoc_gridfs_bucket_init_cursor (file); } r = mongoc_cursor_next (file->cursor, &next); if (mongoc_cursor_error (file->cursor, &file->err)) { return false; } if (!r) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CHUNK_MISSING, "Missing chunk %d.", file->curr_chunk); return false; } r = bson_iter_init_find (&iter, next, "n"); if (!r) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "Chunk %d missing a required field 'n'.", file->curr_chunk); return false; } n = bson_iter_int32 (&iter); if (n != file->curr_chunk) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CHUNK_MISSING, "Missing chunk %d.", file->curr_chunk); return false; } r = bson_iter_init_find (&iter, next, "data"); if (!r) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "Chunk %d missing a required field 'data'.", file->curr_chunk); return false; } bson_iter_binary (&iter, NULL, &data_len, &data); /* Assert that the data is the correct length */ if (file->curr_chunk != total_chunks - 1) { expected_size = file->chunk_size; } else { expected_size = file->length - ((total_chunks - 1) * file->chunk_size); } if (data_len != expected_size) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "Chunk %d expected to have size %" PRId64 " but is size %d.", file->curr_chunk, expected_size, data_len); return false; } memcpy (file->buffer, data, data_len); file->in_buffer = data_len; file->bytes_read = 0; file->curr_chunk++; return true; } ssize_t _mongoc_gridfs_bucket_file_writev (mongoc_gridfs_bucket_file_t *file, const mongoc_iovec_t *iov, size_t iovcnt) { uint32_t total; size_t bytes_available; size_t space_available; int32_t written_this_iov; size_t to_write; size_t i; bool r; BSON_ASSERT (file); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); total = 0; if (file->err.code) { return -1; } if (file->saved) { bson_set_error (&file->err, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_PROTOCOL_ERROR, "Cannot write after saving/aborting on a GridFS file."); return -1; } if (!file->bucket->indexed) { r = _mongoc_gridfs_bucket_create_indexes (file->bucket, &file->err); if (!r) { /* Error is set on file. */ return -1; } else { file->bucket->indexed = true; } } for (i = 0; i < iovcnt; i++) { written_this_iov = 0; while (written_this_iov < iov[i].iov_len) { bytes_available = iov[i].iov_len - written_this_iov; space_available = file->chunk_size - file->in_buffer; to_write = _mongoc_min (bytes_available, space_available); memcpy (file->buffer + file->in_buffer, iov[i].iov_base + written_this_iov, to_write); file->in_buffer += to_write; written_this_iov += to_write; total += to_write; if (file->in_buffer == file->chunk_size) { /* Buffer is filled, write the chunk */ _mongoc_gridfs_bucket_write_chunk (file); } } } return total; } ssize_t _mongoc_gridfs_bucket_file_readv (mongoc_gridfs_bucket_file_t *file, mongoc_iovec_t *iov, size_t iovcnt) { uint32_t total; size_t bytes_available; size_t space_available; int32_t read_this_iov; size_t to_read; bool r; size_t i; BSON_ASSERT (file); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); total = 0; if (file->err.code) { return -1; } if (file->finished) { return 0; } for (i = 0; i < iovcnt; i++) { read_this_iov = 0; while (read_this_iov < iov[i].iov_len) { bytes_available = file->in_buffer - file->bytes_read; space_available = iov[i].iov_len - read_this_iov; to_read = _mongoc_min (bytes_available, space_available); memcpy (iov[i].iov_base + read_this_iov, file->buffer + file->bytes_read, to_read); file->bytes_read += to_read; read_this_iov += to_read; total += to_read; if (file->bytes_read == file->in_buffer) { /* Everything in the current chunk has been read, so read a new * chunk */ r = _mongoc_gridfs_bucket_read_chunk (file); if (!r) { /* an error occured while reading the chunk */ return -1; } if (file->finished) { /* There's nothing left to read */ RETURN (total); } } } } RETURN (total); } /*-------------------------------------------------------------------------- * * _mongoc_gridfs_bucket_file_save -- * * Saves the file to the files collection in gridFS. This locks the * file into GridFS, and no more chunks are allowed to be written. * * Return: * True if saved or no-op. False otherwise, and sets the file error. * *-------------------------------------------------------------------------- */ bool _mongoc_gridfs_bucket_file_save (mongoc_gridfs_bucket_file_t *file) { bson_t new_doc; int64_t length; bool r; BSON_ASSERT (file); if (file->saved) { /* Already saved, or aborted. */ return true; } if (file->err.code) { return false; } length = ((int64_t) file->curr_chunk) * file->chunk_size; if (file->in_buffer != 0) { length += file->in_buffer; _mongoc_gridfs_bucket_write_chunk (file); } file->length = length; bson_init (&new_doc); BSON_APPEND_VALUE (&new_doc, "_id", file->file_id); BSON_APPEND_INT64 (&new_doc, "length", file->length); BSON_APPEND_INT32 (&new_doc, "chunkSize", file->chunk_size); BSON_APPEND_DATE_TIME (&new_doc, "uploadDate", bson_get_monotonic_time ()); BSON_APPEND_UTF8 (&new_doc, "filename", file->filename); if (file->metadata) { BSON_APPEND_DOCUMENT (&new_doc, "metadata", file->metadata); } r = mongoc_collection_insert_one ( file->bucket->files, &new_doc, NULL, NULL, &file->err); bson_destroy (&new_doc); file->saved = r; return (file->err.code) ? false : true; } void _mongoc_gridfs_bucket_file_destroy (mongoc_gridfs_bucket_file_t *file) { if (file) { bson_value_destroy (file->file_id); bson_free (file->file_id); bson_destroy (file->metadata); mongoc_cursor_destroy (file->cursor); bson_free (file->buffer); bson_free (file->filename); bson_free (file); } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-bucket-private.h0000644000076500000240000000174513572250757027673 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_BUCKET_PRIVATE_H #define MONGOC_GRIDFS_BUCKET_PRIVATE_H #include "mongoc/mongoc-collection.h" BSON_BEGIN_DECLS struct _mongoc_gridfs_bucket_t { mongoc_collection_t *chunks; mongoc_collection_t *files; int32_t chunk_size; char *bucket_name; bool indexed; }; BSON_END_DECLS #endif /* MONGOC_GRIDFS_BUCKET_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-bucket.c0000644000076500000240000004006413572250757026213 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "bson/bson.h" #include "mongoc/mongoc.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-database-private.h" #include "mongoc/mongoc-gridfs-bucket-private.h" #include "mongoc/mongoc-gridfs-bucket-file-private.h" #include "mongoc/mongoc-opts-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-stream-gridfs-download-private.h" #include "mongoc/mongoc-stream-gridfs-upload-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-write-concern-private.h" /*-------------------------------------------------------------------------- * * _mongoc_gridfs_find_file_with_id -- * * Attempts to find the file corresponding to the given file_id in * GridFS. * * Return: * True on success and initializes file. Otherwise, returns false * and sets error. * *-------------------------------------------------------------------------- */ static bool _mongoc_gridfs_find_file_with_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, bson_t *file, bson_error_t *error) { mongoc_cursor_t *cursor; bson_t filter; const bson_t *doc; bool r; BSON_ASSERT (bucket); BSON_ASSERT (file_id); bson_init (&filter); BSON_APPEND_VALUE (&filter, "_id", file_id); cursor = mongoc_collection_find_with_opts (bucket->files, &filter, NULL, NULL); bson_destroy (&filter); r = mongoc_cursor_next (cursor, &doc); if (!r) { if (!mongoc_cursor_error (cursor, error)) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_BUCKET_FILE_NOT_FOUND, "No file with given id exists"); } } else { if (file) { bson_copy_to (doc, file); } } mongoc_cursor_destroy (cursor); return r; } mongoc_gridfs_bucket_t * mongoc_gridfs_bucket_new (mongoc_database_t *db, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { mongoc_gridfs_bucket_t *bucket; char buf[128]; mongoc_gridfs_bucket_opts_t gridfs_opts; BSON_ASSERT (db); if (!_mongoc_gridfs_bucket_opts_parse ( db->client, opts, &gridfs_opts, error)) { _mongoc_gridfs_bucket_opts_cleanup (&gridfs_opts); return NULL; } /* Initialize the bucket fields */ if (strlen (gridfs_opts.bucketName) + strlen (".chunks") + 1 > sizeof (buf)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "bucketName \"%s\" must have fewer than %d characters", gridfs_opts.bucketName, (int) (sizeof (buf) - (strlen (".chunks") + 1))); } bucket = (mongoc_gridfs_bucket_t *) bson_malloc0 (sizeof *bucket); bson_snprintf (buf, sizeof (buf), "%s.chunks", gridfs_opts.bucketName); bucket->chunks = mongoc_database_get_collection (db, buf); bson_snprintf (buf, sizeof (buf), "%s.files", gridfs_opts.bucketName); bucket->files = mongoc_database_get_collection (db, buf); if (gridfs_opts.writeConcern) { mongoc_collection_set_write_concern (bucket->chunks, gridfs_opts.writeConcern); mongoc_collection_set_write_concern (bucket->files, gridfs_opts.writeConcern); } if (gridfs_opts.readConcern) { mongoc_collection_set_read_concern (bucket->chunks, gridfs_opts.readConcern); mongoc_collection_set_read_concern (bucket->files, gridfs_opts.readConcern); } if (read_prefs) { mongoc_collection_set_read_prefs (bucket->chunks, read_prefs); mongoc_collection_set_read_prefs (bucket->files, read_prefs); } bucket->chunk_size = gridfs_opts.chunkSizeBytes; bucket->bucket_name = bson_strdup (gridfs_opts.bucketName); _mongoc_gridfs_bucket_opts_cleanup (&gridfs_opts); return bucket; } mongoc_stream_t * mongoc_gridfs_bucket_open_upload_stream_with_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, const char *filename, const bson_t *opts, bson_error_t *error) { mongoc_gridfs_bucket_file_t *file; size_t len; mongoc_gridfs_bucket_upload_opts_t gridfs_opts; BSON_ASSERT (bucket); BSON_ASSERT (file_id); BSON_ASSERT (filename); if (!_mongoc_gridfs_bucket_upload_opts_parse ( NULL /* not needed. */, opts, &gridfs_opts, error)) { _mongoc_gridfs_bucket_upload_opts_cleanup (&gridfs_opts); return NULL; } /* default to bucket's chunk size. */ if (!gridfs_opts.chunkSizeBytes) { gridfs_opts.chunkSizeBytes = bucket->chunk_size; } /* Initialize the file's fields */ len = strlen (filename); file = (mongoc_gridfs_bucket_file_t *) bson_malloc0 (sizeof *file); file->filename = bson_malloc0 (len + 1); bson_strncpy (file->filename, filename, len + 1); file->file_id = (bson_value_t *) bson_malloc0 (sizeof *(file->file_id)); bson_value_copy (file_id, file->file_id); file->bucket = bucket; file->chunk_size = gridfs_opts.chunkSizeBytes; file->metadata = bson_copy (&gridfs_opts.metadata); file->buffer = bson_malloc ((size_t) gridfs_opts.chunkSizeBytes); file->in_buffer = 0; _mongoc_gridfs_bucket_upload_opts_cleanup (&gridfs_opts); return _mongoc_upload_stream_gridfs_new (file); } mongoc_stream_t * mongoc_gridfs_bucket_open_upload_stream (mongoc_gridfs_bucket_t *bucket, const char *filename, const bson_t *opts, bson_value_t *file_id /* OUT */, bson_error_t *error) { mongoc_stream_t *stream; bson_oid_t object_id; bson_value_t val; BSON_ASSERT (bucket); BSON_ASSERT (filename); /* Create an objectId to use as the file's id */ bson_oid_init (&object_id, NULL); val.value_type = BSON_TYPE_OID; val.value.v_oid = object_id; stream = mongoc_gridfs_bucket_open_upload_stream_with_id ( bucket, &val, filename, opts, error); if (!stream) { return NULL; } if (file_id) { bson_value_copy (&val, file_id); } return stream; } bool mongoc_gridfs_bucket_upload_from_stream_with_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, const char *filename, mongoc_stream_t *source, const bson_t *opts, bson_error_t *error) { mongoc_stream_t *upload_stream; ssize_t bytes_read; ssize_t bytes_written; char buf[512]; BSON_ASSERT (bucket); BSON_ASSERT (file_id); BSON_ASSERT (filename); BSON_ASSERT (source); upload_stream = mongoc_gridfs_bucket_open_upload_stream_with_id ( bucket, file_id, filename, opts, error); if (!upload_stream) { return false; } while ((bytes_read = mongoc_stream_read (source, buf, 512, 1, 0)) > 0) { bytes_written = mongoc_stream_write (upload_stream, buf, bytes_read, 0); if (bytes_written < 0) { BSON_ASSERT (mongoc_gridfs_bucket_stream_error (upload_stream, error)); mongoc_gridfs_bucket_abort_upload (upload_stream); mongoc_stream_destroy (upload_stream); return false; } } if (bytes_read < 0) { mongoc_gridfs_bucket_abort_upload (upload_stream); bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_BUCKET_STREAM, "Error occurred on the provided stream."); mongoc_stream_destroy (upload_stream); return false; } else { mongoc_stream_destroy (upload_stream); return true; } } bool mongoc_gridfs_bucket_upload_from_stream (mongoc_gridfs_bucket_t *bucket, const char *filename, mongoc_stream_t *source, const bson_t *opts, bson_value_t *file_id /* OUT */, bson_error_t *error) { bool r; bson_oid_t object_id; bson_value_t val; BSON_ASSERT (bucket); BSON_ASSERT (filename); BSON_ASSERT (source); /* Create an objectId to use as the file's id */ bson_oid_init (&object_id, bson_context_get_default ()); val.value_type = BSON_TYPE_OID; val.value.v_oid = object_id; r = mongoc_gridfs_bucket_upload_from_stream_with_id ( bucket, &val, filename, source, opts, error); if (!r) { return false; } if (file_id) { bson_value_copy (&val, file_id); } return true; } mongoc_stream_t * mongoc_gridfs_bucket_open_download_stream (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, bson_error_t *error) { mongoc_gridfs_bucket_file_t *file; bson_t file_doc; const char *key; bson_iter_t iter; uint32_t data_len; const uint8_t *data; bool r; BSON_ASSERT (bucket); BSON_ASSERT (file_id); r = _mongoc_gridfs_find_file_with_id (bucket, file_id, &file_doc, error); if (!r) { /* Error should already be set. */ return NULL; } if (!bson_iter_init (&iter, &file_doc)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "File document malformed"); return NULL; } file = (mongoc_gridfs_bucket_file_t *) bson_malloc0 (sizeof *file); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (strcmp (key, "length") == 0) { file->length = bson_iter_as_int64 (&iter); } else if (strcmp (key, "chunkSize") == 0) { file->chunk_size = bson_iter_int32 (&iter); } else if (strcmp (key, "filename") == 0) { file->filename = bson_strdup (bson_iter_utf8 (&iter, NULL)); } else if (strcmp (key, "metadata") == 0) { bson_iter_document (&iter, &data_len, &data); file->metadata = bson_new_from_data (data, data_len); } } bson_destroy (&file_doc); file->file_id = (bson_value_t *) bson_malloc0 (sizeof *(file->file_id)); bson_value_copy (file_id, file->file_id); file->bucket = bucket; file->buffer = bson_malloc0 ((size_t) file->chunk_size); BSON_ASSERT (file->file_id); return _mongoc_download_stream_gridfs_new (file); } bool mongoc_gridfs_bucket_download_to_stream (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, mongoc_stream_t *destination, bson_error_t *error) { mongoc_stream_t *download_stream; ssize_t bytes_read; ssize_t bytes_written; char buf[512]; BSON_ASSERT (bucket); BSON_ASSERT (file_id); BSON_ASSERT (destination); /* Make the download stream */ download_stream = mongoc_gridfs_bucket_open_download_stream (bucket, file_id, error); while ((bytes_read = mongoc_stream_read (download_stream, buf, 256, 1, 0)) > 0) { bytes_written = mongoc_stream_write (destination, buf, bytes_read, 0); if (bytes_written < 0) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_BUCKET_STREAM, "Error occurred on the provided stream."); mongoc_stream_destroy (download_stream); return false; } } mongoc_stream_destroy (download_stream); return bytes_read != -1; } bool mongoc_gridfs_bucket_delete_by_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, bson_error_t *error) { bson_t files_selector; bson_t chunks_selector; bson_t reply; bson_iter_t iter; bool r; BSON_ASSERT (bucket); BSON_ASSERT (file_id); bson_init (&files_selector); BSON_APPEND_VALUE (&files_selector, "_id", file_id); r = mongoc_collection_delete_one ( bucket->files, &files_selector, NULL, &reply, error); bson_destroy (&files_selector); if (!r) { bson_destroy (&reply); return false; } BSON_ASSERT (bson_iter_init_find (&iter, &reply, "deletedCount")); if (bson_iter_as_int64 (&iter) != 1) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_BUCKET_FILE_NOT_FOUND, "File not found"); bson_destroy (&reply); return false; } bson_destroy (&reply); bson_init (&chunks_selector); BSON_APPEND_VALUE (&chunks_selector, "files_id", file_id); r = mongoc_collection_delete_many ( bucket->chunks, &chunks_selector, NULL, NULL, error); bson_destroy (&chunks_selector); if (!r) { return false; } return true; } mongoc_cursor_t * mongoc_gridfs_bucket_find (mongoc_gridfs_bucket_t *bucket, const bson_t *filter, const bson_t *opts) { mongoc_cursor_t *cursor; BSON_ASSERT (bucket); BSON_ASSERT (filter); cursor = mongoc_collection_find_with_opts (bucket->files, filter, NULL, NULL); if (!cursor->error.code && bson_has_field (opts, "sessionId")) { bson_set_error (&cursor->error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "Cannot pass sessionId as an option"); } return cursor; } bool mongoc_gridfs_bucket_stream_error (mongoc_stream_t *stream, bson_error_t *error) { bson_error_t *stream_err; BSON_ASSERT (stream); BSON_ASSERT (error); if (stream->type == MONGOC_STREAM_GRIDFS_UPLOAD) { stream_err = &((mongoc_gridfs_upload_stream_t *) stream)->file->err; } else if (stream->type == MONGOC_STREAM_GRIDFS_DOWNLOAD) { stream_err = &((mongoc_gridfs_download_stream_t *) stream)->file->err; } else { return false; } if (stream_err->code) { memcpy (error, stream_err, sizeof (*stream_err)); return true; } else { return false; } } void mongoc_gridfs_bucket_destroy (mongoc_gridfs_bucket_t *bucket) { if (bucket) { mongoc_collection_destroy (bucket->chunks); mongoc_collection_destroy (bucket->files); bson_free (bucket->bucket_name); bson_free (bucket); } } bool mongoc_gridfs_bucket_abort_upload (mongoc_stream_t *stream) { mongoc_gridfs_bucket_file_t *file; bson_t chunks_selector; bool r; BSON_ASSERT (stream); BSON_ASSERT (stream->type == MONGOC_STREAM_GRIDFS_UPLOAD); file = ((mongoc_gridfs_upload_stream_t *) stream)->file; /* Pretend we've already saved. This way we won't add an entry to the files * collection when the stream is closed */ file->saved = true; bson_init (&chunks_selector); BSON_APPEND_VALUE (&chunks_selector, "files_id", file->file_id); r = mongoc_collection_delete_many ( file->bucket->chunks, &chunks_selector, NULL, NULL, &file->err); bson_destroy (&chunks_selector); return r; }mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-bucket.h0000644000076500000240000001002313572250757026210 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_BUCKET_H #define MONGOC_GRIDFS_BUCKET_H #include "bson/bson.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-database.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_gridfs_bucket_t mongoc_gridfs_bucket_t; MONGOC_EXPORT (mongoc_gridfs_bucket_t *) mongoc_gridfs_bucket_new (mongoc_database_t *db, const bson_t *opts, const mongoc_read_prefs_t *read_prefs, bson_error_t *error); MONGOC_EXPORT (mongoc_stream_t *) mongoc_gridfs_bucket_open_upload_stream (mongoc_gridfs_bucket_t *bucket, const char *filename, const bson_t *opts, bson_value_t *file_id, bson_error_t *error); MONGOC_EXPORT (mongoc_stream_t *) mongoc_gridfs_bucket_open_upload_stream_with_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, const char *filename, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_upload_from_stream (mongoc_gridfs_bucket_t *bucket, const char *filename, mongoc_stream_t *source, const bson_t *opts, bson_value_t *file_id, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_upload_from_stream_with_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, const char *filename, mongoc_stream_t *source, const bson_t *opts, bson_error_t *error); MONGOC_EXPORT (mongoc_stream_t *) mongoc_gridfs_bucket_open_download_stream (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_download_to_stream (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, mongoc_stream_t *destination, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_delete_by_id (mongoc_gridfs_bucket_t *bucket, const bson_value_t *file_id, bson_error_t *error); MONGOC_EXPORT (mongoc_cursor_t *) mongoc_gridfs_bucket_find (mongoc_gridfs_bucket_t *bucket, const bson_t *filter, const bson_t *opts); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_stream_error (mongoc_stream_t *stream, bson_error_t *error); MONGOC_EXPORT (void) mongoc_gridfs_bucket_destroy (mongoc_gridfs_bucket_t *bucket); MONGOC_EXPORT (bool) mongoc_gridfs_bucket_abort_upload (mongoc_stream_t *stream); BSON_END_DECLS #endif /* MONGOC_GRIDFS_BUCKET_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-list-private.h0000644000076500000240000000265313572250757030305 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_LIST_PRIVATE_H #define MONGOC_GRIDFS_FILE_LIST_PRIVATE_H #include #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-cursor.h" BSON_BEGIN_DECLS struct _mongoc_gridfs_file_list_t { mongoc_gridfs_t *gridfs; mongoc_cursor_t *cursor; bson_error_t error; }; mongoc_gridfs_file_list_t * _mongoc_gridfs_file_list_new (mongoc_gridfs_t *gridfs, const bson_t *query, uint32_t limit); mongoc_gridfs_file_list_t * _mongoc_gridfs_file_list_new_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts); BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_LIST_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.c0000644000076500000240000000675613572250757026640 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-private.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-private.h" #include "mongoc/mongoc-gridfs-file-list.h" #include "mongoc/mongoc-gridfs-file-list-private.h" #include "mongoc/mongoc-trace-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "gridfs_file_list" mongoc_gridfs_file_list_t * _mongoc_gridfs_file_list_new (mongoc_gridfs_t *gridfs, const bson_t *query, uint32_t limit) { mongoc_gridfs_file_list_t *list; mongoc_cursor_t *cursor; bool use_unwrapped; bson_t opts; bson_t unwrapped; bson_error_t error; bson_init (&opts); use_unwrapped = _mongoc_cursor_translate_dollar_query_opts ( query, &opts, &unwrapped, &error); cursor = _mongoc_cursor_find_new (gridfs->client, gridfs->files->ns, use_unwrapped ? &unwrapped : query, &opts, NULL, gridfs->files->read_prefs, gridfs->files->read_concern); BSON_ASSERT (cursor); bson_destroy (&opts); if (limit) { (void) mongoc_cursor_set_limit (cursor, limit); } bson_destroy (&unwrapped); if (error.domain) { memcpy (&cursor->error, &error, sizeof (bson_error_t)); } list = (mongoc_gridfs_file_list_t *) bson_malloc0 (sizeof *list); list->cursor = cursor; list->gridfs = gridfs; return list; } mongoc_gridfs_file_list_t * _mongoc_gridfs_file_list_new_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts) { mongoc_gridfs_file_list_t *list; mongoc_cursor_t *cursor; cursor = mongoc_collection_find_with_opts ( gridfs->files, filter, opts, NULL /* read prefs */); BSON_ASSERT (cursor); list = (mongoc_gridfs_file_list_t *) bson_malloc0 (sizeof *list); list->cursor = cursor; list->gridfs = gridfs; return list; } mongoc_gridfs_file_t * mongoc_gridfs_file_list_next (mongoc_gridfs_file_list_t *list) { const bson_t *bson; BSON_ASSERT (list); if (mongoc_cursor_next (list->cursor, &bson)) { return _mongoc_gridfs_file_new_from_bson (list->gridfs, bson); } else { return NULL; } } bool mongoc_gridfs_file_list_error (mongoc_gridfs_file_list_t *list, bson_error_t *error) { return mongoc_cursor_error (list->cursor, error); } void mongoc_gridfs_file_list_destroy (mongoc_gridfs_file_list_t *list) { if (!list) { return; } mongoc_cursor_destroy (list->cursor); bson_free (list); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-list.h0000644000076500000240000000237413572250757026635 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_LIST_H #define MONGOC_GRIDFS_FILE_LIST_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-gridfs-file.h" BSON_BEGIN_DECLS typedef struct _mongoc_gridfs_file_list_t mongoc_gridfs_file_list_t; MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_file_list_next (mongoc_gridfs_file_list_t *list); MONGOC_EXPORT (void) mongoc_gridfs_file_list_destroy (mongoc_gridfs_file_list_t *list); MONGOC_EXPORT (bool) mongoc_gridfs_file_list_error (mongoc_gridfs_file_list_t *list, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_LIST_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-page-private.h0000644000076500000240000000413713572250757030245 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_PAGE_PRIVATE_H #define MONGOC_GRIDFS_FILE_PAGE_PRIVATE_H #include #include "mongoc/mongoc-gridfs-file.h" BSON_BEGIN_DECLS struct _mongoc_gridfs_file_page_t { const uint8_t *read_buf; uint8_t *buf; uint32_t len; uint32_t chunk_size; uint32_t offset; }; mongoc_gridfs_file_page_t * _mongoc_gridfs_file_page_new (const uint8_t *data, uint32_t len, uint32_t chunk_size); void _mongoc_gridfs_file_page_destroy (mongoc_gridfs_file_page_t *page); bool _mongoc_gridfs_file_page_seek (mongoc_gridfs_file_page_t *page, uint32_t offset); int32_t _mongoc_gridfs_file_page_read (mongoc_gridfs_file_page_t *page, void *dst, uint32_t len); int32_t _mongoc_gridfs_file_page_write (mongoc_gridfs_file_page_t *page, const void *src, uint32_t len); uint32_t _mongoc_gridfs_file_page_memset0 (mongoc_gridfs_file_page_t *page, uint32_t len); uint32_t _mongoc_gridfs_file_page_tell (mongoc_gridfs_file_page_t *page); const uint8_t * _mongoc_gridfs_file_page_get_data (mongoc_gridfs_file_page_t *page); uint32_t _mongoc_gridfs_file_page_get_len (mongoc_gridfs_file_page_t *page); bool _mongoc_gridfs_file_page_is_dirty (mongoc_gridfs_file_page_t *page); BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_PAGE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.c0000644000076500000240000001177313572250757026574 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "gridfs_file_page" #include "mongoc/mongoc-gridfs-file-page.h" #include "mongoc/mongoc-gridfs-file-page-private.h" #include "mongoc/mongoc-trace-private.h" /** create a new page from a buffer * * The buffer should stick around for the life of the page */ mongoc_gridfs_file_page_t * _mongoc_gridfs_file_page_new (const uint8_t *data, uint32_t len, uint32_t chunk_size) { mongoc_gridfs_file_page_t *page; ENTRY; BSON_ASSERT (data); BSON_ASSERT (len <= chunk_size); page = (mongoc_gridfs_file_page_t *) bson_malloc0 (sizeof *page); page->chunk_size = chunk_size; page->read_buf = data; page->len = len; RETURN (page); } bool _mongoc_gridfs_file_page_seek (mongoc_gridfs_file_page_t *page, uint32_t offset) { ENTRY; BSON_ASSERT (page); page->offset = offset; RETURN (1); } int32_t _mongoc_gridfs_file_page_read (mongoc_gridfs_file_page_t *page, void *dst, uint32_t len) { int bytes_read; const uint8_t *src; ENTRY; BSON_ASSERT (page); BSON_ASSERT (dst); bytes_read = BSON_MIN (len, page->len - page->offset); src = page->read_buf ? page->read_buf : page->buf; memcpy (dst, src + page->offset, bytes_read); page->offset += bytes_read; RETURN (bytes_read); } /** * _mongoc_gridfs_file_page_write: * * Write to a page. * * Writes are copy-on-write with regards to the buffer that was passed to the * mongoc_gridfs_file_page_t during construction. In other words, the first * write allocates a large enough buffer for file->chunk_size, which becomes * authoritative from then on. * * A write of zero bytes will trigger the copy-on-write mechanism. */ int32_t _mongoc_gridfs_file_page_write (mongoc_gridfs_file_page_t *page, const void *src, uint32_t len) { int bytes_written; ENTRY; BSON_ASSERT (page); BSON_ASSERT (src); bytes_written = BSON_MIN (len, page->chunk_size - page->offset); if (!page->buf) { page->buf = (uint8_t *) bson_malloc (page->chunk_size); memcpy ( page->buf, page->read_buf, BSON_MIN (page->chunk_size, page->len)); } /* Copy bytes and adjust the page position */ memcpy (page->buf + page->offset, src, bytes_written); page->offset += bytes_written; page->len = BSON_MAX (page->offset, page->len); /* Don't use the old read buffer, which is no longer current */ page->read_buf = page->buf; RETURN (bytes_written); } /** * _mongoc_gridfs_file_page_memset0: * * Write zeros to a page, starting from the page's current position. Up to * `len` bytes will be set to zero or until the page is full, whichever * comes first. * * Like _mongoc_gridfs_file_page_write, operations are copy-on-write with * regards to the page buffer. * * Returns: * Number of bytes set. */ uint32_t _mongoc_gridfs_file_page_memset0 (mongoc_gridfs_file_page_t *page, uint32_t len) { uint32_t bytes_set; ENTRY; BSON_ASSERT (page); bytes_set = BSON_MIN (page->chunk_size - page->offset, len); if (!page->buf) { page->buf = (uint8_t *) bson_malloc0 (page->chunk_size); memcpy ( page->buf, page->read_buf, BSON_MIN (page->chunk_size, page->len)); } /* Set bytes and adjust the page position */ memset (page->buf + page->offset, '\0', bytes_set); page->offset += bytes_set; page->len = BSON_MAX (page->offset, page->len); /* Don't use the old read buffer, which is no longer current */ page->read_buf = page->buf; RETURN (bytes_set); } const uint8_t * _mongoc_gridfs_file_page_get_data (mongoc_gridfs_file_page_t *page) { ENTRY; BSON_ASSERT (page); RETURN (page->buf ? page->buf : page->read_buf); } uint32_t _mongoc_gridfs_file_page_get_len (mongoc_gridfs_file_page_t *page) { ENTRY; BSON_ASSERT (page); RETURN (page->len); } uint32_t _mongoc_gridfs_file_page_tell (mongoc_gridfs_file_page_t *page) { ENTRY; BSON_ASSERT (page); RETURN (page->offset); } bool _mongoc_gridfs_file_page_is_dirty (mongoc_gridfs_file_page_t *page) { ENTRY; BSON_ASSERT (page); RETURN (page->buf ? 1 : 0); } void _mongoc_gridfs_file_page_destroy (mongoc_gridfs_file_page_t *page) { ENTRY; if (page->buf) { bson_free (page->buf); } bson_free (page); EXIT; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-page.h0000644000076500000240000000173513572250757026576 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_PAGE_H #define MONGOC_GRIDFS_FILE_PAGE_H #include #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-list.h" BSON_BEGIN_DECLS typedef struct _mongoc_gridfs_file_page_t mongoc_gridfs_file_page_t; BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_PAGE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file-private.h0000644000076500000240000000341213572250757027326 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_PRIVATE_H #define MONGOC_GRIDFS_FILE_PRIVATE_H #include #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-page.h" #include "mongoc/mongoc-cursor.h" BSON_BEGIN_DECLS struct _mongoc_gridfs_file_t { mongoc_gridfs_t *gridfs; bson_t bson; mongoc_gridfs_file_page_t *page; uint64_t pos; int32_t n; bson_error_t error; mongoc_cursor_t *cursor; uint32_t cursor_range[2]; /* current chunk, # of chunks */ bool is_dirty; bson_value_t files_id; int64_t length; int32_t chunk_size; int64_t upload_date; char *md5; char *filename; char *content_type; bson_t aliases; bson_t metadata; const char *bson_md5; const char *bson_filename; const char *bson_content_type; bson_t bson_aliases; bson_t bson_metadata; }; mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data); mongoc_gridfs_file_t * _mongoc_gridfs_file_new (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt); BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file.c0000644000076500000240000007066313572250757025665 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "gridfs_file" #include #include #include #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-private.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-private.h" #include "mongoc/mongoc-gridfs-file-page.h" #include "mongoc/mongoc-gridfs-file-page-private.h" #include "mongoc/mongoc-iovec.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-error.h" static bool _mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file); static bool _mongoc_gridfs_file_flush_page (mongoc_gridfs_file_t *file); static ssize_t _mongoc_gridfs_file_extend (mongoc_gridfs_file_t *file); /***************************************************************** * Magic accessor generation * * We need some accessors to get and set properties on files, to handle memory * ownership and to determine dirtiness. These macros produce the getters and * setters we need *****************************************************************/ #define MONGOC_GRIDFS_FILE_STR_ACCESSOR(name) \ const char *mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file) \ { \ return file->name ? file->name : file->bson_##name; \ } \ void mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, \ const char *str) \ { \ if (file->name) { \ bson_free (file->name); \ } \ file->name = bson_strdup (str); \ file->is_dirty = 1; \ } #define MONGOC_GRIDFS_FILE_BSON_ACCESSOR(name) \ const bson_t *mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file) \ { \ if (file->name.len) { \ return &file->name; \ } else if (file->bson_##name.len) { \ return &file->bson_##name; \ } else { \ return NULL; \ } \ } \ void mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, \ const bson_t *bson) \ { \ if (file->name.len) { \ bson_destroy (&file->name); \ } \ bson_copy_to (bson, &(file->name)); \ file->is_dirty = 1; \ } MONGOC_GRIDFS_FILE_STR_ACCESSOR (md5) MONGOC_GRIDFS_FILE_STR_ACCESSOR (filename) MONGOC_GRIDFS_FILE_STR_ACCESSOR (content_type) MONGOC_GRIDFS_FILE_BSON_ACCESSOR (aliases) MONGOC_GRIDFS_FILE_BSON_ACCESSOR (metadata) /** * mongoc_gridfs_file_set_id: * * the user can set the files_id to an id of any type. Must be called before * mongoc_gridfs_file_save. * */ bool mongoc_gridfs_file_set_id (mongoc_gridfs_file_t *file, const bson_value_t *id, bson_error_t *error) { if (!file->is_dirty) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_PROTOCOL_ERROR, "Cannot set file id after saving file."); return false; } bson_value_copy (id, &file->files_id); return true; } /** save a gridfs file */ bool mongoc_gridfs_file_save (mongoc_gridfs_file_t *file) { bson_t *selector, *update, child; const char *md5; const char *filename; const char *content_type; const bson_t *aliases; const bson_t *metadata; bool r; ENTRY; if (!file->is_dirty) { return 1; } if (file->page && _mongoc_gridfs_file_page_is_dirty (file->page)) { if (!_mongoc_gridfs_file_flush_page (file)) { RETURN (false); } } md5 = mongoc_gridfs_file_get_md5 (file); filename = mongoc_gridfs_file_get_filename (file); content_type = mongoc_gridfs_file_get_content_type (file); aliases = mongoc_gridfs_file_get_aliases (file); metadata = mongoc_gridfs_file_get_metadata (file); selector = bson_new (); bson_append_value (selector, "_id", -1, &file->files_id); update = bson_new (); bson_append_document_begin (update, "$set", -1, &child); bson_append_int64 (&child, "length", -1, file->length); bson_append_int32 (&child, "chunkSize", -1, file->chunk_size); bson_append_date_time (&child, "uploadDate", -1, file->upload_date); if (md5) { bson_append_utf8 (&child, "md5", -1, md5, -1); } if (filename) { bson_append_utf8 (&child, "filename", -1, filename, -1); } if (content_type) { bson_append_utf8 (&child, "contentType", -1, content_type, -1); } if (aliases) { bson_append_array (&child, "aliases", -1, aliases); } if (metadata) { bson_append_document (&child, "metadata", -1, metadata); } bson_append_document_end (update, &child); r = mongoc_collection_update (file->gridfs->files, MONGOC_UPDATE_UPSERT, selector, update, NULL, &file->error); bson_destroy (selector); bson_destroy (update); file->is_dirty = 0; RETURN (r); } /** * _mongoc_gridfs_file_new_from_bson: * * creates a gridfs file from a bson object * * This is only really useful for instantiating a gridfs file from a server * side object */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new_from_bson (mongoc_gridfs_t *gridfs, const bson_t *data) { mongoc_gridfs_file_t *file; const bson_value_t *value; const char *key; bson_iter_t iter; const uint8_t *buf; uint32_t buf_len; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (data); file = (mongoc_gridfs_file_t *) bson_malloc0 (sizeof *file); file->gridfs = gridfs; bson_copy_to (data, &file->bson); if (!bson_iter_init (&iter, &file->bson)) { GOTO (failure); } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (0 == strcmp (key, "_id")) { value = bson_iter_value (&iter); bson_value_copy (value, &file->files_id); } else if (0 == strcmp (key, "length")) { if (!BSON_ITER_HOLDS_NUMBER (&iter)) { GOTO (failure); } file->length = bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "chunkSize")) { if (!BSON_ITER_HOLDS_NUMBER (&iter)) { GOTO (failure); } if (bson_iter_as_int64 (&iter) > INT32_MAX) { GOTO (failure); } file->chunk_size = (int32_t) bson_iter_as_int64 (&iter); } else if (0 == strcmp (key, "uploadDate")) { if (!BSON_ITER_HOLDS_DATE_TIME (&iter)) { GOTO (failure); } file->upload_date = bson_iter_date_time (&iter); } else if (0 == strcmp (key, "md5")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_md5 = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "filename")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_filename = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "contentType")) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) { GOTO (failure); } file->bson_content_type = bson_iter_utf8 (&iter, NULL); } else if (0 == strcmp (key, "aliases")) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) { GOTO (failure); } bson_iter_array (&iter, &buf_len, &buf); if (!bson_init_static (&file->bson_aliases, buf, buf_len)) { GOTO (failure); } } else if (0 == strcmp (key, "metadata")) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) { GOTO (failure); } bson_iter_document (&iter, &buf_len, &buf); if (!bson_init_static (&file->bson_metadata, buf, buf_len)) { GOTO (failure); } } } /* TODO: is there are a minimal object we should be verifying that we * actually have here? */ RETURN (file); failure: bson_destroy (&file->bson); RETURN (NULL); } /** * _mongoc_gridfs_file_new: * * Create a new empty gridfs file */ mongoc_gridfs_file_t * _mongoc_gridfs_file_new (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt) { mongoc_gridfs_file_t *file; mongoc_gridfs_file_opt_t default_opt = {0}; ENTRY; BSON_ASSERT (gridfs); if (!opt) { opt = &default_opt; } file = (mongoc_gridfs_file_t *) bson_malloc0 (sizeof *file); file->gridfs = gridfs; file->is_dirty = 1; if (opt->chunk_size) { file->chunk_size = opt->chunk_size; } else { /* * The default chunk size is now 255kb. This used to be 256k but has been * reduced to allow for them to fit within power of two sizes in mongod. * * See CDRIVER-322. */ file->chunk_size = (1 << 18) - 1024; } file->files_id.value_type = BSON_TYPE_OID; bson_oid_init (&file->files_id.value.v_oid, NULL); file->upload_date = ((int64_t) time (NULL)) * 1000; if (opt->md5) { file->md5 = bson_strdup (opt->md5); } if (opt->filename) { file->filename = bson_strdup (opt->filename); } if (opt->content_type) { file->content_type = bson_strdup (opt->content_type); } if (opt->aliases) { bson_copy_to (opt->aliases, &(file->aliases)); } if (opt->metadata) { bson_copy_to (opt->metadata, &(file->metadata)); } file->pos = 0; file->n = 0; RETURN (file); } void mongoc_gridfs_file_destroy (mongoc_gridfs_file_t *file) { ENTRY; if (!file) { EXIT; } if (file->page) { _mongoc_gridfs_file_page_destroy (file->page); } if (file->bson.len) { bson_destroy (&file->bson); } if (file->cursor) { mongoc_cursor_destroy (file->cursor); } if (file->files_id.value_type) { bson_value_destroy (&file->files_id); } if (file->md5) { bson_free (file->md5); } if (file->filename) { bson_free (file->filename); } if (file->content_type) { bson_free (file->content_type); } if (file->aliases.len) { bson_destroy (&file->aliases); } if (file->bson_aliases.len) { bson_destroy (&file->bson_aliases); } if (file->metadata.len) { bson_destroy (&file->metadata); } if (file->bson_metadata.len) { bson_destroy (&file->bson_metadata); } bson_free (file); EXIT; } /** readv against a gridfs file * timeout_msec is unused */ ssize_t mongoc_gridfs_file_readv (mongoc_gridfs_file_t *file, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, uint32_t timeout_msec) { uint32_t bytes_read = 0; int32_t r; size_t i; uint32_t iov_pos; ENTRY; BSON_ASSERT (file); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); /* Reading when positioned past the end does nothing */ if (file->pos >= file->length) { return 0; } /* Try to get the current chunk */ if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) { return -1; } for (i = 0; i < iovcnt; i++) { iov_pos = 0; for (;;) { r = _mongoc_gridfs_file_page_read ( file->page, (uint8_t *) iov[i].iov_base + iov_pos, (uint32_t) (iov[i].iov_len - iov_pos)); BSON_ASSERT (r >= 0); iov_pos += r; file->pos += r; bytes_read += r; if (iov_pos == iov[i].iov_len) { /* filled a bucket, keep going */ break; } else if (file->length == file->pos) { /* we're at the end of the file. So we're done */ RETURN (bytes_read); } else if (bytes_read >= min_bytes) { /* we need a new page, but we've read enough bytes to stop */ RETURN (bytes_read); } else if (!_mongoc_gridfs_file_refresh_page (file)) { return -1; } } } RETURN (bytes_read); } /** writev against a gridfs file * timeout_msec is unused */ ssize_t mongoc_gridfs_file_writev (mongoc_gridfs_file_t *file, const mongoc_iovec_t *iov, size_t iovcnt, uint32_t timeout_msec) { uint32_t bytes_written = 0; int32_t r; size_t i; uint32_t iov_pos; ENTRY; BSON_ASSERT (file); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); /* Pull in the correct page */ if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) { return -1; } /* When writing past the end-of-file, fill the gap with zeros */ if (file->pos > file->length && !_mongoc_gridfs_file_extend (file)) { return -1; } for (i = 0; i < iovcnt; i++) { iov_pos = 0; for (;;) { if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) { return -1; } /* write bytes until an iov is exhausted or the page is full */ r = _mongoc_gridfs_file_page_write ( file->page, (uint8_t *) iov[i].iov_base + iov_pos, (uint32_t) (iov[i].iov_len - iov_pos)); BSON_ASSERT (r >= 0); iov_pos += r; file->pos += r; bytes_written += r; file->length = BSON_MAX (file->length, (int64_t) file->pos); if (iov_pos == iov[i].iov_len) { /** filled a bucket, keep going */ break; } else { /** flush the buffer, the next pass through will bring in a new page */ if (!_mongoc_gridfs_file_flush_page (file)) { return -1; } } } } file->is_dirty = 1; RETURN (bytes_written); } /** * _mongoc_gridfs_file_extend: * * Extend a GridFS file to the current position pointer. Zeros will be * appended to the end of the file until file->length is even with * file->pos. * * If file->length >= file->pos, the function exits successfully with no * operation performed. * * Parameters: * @file: A mongoc_gridfs_file_t. * * Returns: * The number of zero bytes written, or -1 on failure. */ static ssize_t _mongoc_gridfs_file_extend (mongoc_gridfs_file_t *file) { int64_t target_length; ssize_t diff; ENTRY; BSON_ASSERT (file); if (file->length >= file->pos) { RETURN (0); } diff = (ssize_t) (file->pos - file->length); target_length = file->pos; if (-1 == mongoc_gridfs_file_seek (file, 0, SEEK_END)) { RETURN (-1); } while (true) { if (!file->page && !_mongoc_gridfs_file_refresh_page (file)) { RETURN (-1); } /* Set bytes until we reach the limit or fill a page */ file->pos += _mongoc_gridfs_file_page_memset0 (file->page, target_length - file->pos); if (file->pos == target_length) { /* We're done */ break; } else if (!_mongoc_gridfs_file_flush_page (file)) { /* We tried to flush a full buffer, but an error occurred */ RETURN (-1); } } file->length = target_length; file->is_dirty = true; RETURN (diff); } /** * _mongoc_gridfs_file_flush_page: * * Unconditionally flushes the file's current page to the database. * The page to flush is determined by page->n. * * Side Effects: * * On success, file->page is properly destroyed and set to NULL. * * Returns: * * True on success; false otherwise. */ static bool _mongoc_gridfs_file_flush_page (mongoc_gridfs_file_t *file) { bson_t *selector, *update; bool r; const uint8_t *buf; uint32_t len; ENTRY; BSON_ASSERT (file); BSON_ASSERT (file->page); buf = _mongoc_gridfs_file_page_get_data (file->page); len = _mongoc_gridfs_file_page_get_len (file->page); selector = bson_new (); bson_append_value (selector, "files_id", -1, &file->files_id); bson_append_int32 (selector, "n", -1, file->n); update = bson_sized_new (file->chunk_size + 100); bson_append_value (update, "files_id", -1, &file->files_id); bson_append_int32 (update, "n", -1, file->n); bson_append_binary (update, "data", -1, BSON_SUBTYPE_BINARY, buf, len); r = mongoc_collection_update (file->gridfs->chunks, MONGOC_UPDATE_UPSERT, selector, update, NULL, &file->error); bson_destroy (selector); bson_destroy (update); if (r) { _mongoc_gridfs_file_page_destroy (file->page); file->page = NULL; r = mongoc_gridfs_file_save (file); } RETURN (r); } /** * _mongoc_gridfs_file_keep_cursor: * * After a seek, decide if the next read should use the current cursor or * start a new query. * * Preconditions: * * file has a cursor and cursor range. * * Side Effects: * * None. */ static bool _mongoc_gridfs_file_keep_cursor (mongoc_gridfs_file_t *file) { uint32_t chunk_no; uint32_t chunks_per_batch; if (file->n < 0 || file->chunk_size <= 0) { return false; } chunk_no = (uint32_t) file->n; /* server returns roughly 4 MB batches by default */ chunks_per_batch = (4 * 1024 * 1024) / (uint32_t) file->chunk_size; return ( /* cursor is on or before the desired chunk */ file->cursor_range[0] <= chunk_no && /* chunk_no is before end of file */ chunk_no <= file->cursor_range[1] && /* desired chunk is in this batch or next one */ chunk_no < file->cursor_range[0] + 2 * chunks_per_batch); } static int64_t divide_round_up (int64_t num, int64_t denom) { return (num + denom - 1) / denom; } static void missing_chunk (mongoc_gridfs_file_t *file) { bson_set_error (&file->error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CHUNK_MISSING, "missing chunk number %" PRId32, file->n); if (file->cursor) { mongoc_cursor_destroy (file->cursor); file->cursor = NULL; } } /** * _mongoc_gridfs_file_refresh_page: * * Refresh a GridFS file's underlying page. This recalculates the current * page number based on the file's stream position, then fetches that page * from the database. * * Note that this fetch is unconditional and the page is queried from the * database even if the current page covers the same theoretical chunk. * * * Side Effects: * * file->page is loaded with the appropriate buffer, fetched from the * database. If the file position is at the end of the file and on a new * chunk boundary, a new page is created. If the position is far past the * end of the file, _mongoc_gridfs_file_extend is responsible for creating * chunks to file the gap. * * file->n is set based on file->pos. file->error is set on error. */ static bool _mongoc_gridfs_file_refresh_page (mongoc_gridfs_file_t *file) { bson_t query; bson_t child; bson_t opts; const bson_t *chunk; const char *key; bson_iter_t iter; int64_t existing_chunks; int64_t required_chunks; const uint8_t *data = NULL; uint32_t len; ENTRY; BSON_ASSERT (file); file->n = (int32_t) (file->pos / file->chunk_size); if (file->page) { _mongoc_gridfs_file_page_destroy (file->page); file->page = NULL; } /* if the file pointer is past the end of the current file (i.e. pointing to * a new chunk), we'll pass the page constructor a new empty page. */ existing_chunks = divide_round_up (file->length, file->chunk_size); required_chunks = divide_round_up (file->pos + 1, file->chunk_size); if (required_chunks > existing_chunks) { data = (uint8_t *) ""; len = 0; } else { /* if we have a cursor, but the cursor doesn't have the chunk we're going * to need, destroy it (we'll grab a new one immediately there after) */ if (file->cursor && !_mongoc_gridfs_file_keep_cursor (file)) { mongoc_cursor_destroy (file->cursor); file->cursor = NULL; } if (!file->cursor) { bson_init (&query); BSON_APPEND_VALUE (&query, "files_id", &file->files_id); BSON_APPEND_DOCUMENT_BEGIN (&query, "n", &child); BSON_APPEND_INT32 (&child, "$gte", file->n); bson_append_document_end (&query, &child); bson_init (&opts); BSON_APPEND_DOCUMENT_BEGIN (&opts, "sort", &child); BSON_APPEND_INT32 (&child, "n", 1); bson_append_document_end (&opts, &child); BSON_APPEND_DOCUMENT_BEGIN (&opts, "projection", &child); BSON_APPEND_INT32 (&child, "n", 1); BSON_APPEND_INT32 (&child, "data", 1); BSON_APPEND_INT32 (&child, "_id", 0); bson_append_document_end (&opts, &child); /* find all chunks greater than or equal to our current file pos */ file->cursor = mongoc_collection_find_with_opts ( file->gridfs->chunks, &query, &opts, NULL); file->cursor_range[0] = file->n; file->cursor_range[1] = (uint32_t) (file->length / file->chunk_size); bson_destroy (&query); bson_destroy (&opts); BSON_ASSERT (file->cursor); } /* we might have had a cursor before, then seeked ahead past a chunk. * iterate until we're on the right chunk */ while (file->cursor_range[0] <= file->n) { if (!mongoc_cursor_next (file->cursor, &chunk)) { /* copy cursor error; if there's none, we're missing a chunk */ if (!mongoc_cursor_error (file->cursor, &file->error)) { missing_chunk (file); } RETURN (0); } file->cursor_range[0]++; } BSON_ASSERT (bson_iter_init (&iter, chunk)); /* grab out what we need from the chunk */ while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (strcmp (key, "n") == 0) { if (file->n != bson_iter_int32 (&iter)) { missing_chunk (file); RETURN (0); } } else if (strcmp (key, "data") == 0) { bson_iter_binary (&iter, NULL, &len, &data); } else { /* Unexpected key. This should never happen */ RETURN (0); } } if (file->n != file->pos / file->chunk_size) { return 0; } } if (!data) { bson_set_error (&file->error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CHUNK_MISSING, "corrupt chunk number %" PRId32, file->n); RETURN (0); } if (len > file->chunk_size) { bson_set_error (&file->error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_CORRUPT, "corrupt chunk number %" PRId32 ": bad size", file->n); RETURN (0); } file->page = _mongoc_gridfs_file_page_new (data, len, file->chunk_size); /* seek in the page towards wherever we're supposed to be */ RETURN ( _mongoc_gridfs_file_page_seek (file->page, file->pos % file->chunk_size)); } /** * mongoc_gridfs_file_seek: * * Adjust the file position pointer in `file` by `delta`, starting from the * position `whence`. The `whence` argument is interpreted as in fseek(2): * * SEEK_SET Set the position relative to the start of the file. * SEEK_CUR Move `delta` from the current file position. * SEEK_END Move `delta` from the end-of-file. * * Parameters: * * @file: A mongoc_gridfs_file_t. * @delta: The amount to move. May be positive or negative. * @whence: One of SEEK_SET, SEEK_CUR or SEEK_END. * * Errors: * * [EINVAL] `whence` is not one of SEEK_SET, SEEK_CUR or SEEK_END. * [EINVAL] Resulting file position would be negative. * * Side Effects: * * On success, the file's underlying position pointer is set appropriately. * On failure, the file position is NOT changed and errno is set. * * Returns: * * 0 on success. * -1 on error, and errno set to indicate the error. */ int mongoc_gridfs_file_seek (mongoc_gridfs_file_t *file, int64_t delta, int whence) { int64_t offset; BSON_ASSERT (file); switch (whence) { case SEEK_SET: offset = delta; break; case SEEK_CUR: offset = file->pos + delta; break; case SEEK_END: offset = file->length + delta; break; default: errno = EINVAL; return -1; break; } if (offset < 0) { errno = EINVAL; return -1; } if (offset / file->chunk_size != file->n) { /** no longer on the same page */ if (file->page) { if (_mongoc_gridfs_file_page_is_dirty (file->page)) { if (!_mongoc_gridfs_file_flush_page (file)) { return -1; } } else { _mongoc_gridfs_file_page_destroy (file->page); file->page = NULL; } } /** we'll pick up the seek when we fetch a page on the next action. We * lazily load */ } else if (file->page) { BSON_ASSERT ( _mongoc_gridfs_file_page_seek (file->page, offset % file->chunk_size)); } file->pos = offset; file->n = file->pos / file->chunk_size; return 0; } uint64_t mongoc_gridfs_file_tell (mongoc_gridfs_file_t *file) { BSON_ASSERT (file); return file->pos; } bool mongoc_gridfs_file_error (mongoc_gridfs_file_t *file, bson_error_t *error) { BSON_ASSERT (file); BSON_ASSERT (error); if (BSON_UNLIKELY (file->error.domain)) { bson_set_error (error, file->error.domain, file->error.code, "%s", file->error.message); RETURN (true); } RETURN (false); } const bson_value_t * mongoc_gridfs_file_get_id (mongoc_gridfs_file_t *file) { BSON_ASSERT (file); return &file->files_id; } int64_t mongoc_gridfs_file_get_length (mongoc_gridfs_file_t *file) { BSON_ASSERT (file); return file->length; } int32_t mongoc_gridfs_file_get_chunk_size (mongoc_gridfs_file_t *file) { BSON_ASSERT (file); return file->chunk_size; } int64_t mongoc_gridfs_file_get_upload_date (mongoc_gridfs_file_t *file) { BSON_ASSERT (file); return file->upload_date; } bool mongoc_gridfs_file_remove (mongoc_gridfs_file_t *file, bson_error_t *error) { bson_t sel = BSON_INITIALIZER; bool ret = false; BSON_ASSERT (file); BSON_APPEND_VALUE (&sel, "_id", &file->files_id); if (!mongoc_collection_delete_one ( file->gridfs->files, &sel, NULL, NULL, error)) { goto cleanup; } bson_reinit (&sel); BSON_APPEND_VALUE (&sel, "files_id", &file->files_id); if (!mongoc_collection_delete_many ( file->gridfs->chunks, &sel, NULL, NULL, error)) { goto cleanup; } ret = true; cleanup: bson_destroy (&sel); return ret; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-file.h0000644000076500000240000000711113572250757025656 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_FILE_H #define MONGOC_GRIDFS_FILE_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-socket.h" BSON_BEGIN_DECLS #define MONGOC_GRIDFS_FILE_STR_HEADER(name) \ MONGOC_EXPORT (const char *) \ mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file); \ MONGOC_EXPORT (void) \ mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, const char *str); #define MONGOC_GRIDFS_FILE_BSON_HEADER(name) \ MONGOC_EXPORT (const bson_t *) \ mongoc_gridfs_file_get_##name (mongoc_gridfs_file_t *file); \ MONGOC_EXPORT (void) \ mongoc_gridfs_file_set_##name (mongoc_gridfs_file_t *file, \ const bson_t *bson); typedef struct _mongoc_gridfs_file_t mongoc_gridfs_file_t; typedef struct _mongoc_gridfs_file_opt_t mongoc_gridfs_file_opt_t; struct _mongoc_gridfs_file_opt_t { const char *md5; const char *filename; const char *content_type; const bson_t *aliases; const bson_t *metadata; uint32_t chunk_size; }; MONGOC_GRIDFS_FILE_STR_HEADER (md5) MONGOC_GRIDFS_FILE_STR_HEADER (filename) MONGOC_GRIDFS_FILE_STR_HEADER (content_type) MONGOC_GRIDFS_FILE_BSON_HEADER (aliases) MONGOC_GRIDFS_FILE_BSON_HEADER (metadata) MONGOC_EXPORT (const bson_value_t *) mongoc_gridfs_file_get_id (mongoc_gridfs_file_t *file); MONGOC_EXPORT (int64_t) mongoc_gridfs_file_get_length (mongoc_gridfs_file_t *file); MONGOC_EXPORT (int32_t) mongoc_gridfs_file_get_chunk_size (mongoc_gridfs_file_t *file); MONGOC_EXPORT (int64_t) mongoc_gridfs_file_get_upload_date (mongoc_gridfs_file_t *file); MONGOC_EXPORT (ssize_t) mongoc_gridfs_file_writev (mongoc_gridfs_file_t *file, const mongoc_iovec_t *iov, size_t iovcnt, uint32_t timeout_msec); MONGOC_EXPORT (ssize_t) mongoc_gridfs_file_readv (mongoc_gridfs_file_t *file, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, uint32_t timeout_msec); MONGOC_EXPORT (int) mongoc_gridfs_file_seek (mongoc_gridfs_file_t *file, int64_t delta, int whence); MONGOC_EXPORT (uint64_t) mongoc_gridfs_file_tell (mongoc_gridfs_file_t *file); MONGOC_EXPORT (bool) mongoc_gridfs_file_set_id (mongoc_gridfs_file_t *file, const bson_value_t *id, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_file_save (mongoc_gridfs_file_t *file); MONGOC_EXPORT (void) mongoc_gridfs_file_destroy (mongoc_gridfs_file_t *file); MONGOC_EXPORT (bool) mongoc_gridfs_file_error (mongoc_gridfs_file_t *file, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_file_remove (mongoc_gridfs_file_t *file, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_GRIDFS_FILE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs-private.h0000644000076500000240000000227513572250757026417 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_PRIVATE_H #define MONGOC_GRIDFS_PRIVATE_H #include #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-client.h" BSON_BEGIN_DECLS struct _mongoc_gridfs_t { mongoc_client_t *client; mongoc_collection_t *files; mongoc_collection_t *chunks; }; mongoc_gridfs_t * _mongoc_gridfs_new (mongoc_client_t *client, const char *db, const char *prefix, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_GRIDFS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs.c0000644000076500000240000002772113572250757024745 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "gridfs" #include "mongoc/mongoc-bulk-operation.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-private.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-private.h" #include "mongoc/mongoc-gridfs-file-list.h" #include "mongoc/mongoc-gridfs-file-list-private.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-cursor-private.h" #include "mongoc/mongoc-util-private.h" #define MONGOC_GRIDFS_STREAM_CHUNK 4096 /** * _mongoc_gridfs_ensure_index: * * ensure gridfs indexes * * Ensure fast searches for chunks via [ files_id, n ] * Ensure fast searches for files via [ filename ] */ static bool _mongoc_gridfs_ensure_index (mongoc_gridfs_t *gridfs, bson_error_t *error) { bson_t keys; mongoc_index_opt_t opt; bool r; ENTRY; bson_init (&keys); bson_append_int32 (&keys, "files_id", -1, 1); bson_append_int32 (&keys, "n", -1, 1); mongoc_index_opt_init (&opt); opt.unique = 1; BEGIN_IGNORE_DEPRECATIONS r = mongoc_collection_create_index (gridfs->chunks, &keys, &opt, error); END_IGNORE_DEPRECATIONS bson_destroy (&keys); if (!r) { RETURN (r); } bson_init (&keys); bson_append_int32 (&keys, "filename", -1, 1); bson_append_int32 (&keys, "uploadDate", -1, 1); opt.unique = 0; BEGIN_IGNORE_DEPRECATIONS r = mongoc_collection_create_index (gridfs->files, &keys, &opt, error); END_IGNORE_DEPRECATIONS bson_destroy (&keys); if (!r) { RETURN (r); } RETURN (1); } mongoc_gridfs_t * _mongoc_gridfs_new (mongoc_client_t *client, const char *db, const char *prefix, bson_error_t *error) { mongoc_gridfs_t *gridfs; char buf[128]; bool r; uint32_t prefix_len; ENTRY; BSON_ASSERT (client); BSON_ASSERT (db); if (!prefix) { prefix = "fs"; } /* make sure prefix is short enough to bucket the chunks and files * collections */ prefix_len = (uint32_t) strlen (prefix); BSON_ASSERT (prefix_len + sizeof (".chunks") < sizeof (buf)); gridfs = (mongoc_gridfs_t *) bson_malloc0 (sizeof *gridfs); gridfs->client = client; bson_snprintf (buf, sizeof (buf), "%s.chunks", prefix); gridfs->chunks = mongoc_client_get_collection (client, db, buf); bson_snprintf (buf, sizeof (buf), "%s.files", prefix); gridfs->files = mongoc_client_get_collection (client, db, buf); r = _mongoc_gridfs_ensure_index (gridfs, error); if (!r) { mongoc_gridfs_destroy (gridfs); RETURN (NULL); } RETURN (gridfs); } bool mongoc_gridfs_drop (mongoc_gridfs_t *gridfs, bson_error_t *error) { bool r; ENTRY; r = mongoc_collection_drop (gridfs->files, error); if (!r) { RETURN (0); } r = mongoc_collection_drop (gridfs->chunks, error); if (!r) { RETURN (0); } RETURN (1); } void mongoc_gridfs_destroy (mongoc_gridfs_t *gridfs) { ENTRY; if (!gridfs) { EXIT; } mongoc_collection_destroy (gridfs->files); mongoc_collection_destroy (gridfs->chunks); bson_free (gridfs); EXIT; } /** find all matching gridfs files */ mongoc_gridfs_file_list_t * mongoc_gridfs_find (mongoc_gridfs_t *gridfs, const bson_t *query) { return _mongoc_gridfs_file_list_new (gridfs, query, 0); } /** find a single gridfs file */ mongoc_gridfs_file_t * mongoc_gridfs_find_one (mongoc_gridfs_t *gridfs, const bson_t *query, bson_error_t *error) { mongoc_gridfs_file_list_t *list; mongoc_gridfs_file_t *file; ENTRY; list = _mongoc_gridfs_file_list_new (gridfs, query, 1); file = mongoc_gridfs_file_list_next (list); if (!mongoc_gridfs_file_list_error (list, error) && error) { /* no error, but an error out-pointer was provided - clear it */ memset (error, 0, sizeof (*error)); } mongoc_gridfs_file_list_destroy (list); RETURN (file); } /** find all matching gridfs files */ mongoc_gridfs_file_list_t * mongoc_gridfs_find_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts) { return _mongoc_gridfs_file_list_new_with_opts (gridfs, filter, opts); } /** find a single gridfs file */ mongoc_gridfs_file_t * mongoc_gridfs_find_one_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts, bson_error_t *error) { mongoc_gridfs_file_list_t *list; mongoc_gridfs_file_t *file; bson_t new_opts; ENTRY; bson_init (&new_opts); if (opts) { bson_copy_to_excluding_noinit (opts, &new_opts, "limit", (char *) NULL); } BSON_APPEND_INT32 (&new_opts, "limit", 1); list = _mongoc_gridfs_file_list_new_with_opts (gridfs, filter, &new_opts); file = mongoc_gridfs_file_list_next (list); if (!mongoc_gridfs_file_list_error (list, error) && error) { /* no error, but an error out-pointer was provided - clear it */ memset (error, 0, sizeof (*error)); } mongoc_gridfs_file_list_destroy (list); bson_destroy (&new_opts); RETURN (file); } /** find a single gridfs file by filename */ mongoc_gridfs_file_t * mongoc_gridfs_find_one_by_filename (mongoc_gridfs_t *gridfs, const char *filename, bson_error_t *error) { mongoc_gridfs_file_t *file; bson_t filter; bson_init (&filter); bson_append_utf8 (&filter, "filename", -1, filename, -1); file = mongoc_gridfs_find_one_with_opts (gridfs, &filter, NULL, error); bson_destroy (&filter); return file; } /** create a gridfs file from a stream * * The stream is fully consumed in creating the file */ mongoc_gridfs_file_t * mongoc_gridfs_create_file_from_stream (mongoc_gridfs_t *gridfs, mongoc_stream_t *stream, mongoc_gridfs_file_opt_t *opt) { mongoc_gridfs_file_t *file; ssize_t r; uint8_t buf[MONGOC_GRIDFS_STREAM_CHUNK]; mongoc_iovec_t iov; int timeout; ENTRY; BSON_ASSERT (gridfs); BSON_ASSERT (stream); iov.iov_base = (void *) buf; iov.iov_len = 0; file = _mongoc_gridfs_file_new (gridfs, opt); timeout = gridfs->client->cluster.sockettimeoutms; for (;;) { r = mongoc_stream_read ( stream, iov.iov_base, MONGOC_GRIDFS_STREAM_CHUNK, 0, timeout); if (r > 0) { iov.iov_len = r; if (mongoc_gridfs_file_writev (file, &iov, 1, timeout) < 0) { MONGOC_ERROR ("%s", file->error.message); mongoc_gridfs_file_destroy (file); RETURN (NULL); } } else if (r == 0) { break; } else { MONGOC_ERROR ("Error reading from GridFS file source stream"); mongoc_gridfs_file_destroy (file); RETURN (NULL); } } mongoc_stream_failed (stream); if (-1 == mongoc_gridfs_file_seek (file, 0, SEEK_SET)) { MONGOC_ERROR ("%s", file->error.message); mongoc_gridfs_file_destroy (file); RETURN (NULL); } RETURN (file); } /** create an empty gridfs file */ mongoc_gridfs_file_t * mongoc_gridfs_create_file (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt) { mongoc_gridfs_file_t *file; ENTRY; BSON_ASSERT (gridfs); file = _mongoc_gridfs_file_new (gridfs, opt); RETURN (file); } /** accessor functions for collections */ mongoc_collection_t * mongoc_gridfs_get_files (mongoc_gridfs_t *gridfs) { BSON_ASSERT (gridfs); return gridfs->files; } mongoc_collection_t * mongoc_gridfs_get_chunks (mongoc_gridfs_t *gridfs) { BSON_ASSERT (gridfs); return gridfs->chunks; } bool mongoc_gridfs_remove_by_filename (mongoc_gridfs_t *gridfs, const char *filename, bson_error_t *error) { mongoc_bulk_operation_t *bulk_files = NULL; mongoc_bulk_operation_t *bulk_chunks = NULL; mongoc_cursor_t *cursor = NULL; bson_error_t files_error; bson_error_t chunks_error; const bson_t *doc; const char *key; char keybuf[16]; int count = 0; bool chunks_ret; bool files_ret; bool ret = false; bson_iter_t iter; bson_t *files_q = NULL; bson_t *chunks_q = NULL; bson_t find_filter = BSON_INITIALIZER; bson_t find_opts = BSON_INITIALIZER; bson_t find_opts_project; bson_t ar = BSON_INITIALIZER; bson_t opts = BSON_INITIALIZER; BSON_ASSERT (gridfs); if (!filename) { bson_set_error (error, MONGOC_ERROR_GRIDFS, MONGOC_ERROR_GRIDFS_INVALID_FILENAME, "A non-NULL filename must be specified."); return false; } /* * Find all files matching this filename. Hopefully just one, but not * strictly required! */ BSON_APPEND_UTF8 (&find_filter, "filename", filename); BSON_APPEND_DOCUMENT_BEGIN (&find_opts, "projection", &find_opts_project); BSON_APPEND_INT32 (&find_opts_project, "_id", 1); bson_append_document_end (&find_opts, &find_opts_project); cursor = _mongoc_cursor_find_new (gridfs->client, gridfs->files->ns, &find_filter, &find_opts, NULL /* user_prefs */, NULL /* default_prefs */, NULL /* read_concern */); BSON_ASSERT (cursor); while (mongoc_cursor_next (cursor, &doc)) { if (bson_iter_init_find (&iter, doc, "_id")) { const bson_value_t *value = bson_iter_value (&iter); bson_uint32_to_string (count, &key, keybuf, sizeof keybuf); BSON_APPEND_VALUE (&ar, key, value); } } if (mongoc_cursor_error (cursor, error)) { goto failure; } bson_append_bool (&opts, "ordered", 7, false); bulk_files = mongoc_collection_create_bulk_operation_with_opts (gridfs->files, &opts); bulk_chunks = mongoc_collection_create_bulk_operation_with_opts (gridfs->chunks, &opts); bson_destroy (&opts); files_q = BCON_NEW ("_id", "{", "$in", BCON_ARRAY (&ar), "}"); chunks_q = BCON_NEW ("files_id", "{", "$in", BCON_ARRAY (&ar), "}"); mongoc_bulk_operation_remove (bulk_files, files_q); mongoc_bulk_operation_remove (bulk_chunks, chunks_q); files_ret = mongoc_bulk_operation_execute (bulk_files, NULL, &files_error); chunks_ret = mongoc_bulk_operation_execute (bulk_chunks, NULL, &chunks_error); if (error) { if (!files_ret) { memcpy (error, &files_error, sizeof *error); } else if (!chunks_ret) { memcpy (error, &chunks_error, sizeof *error); } } ret = (files_ret && chunks_ret); failure: if (cursor) { mongoc_cursor_destroy (cursor); } if (bulk_files) { mongoc_bulk_operation_destroy (bulk_files); } if (bulk_chunks) { mongoc_bulk_operation_destroy (bulk_chunks); } bson_destroy (&find_filter); bson_destroy (&find_opts); bson_destroy (&ar); if (files_q) { bson_destroy (files_q); } if (chunks_q) { bson_destroy (chunks_q); } return ret; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-gridfs.h0000644000076500000240000000601513572250757024743 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_GRIDFS_H #define MONGOC_GRIDFS_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-gridfs-file-list.h" BSON_BEGIN_DECLS typedef struct _mongoc_gridfs_t mongoc_gridfs_t; MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_create_file_from_stream (mongoc_gridfs_t *gridfs, mongoc_stream_t *stream, mongoc_gridfs_file_opt_t *opt); MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_create_file (mongoc_gridfs_t *gridfs, mongoc_gridfs_file_opt_t *opt); MONGOC_EXPORT (mongoc_gridfs_file_list_t *) mongoc_gridfs_find (mongoc_gridfs_t *gridfs, const bson_t *query) BSON_GNUC_DEPRECATED_FOR (mongoc_gridfs_find_with_opts); MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_find_one (mongoc_gridfs_t *gridfs, const bson_t *query, bson_error_t *error) BSON_GNUC_DEPRECATED_FOR (mongoc_gridfs_find_one_with_opts); MONGOC_EXPORT (mongoc_gridfs_file_list_t *) mongoc_gridfs_find_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_find_one_with_opts (mongoc_gridfs_t *gridfs, const bson_t *filter, const bson_t *opts, bson_error_t *error) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_gridfs_file_t *) mongoc_gridfs_find_one_by_filename (mongoc_gridfs_t *gridfs, const char *filename, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_gridfs_drop (mongoc_gridfs_t *gridfs, bson_error_t *error); MONGOC_EXPORT (void) mongoc_gridfs_destroy (mongoc_gridfs_t *gridfs); MONGOC_EXPORT (mongoc_collection_t *) mongoc_gridfs_get_files (mongoc_gridfs_t *gridfs); MONGOC_EXPORT (mongoc_collection_t *) mongoc_gridfs_get_chunks (mongoc_gridfs_t *gridfs); MONGOC_EXPORT (bool) mongoc_gridfs_remove_by_filename (mongoc_gridfs_t *gridfs, const char *filename, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_GRIDFS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-handshake-compiler-private.h0000644000076500000240000000452413572250757030676 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HANDSHAKE_COMPILER_PRIVATE_H #define MONGOC_HANDSHAKE_COMPILER_PRIVATE_H #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-util-private.h" /* * Thanks to: * http://nadeausoftware.com/articles/2012/10/c_c_tip_how_detect_compiler_name_and_version_using_compiler_predefined_macros */ #if defined(__clang__) #define MONGOC_COMPILER "clang" #define MONGOC_COMPILER_VERSION __clang_version__ #elif defined(__ICC) || defined(__INTEL_COMPILER) #define MONGOC_COMPILER "ICC" #define MONGOC_COMPILER_VERSION __VERSION__ #elif defined(__GNUC__) || defined(__GNUG__) #define MONGOC_COMPILER "GCC" #define MONGOC_COMPILER_VERSION __VERSION__ #elif defined(__HP_cc) || defined(__HP_aCC) #define MONGOC_COMPILER "aCC" #define MONGOC_COMPILER_VERSION MONGOC_EVALUATE_STR (__HP_cc) #elif defined(__IBMC__) || defined(__IBMCPP__) #define MONGOC_COMPILER "xlc" #define MONGOC_COMPILER_VERSION __xlc__ #elif defined(_MSC_VER) #define MONGOC_COMPILER "MSVC" #define MONGOC_COMPILER_VERSION MONGOC_EVALUATE_STR (_MSC_VER) #elif defined(__PGI) #define MONGOC_COMPILER "Portland PGCC" #define MONGOC_COMPILER_VERSION \ MONGOC_EVALUATE_STR (__PGIC__) \ "." MONGOC_EVALUATE_STR (__PGIC_MINOR) "." MONGOC_EVALUATE_STR ( \ __PGIC_PATCHLEVEL__) #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) #define MONGOC_COMPILER "Solaris Studio" #define MONGOC_COMPILER_VERSION MONGOC_EVALUATE_STR (__SUNPRO_C) #elif defined(__PCC__) /* Portable C Compiler. Version may not be available */ #define MONGOC_COMPILER "PCC" #else #define MONGOC_COMPILER MONGOC_EVALUATE_STR (MONGOC_CC) /* Not defining COMPILER_VERSION. We'll fall back to values set at * configure-time */ #endif #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-handshake-os-private.h0000644000076500000240000000540313572250757027502 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HANDSHAKE_OS_PRIVATE #define MONGOC_HANDSHAKE_OS_PRIVATE /* Based on tables from * http://nadeausoftware.com/articles/2012/01/c_c_tip_how_use_compiler_predefined_macros_detect_operating_system */ #if defined(_WIN32) || defined(__CYGWIN__) #define MONGOC_OS_TYPE "Windows" #if defined(__CYGWIN__) #define MONGOC_OS_NAME "Cygwin" #else #define MONGOC_OS_NAME "Windows" #endif /* osx and iphone defines __APPLE__ and __MACH__, but not __unix__ */ #elif defined(__APPLE__) && defined(__MACH__) && !defined(__unix__) #define MONGOC_OS_TYPE "Darwin" #include #if defined(TARGET_IPHONE_SIMULATOR) && TARGET_IPHONE_SIMULATOR == 1 #define MONGOC_OS_NAME "iOS Simulator" #elif defined(TARGET_OS_IOS) && TARGET_OS_IOS == 1 #define MONGOC_OS_NAME "iOS" #elif defined(TARGET_OS_MAC) && TARGET_OS_MAC == 1 #define MONGOC_OS_NAME "macOS" #elif defined(TARGET_OS_TV) && TARGET_OS_TV == 1 #define MONGOC_OS_NAME "tvOS" #elif defined(TARGET_OS_WATCH) && TARGET_OS_WATCH == 1 #define MONGOC_OS_NAME "watchOS" #else /* Fall back to uname () */ #endif /* Need to check if __unix is defined since sun and hpux always have __unix, * but not necessarily __unix__ defined. */ #elif defined(__unix__) || defined(__unix) #include #if defined(__linux__) #define MONGOC_OS_IS_LINUX #if defined(__ANDROID__) #define MONGOC_OS_TYPE "Linux (Android)" #else #define MONGOC_OS_TYPE "Linux" #endif /* Don't define OS_NAME. We'll scan the file system to determine distro. */ #elif defined(BSD) #define MONGOC_OS_TYPE "BSD" #if defined(__FreeBSD__) #define MONGOC_OS_NAME "FreeBSD" #elif defined(__NetBSD__) #define MONGOC_OS_NAME "NetBSD" #elif defined(__OpenBSD__) #define MONGOC_OS_NAME "OpenBSD" #elif defined(__DragonFly__) #define MONGOC_OS_NAME "DragonFlyBSD" #else /* Don't define OS_NAME. We'll use uname to figure it out. */ #endif #else #define MONGOC_OS_TYPE "Unix" #if defined(_AIX) #define MONGOC_OS_NAME "AIX" #elif defined(__sun) && defined(__SVR4) #define MONGOC_OS_NAME "Solaris" #elif defined(__hpux) #define MONGOC_OS_NAME "HP-UX" #else /* Don't set OS name. We'll just fall back to uname. */ #endif #endif #endif #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-handshake-private.h0000644000076500000240000000732213572250757027065 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HANDSHAKE_PRIVATE_H #define MONGOC_HANDSHAKE_PRIVATE_H #include BSON_BEGIN_DECLS #define HANDSHAKE_FIELD "client" #define HANDSHAKE_PLATFORM_FIELD "platform" #define HANDSHAKE_MAX_SIZE 512 #define HANDSHAKE_OS_TYPE_MAX 32 #define HANDSHAKE_OS_NAME_MAX 32 #define HANDSHAKE_OS_VERSION_MAX 32 #define HANDSHAKE_OS_ARCHITECTURE_MAX 32 #define HANDSHAKE_DRIVER_NAME_MAX 64 #define HANDSHAKE_DRIVER_VERSION_MAX 32 /* platform has no fixed max size. It can just occupy the remaining * available space in the document. */ /* When adding a new field to mongoc-config.h.in, update this! */ typedef enum { /* The bit position (from the RHS) of each config flag. Do not reorder. */ MONGOC_MD_FLAG_ENABLE_CRYPTO = 0, MONGOC_MD_FLAG_ENABLE_CRYPTO_CNG, MONGOC_MD_FLAG_ENABLE_CRYPTO_COMMON_CRYPTO, MONGOC_MD_FLAG_ENABLE_CRYPTO_LIBCRYPTO, MONGOC_MD_FLAG_ENABLE_CRYPTO_SYSTEM_PROFILE, MONGOC_MD_FLAG_ENABLE_SASL, MONGOC_MD_FLAG_ENABLE_SSL, MONGOC_MD_FLAG_ENABLE_SSL_OPENSSL, MONGOC_MD_FLAG_ENABLE_SSL_SECURE_CHANNEL, MONGOC_MD_FLAG_ENABLE_SSL_SECURE_TRANSPORT, MONGOC_MD_FLAG_EXPERIMENTAL_FEATURES, MONGOC_MD_FLAG_HAVE_SASL_CLIENT_DONE, MONGOC_MD_FLAG_HAVE_WEAK_SYMBOLS, MONGOC_MD_FLAG_NO_AUTOMATIC_GLOBALS, MONGOC_MD_FLAG_ENABLE_SSL_LIBRESSL, MONGOC_MD_FLAG_ENABLE_SASL_CYRUS, MONGOC_MD_FLAG_ENABLE_SASL_SSPI, MONGOC_MD_FLAG_HAVE_SOCKLEN, MONGOC_MD_FLAG_ENABLE_COMPRESSION, MONGOC_MD_FLAG_ENABLE_COMPRESSION_SNAPPY, MONGOC_MD_FLAG_ENABLE_COMPRESSION_ZLIB, MONGOC_MD_FLAG_ENABLE_SASL_GSSAPI_UNUSED, /* CDRIVER-2654 removed this . */ MONGOC_MD_FLAG_ENABLE_RES_NSEARCH, MONGOC_MD_FLAG_ENABLE_RES_NDESTROY, MONGOC_MD_FLAG_ENABLE_RES_NCLOSE, MONGOC_MD_FLAG_ENABLE_RES_SEARCH, MONGOC_MD_FLAG_ENABLE_DNSAPI, MONGOC_MD_FLAG_ENABLE_RDTSCP, MONGOC_MD_FLAG_HAVE_SCHED_GETCPU, MONGOC_MD_FLAG_ENABLE_SHM_COUNTERS, MONGOC_MD_FLAG_TRACE, MONGOC_MD_FLAG_ENABLE_ICU, /* Add additional config flags here, above LAST_MONGOC_MD_FLAG. */ LAST_MONGOC_MD_FLAG } mongoc_handshake_config_flag_bit_t; typedef struct _mongoc_handshake_t { char *os_type; char *os_name; char *os_version; char *os_architecture; char *driver_name; char *driver_version; char *platform; char *compiler_info; char *flags; bool frozen; } mongoc_handshake_t; void _mongoc_handshake_init (void); void _mongoc_handshake_cleanup (void); bool _mongoc_handshake_build_doc_with_application (bson_t *doc, const char *application); void _mongoc_handshake_freeze (void); mongoc_handshake_t * _mongoc_handshake_get (void); bool _mongoc_handshake_appname_is_valid (const char *appname); typedef struct { bool scram_sha_256; bool scram_sha_1; } mongoc_handshake_sasl_supported_mechs_t; void _mongoc_handshake_append_sasl_supported_mechs (const mongoc_uri_t *uri, bson_t *ismaster); void _mongoc_handshake_parse_sasl_supported_mechs ( const bson_t *ismaster, mongoc_handshake_sasl_supported_mechs_t *sasl_supported_mechs); BSON_END_DECLS #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-handshake.c0000644000076500000240000004450013572250757025407 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #ifdef _POSIX_VERSION #include #endif #ifdef _WIN32 #include #endif #include "mongoc/mongoc-linux-distro-scanner-private.h" #include "mongoc/mongoc-handshake.h" #include "mongoc/mongoc-handshake-compiler-private.h" #include "mongoc/mongoc-handshake-os-private.h" #include "mongoc/mongoc-handshake-private.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-version.h" #include "mongoc/mongoc-util-private.h" /* * Global handshake data instance. Initialized at startup from mongoc_init * * Can be modified by calls to mongoc_handshake_data_append */ static mongoc_handshake_t gMongocHandshake; /* * Used for thread-safety in mongoc_handshake_data_append */ static bson_mutex_t gHandshakeLock; static void _set_bit (uint8_t *bf, uint32_t byte_count, uint32_t bit) { uint32_t byte = bit / 8; uint32_t bit_of_byte = (bit) % 8; /* byte 0 is the last location in bf. */ bf[(byte_count - 1) - byte] |= 1u << bit_of_byte; } /* returns a hex string for all config flag bits, which must be freed. */ char * _mongoc_handshake_get_config_hex_string (void) { uint32_t byte_count; uint8_t *bf; bson_string_t *str; int i; byte_count = (LAST_MONGOC_MD_FLAG + 7) / 8; /* ceil (num_bits / 8) */ /* allocate enough bytes to fit all config bits. */ bf = (uint8_t *) bson_malloc0 (byte_count); #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL _set_bit (bf, byte_count, MONGOC_ENABLE_SSL_SECURE_CHANNEL); #endif #ifdef MONGOC_ENABLE_CRYPTO_CNG _set_bit (bf, byte_count, MONGOC_ENABLE_CRYPTO_CNG); #endif #ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SSL_SECURE_TRANSPORT); #endif #ifdef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_CRYPTO_COMMON_CRYPTO); #endif #ifdef MONGOC_ENABLE_SSL_OPENSSL _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SSL_OPENSSL); #endif #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_CRYPTO_LIBCRYPTO); #endif #ifdef MONGOC_ENABLE_SSL _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SSL); #endif #ifdef MONGOC_ENABLE_CRYPTO _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_CRYPTO); #endif #ifdef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_CRYPTO_SYSTEM_PROFILE); #endif #ifdef MONGOC_ENABLE_SASL _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SASL); #endif #ifdef MONGOC_HAVE_SASL_CLIENT_DONE _set_bit (bf, byte_count, MONGOC_MD_FLAG_HAVE_SASL_CLIENT_DONE); #endif #ifdef MONGOC_NO_AUTOMATIC_GLOBALS _set_bit (bf, byte_count, MONGOC_MD_FLAG_NO_AUTOMATIC_GLOBALS); #endif #ifdef MONGOC_EXPERIMENTAL_FEATURES _set_bit (bf, byte_count, MONGOC_MD_FLAG_EXPERIMENTAL_FEATURES); #endif #ifdef MONGOC_ENABLE_SSL_LIBRESSL _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SSL_LIBRESSL); #endif #ifdef MONGOC_ENABLE_SASL_CYRUS _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SASL_CYRUS); #endif #ifdef MONGOC_ENABLE_SASL_SSPI _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SASL_SSPI); #endif #ifdef MONGOC_HAVE_SOCKLEN _set_bit (bf, byte_count, MONGOC_MD_FLAG_HAVE_SOCKLEN); #endif #ifdef MONGOC_ENABLE_COMPRESSION _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_COMPRESSION); #endif #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_COMPRESSION_SNAPPY); #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_COMPRESSION_ZLIB); #endif #ifdef MONGOC_HAVE_RES_NSEARCH _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_RES_NSEARCH); #endif #ifdef MONGOC_HAVE_RES_NDESTROY _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_RES_NDESTROY); #endif #ifdef MONGOC_HAVE_RES_NCLOSE _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_RES_NCLOSE); #endif #ifdef MONGOC_HAVE_RES_SEARCH _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_RES_SEARCH); #endif #ifdef MONGOC_HAVE_DNSAPI _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_DNSAPI); #endif #ifdef MONGOC_HAVE_RDTSCP _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_RDTSCP); #endif #ifdef MONGOC_HAVE_SCHED_GETCPU _set_bit (bf, byte_count, MONGOC_MD_FLAG_HAVE_SCHED_GETCPU); #endif #ifdef MONGOC_ENABLE_SHM_COUNTERS _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_SHM_COUNTERS); #endif #ifdef MONGOC_TRACE _set_bit (bf, byte_count, MONGOC_MD_FLAG_TRACE); #endif #ifdef MONGOC_ENABLE_ICU _set_bit (bf, byte_count, MONGOC_MD_FLAG_ENABLE_ICU); #endif str = bson_string_new ("0x"); for (i = 0; i < byte_count; i++) { bson_string_append_printf (str, "%02x", bf[i]); } bson_free (bf); /* free the bson_string_t, but keep the underlying char* alive. */ return bson_string_free (str, false); } static char * _get_os_type (void) { #ifdef MONGOC_OS_TYPE return bson_strndup (MONGOC_OS_TYPE, HANDSHAKE_OS_TYPE_MAX); #else return bson_strndup ("unknown", HANDSHAKE_OS_TYPE_MAX); #endif } static char * _get_os_architecture (void) { const char *ret = NULL; #ifdef _WIN32 SYSTEM_INFO system_info; DWORD arch; GetSystemInfo (&system_info); arch = system_info.wProcessorArchitecture; switch (arch) { case PROCESSOR_ARCHITECTURE_AMD64: ret = "x86_64"; break; case PROCESSOR_ARCHITECTURE_ARM: ret = "ARM"; break; case PROCESSOR_ARCHITECTURE_IA64: ret = "IA64"; break; case PROCESSOR_ARCHITECTURE_INTEL: ret = "x86"; break; case PROCESSOR_ARCHITECTURE_UNKNOWN: ret = "Unknown"; break; default: ret = "Other"; break; } #elif defined(_POSIX_VERSION) struct utsname system_info; if (uname (&system_info) >= 0) { ret = system_info.machine; } #endif if (ret) { return bson_strndup (ret, HANDSHAKE_OS_ARCHITECTURE_MAX); } return NULL; } #ifndef MONGOC_OS_IS_LINUX static char * _get_os_name (void) { #ifdef MONGOC_OS_NAME return bson_strndup (MONGOC_OS_NAME, HANDSHAKE_OS_NAME_MAX); #elif defined(_POSIX_VERSION) struct utsname system_info; if (uname (&system_info) >= 0) { return bson_strndup (system_info.sysname, HANDSHAKE_OS_NAME_MAX); } #endif return NULL; } static char * _get_os_version (void) { char *ret = bson_malloc (HANDSHAKE_OS_VERSION_MAX); bool found = false; #ifdef _WIN32 OSVERSIONINFO osvi; ZeroMemory (&osvi, sizeof (OSVERSIONINFO)); osvi.dwOSVersionInfoSize = sizeof (OSVERSIONINFO); if (GetVersionEx (&osvi)) { bson_snprintf (ret, HANDSHAKE_OS_VERSION_MAX, "%lu.%lu (%lu)", osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.dwBuildNumber); found = true; } else { MONGOC_WARNING ("Error with GetVersionEx(): %lu", GetLastError ()); } #elif defined(_POSIX_VERSION) struct utsname system_info; if (uname (&system_info) >= 0) { bson_strncpy (ret, system_info.release, HANDSHAKE_OS_VERSION_MAX); found = true; } else { MONGOC_WARNING ("Error with uname(): %d", errno); } #endif if (!found) { bson_free (ret); ret = NULL; } return ret; } #endif static void _get_system_info (mongoc_handshake_t *handshake) { handshake->os_type = _get_os_type (); #ifdef MONGOC_OS_IS_LINUX _mongoc_linux_distro_scanner_get_distro (&handshake->os_name, &handshake->os_version); #else handshake->os_name = _get_os_name (); handshake->os_version = _get_os_version (); #endif handshake->os_architecture = _get_os_architecture (); } static void _free_system_info (mongoc_handshake_t *handshake) { bson_free (handshake->os_type); bson_free (handshake->os_name); bson_free (handshake->os_version); bson_free (handshake->os_architecture); } static void _get_driver_info (mongoc_handshake_t *handshake) { handshake->driver_name = bson_strndup ("mongoc", HANDSHAKE_DRIVER_NAME_MAX); handshake->driver_version = bson_strndup (MONGOC_VERSION_S, HANDSHAKE_DRIVER_VERSION_MAX); } static void _free_driver_info (mongoc_handshake_t *handshake) { bson_free (handshake->driver_name); bson_free (handshake->driver_version); } static void _set_platform_string (mongoc_handshake_t *handshake) { bson_string_t *str; str = bson_string_new (""); handshake->platform = bson_string_free (str, false); } static void _set_compiler_info (mongoc_handshake_t *handshake) { bson_string_t *str; char *config_str; str = bson_string_new (""); config_str = _mongoc_handshake_get_config_hex_string (); bson_string_append_printf (str, "cfg=%s", config_str); bson_free (config_str); #ifdef _POSIX_VERSION bson_string_append_printf (str, " posix=%ld", _POSIX_VERSION); #endif #ifdef __STDC_VERSION__ bson_string_append_printf (str, " stdc=%ld", __STDC_VERSION__); #endif bson_string_append_printf (str, " CC=%s", MONGOC_COMPILER); #ifdef MONGOC_COMPILER_VERSION bson_string_append_printf (str, " %s", MONGOC_COMPILER_VERSION); #endif handshake->compiler_info = bson_string_free (str, false); } static void _set_flags (mongoc_handshake_t *handshake) { bson_string_t *str; str = bson_string_new (""); if (strlen (MONGOC_EVALUATE_STR (MONGOC_USER_SET_CFLAGS)) > 0) { bson_string_append_printf ( str, " CFLAGS=%s", MONGOC_EVALUATE_STR (MONGOC_USER_SET_CFLAGS)); } if (strlen (MONGOC_EVALUATE_STR (MONGOC_USER_SET_LDFLAGS)) > 0) { bson_string_append_printf ( str, " LDFLAGS=%s", MONGOC_EVALUATE_STR (MONGOC_USER_SET_LDFLAGS)); } handshake->flags = bson_string_free (str, false); } static void _free_platform_string (mongoc_handshake_t *handshake) { bson_free (handshake->platform); bson_free (handshake->compiler_info); bson_free (handshake->flags); } void _mongoc_handshake_init (void) { _get_system_info (_mongoc_handshake_get ()); _get_driver_info (_mongoc_handshake_get ()); _set_platform_string (_mongoc_handshake_get ()); _set_compiler_info (_mongoc_handshake_get ()); _set_flags (_mongoc_handshake_get ()); _mongoc_handshake_get ()->frozen = false; bson_mutex_init (&gHandshakeLock); } void _mongoc_handshake_cleanup (void) { _free_system_info (_mongoc_handshake_get ()); _free_driver_info (_mongoc_handshake_get ()); _free_platform_string (_mongoc_handshake_get ()); bson_mutex_destroy (&gHandshakeLock); } static void _append_platform_field (bson_t *doc, const char *platform) { int max_platform_str_size; char *compiler_info = _mongoc_handshake_get ()->compiler_info; char *flags = _mongoc_handshake_get ()->flags; bson_string_t *combined_platform = bson_string_new (platform); /* Compute space left for platform field */ max_platform_str_size = HANDSHAKE_MAX_SIZE - ((int) doc->len + /* 1 byte for utf8 tag */ 1 + /* key size */ (int) strlen (HANDSHAKE_PLATFORM_FIELD) + 1 + /* 4 bytes for length of string */ 4); if (max_platform_str_size <= 0) { return; } /* We opt to drop compiler info and flags if they can't fit, while the * platform information is truncated * Try to drop flags first, and if there is still not enough space also drop * compiler info */ if (max_platform_str_size > combined_platform->len + strlen (compiler_info) + 1) { bson_string_append (combined_platform, compiler_info); } if (max_platform_str_size > combined_platform->len + strlen (flags) + 1) { bson_string_append (combined_platform, flags); } /* We use the flags_index field to check if the CLAGS/LDFLAGS need to be * truncated, and if so we drop them altogether */ bson_append_utf8 ( doc, HANDSHAKE_PLATFORM_FIELD, -1, combined_platform->str, BSON_MIN (max_platform_str_size - 1, combined_platform->len)); bson_string_free (combined_platform, true); BSON_ASSERT (doc->len <= HANDSHAKE_MAX_SIZE); } /* * Return true if we build the document, and it's not too big * false if there's no way to prevent the doc from being too big. In this * case, the caller shouldn't include it with isMaster */ bool _mongoc_handshake_build_doc_with_application (bson_t *doc, const char *appname) { const mongoc_handshake_t *md = _mongoc_handshake_get (); bson_t child; if (appname) { BSON_APPEND_DOCUMENT_BEGIN (doc, "application", &child); BSON_APPEND_UTF8 (&child, "name", appname); bson_append_document_end (doc, &child); } BSON_APPEND_DOCUMENT_BEGIN (doc, "driver", &child); BSON_APPEND_UTF8 (&child, "name", md->driver_name); BSON_APPEND_UTF8 (&child, "version", md->driver_version); bson_append_document_end (doc, &child); BSON_APPEND_DOCUMENT_BEGIN (doc, "os", &child); BSON_ASSERT (md->os_type); BSON_APPEND_UTF8 (&child, "type", md->os_type); if (md->os_name) { BSON_APPEND_UTF8 (&child, "name", md->os_name); } if (md->os_version) { BSON_APPEND_UTF8 (&child, "version", md->os_version); } if (md->os_architecture) { BSON_APPEND_UTF8 (&child, "architecture", md->os_architecture); } bson_append_document_end (doc, &child); if (doc->len > HANDSHAKE_MAX_SIZE) { /* We've done all we can possibly do to ensure the current * document is below the maxsize, so if it overflows there is * nothing else we can do, so we fail */ return false; } if (md->platform) { _append_platform_field (doc, md->platform); } return true; } void _mongoc_handshake_freeze (void) { _mongoc_handshake_get ()->frozen = true; } /* * free (*s) and make *s point to *s concated with suffix. * If *s is NULL it's treated like it's an empty string. * If suffix is NULL, nothing happens. */ static void _append_and_truncate (char **s, const char *suffix, int max_len) { char *old_str = *s; char *prefix; const int delim_len = (int) strlen (" / "); int space_for_suffix; BSON_ASSERT (s); prefix = old_str ? old_str : ""; if (!suffix) { return; } space_for_suffix = max_len - (int) strlen (prefix) - delim_len; if (space_for_suffix <= 0) { /* the old string already takes the whole allotted space */ return; } *s = bson_strdup_printf ("%s / %.*s", prefix, space_for_suffix, suffix); BSON_ASSERT (strlen (*s) <= max_len); bson_free (old_str); } /* * Set some values in our global handshake struct. These values will be sent * to the server as part of the initial connection handshake (isMaster). * If this function is called more than once, or after we've connected to a * mongod, then it will do nothing and return false. It will return true if it * successfully sets the values. * * All arguments are optional. */ bool mongoc_handshake_data_append (const char *driver_name, const char *driver_version, const char *platform) { int platform_space; bson_mutex_lock (&gHandshakeLock); if (_mongoc_handshake_get ()->frozen) { bson_mutex_unlock (&gHandshakeLock); return false; } /* allow practically any size for "platform", we'll trim it down in * _mongoc_handshake_build_doc_with_application */ platform_space = HANDSHAKE_MAX_SIZE - (int) strlen (_mongoc_handshake_get ()->platform); /* we check for an empty string as a special case to avoid an unnecessary * delimiter being added in front of the string by _append_and_truncate */ if (strcmp (_mongoc_handshake_get ()->platform, "") == 0) { bson_free (_mongoc_handshake_get ()->platform); _mongoc_handshake_get ()->platform = bson_strdup_printf ("%.*s", platform_space, platform); } else { _append_and_truncate ( &_mongoc_handshake_get ()->platform, platform, HANDSHAKE_MAX_SIZE); } _append_and_truncate (&_mongoc_handshake_get ()->driver_name, driver_name, HANDSHAKE_DRIVER_NAME_MAX); _append_and_truncate (&_mongoc_handshake_get ()->driver_version, driver_version, HANDSHAKE_DRIVER_VERSION_MAX); _mongoc_handshake_freeze (); bson_mutex_unlock (&gHandshakeLock); return true; } mongoc_handshake_t * _mongoc_handshake_get (void) { return &gMongocHandshake; } bool _mongoc_handshake_appname_is_valid (const char *appname) { return strlen (appname) <= MONGOC_HANDSHAKE_APPNAME_MAX; } void _mongoc_handshake_append_sasl_supported_mechs (const mongoc_uri_t *uri, bson_t *cmd) { const char *username; char *db_user; username = mongoc_uri_get_username (uri); db_user = bson_strdup_printf ("%s.%s", mongoc_uri_get_auth_source (uri), username); bson_append_utf8 (cmd, "saslSupportedMechs", 18, db_user, -1); bson_free (db_user); } void _mongoc_handshake_parse_sasl_supported_mechs ( const bson_t *ismaster, mongoc_handshake_sasl_supported_mechs_t *sasl_supported_mechs) { bson_iter_t iter; memset (sasl_supported_mechs, 0, sizeof (*sasl_supported_mechs)); if (bson_iter_init_find (&iter, ismaster, "saslSupportedMechs")) { bson_iter_t array_iter; if (BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &array_iter)) { while (bson_iter_next (&array_iter)) { if (BSON_ITER_HOLDS_UTF8 (&array_iter)) { const char *mechanism_name = bson_iter_utf8 (&array_iter, NULL); if (0 == strcmp (mechanism_name, "SCRAM-SHA-256")) { sasl_supported_mechs->scram_sha_256 = true; } else if (0 == strcmp (mechanism_name, "SCRAM-SHA-1")) { sasl_supported_mechs->scram_sha_1 = true; } } } } } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-handshake.h0000644000076500000240000000612213572250757025412 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HANDSHAKE_H #define MONGOC_HANDSHAKE_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS #define MONGOC_HANDSHAKE_APPNAME_MAX 128 /** * mongoc_handshake_data_append: * * This function is intended for use by drivers which wrap the C Driver. * Calling this function will store the given strings as handshake data about * the system and driver by appending them to the handshake data for the * underlying C Driver. These values, along with other handshake data collected * during mongoc_init will be sent to the server as part of the initial * connection handshake in the "client" document. This function cannot be * called more than once, or after a handshake has been initiated. * * The passed in strings are copied, and don't have to remain valid after the * call to mongoc_handshake_data_append(). The driver may store truncated * versions of the passed in strings. * * Note: * This function allocates memory, and therefore caution should be used when * using this in conjunction with bson_mem_set_vtable. If this function is * called before bson_mem_set_vtable, then bson_mem_restore_vtable must be * called before calling mongoc_cleanup. Failure to do so will result in * memory being freed by the wrong allocator. * * * @driver_name: An optional string storing the name of the wrapping driver * @driver_version: An optional string storing the version of the wrapping * driver. * @platform: An optional string storing any information about the current * platform, for example configure options or compile flags. * * * Returns true if the given fields are set successfully. Otherwise, it returns * false and logs an error. * * The default handshake data the driver sends with "isMaster" looks something * like: * client: { * driver: { * name: "mongoc", * version: "1.5.0" * }, * os: {...}, * platform: "CC=gcc CFLAGS=-Wall -pedantic" * } * * If we call * mongoc_handshake_data_append ("phongo", "1.1.8", "CC=clang") * and it returns true, the driver sends handshake data like: * client: { * driver: { * name: "mongoc / phongo", * version: "1.5.0 / 1.1.8" * }, * os: {...}, * platform: "CC=gcc CFLAGS=-Wall -pedantic / CC=clang" * } * */ MONGOC_EXPORT (bool) mongoc_handshake_data_append (const char *driver_name, const char *driver_version, const char *platform); BSON_END_DECLS #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-host-list-private.h0000644000076500000240000000403713572250757027065 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HOST_LIST_PRIVATE_H #define MONGOC_HOST_LIST_PRIVATE_H #include "mongoc/mongoc-host-list.h" BSON_BEGIN_DECLS mongoc_host_list_t * _mongoc_host_list_push (const char *host, uint16_t port, int family, mongoc_host_list_t *next); mongoc_host_list_t * _mongoc_host_list_copy (const mongoc_host_list_t *src, mongoc_host_list_t *next); bool _mongoc_host_list_from_string (mongoc_host_list_t *host_list, const char *host_and_port); bool _mongoc_host_list_from_string_with_err (mongoc_host_list_t *host_list, const char *host_and_port, bson_error_t *error); bool _mongoc_host_list_from_hostport_with_err (mongoc_host_list_t *host_list, const char *host, uint16_t port, bson_error_t *error); bool _mongoc_host_list_equal (const mongoc_host_list_t *host_a, const mongoc_host_list_t *host_b); void _mongoc_host_list_remove_host (mongoc_host_list_t **phosts, const char *host, uint16_t port); void _mongoc_host_list_destroy_all (mongoc_host_list_t *host); BSON_END_DECLS #endif /* MONGOC_HOST_LIST_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-host-list.c0000644000076500000240000001753213572250757025414 0ustar alcaeusstaff/* * Copyright 2015 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-host-list-private.h" /* strcasecmp on windows */ #include "mongoc/mongoc-util-private.h" #include "mongoc/utlist.h" /* *-------------------------------------------------------------------------- * * _mongoc_host_list_push -- * * Add a host to the front of the list and return it. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_host_list_t * _mongoc_host_list_push (const char *host, uint16_t port, int family, mongoc_host_list_t *next) { mongoc_host_list_t *h; BSON_ASSERT (host); h = bson_malloc0 (sizeof (mongoc_host_list_t)); bson_strncpy (h->host, host, sizeof h->host); h->port = port; bson_snprintf ( h->host_and_port, sizeof h->host_and_port, "%s:%hu", host, port); h->family = family; h->next = next; return h; } /* Duplicates the elements of {src}, creating a new chain, * optionally prepended to an existing chain {next}. * * Note that as a side-effect of the implementation, * this reverses the order of src's copy in the destination. */ mongoc_host_list_t * _mongoc_host_list_copy (const mongoc_host_list_t *src, mongoc_host_list_t *next) { mongoc_host_list_t *h = NULL; const mongoc_host_list_t *src_iter; LL_FOREACH (src, src_iter) { h = bson_malloc0 (sizeof (mongoc_host_list_t)); memcpy (h, src_iter, sizeof (mongoc_host_list_t)); LL_PREPEND (next, h); } return h; } /* *-------------------------------------------------------------------------- * * _mongoc_host_list_equal -- * * Check two hosts have the same domain (case-insensitive), port, * and address family. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool _mongoc_host_list_equal (const mongoc_host_list_t *host_a, const mongoc_host_list_t *host_b) { return (!strcasecmp (host_a->host_and_port, host_b->host_and_port) && host_a->family == host_b->family); } /* *-------------------------------------------------------------------------- * * _mongoc_host_list_destroy_all -- * * Destroy whole linked list of hosts. * *-------------------------------------------------------------------------- */ void _mongoc_host_list_destroy_all (mongoc_host_list_t *host) { mongoc_host_list_t *tmp; while (host) { tmp = host->next; bson_free (host); host = tmp; } } /* *-------------------------------------------------------------------------- * * _mongoc_host_list_from_string -- * * Populate a mongoc_host_list_t from a fully qualified address * *-------------------------------------------------------------------------- */ bool _mongoc_host_list_from_string (mongoc_host_list_t *link_, const char *address) { bson_error_t error = {0}; bool r = _mongoc_host_list_from_string_with_err (link_, address, &error); if (!r) { MONGOC_ERROR ("Could not parse address %s: %s", address, error.message); return false; } return true; } bool _mongoc_host_list_from_string_with_err (mongoc_host_list_t *link_, const char *address, bson_error_t *error) { char *close_bracket; char *sport; uint16_t port; char *host; bool ret; bool ipv6 = false; close_bracket = strchr (address, ']'); /* if this is an ipv6 address. */ if (close_bracket) { /* if present, the port should immediately follow after ] */ sport = strchr (close_bracket, ':'); if (sport > close_bracket + 1) { return false; } /* otherwise ] should be the last char. */ if (!sport && *(close_bracket + 1) != '\0') { return false; } if (*address != '[') { return false; } ipv6 = true; } /* otherwise, just find the first : */ else { sport = strchr (address, ':'); } /* like "example.com:27019" or "[fe80::1]:27019", but not "[fe80::1]" */ if (sport) { if (sport == address) { /* bad address like ":27017" */ return false; } if (!mongoc_parse_port (&port, sport + 1)) { return false; } /* if this is an ipv6 address, strip the [ and ] */ if (ipv6) { host = bson_strndup (address + 1, close_bracket - address - 1); } else { host = bson_strndup (address, sport - address); } } else { /* if this is an ipv6 address, strip the [ and ] */ if (ipv6) { host = bson_strndup (address + 1, close_bracket - address - 1); } else { host = bson_strdup (address); } port = MONGOC_DEFAULT_PORT; } ret = _mongoc_host_list_from_hostport_with_err (link_, host, port, error); bson_free (host); return ret; } bool _mongoc_host_list_from_hostport_with_err (mongoc_host_list_t *link_, const char *host, uint16_t port, bson_error_t *error) { size_t host_len = strlen (host); link_->port = port; if (host_len == 0) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Empty hostname in URI"); return false; } if (host_len > BSON_HOST_NAME_MAX) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Hostname provided in URI is too long, max is %d chars", BSON_HOST_NAME_MAX); return false; } bson_strncpy (link_->host, host, host_len + 1); /* like "fe80::1" or "::1" */ if (strchr (host, ':')) { link_->family = AF_INET6; mongoc_lowercase (link_->host, link_->host); bson_snprintf (link_->host_and_port, sizeof link_->host_and_port, "[%s]:%hu", link_->host, link_->port); } else if (strchr (host, '/') && strstr (host, ".sock")) { link_->family = AF_UNIX; bson_strncpy (link_->host_and_port, link_->host, host_len + 1); } else { /* This is either an IPv4 or hostname. */ link_->family = AF_UNSPEC; mongoc_lowercase (link_->host, link_->host); bson_snprintf (link_->host_and_port, sizeof link_->host_and_port, "%s:%hu", link_->host, link_->port); } link_->next = NULL; return true; } void _mongoc_host_list_remove_host (mongoc_host_list_t **hosts, const char *host, uint16_t port) { mongoc_host_list_t *current; mongoc_host_list_t *prev = NULL; for (current = *hosts; current; prev = current, current = current->next) { if ((current->port == port) && (strcmp (current->host, host) == 0)) { /* Node found, unlink. */ if (prev) { prev->next = current->next; } else { /* No previous, unlinking at head. */ *hosts = current->next; } bson_free (current); break; } } }mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-host-list.h0000644000076500000240000000221013572250757025404 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_HOST_LIST_H #define MONGOC_HOST_LIST_H #include BSON_BEGIN_DECLS #ifdef _POSIX_HOST_NAME_MAX #define BSON_HOST_NAME_MAX _POSIX_HOST_NAME_MAX #else #define BSON_HOST_NAME_MAX 255 #endif typedef struct _mongoc_host_list_t mongoc_host_list_t; struct _mongoc_host_list_t { mongoc_host_list_t *next; char host[BSON_HOST_NAME_MAX + 1]; char host_and_port[BSON_HOST_NAME_MAX + 7]; uint16_t port; int family; void *padding[4]; }; BSON_END_DECLS #endif /* MONGOC_HOST_LIST_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-index.c0000644000076500000240000000503513572250757024570 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-trace-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "gridfs_index" static mongoc_index_opt_t gMongocIndexOptDefault = { 1, /* is_initialized */ 0, /* background */ 0, /* unique */ NULL, /* name */ 0, /* drop_dups */ 0, /* sparse */ -1, /* expire_after_seconds */ -1, /* v */ NULL, /* weights */ NULL, /* default_language */ NULL, /* language_override */ NULL, /* mongoc_index_opt_geo_t geo_options */ NULL, /* mongoc_index_opt_storage_t storage_options */ NULL, /* partial_filter_expression */ NULL, /* collation */ {NULL} /* struct padding */ }; static mongoc_index_opt_geo_t gMongocIndexOptGeoDefault = { 26, /* twod_sphere_version */ -90, /* twod_bits_precision */ 90, /* twod_location_min */ -1, /* twod_location_max */ 2, /* haystack_bucket_size */ {NULL} /* struct padding */ }; static mongoc_index_opt_wt_t gMongocIndexOptWTDefault = { {MONGOC_INDEX_STORAGE_OPT_WIREDTIGER}, /* mongoc_index_opt_storage_t */ "", /* config_str */ {NULL} /* struct padding */ }; const mongoc_index_opt_t * mongoc_index_opt_get_default (void) { return &gMongocIndexOptDefault; } const mongoc_index_opt_geo_t * mongoc_index_opt_geo_get_default (void) { return &gMongocIndexOptGeoDefault; } const mongoc_index_opt_wt_t * mongoc_index_opt_wt_get_default (void) { return &gMongocIndexOptWTDefault; } void mongoc_index_opt_init (mongoc_index_opt_t *opt) { BSON_ASSERT (opt); memcpy (opt, &gMongocIndexOptDefault, sizeof *opt); } void mongoc_index_opt_geo_init (mongoc_index_opt_geo_t *opt) { BSON_ASSERT (opt); memcpy (opt, &gMongocIndexOptGeoDefault, sizeof *opt); } void mongoc_index_opt_wt_init (mongoc_index_opt_wt_t *opt) { BSON_ASSERT (opt); memcpy (opt, &gMongocIndexOptWTDefault, sizeof *opt); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-index.h0000644000076500000240000000444513572250757024601 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_INDEX_H #define MONGOC_INDEX_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS typedef struct { uint8_t twod_sphere_version; uint8_t twod_bits_precision; double twod_location_min; double twod_location_max; double haystack_bucket_size; uint8_t *padding[32]; } mongoc_index_opt_geo_t; typedef struct { int type; } mongoc_index_opt_storage_t; typedef enum { MONGOC_INDEX_STORAGE_OPT_MMAPV1, MONGOC_INDEX_STORAGE_OPT_WIREDTIGER, } mongoc_index_storage_opt_type_t; typedef struct { mongoc_index_opt_storage_t base; const char *config_str; void *padding[8]; } mongoc_index_opt_wt_t; typedef struct { bool is_initialized; bool background; bool unique; const char *name; bool drop_dups; bool sparse; int32_t expire_after_seconds; int32_t v; const bson_t *weights; const char *default_language; const char *language_override; mongoc_index_opt_geo_t *geo_options; mongoc_index_opt_storage_t *storage_options; const bson_t *partial_filter_expression; const bson_t *collation; void *padding[4]; } mongoc_index_opt_t; MONGOC_EXPORT (const mongoc_index_opt_t *) mongoc_index_opt_get_default (void) BSON_GNUC_PURE; MONGOC_EXPORT (const mongoc_index_opt_geo_t *) mongoc_index_opt_geo_get_default (void) BSON_GNUC_PURE; MONGOC_EXPORT (const mongoc_index_opt_wt_t *) mongoc_index_opt_wt_get_default (void) BSON_GNUC_PURE; MONGOC_EXPORT (void) mongoc_index_opt_init (mongoc_index_opt_t *opt); MONGOC_EXPORT (void) mongoc_index_opt_geo_init (mongoc_index_opt_geo_t *opt); MONGOC_EXPORT (void) mongoc_index_opt_wt_init (mongoc_index_opt_wt_t *opt); BSON_END_DECLS #endif /* MONGOC_INDEX_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-init.c0000644000076500000240000001027413572250757024425 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-init.h" #include "mongoc/mongoc-handshake-private.h" #ifdef MONGOC_ENABLE_SSL_OPENSSL #include "mongoc/mongoc-openssl-private.h" #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) #include "tls.h" #endif #include "mongoc/mongoc-thread-private.h" #include "common-b64-private.h" #if defined(MONGOC_ENABLE_CRYPTO_CNG) #include "mongoc/mongoc-crypto-private.h" #include "mongoc/mongoc-crypto-cng-private.h" #endif #ifndef MONGOC_NO_AUTOMATIC_GLOBALS #pragma message( \ "Configure the driver with ENABLE_AUTOMATIC_INIT_AND_CLEANUP=OFF.\ Automatic cleanup is deprecated and will be removed in version 2.0.") #endif #ifdef MONGOC_ENABLE_SASL_CYRUS #include static void * mongoc_cyrus_mutex_alloc (void) { bson_mutex_t *mutex; mutex = (bson_mutex_t *) bson_malloc0 (sizeof (bson_mutex_t)); bson_mutex_init (mutex); return (void *) mutex; } static int mongoc_cyrus_mutex_lock (void *mutex) { bson_mutex_lock ((bson_mutex_t *) mutex); return SASL_OK; } static int mongoc_cyrus_mutex_unlock (void *mutex) { bson_mutex_unlock ((bson_mutex_t *) mutex); return SASL_OK; } static void mongoc_cyrus_mutex_free (void *mutex) { bson_mutex_destroy ((bson_mutex_t *) mutex); bson_free (mutex); } #endif /* MONGOC_ENABLE_SASL_CYRUS */ static BSON_ONCE_FUN (_mongoc_do_init) { #ifdef MONGOC_ENABLE_SASL_CYRUS int status; #endif #ifdef MONGOC_ENABLE_SSL_OPENSSL _mongoc_openssl_init (); #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) tls_init (); #endif #ifdef MONGOC_ENABLE_SASL_CYRUS /* The following functions should not use tracing, as they may be invoked * before mongoc_log_set_handler() can complete. */ sasl_set_mutex (mongoc_cyrus_mutex_alloc, mongoc_cyrus_mutex_lock, mongoc_cyrus_mutex_unlock, mongoc_cyrus_mutex_free); status = sasl_client_init (NULL); BSON_ASSERT (status == SASL_OK); #endif _mongoc_counters_init (); #ifdef _WIN32 { WORD wVersionRequested; WSADATA wsaData; int err; wVersionRequested = MAKEWORD (2, 2); err = WSAStartup (wVersionRequested, &wsaData); /* check the version perhaps? */ BSON_ASSERT (err == 0); } #endif #if defined(MONGOC_ENABLE_CRYPTO_CNG) mongoc_crypto_cng_init (); #endif _mongoc_handshake_init (); BSON_ONCE_RETURN; } void mongoc_init (void) { static bson_once_t once = BSON_ONCE_INIT; bson_once (&once, _mongoc_do_init); } static BSON_ONCE_FUN (_mongoc_do_cleanup) { #ifdef MONGOC_ENABLE_SSL_OPENSSL _mongoc_openssl_cleanup (); #endif #ifdef MONGOC_ENABLE_SASL_CYRUS #ifdef MONGOC_HAVE_SASL_CLIENT_DONE sasl_client_done (); #else /* fall back to deprecated function */ sasl_done (); #endif #endif #ifdef _WIN32 WSACleanup (); #endif #if defined(MONGOC_ENABLE_CRYPTO_CNG) mongoc_crypto_cng_cleanup (); #endif _mongoc_counters_cleanup (); _mongoc_handshake_cleanup (); BSON_ONCE_RETURN; } void mongoc_cleanup (void) { static bson_once_t once = BSON_ONCE_INIT; bson_once (&once, _mongoc_do_cleanup); } /* * On GCC, just use __attribute__((constructor)) to perform initialization * automatically for the application. */ #if defined(__GNUC__) && !defined(MONGOC_NO_AUTOMATIC_GLOBALS) static void _mongoc_init_ctor (void) __attribute__ ((constructor)); static void _mongoc_init_ctor (void) { mongoc_init (); } static void _mongoc_init_dtor (void) __attribute__ ((destructor)); static void _mongoc_init_dtor (void) { bson_mem_restore_vtable (); mongoc_cleanup (); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-init.h0000644000076500000240000000156613572250757024436 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_INIT_H #define MONGOC_INIT_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (void) mongoc_init (void); MONGOC_EXPORT (void) mongoc_cleanup (void); BSON_END_DECLS #endif /* MONGOC_INIT_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-iovec.h0000644000076500000240000000254713572250757024600 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_IOVEC_H #define MONGOC_IOVEC_H #include #ifdef _WIN32 #include #else #include #endif BSON_BEGIN_DECLS #ifdef _WIN32 typedef struct { size_t iov_len; char *iov_base; } mongoc_iovec_t; BSON_STATIC_ASSERT2 (sizeof_iovect_t, sizeof (mongoc_iovec_t) == sizeof (WSABUF)); BSON_STATIC_ASSERT2 (offsetof_iovec_base, offsetof (mongoc_iovec_t, iov_base) == offsetof (WSABUF, buf)); BSON_STATIC_ASSERT2 (offsetof_iovec_len, offsetof (mongoc_iovec_t, iov_len) == offsetof (WSABUF, len)); #else typedef struct iovec mongoc_iovec_t; #endif BSON_END_DECLS #endif /* MONGOC_IOVEC_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-libressl-private.h0000644000076500000240000000221613572250757026753 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_LIBRESSL_PRIVATE_H #define MONGOC_LIBRESSL_PRIVATE_H #include #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls-libressl-private.h" #include BSON_BEGIN_DECLS bool mongoc_libressl_setup_ca (mongoc_stream_tls_libressl_t *libressl, mongoc_ssl_opt_t *opt); bool mongoc_libressl_setup_certificate (mongoc_stream_tls_libressl_t *libressl, mongoc_ssl_opt_t *opt); BSON_END_DECLS #endif /* MONGOC_LIBRESSL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-libressl.c0000644000076500000240000000375213572250757025304 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_LIBRESSL #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-libressl-private.h" #include "mongoc/mongoc-stream-tls-libressl-private.h" #include #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-libressl" bool mongoc_libressl_setup_certificate (mongoc_stream_tls_libressl_t *libressl, mongoc_ssl_opt_t *opt) { uint8_t *file; size_t file_len; if (!opt->pem_file) { return false; } file = tls_load_file (opt->pem_file, &file_len, (char *) opt->pem_pwd); if (!file) { MONGOC_ERROR ("Cannot load private key: '%s'", opt->pem_file); return false; } if (tls_config_set_keypair_mem ( libressl->config, file, file_len, file, file_len) == -1) { MONGOC_ERROR ("%s", tls_config_error (libressl->config)); return false; } return true; } bool mongoc_libressl_setup_ca (mongoc_stream_tls_libressl_t *libressl, mongoc_ssl_opt_t *opt) { if (opt->ca_file) { tls_config_set_ca_file (libressl->config, opt->ca_file); } if (opt->ca_dir) { tls_config_set_ca_path (libressl->config, opt->ca_dir); } return true; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner-private.h0000644000076500000240000000371113572250757031225 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_LINUX_DISTRO_SCANNER_PRIVATE_H #define MONGOC_LINUX_DISTRO_SCANNER_PRIVATE_H #include "mongoc/mongoc-handshake-os-private.h" #ifdef MONGOC_OS_IS_LINUX BSON_BEGIN_DECLS bool _mongoc_linux_distro_scanner_get_distro (char **name, char **version); /* These functions are exposed so we can test them separately. */ void _mongoc_linux_distro_scanner_read_key_value_file (const char *path, const char *name_key, ssize_t name_key_len, char **name, const char *version_key, ssize_t version_key_len, char **version); void _mongoc_linux_distro_scanner_read_generic_release_file (const char **paths, char **name, char **version); void _mongoc_linux_distro_scanner_split_line_by_release (const char *line, ssize_t line_len, char **name, char **version); BSON_END_DECLS #endif #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-linux-distro-scanner.c0000644000076500000240000002566113572250757027560 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-handshake-os-private.h" #ifdef MONGOC_OS_IS_LINUX #include #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-linux-distro-scanner-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-handshake-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-version.h" #define LINE_BUFFER_SIZE 1024 /* * fgets() wrapper which removes '\n' at the end of the string * Return 0 on failure or EOF. */ static size_t _fgets_wrapper (char *buffer, size_t buffer_size, FILE *f) { char *fgets_res; size_t len; fgets_res = fgets (buffer, buffer_size, f); if (!fgets_res) { /* Didn't read anything. Empty file or error. */ if (ferror (f)) { TRACE ("fgets() failed with error %d", errno); } return 0; } /* Chop off trailing \n */ len = strlen (buffer); if (len > 0 && buffer[len - 1] == '\n') { buffer[len - 1] = '\0'; len--; } else if (len == buffer_size - 1) { /* We read buffer_size bytes without hitting a newline * therefore the line is super long, so we say this file is invalid. * This is important since if we are in this situation, the NEXT call to * fgets() will keep reading where we left off. * * This protects us from files like: * aaaaa...DISTRIB_ID=nasal demons */ TRACE ("Found line of length %ld, bailing out", len); return 0; } return len; } static void _process_line (const char *name_key, size_t name_key_len, char **name, const char *version_key, size_t version_key_len, char **version, const char *line, size_t line_len) { size_t key_len; const char *equal_sign; const char *value; const char *needle = "="; size_t value_len = 0; ENTRY; /* Figure out where = is. Everything before is the key, * and everything after is the value */ equal_sign = strstr (line, needle); if (equal_sign == NULL) { TRACE ("Encountered malformed line: %s", line); /* This line is malformed/incomplete, so skip it */ EXIT; } /* Should never happen since we null terminated this line */ BSON_ASSERT (equal_sign < line + line_len); key_len = equal_sign - line; value = equal_sign + strlen (needle); value_len = strlen (value); if (value_len > 2 && value[0] == '"' && value[value_len - 1] == '"') { value_len -= 2; value++; } /* If we find two copies of either key, the *name == NULL check will fail * so we will just keep the first value encountered. */ if (name_key_len == key_len && strncmp (line, name_key, key_len) == 0 && !(*name)) { *name = bson_strndup (value, value_len); TRACE ("Found name: %s", *name); } else if (version_key_len == key_len && strncmp (line, version_key, key_len) == 0 && !(*version)) { *version = bson_strndup (value, value_len); TRACE ("Found version: %s", *version); } EXIT; } /* * Parse a file of the form: * KEY=VALUE * Looking for name_key and version_key, and storing * their values into *name and *version. * The values in *name and *version must be freed with bson_free. */ void _mongoc_linux_distro_scanner_read_key_value_file (const char *path, const char *name_key, ssize_t name_key_len, char **name, const char *version_key, ssize_t version_key_len, char **version) { const int max_lines = 100; int lines_read = 0; char buffer[LINE_BUFFER_SIZE]; size_t buflen; FILE *f; ENTRY; *name = NULL; *version = NULL; if (name_key_len < 0) { name_key_len = strlen (name_key); } if (version_key_len < 0) { version_key_len = strlen (version_key); } if (access (path, R_OK)) { TRACE ("No permission to read from %s: errno: %d", path, errno); EXIT; } f = fopen (path, "r"); if (!f) { TRACE ("fopen failed on %s: %d", path, errno); EXIT; } while (lines_read < max_lines) { buflen = _fgets_wrapper (buffer, sizeof (buffer), f); if (buflen == 0) { /* Error or eof */ break; } _process_line (name_key, name_key_len, name, version_key, version_key_len, version, buffer, buflen); if (*version && *name) { /* No point in reading any more */ break; } lines_read++; } fclose (f); EXIT; } /* * Find the first string in a list which is a valid file. Assumes * passed in list is NULL terminated! */ const char * _get_first_existing (const char **paths) { const char **p = &paths[0]; ENTRY; for (; *p != NULL; p++) { if (access (*p, F_OK)) { /* Just doesn't exist */ continue; } if (access (*p, R_OK)) { TRACE ("file %s exists, but cannot be read: error %d", *p, errno); continue; } RETURN (*p); } RETURN (NULL); } /* * Given a line of text, split it by the word "release." For example: * Ubuntu release 14.04 => * *name = Ubuntu * *version = 14.04 * If the word "release" isn't found then we put the whole string into *name * (even if the string is empty). */ void _mongoc_linux_distro_scanner_split_line_by_release (const char *line, ssize_t line_len, char **name, char **version) { const char *needle_loc; const char *const needle = " release "; const char *version_string; *name = NULL; *version = NULL; if (line_len < 0) { line_len = strlen (line); } needle_loc = strstr (line, needle); if (!needle_loc) { *name = bson_strdup (line); return; } else if (needle_loc == line) { /* The file starts with the word " release " * This file is weird enough we will just abandon it. */ return; } *name = bson_strndup (line, needle_loc - line); version_string = needle_loc + strlen (needle); if (version_string == line + line_len) { /* Weird. The file just ended with "release " */ return; } *version = bson_strdup (version_string); } /* * Search for a *-release file, and read its contents. */ void _mongoc_linux_distro_scanner_read_generic_release_file (const char **paths, char **name, char **version) { const char *path; size_t buflen; char buffer[LINE_BUFFER_SIZE]; FILE *f; ENTRY; *name = NULL; *version = NULL; path = _get_first_existing (paths); if (!path) { EXIT; } f = fopen (path, "r"); if (!f) { TRACE ("Found %s exists and readable but couldn't open: %d", path, errno); EXIT; } /* Read the first line of the file, look for the word "release" */ buflen = _fgets_wrapper (buffer, sizeof (buffer), f); if (buflen > 0) { TRACE ("Trying to split buffer with contents %s", buffer); /* Try splitting the string. If we can't it'll store everything in * *name. */ _mongoc_linux_distro_scanner_split_line_by_release ( buffer, buflen, name, version); } fclose (f); EXIT; } static void _get_kernel_version_from_uname (char **version) { struct utsname system_info; if (uname (&system_info) >= 0) { *version = bson_strdup_printf ("kernel %s", system_info.release); } else { *version = NULL; } } /* * Some boilerplate logic that tries to set *name and *version to new_name * and new_version if it's not already set. Values of new_name and new_version * should not be used after this call. */ static bool _set_name_and_version_if_needed (char **name, char **version, char *new_name, char *new_version) { if (new_name && !(*name)) { *name = new_name; } else { bson_free (new_name); } if (new_version && !(*version)) { *version = new_version; } else { bson_free (new_version); } return (*name) && (*version); } bool _mongoc_linux_distro_scanner_get_distro (char **name, char **version) { /* In case we decide to try looking up name/version again */ char *new_name; char *new_version; const char *generic_release_paths[] = { "/etc/redhat-release", "/etc/novell-release", "/etc/gentoo-release", "/etc/SuSE-release", "/etc/SUSE-release", "/etc/sles-release", "/etc/debian_release", "/etc/slackware-version", "/etc/centos-release", NULL, }; ENTRY; *name = NULL; *version = NULL; _mongoc_linux_distro_scanner_read_key_value_file ( "/etc/os-release", "NAME", -1, name, "VERSION_ID", -1, version); if (*name && *version) { RETURN (true); } _mongoc_linux_distro_scanner_read_key_value_file ("/etc/lsb-release", "DISTRIB_ID", -1, &new_name, "DISTRIB_RELEASE", -1, &new_version); if (_set_name_and_version_if_needed (name, version, new_name, new_version)) { RETURN (true); } /* Try to read from a generic release file */ _mongoc_linux_distro_scanner_read_generic_release_file ( generic_release_paths, &new_name, &new_version); if (_set_name_and_version_if_needed (name, version, new_name, new_version)) { RETURN (true); } if (*version == NULL) { _get_kernel_version_from_uname (version); } if (*name && *version) { RETURN (true); } bson_free (*name); bson_free (*version); *name = NULL; *version = NULL; RETURN (false); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-list-private.h0000644000076500000240000000242313572250757026107 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_LIST_H #define MONGOC_LIST_H #include BSON_BEGIN_DECLS typedef struct _mongoc_list_t mongoc_list_t; struct _mongoc_list_t { mongoc_list_t *next; void *data; }; mongoc_list_t * _mongoc_list_append (mongoc_list_t *list, void *data); mongoc_list_t * _mongoc_list_prepend (mongoc_list_t *list, void *data); mongoc_list_t * _mongoc_list_remove (mongoc_list_t *list, void *data); void _mongoc_list_foreach (mongoc_list_t *list, void (*func) (void *data, void *user_data), void *user_data); void _mongoc_list_destroy (mongoc_list_t *list); BSON_END_DECLS #endif /* MONGOC_LIST_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-list.c0000644000076500000240000000604613572250757024437 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-list-private.h" /** * mongoc_list_append: * @list: A list to append to, or NULL. * @data: Data to append to @list. * * Appends a new link onto the linked list. * * Returns: @list or a new list if @list is NULL. */ mongoc_list_t * _mongoc_list_append (mongoc_list_t *list, void *data) { mongoc_list_t *item; mongoc_list_t *iter; item = (mongoc_list_t *) bson_malloc0 (sizeof *item); item->data = (void *) data; if (!list) { return item; } for (iter = list; iter->next; iter = iter->next) { } iter->next = item; return list; } /** * mongoc_list_prepend: * @list: A mongoc_list_t or NULL. * @data: data to prepend to the list. * * Prepends to @list a new link containing @data. * * Returns: A new link containing data with @list following. */ mongoc_list_t * _mongoc_list_prepend (mongoc_list_t *list, void *data) { mongoc_list_t *item; item = (mongoc_list_t *) bson_malloc0 (sizeof *item); item->data = (void *) data; item->next = list; return item; } /** * mongoc_list_remove: * @list: A mongoc_list_t. * @data: Data to remove from @list. * * Removes the link containing @data from @list. * * Returns: @list with the link containing @data removed. */ mongoc_list_t * _mongoc_list_remove (mongoc_list_t *list, void *data) { mongoc_list_t *iter; mongoc_list_t *prev = NULL; mongoc_list_t *ret = list; BSON_ASSERT (list); for (iter = list; iter; iter = iter->next) { if (iter->data == data) { if (iter != list) { prev->next = iter->next; } else { ret = iter->next; } bson_free (iter); break; } prev = iter; } return ret; } /** * mongoc_list_foreach: * @list: A mongoc_list_t or NULL. * @func: A func to call for each link in @list. * @user_data: User data for @func. * * Calls @func for each item in @list. */ void _mongoc_list_foreach (mongoc_list_t *list, void (*func) (void *data, void *user_data), void *user_data) { mongoc_list_t *iter; BSON_ASSERT (func); for (iter = list; iter; iter = iter->next) { func (iter->data, user_data); } } /** * mongoc_list_destroy: * @list: A mongoc_list_t. * * Destroys @list and releases any resources. */ void _mongoc_list_destroy (mongoc_list_t *list) { mongoc_list_t *tmp = list; while (list) { tmp = list->next; bson_free (list); list = tmp; } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-log-private.h0000644000076500000240000000215013572250757025712 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_LOG_PRIVATE_H #define MONGOC_LOG_PRIVATE_H #include "mongoc/mongoc-iovec.h" /* just for testing */ void _mongoc_log_get_handler (mongoc_log_func_t *log_func, void **user_data); bool _mongoc_log_trace_is_enabled (void); void mongoc_log_trace_bytes (const char *domain, const uint8_t *_b, size_t _l); void mongoc_log_trace_iovec (const char *domain, const mongoc_iovec_t *_iov, size_t _iovcnt); #endif /* MONGOC_LOG_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-log.c0000644000076500000240000001617413572250757024250 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if defined(__linux__) #include #elif defined(_WIN32) #include #elif defined(__FreeBSD__) #include #else #include #endif #include #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-log-private.h" #include "mongoc/mongoc-thread-private.h" static bson_once_t once = BSON_ONCE_INIT; static bson_mutex_t gLogMutex; static mongoc_log_func_t gLogFunc = mongoc_log_default_handler; #ifdef MONGOC_TRACE static bool gLogTrace = true; #endif static void *gLogData; static BSON_ONCE_FUN (_mongoc_ensure_mutex_once) { bson_mutex_init (&gLogMutex); BSON_ONCE_RETURN; } void mongoc_log_set_handler (mongoc_log_func_t log_func, void *user_data) { bson_once (&once, &_mongoc_ensure_mutex_once); bson_mutex_lock (&gLogMutex); gLogFunc = log_func; gLogData = user_data; bson_mutex_unlock (&gLogMutex); } /* just for testing */ void _mongoc_log_get_handler (mongoc_log_func_t *log_func, void **user_data) { *log_func = gLogFunc; *user_data = gLogData; } void mongoc_log (mongoc_log_level_t log_level, const char *log_domain, const char *format, ...) { va_list args; char *message; int stop_logging; bson_once (&once, &_mongoc_ensure_mutex_once); stop_logging = !gLogFunc; #ifdef MONGOC_TRACE stop_logging = stop_logging || (log_level == MONGOC_LOG_LEVEL_TRACE && !gLogTrace); #endif if (stop_logging) { return; } BSON_ASSERT (format); va_start (args, format); message = bson_strdupv_printf (format, args); va_end (args); bson_mutex_lock (&gLogMutex); gLogFunc (log_level, log_domain, message, gLogData); bson_mutex_unlock (&gLogMutex); bson_free (message); } const char * mongoc_log_level_str (mongoc_log_level_t log_level) { switch (log_level) { case MONGOC_LOG_LEVEL_ERROR: return "ERROR"; case MONGOC_LOG_LEVEL_CRITICAL: return "CRITICAL"; case MONGOC_LOG_LEVEL_WARNING: return "WARNING"; case MONGOC_LOG_LEVEL_MESSAGE: return "MESSAGE"; case MONGOC_LOG_LEVEL_INFO: return "INFO"; case MONGOC_LOG_LEVEL_DEBUG: return "DEBUG"; case MONGOC_LOG_LEVEL_TRACE: return "TRACE"; default: return "UNKNOWN"; } } void mongoc_log_default_handler (mongoc_log_level_t log_level, const char *log_domain, const char *message, void *user_data) { struct timeval tv; struct tm tt; time_t t; FILE *stream; char nowstr[32]; int pid; bson_gettimeofday (&tv); t = tv.tv_sec; #ifdef _WIN32 #ifdef _MSC_VER localtime_s (&tt, &t); #else tt = *(localtime (&t)); #endif #else localtime_r (&t, &tt); #endif strftime (nowstr, sizeof nowstr, "%Y/%m/%d %H:%M:%S", &tt); switch (log_level) { case MONGOC_LOG_LEVEL_ERROR: case MONGOC_LOG_LEVEL_CRITICAL: case MONGOC_LOG_LEVEL_WARNING: stream = stderr; break; case MONGOC_LOG_LEVEL_MESSAGE: case MONGOC_LOG_LEVEL_INFO: case MONGOC_LOG_LEVEL_DEBUG: case MONGOC_LOG_LEVEL_TRACE: default: stream = stdout; } #ifdef __linux__ pid = syscall (SYS_gettid); #elif defined(_WIN32) pid = (int) _getpid (); #elif defined(__FreeBSD__) long tid; thr_self (&tid); pid = (int) tid; #elif defined(__OpenBSD__) pid = (int) getthrid (); #else pid = (int) getpid (); #endif fprintf (stream, "%s.%04ld: [%5d]: %8s: %12s: %s\n", nowstr, tv.tv_usec / 1000L, pid, mongoc_log_level_str (log_level), log_domain, message); } bool _mongoc_log_trace_is_enabled (void) { #ifdef MONGOC_TRACE return gLogTrace; #else return false; #endif } void mongoc_log_trace_enable (void) { #ifdef MONGOC_TRACE gLogTrace = true; #endif } void mongoc_log_trace_disable (void) { #ifdef MONGOC_TRACE gLogTrace = false; #endif } void mongoc_log_trace_bytes (const char *domain, const uint8_t *_b, size_t _l) { bson_string_t *str, *astr; int32_t _i; uint8_t _v; #ifdef MONGOC_TRACE if (!gLogTrace) { return; } #endif str = bson_string_new (NULL); astr = bson_string_new (NULL); for (_i = 0; _i < _l; _i++) { _v = *(_b + _i); if ((_i % 16) == 0) { bson_string_append_printf (str, "%05x: ", _i); } bson_string_append_printf (str, " %02x", _v); if (isprint (_v)) { bson_string_append_printf (astr, " %c", _v); } else { bson_string_append (astr, " ."); } if ((_i % 16) == 15) { mongoc_log ( MONGOC_LOG_LEVEL_TRACE, domain, "%s %s", str->str, astr->str); bson_string_truncate (str, 0); bson_string_truncate (astr, 0); } else if ((_i % 16) == 7) { bson_string_append (str, " "); bson_string_append (astr, " "); } } if (_i != 16) { mongoc_log ( MONGOC_LOG_LEVEL_TRACE, domain, "%-56s %s", str->str, astr->str); } bson_string_free (str, true); bson_string_free (astr, true); } void mongoc_log_trace_iovec (const char *domain, const mongoc_iovec_t *_iov, size_t _iovcnt) { bson_string_t *str, *astr; const char *_b; unsigned _i = 0; unsigned _j = 0; unsigned _k = 0; size_t _l = 0; uint8_t _v; #ifdef MONGOC_TRACE if (!gLogTrace) { return; } #endif for (_i = 0; _i < _iovcnt; _i++) { _l += _iov[_i].iov_len; } _i = 0; str = bson_string_new (NULL); astr = bson_string_new (NULL); for (_j = 0; _j < _iovcnt; _j++) { _b = (char *) _iov[_j].iov_base; _l = _iov[_j].iov_len; for (_k = 0; _k < _l; _k++, _i++) { _v = *(_b + _k); if ((_i % 16) == 0) { bson_string_append_printf (str, "%05x: ", _i); } bson_string_append_printf (str, " %02x", _v); if (isprint (_v)) { bson_string_append_printf (astr, " %c", _v); } else { bson_string_append (astr, " ."); } if ((_i % 16) == 15) { mongoc_log ( MONGOC_LOG_LEVEL_TRACE, domain, "%s %s", str->str, astr->str); bson_string_truncate (str, 0); bson_string_truncate (astr, 0); } else if ((_i % 16) == 7) { bson_string_append (str, " "); bson_string_append (astr, " "); } } } if (_i != 16) { mongoc_log ( MONGOC_LOG_LEVEL_TRACE, domain, "%-56s %s", str->str, astr->str); } bson_string_free (str, true); bson_string_free (astr, true); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-log.h0000644000076500000240000000766613572250757024263 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_LOG_H #define MONGOC_LOG_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS #ifndef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "mongoc" #endif #define MONGOC_ERROR(...) \ mongoc_log (MONGOC_LOG_LEVEL_ERROR, MONGOC_LOG_DOMAIN, __VA_ARGS__) #define MONGOC_CRITICAL(...) \ mongoc_log (MONGOC_LOG_LEVEL_CRITICAL, MONGOC_LOG_DOMAIN, __VA_ARGS__) #define MONGOC_WARNING(...) \ mongoc_log (MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, __VA_ARGS__) #define MONGOC_MESSAGE(...) \ mongoc_log (MONGOC_LOG_LEVEL_MESSAGE, MONGOC_LOG_DOMAIN, __VA_ARGS__) #define MONGOC_INFO(...) \ mongoc_log (MONGOC_LOG_LEVEL_INFO, MONGOC_LOG_DOMAIN, __VA_ARGS__) #define MONGOC_DEBUG(...) \ mongoc_log (MONGOC_LOG_LEVEL_DEBUG, MONGOC_LOG_DOMAIN, __VA_ARGS__) typedef enum { MONGOC_LOG_LEVEL_ERROR, MONGOC_LOG_LEVEL_CRITICAL, MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_LEVEL_MESSAGE, MONGOC_LOG_LEVEL_INFO, MONGOC_LOG_LEVEL_DEBUG, MONGOC_LOG_LEVEL_TRACE, } mongoc_log_level_t; /** * mongoc_log_func_t: * @log_level: The level of the log message. * @log_domain: The domain of the log message, such as "client". * @message: The message generated. * @user_data: User data provided to mongoc_log_set_handler(). * * This function prototype can be used to set a custom log handler for the * libmongoc library. This is useful if you would like to show them in a * user interface or alternate storage. */ typedef void (*mongoc_log_func_t) (mongoc_log_level_t log_level, const char *log_domain, const char *message, void *user_data); /** * mongoc_log_set_handler: * @log_func: A function to handle log messages. * @user_data: User data for @log_func. * * Sets the function to be called to handle logging. */ MONGOC_EXPORT (void) mongoc_log_set_handler (mongoc_log_func_t log_func, void *user_data); /** * mongoc_log: * @log_level: The log level. * @log_domain: The log domain (such as "client"). * @format: The format string for the log message. * * Logs a message using the currently configured logger. * * This method will hold a logging lock to prevent concurrent calls to the * logging infrastructure. It is important that your configured log function * does not re-enter the logging system or deadlock will occur. * */ MONGOC_EXPORT (void) mongoc_log (mongoc_log_level_t log_level, const char *log_domain, const char *format, ...) BSON_GNUC_PRINTF (3, 4); MONGOC_EXPORT (void) mongoc_log_default_handler (mongoc_log_level_t log_level, const char *log_domain, const char *message, void *user_data); /** * mongoc_log_level_str: * @log_level: The log level. * * Returns: The string representation of log_level */ MONGOC_EXPORT (const char *) mongoc_log_level_str (mongoc_log_level_t log_level); /** * mongoc_log_trace_enable: * * Enables tracing at runtime (if it has been enabled at compile time). */ MONGOC_EXPORT (void) mongoc_log_trace_enable (void); /** * mongoc_log_trace_disable: * * Disables tracing at runtime (if it has been enabled at compile time). */ MONGOC_EXPORT (void) mongoc_log_trace_disable (void); BSON_END_DECLS #endif /* MONGOC_LOG_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-macros.h0000644000076500000240000000340413572250757024750 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_MACROS_H #define MONGOC_MACROS_H /* Decorate public functions: * - if MONGOC_STATIC, we're compiling a program that uses libmongoc as * a static library, don't decorate functions * - else if MONGOC_COMPILATION, we're compiling a static or shared libmongoc, * mark public functions for export from the shared lib (which has no effect * on the static lib) * - else, we're compiling a program that uses libmongoc as a shared library, * mark public functions as DLL imports for Microsoft Visual C. */ #ifdef _MSC_VER /* * Microsoft Visual C */ #ifdef MONGOC_STATIC #define MONGOC_API #elif defined(MONGOC_COMPILATION) #define MONGOC_API __declspec(dllexport) #else #define MONGOC_API __declspec(dllimport) #endif #define MONGOC_CALL __cdecl #elif defined(__GNUC__) /* * GCC */ #ifdef MONGOC_STATIC #define MONGOC_API #elif defined(MONGOC_COMPILATION) #define MONGOC_API __attribute__ ((visibility ("default"))) #else #define MONGOC_API #endif #define MONGOC_CALL #else /* * Other compilers */ #define MONGOC_API #define MONGOC_CALL #endif #define MONGOC_EXPORT(type) MONGOC_API type MONGOC_CALL #endif /* MONGOC_MACROS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-matcher-op-private.h0000644000076500000240000000672313572250757027202 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_MATCHER_OP_PRIVATE_H #define MONGOC_MATCHER_OP_PRIVATE_H #include BSON_BEGIN_DECLS typedef union _mongoc_matcher_op_t mongoc_matcher_op_t; typedef struct _mongoc_matcher_op_base_t mongoc_matcher_op_base_t; typedef struct _mongoc_matcher_op_logical_t mongoc_matcher_op_logical_t; typedef struct _mongoc_matcher_op_compare_t mongoc_matcher_op_compare_t; typedef struct _mongoc_matcher_op_exists_t mongoc_matcher_op_exists_t; typedef struct _mongoc_matcher_op_type_t mongoc_matcher_op_type_t; typedef struct _mongoc_matcher_op_not_t mongoc_matcher_op_not_t; typedef enum { MONGOC_MATCHER_OPCODE_EQ, MONGOC_MATCHER_OPCODE_GT, MONGOC_MATCHER_OPCODE_GTE, MONGOC_MATCHER_OPCODE_IN, MONGOC_MATCHER_OPCODE_LT, MONGOC_MATCHER_OPCODE_LTE, MONGOC_MATCHER_OPCODE_NE, MONGOC_MATCHER_OPCODE_NIN, MONGOC_MATCHER_OPCODE_OR, MONGOC_MATCHER_OPCODE_AND, MONGOC_MATCHER_OPCODE_NOT, MONGOC_MATCHER_OPCODE_NOR, MONGOC_MATCHER_OPCODE_EXISTS, MONGOC_MATCHER_OPCODE_TYPE, } mongoc_matcher_opcode_t; struct _mongoc_matcher_op_base_t { mongoc_matcher_opcode_t opcode; }; struct _mongoc_matcher_op_logical_t { mongoc_matcher_op_base_t base; mongoc_matcher_op_t *left; mongoc_matcher_op_t *right; }; struct _mongoc_matcher_op_compare_t { mongoc_matcher_op_base_t base; char *path; bson_iter_t iter; }; struct _mongoc_matcher_op_exists_t { mongoc_matcher_op_base_t base; char *path; bool exists; }; struct _mongoc_matcher_op_type_t { mongoc_matcher_op_base_t base; bson_type_t type; char *path; }; struct _mongoc_matcher_op_not_t { mongoc_matcher_op_base_t base; mongoc_matcher_op_t *child; char *path; }; union _mongoc_matcher_op_t { mongoc_matcher_op_base_t base; mongoc_matcher_op_logical_t logical; mongoc_matcher_op_compare_t compare; mongoc_matcher_op_exists_t exists; mongoc_matcher_op_type_t type; mongoc_matcher_op_not_t not_; }; mongoc_matcher_op_t * _mongoc_matcher_op_logical_new (mongoc_matcher_opcode_t opcode, mongoc_matcher_op_t *left, mongoc_matcher_op_t *right); mongoc_matcher_op_t * _mongoc_matcher_op_compare_new (mongoc_matcher_opcode_t opcode, const char *path, const bson_iter_t *iter); mongoc_matcher_op_t * _mongoc_matcher_op_exists_new (const char *path, bool exists); mongoc_matcher_op_t * _mongoc_matcher_op_type_new (const char *path, bson_type_t type); mongoc_matcher_op_t * _mongoc_matcher_op_not_new (const char *path, mongoc_matcher_op_t *child); bool _mongoc_matcher_op_match (mongoc_matcher_op_t *op, const bson_t *bson); void _mongoc_matcher_op_destroy (mongoc_matcher_op_t *op); void _mongoc_matcher_op_to_bson (mongoc_matcher_op_t *op, bson_t *bson); BSON_END_DECLS #endif /* MONGOC_MATCHER_OP_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-matcher-op.c0000644000076500000240000010235013572250757025516 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-matcher-op-private.h" #include "mongoc/mongoc-util-private.h" /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_exists_new -- * * Create a new op for checking {$exists: bool}. * * Returns: * A newly allocated mongoc_matcher_op_t that should be freed with * _mongoc_matcher_op_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_matcher_op_t * _mongoc_matcher_op_exists_new (const char *path, /* IN */ bool exists) /* IN */ { mongoc_matcher_op_t *op; BSON_ASSERT (path); op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op); op->exists.base.opcode = MONGOC_MATCHER_OPCODE_EXISTS; op->exists.path = bson_strdup (path); op->exists.exists = exists; return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_type_new -- * * Create a new op for checking {$type: int}. * * Returns: * A newly allocated mongoc_matcher_op_t that should be freed with * _mongoc_matcher_op_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_matcher_op_t * _mongoc_matcher_op_type_new (const char *path, /* IN */ bson_type_t type) /* IN */ { mongoc_matcher_op_t *op; BSON_ASSERT (path); BSON_ASSERT (type); op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op); op->type.base.opcode = MONGOC_MATCHER_OPCODE_TYPE; op->type.path = bson_strdup (path); op->type.type = type; return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_logical_new -- * * Create a new op for checking any of: * * {$or: []} * {$nor: []} * {$and: []} * * Returns: * A newly allocated mongoc_matcher_op_t that should be freed with * _mongoc_matcher_op_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_matcher_op_t * _mongoc_matcher_op_logical_new (mongoc_matcher_opcode_t opcode, /* IN */ mongoc_matcher_op_t *left, /* IN */ mongoc_matcher_op_t *right) /* IN */ { mongoc_matcher_op_t *op; BSON_ASSERT (left); BSON_ASSERT ((opcode >= MONGOC_MATCHER_OPCODE_OR) && (opcode <= MONGOC_MATCHER_OPCODE_NOR)); op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op); op->logical.base.opcode = opcode; op->logical.left = left; op->logical.right = right; return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_compare_new -- * * Create a new op for checking any of: * * {"abc": "def"} * {$gt: {...} * {$gte: {...} * {$lt: {...} * {$lte: {...} * {$ne: {...} * {$in: [...]} * {$nin: [...]} * * Returns: * A newly allocated mongoc_matcher_op_t that should be freed with * _mongoc_matcher_op_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_matcher_op_t * _mongoc_matcher_op_compare_new (mongoc_matcher_opcode_t opcode, /* IN */ const char *path, /* IN */ const bson_iter_t *iter) /* IN */ { mongoc_matcher_op_t *op; BSON_ASSERT (path); BSON_ASSERT (iter); op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op); op->compare.base.opcode = opcode; op->compare.path = bson_strdup (path); memcpy (&op->compare.iter, iter, sizeof *iter); return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_not_new -- * * Create a new op for checking {$not: {...}} * * Returns: * A newly allocated mongoc_matcher_op_t that should be freed with * _mongoc_matcher_op_destroy(). * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_matcher_op_t * _mongoc_matcher_op_not_new (const char *path, /* IN */ mongoc_matcher_op_t *child) /* IN */ { mongoc_matcher_op_t *op; BSON_ASSERT (path); BSON_ASSERT (child); op = (mongoc_matcher_op_t *) bson_malloc0 (sizeof *op); op->not_.base.opcode = MONGOC_MATCHER_OPCODE_NOT; op->not_.path = bson_strdup (path); op->not_.child = child; return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_destroy -- * * Free a mongoc_matcher_op_t structure and all children structures. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void _mongoc_matcher_op_destroy (mongoc_matcher_op_t *op) /* IN */ { BSON_ASSERT (op); switch (op->base.opcode) { case MONGOC_MATCHER_OPCODE_EQ: case MONGOC_MATCHER_OPCODE_GT: case MONGOC_MATCHER_OPCODE_GTE: case MONGOC_MATCHER_OPCODE_IN: case MONGOC_MATCHER_OPCODE_LT: case MONGOC_MATCHER_OPCODE_LTE: case MONGOC_MATCHER_OPCODE_NE: case MONGOC_MATCHER_OPCODE_NIN: bson_free (op->compare.path); break; case MONGOC_MATCHER_OPCODE_OR: case MONGOC_MATCHER_OPCODE_AND: case MONGOC_MATCHER_OPCODE_NOR: if (op->logical.left) _mongoc_matcher_op_destroy (op->logical.left); if (op->logical.right) _mongoc_matcher_op_destroy (op->logical.right); break; case MONGOC_MATCHER_OPCODE_NOT: _mongoc_matcher_op_destroy (op->not_.child); bson_free (op->not_.path); break; case MONGOC_MATCHER_OPCODE_EXISTS: bson_free (op->exists.path); break; case MONGOC_MATCHER_OPCODE_TYPE: bson_free (op->type.path); break; default: break; } bson_free (op); } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_exists_match -- * * Checks to see if @bson matches @exists requirements. The * {$exists: bool} query can be either true or fase so we must * handle false as "not exists". * * Returns: * true if the field exists and the spec expected it. * true if the field does not exist and the spec expected it to not * exist. * Otherwise, false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_exists_match (mongoc_matcher_op_exists_t *exists, /* IN */ const bson_t *bson) /* IN */ { bson_iter_t iter; bson_iter_t desc; bool found; BSON_ASSERT (exists); BSON_ASSERT (bson); found = (bson_iter_init (&iter, bson) && bson_iter_find_descendant (&iter, exists->path, &desc)); return (found == exists->exists); } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_type_match -- * * Checks if @bson matches the {$type: ...} op. * * Returns: * true if the requested field was found and the type matched * the requested type. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_type_match (mongoc_matcher_op_type_t *type, /* IN */ const bson_t *bson) /* IN */ { bson_iter_t iter; bson_iter_t desc; BSON_ASSERT (type); BSON_ASSERT (bson); if (bson_iter_init (&iter, bson) && bson_iter_find_descendant (&iter, type->path, &desc)) { return (bson_iter_type (&iter) == type->type); } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_not_match -- * * Checks if the {$not: ...} expression matches by negating the * child expression. * * Returns: * true if the child expression returned false. * Otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_not_match (mongoc_matcher_op_not_t *not_, /* IN */ const bson_t *bson) /* IN */ { BSON_ASSERT (not_); BSON_ASSERT (bson); return !_mongoc_matcher_op_match (not_->child, bson); } #define _TYPE_CODE(l, r) ((((int) (l)) << 8) | ((int) (r))) #define _NATIVE_COMPARE(op, t1, t2) \ (bson_iter##t2 (iter) op bson_iter##t1 (compare_iter)) #define _EQ_COMPARE(t1, t2) _NATIVE_COMPARE (==, t1, t2) #define _NE_COMPARE(t1, t2) _NATIVE_COMPARE (!=, t1, t2) #define _GT_COMPARE(t1, t2) _NATIVE_COMPARE (>, t1, t2) #define _GTE_COMPARE(t1, t2) _NATIVE_COMPARE (>=, t1, t2) #define _LT_COMPARE(t1, t2) _NATIVE_COMPARE (<, t1, t2) #define _LTE_COMPARE(t1, t2) _NATIVE_COMPARE (<=, t1, t2) /* *-------------------------------------------------------------------------- * * _mongoc_matcher_iter_eq_match -- * * Performs equality match for all types on either left or right * side of the equation. * * We try to default to what the compiler would do for comparing * things like integers. Therefore, we just have MACRO'tized * everything so that the compiler sees the native values. (Such * as (double == int64). * * The _TYPE_CODE() stuff allows us to shove the type of the left * and the right into a single integer and then do a jump table * with a switch/case for all our supported types. * * I imagine a bunch more of these will need to be added, so feel * free to submit patches. * * Returns: * true if the equality match succeeded. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_iter_eq_match (bson_iter_t *compare_iter, /* IN */ bson_iter_t *iter) /* IN */ { int code; BSON_ASSERT (compare_iter); BSON_ASSERT (iter); code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter)); switch (code) { /* Double on Left Side */ case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE): return _EQ_COMPARE (_double, _double); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL): return _EQ_COMPARE (_double, _bool); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32): return _EQ_COMPARE (_double, _int32); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64): return _EQ_COMPARE (_double, _int64); /* UTF8 on Left Side */ case _TYPE_CODE (BSON_TYPE_UTF8, BSON_TYPE_UTF8): { uint32_t llen; uint32_t rlen; const char *lstr; const char *rstr; lstr = bson_iter_utf8 (compare_iter, &llen); rstr = bson_iter_utf8 (iter, &rlen); return ((llen == rlen) && (0 == memcmp (lstr, rstr, llen))); } /* Int32 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE): return _EQ_COMPARE (_int32, _double); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL): return _EQ_COMPARE (_int32, _bool); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32): return _EQ_COMPARE (_int32, _int32); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64): return _EQ_COMPARE (_int32, _int64); /* Int64 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE): return _EQ_COMPARE (_int64, _double); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL): return _EQ_COMPARE (_int64, _bool); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32): return _EQ_COMPARE (_int64, _int32); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64): return _EQ_COMPARE (_int64, _int64); /* Null on Left Side */ case _TYPE_CODE (BSON_TYPE_NULL, BSON_TYPE_NULL): case _TYPE_CODE (BSON_TYPE_NULL, BSON_TYPE_UNDEFINED): return true; case _TYPE_CODE (BSON_TYPE_ARRAY, BSON_TYPE_ARRAY): { bson_iter_t left_array; bson_iter_t right_array; BSON_ASSERT (bson_iter_recurse (compare_iter, &left_array)); BSON_ASSERT (bson_iter_recurse (iter, &right_array)); while (true) { bool left_has_next = bson_iter_next (&left_array); bool right_has_next = bson_iter_next (&right_array); if (left_has_next != right_has_next) { /* different lengths */ return false; } if (!left_has_next) { /* finished */ return true; } if (!_mongoc_matcher_iter_eq_match (&left_array, &right_array)) { return false; } } } case _TYPE_CODE (BSON_TYPE_DOCUMENT, BSON_TYPE_DOCUMENT): { uint32_t llen; uint32_t rlen; const uint8_t *ldoc; const uint8_t *rdoc; bson_iter_document (compare_iter, &llen, &ldoc); bson_iter_document (iter, &rlen, &rdoc); return ((llen == rlen) && (0 == memcmp (ldoc, rdoc, llen))); } default: return false; } } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_eq_match -- * * Performs equality match for all types on either left or right * side of the equation. * * Returns: * true if the equality match succeeded. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_eq_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { BSON_ASSERT (compare); BSON_ASSERT (iter); return _mongoc_matcher_iter_eq_match (&compare->iter, iter); } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_gt_match -- * * Perform {$gt: ...} match using @compare. * * In general, we try to default to what the compiler would do * for comparison between different types. * * Returns: * true if the document field was > the spec value. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_gt_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { int code; bson_iter_t *compare_iter = &compare->iter; BSON_ASSERT (compare); BSON_ASSERT (iter); code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter)); switch (code) { /* Double on Left Side */ case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE): return _GT_COMPARE (_double, _double); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL): return _GT_COMPARE (_double, _bool); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32): return _GT_COMPARE (_double, _int32); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64): return _GT_COMPARE (_double, _int64); /* Int32 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE): return _GT_COMPARE (_int32, _double); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL): return _GT_COMPARE (_int32, _bool); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32): return _GT_COMPARE (_int32, _int32); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64): return _GT_COMPARE (_int32, _int64); /* Int64 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE): return _GT_COMPARE (_int64, _double); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL): return _GT_COMPARE (_int64, _bool); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32): return _GT_COMPARE (_int64, _int32); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64): return _GT_COMPARE (_int64, _int64); default: MONGOC_WARNING ("Implement for (Type(%d) > Type(%d))", bson_iter_type (compare_iter), bson_iter_type (iter)); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_gte_match -- * * Perform a match of {"path": {"$gte": value}}. * * Returns: * true if the spec matches, otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_gte_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { bson_iter_t *compare_iter; int code; BSON_ASSERT (compare); BSON_ASSERT (iter); compare_iter = &compare->iter; code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter)); switch (code) { /* Double on Left Side */ case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE): return _GTE_COMPARE (_double, _double); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL): return _GTE_COMPARE (_double, _bool); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32): return _GTE_COMPARE (_double, _int32); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64): return _GTE_COMPARE (_double, _int64); /* Int32 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE): return _GTE_COMPARE (_int32, _double); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL): return _GTE_COMPARE (_int32, _bool); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32): return _GTE_COMPARE (_int32, _int32); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64): return _GTE_COMPARE (_int32, _int64); /* Int64 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE): return _GTE_COMPARE (_int64, _double); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL): return _GTE_COMPARE (_int64, _bool); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32): return _GTE_COMPARE (_int64, _int32); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64): return _GTE_COMPARE (_int64, _int64); default: MONGOC_WARNING ("Implement for (Type(%d) >= Type(%d))", bson_iter_type (compare_iter), bson_iter_type (iter)); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_in_match -- * * Checks the spec {"path": {"$in": [value1, value2, ...]}}. * * Returns: * true if the spec matched, otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_in_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { mongoc_matcher_op_compare_t op; op.base.opcode = MONGOC_MATCHER_OPCODE_EQ; op.path = compare->path; if (!BSON_ITER_HOLDS_ARRAY (&compare->iter) || !bson_iter_recurse (&compare->iter, &op.iter)) { return false; } while (bson_iter_next (&op.iter)) { if (_mongoc_matcher_op_eq_match (&op, iter)) { return true; } } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_lt_match -- * * Perform a {"path": "$lt": {value}} match. * * Returns: * true if the spec matched, otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_lt_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { bson_iter_t *compare_iter; int code; BSON_ASSERT (compare); BSON_ASSERT (iter); compare_iter = &compare->iter; code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter)); switch (code) { /* Double on Left Side */ case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE): return _LT_COMPARE (_double, _double); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL): return _LT_COMPARE (_double, _bool); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32): return _LT_COMPARE (_double, _int32); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64): return _LT_COMPARE (_double, _int64); /* Int32 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE): return _LT_COMPARE (_int32, _double); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL): return _LT_COMPARE (_int32, _bool); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32): return _LT_COMPARE (_int32, _int32); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64): return _LT_COMPARE (_int32, _int64); /* Int64 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE): return _LT_COMPARE (_int64, _double); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL): return _LT_COMPARE (_int64, _bool); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32): return _LT_COMPARE (_int64, _int32); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64): return _LT_COMPARE (_int64, _int64); default: MONGOC_WARNING ("Implement for (Type(%d) < Type(%d))", bson_iter_type (compare_iter), bson_iter_type (iter)); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_lte_match -- * * Perform a {"$path": {"$lte": value}} match. * * Returns: * true if the spec matched, otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_lte_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { bson_iter_t *compare_iter; int code; BSON_ASSERT (compare); BSON_ASSERT (iter); compare_iter = &compare->iter; code = _TYPE_CODE (bson_iter_type (compare_iter), bson_iter_type (iter)); switch (code) { /* Double on Left Side */ case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_DOUBLE): return _LTE_COMPARE (_double, _double); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_BOOL): return _LTE_COMPARE (_double, _bool); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT32): return _LTE_COMPARE (_double, _int32); case _TYPE_CODE (BSON_TYPE_DOUBLE, BSON_TYPE_INT64): return _LTE_COMPARE (_double, _int64); /* Int32 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_DOUBLE): return _LTE_COMPARE (_int32, _double); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_BOOL): return _LTE_COMPARE (_int32, _bool); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT32): return _LTE_COMPARE (_int32, _int32); case _TYPE_CODE (BSON_TYPE_INT32, BSON_TYPE_INT64): return _LTE_COMPARE (_int32, _int64); /* Int64 on Left Side */ case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_DOUBLE): return _LTE_COMPARE (_int64, _double); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_BOOL): return _LTE_COMPARE (_int64, _bool); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT32): return _LTE_COMPARE (_int64, _int32); case _TYPE_CODE (BSON_TYPE_INT64, BSON_TYPE_INT64): return _LTE_COMPARE (_int64, _int64); default: MONGOC_WARNING ("Implement for (Type(%d) <= Type(%d))", bson_iter_type (compare_iter), bson_iter_type (iter)); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_ne_match -- * * Perform a {"path": {"$ne": value}} match. * * Returns: * true if the field "path" was not found or the value is not-equal * to value. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_ne_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { return !_mongoc_matcher_op_eq_match (compare, iter); } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_nin_match -- * * Perform a {"path": {"$nin": value}} match. * * Returns: * true if value was not found in the array at "path". * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_nin_match (mongoc_matcher_op_compare_t *compare, /* IN */ bson_iter_t *iter) /* IN */ { return !_mongoc_matcher_op_in_match (compare, iter); } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_compare_match -- * * Dispatch function for mongoc_matcher_op_compare_t operations * to perform a match. * * Returns: * Opcode dependent. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_compare_match (mongoc_matcher_op_compare_t *compare, /* IN */ const bson_t *bson) /* IN */ { bson_iter_t tmp; bson_iter_t iter; BSON_ASSERT (compare); BSON_ASSERT (bson); if (strchr (compare->path, '.')) { if (!bson_iter_init (&tmp, bson) || !bson_iter_find_descendant (&tmp, compare->path, &iter)) { return false; } } else if (!bson_iter_init_find (&iter, bson, compare->path)) { return false; } switch ((int) compare->base.opcode) { case MONGOC_MATCHER_OPCODE_EQ: return _mongoc_matcher_op_eq_match (compare, &iter); case MONGOC_MATCHER_OPCODE_GT: return _mongoc_matcher_op_gt_match (compare, &iter); case MONGOC_MATCHER_OPCODE_GTE: return _mongoc_matcher_op_gte_match (compare, &iter); case MONGOC_MATCHER_OPCODE_IN: return _mongoc_matcher_op_in_match (compare, &iter); case MONGOC_MATCHER_OPCODE_LT: return _mongoc_matcher_op_lt_match (compare, &iter); case MONGOC_MATCHER_OPCODE_LTE: return _mongoc_matcher_op_lte_match (compare, &iter); case MONGOC_MATCHER_OPCODE_NE: return _mongoc_matcher_op_ne_match (compare, &iter); case MONGOC_MATCHER_OPCODE_NIN: return _mongoc_matcher_op_nin_match (compare, &iter); default: BSON_ASSERT (false); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_logical_match -- * * Dispatch function for mongoc_matcher_op_logical_t operations * to perform a match. * * Returns: * Opcode specific. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_matcher_op_logical_match (mongoc_matcher_op_logical_t *logical, /* IN */ const bson_t *bson) /* IN */ { BSON_ASSERT (logical); BSON_ASSERT (bson); switch ((int) logical->base.opcode) { case MONGOC_MATCHER_OPCODE_OR: return (_mongoc_matcher_op_match (logical->left, bson) || _mongoc_matcher_op_match (logical->right, bson)); case MONGOC_MATCHER_OPCODE_AND: return (_mongoc_matcher_op_match (logical->left, bson) && _mongoc_matcher_op_match (logical->right, bson)); case MONGOC_MATCHER_OPCODE_NOR: return !(_mongoc_matcher_op_match (logical->left, bson) || _mongoc_matcher_op_match (logical->right, bson)); default: BSON_ASSERT (false); break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_match -- * * Dispatch function for all operation types to perform a match. * * Returns: * Opcode specific. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool _mongoc_matcher_op_match (mongoc_matcher_op_t *op, /* IN */ const bson_t *bson) /* IN */ { BSON_ASSERT (op); BSON_ASSERT (bson); switch (op->base.opcode) { case MONGOC_MATCHER_OPCODE_EQ: case MONGOC_MATCHER_OPCODE_GT: case MONGOC_MATCHER_OPCODE_GTE: case MONGOC_MATCHER_OPCODE_IN: case MONGOC_MATCHER_OPCODE_LT: case MONGOC_MATCHER_OPCODE_LTE: case MONGOC_MATCHER_OPCODE_NE: case MONGOC_MATCHER_OPCODE_NIN: return _mongoc_matcher_op_compare_match (&op->compare, bson); case MONGOC_MATCHER_OPCODE_OR: case MONGOC_MATCHER_OPCODE_AND: case MONGOC_MATCHER_OPCODE_NOR: return _mongoc_matcher_op_logical_match (&op->logical, bson); case MONGOC_MATCHER_OPCODE_NOT: return _mongoc_matcher_op_not_match (&op->not_, bson); case MONGOC_MATCHER_OPCODE_EXISTS: return _mongoc_matcher_op_exists_match (&op->exists, bson); case MONGOC_MATCHER_OPCODE_TYPE: return _mongoc_matcher_op_type_match (&op->type, bson); default: break; } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_op_to_bson -- * * Convert the optree specified by @op to a bson document similar * to what the query would have been. This is not perfectly the * same, and so should not be used as such. * * Returns: * None. * * Side effects: * @bson is appended to, and therefore must be initialized before * calling this function. * *-------------------------------------------------------------------------- */ void _mongoc_matcher_op_to_bson (mongoc_matcher_op_t *op, /* IN */ bson_t *bson) /* IN */ { const char *str; bson_t child; bson_t child2; BSON_ASSERT (op); BSON_ASSERT (bson); switch (op->base.opcode) { case MONGOC_MATCHER_OPCODE_EQ: (void) bson_append_iter (bson, op->compare.path, -1, &op->compare.iter); break; case MONGOC_MATCHER_OPCODE_GT: case MONGOC_MATCHER_OPCODE_GTE: case MONGOC_MATCHER_OPCODE_IN: case MONGOC_MATCHER_OPCODE_LT: case MONGOC_MATCHER_OPCODE_LTE: case MONGOC_MATCHER_OPCODE_NE: case MONGOC_MATCHER_OPCODE_NIN: switch ((int) op->base.opcode) { case MONGOC_MATCHER_OPCODE_GT: str = "$gt"; break; case MONGOC_MATCHER_OPCODE_GTE: str = "$gte"; break; case MONGOC_MATCHER_OPCODE_IN: str = "$in"; break; case MONGOC_MATCHER_OPCODE_LT: str = "$lt"; break; case MONGOC_MATCHER_OPCODE_LTE: str = "$lte"; break; case MONGOC_MATCHER_OPCODE_NE: str = "$ne"; break; case MONGOC_MATCHER_OPCODE_NIN: str = "$nin"; break; default: str = "???"; break; } if (bson_append_document_begin (bson, op->compare.path, -1, &child)) { (void) bson_append_iter (&child, str, -1, &op->compare.iter); bson_append_document_end (bson, &child); } break; case MONGOC_MATCHER_OPCODE_OR: case MONGOC_MATCHER_OPCODE_AND: case MONGOC_MATCHER_OPCODE_NOR: if (op->base.opcode == MONGOC_MATCHER_OPCODE_OR) { str = "$or"; } else if (op->base.opcode == MONGOC_MATCHER_OPCODE_AND) { str = "$and"; } else if (op->base.opcode == MONGOC_MATCHER_OPCODE_NOR) { str = "$nor"; } else { BSON_ASSERT (false); str = NULL; } bson_append_array_begin (bson, str, -1, &child); bson_append_document_begin (&child, "0", 1, &child2); _mongoc_matcher_op_to_bson (op->logical.left, &child2); bson_append_document_end (&child, &child2); if (op->logical.right) { bson_append_document_begin (&child, "1", 1, &child2); _mongoc_matcher_op_to_bson (op->logical.right, &child2); bson_append_document_end (&child, &child2); } bson_append_array_end (bson, &child); break; case MONGOC_MATCHER_OPCODE_NOT: bson_append_document_begin (bson, op->not_.path, -1, &child); bson_append_document_begin (&child, "$not", 4, &child2); _mongoc_matcher_op_to_bson (op->not_.child, &child2); bson_append_document_end (&child, &child2); bson_append_document_end (bson, &child); break; case MONGOC_MATCHER_OPCODE_EXISTS: BSON_APPEND_BOOL (bson, "$exists", op->exists.exists); break; case MONGOC_MATCHER_OPCODE_TYPE: BSON_APPEND_INT32 (bson, "$type", (int) op->type.type); break; default: BSON_ASSERT (false); break; } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-matcher-private.h0000644000076500000240000000163613572250757026564 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_MATCHER_PRIVATE_H #define MONGOC_MATCHER_PRIVATE_H #include #include "mongoc/mongoc-matcher-op-private.h" BSON_BEGIN_DECLS struct _mongoc_matcher_t { bson_t query; mongoc_matcher_op_t *optree; }; BSON_END_DECLS #endif /* MONGOC_MATCHER_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-matcher.c0000644000076500000240000003025113572250757025102 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-matcher.h" #include "mongoc/mongoc-matcher-private.h" #include "mongoc/mongoc-matcher-op-private.h" static mongoc_matcher_op_t * _mongoc_matcher_parse_logical (mongoc_matcher_opcode_t opcode, bson_iter_t *iter, bool is_root, bson_error_t *error); /* *-------------------------------------------------------------------------- * * _mongoc_matcher_parse_compare -- * * Parse a compare spec such as $gt or $in. * * See the following link for more information. * * http://docs.mongodb.org/manual/reference/operator/query/ * * Returns: * A newly allocated mongoc_matcher_op_t if successful; otherwise * NULL and @error is set. * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ static mongoc_matcher_op_t * _mongoc_matcher_parse_compare (bson_iter_t *iter, /* IN */ const char *path, /* IN */ bson_error_t *error) /* OUT */ { const char *key; mongoc_matcher_op_t *op = NULL, *op_child; bson_iter_t child; BSON_ASSERT (iter); BSON_ASSERT (path); if (bson_iter_type (iter) == BSON_TYPE_DOCUMENT) { if (!bson_iter_recurse (iter, &child) || !bson_iter_next (&child)) { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Document contains no operations."); return NULL; } key = bson_iter_key (&child); if (key[0] != '$') { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_EQ, path, iter); } else if (strcmp (key, "$not") == 0) { if (!(op_child = _mongoc_matcher_parse_compare (&child, path, error))) { return NULL; } op = _mongoc_matcher_op_not_new (path, op_child); } else if (strcmp (key, "$gt") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_GT, path, &child); } else if (strcmp (key, "$gte") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_GTE, path, &child); } else if (strcmp (key, "$in") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_IN, path, &child); } else if (strcmp (key, "$lt") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_LT, path, &child); } else if (strcmp (key, "$lte") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_LTE, path, &child); } else if (strcmp (key, "$ne") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_NE, path, &child); } else if (strcmp (key, "$nin") == 0) { op = _mongoc_matcher_op_compare_new ( MONGOC_MATCHER_OPCODE_NIN, path, &child); } else if (strcmp (key, "$exists") == 0) { op = _mongoc_matcher_op_exists_new (path, bson_iter_bool (&child)); } else if (strcmp (key, "$type") == 0) { op = _mongoc_matcher_op_type_new (path, bson_iter_type (&child)); } else { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Invalid operator \"%s\"", key); return NULL; } } else { op = _mongoc_matcher_op_compare_new (MONGOC_MATCHER_OPCODE_EQ, path, iter); } BSON_ASSERT (op); return op; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_parse -- * * Parse a query spec observed by the current key of @iter. * * Returns: * A newly allocated mongoc_matcher_op_t if successful; otherwise * NULL an @error is set. * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ static mongoc_matcher_op_t * _mongoc_matcher_parse (bson_iter_t *iter, /* IN */ bson_error_t *error) /* OUT */ { bson_iter_t child; const char *key; BSON_ASSERT (iter); key = bson_iter_key (iter); if (*key != '$') { return _mongoc_matcher_parse_compare (iter, key, error); } else { BSON_ASSERT (bson_iter_type (iter) == BSON_TYPE_ARRAY); if (!bson_iter_recurse (iter, &child)) { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Invalid value for operator \"%s\"", key); return NULL; } if (strcmp (key, "$or") == 0) { return _mongoc_matcher_parse_logical ( MONGOC_MATCHER_OPCODE_OR, &child, false, error); } else if (strcmp (key, "$and") == 0) { return _mongoc_matcher_parse_logical ( MONGOC_MATCHER_OPCODE_AND, &child, false, error); } else if (strcmp (key, "$nor") == 0) { return _mongoc_matcher_parse_logical ( MONGOC_MATCHER_OPCODE_NOR, &child, false, error); } } bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Invalid operator \"%s\"", key); return NULL; } /* *-------------------------------------------------------------------------- * * _mongoc_matcher_parse_logical -- * * Parse a query spec containing a logical operator such as * $or, $and, $not, and $nor. * * See the following link for more information. * * http://docs.mongodb.org/manual/reference/operator/query/ * * Returns: * A newly allocated mongoc_matcher_op_t if successful; otherwise * NULL and @error is set. * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ static mongoc_matcher_op_t * _mongoc_matcher_parse_logical (mongoc_matcher_opcode_t opcode, /* IN */ bson_iter_t *iter, /* IN */ bool is_root, /* IN */ bson_error_t *error) /* OUT */ { mongoc_matcher_op_t *left; mongoc_matcher_op_t *right; mongoc_matcher_op_t *more; mongoc_matcher_op_t *more_wrap; bson_iter_t child; BSON_ASSERT (opcode); BSON_ASSERT (iter); BSON_ASSERT (iter); if (!bson_iter_next (iter)) { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Invalid logical operator."); return NULL; } if (is_root) { if (!(left = _mongoc_matcher_parse (iter, error))) { return NULL; } } else { if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Expected document in value."); return NULL; } if (!bson_iter_recurse (iter, &child)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "corrupt BSON"); return NULL; } if (!bson_iter_next (&child)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "corrupt BSON"); return NULL; } if (!(left = _mongoc_matcher_parse (&child, error))) { return NULL; } } if (!bson_iter_next (iter)) { return left; } if (is_root) { if (!(right = _mongoc_matcher_parse (iter, error))) { return NULL; } } else { if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { bson_set_error (error, MONGOC_ERROR_MATCHER, MONGOC_ERROR_MATCHER_INVALID, "Expected document in value."); return NULL; } if (!bson_iter_recurse (iter, &child)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "bson_iter_recurse failed."); return NULL; } if (!bson_iter_next (&child)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "corrupt BSON"); return NULL; } if (!(right = _mongoc_matcher_parse (&child, error))) { return NULL; } } more = _mongoc_matcher_parse_logical (opcode, iter, is_root, error); if (more) { more_wrap = _mongoc_matcher_op_logical_new (opcode, right, more); return _mongoc_matcher_op_logical_new (opcode, left, more_wrap); } return _mongoc_matcher_op_logical_new (opcode, left, right); } /* *-------------------------------------------------------------------------- * * mongoc_matcher_new -- * * Create a new mongoc_matcher_t using the query specification * provided in @query. * * This will build an operation tree that can be applied to arbitrary * bson documents using mongoc_matcher_match(). * * Returns: * A newly allocated mongoc_matcher_t if successful; otherwise NULL * and @error is set. * * The mongoc_matcher_t should be freed with * mongoc_matcher_destroy(). * * Side effects: * @error may be set. * *-------------------------------------------------------------------------- */ mongoc_matcher_t * mongoc_matcher_new (const bson_t *query, /* IN */ bson_error_t *error) /* OUT */ { mongoc_matcher_op_t *op; mongoc_matcher_t *matcher; bson_iter_t iter; BSON_ASSERT (query); matcher = (mongoc_matcher_t *) bson_malloc0 (sizeof *matcher); bson_copy_to (query, &matcher->query); if (!bson_iter_init (&iter, &matcher->query)) { goto failure; } if (!(op = _mongoc_matcher_parse_logical ( MONGOC_MATCHER_OPCODE_AND, &iter, true, error))) { goto failure; } matcher->optree = op; return matcher; failure: bson_destroy (&matcher->query); bson_free (matcher); return NULL; } /* *-------------------------------------------------------------------------- * * mongoc_matcher_match -- * * Checks to see if @bson matches the query specified when creating * @matcher. * * Returns: * TRUE if @bson matched the query, otherwise FALSE. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool mongoc_matcher_match (const mongoc_matcher_t *matcher, /* IN */ const bson_t *document) /* IN */ { BSON_ASSERT (matcher); BSON_ASSERT (matcher->optree); BSON_ASSERT (document); return _mongoc_matcher_op_match (matcher->optree, document); } /* *-------------------------------------------------------------------------- * * mongoc_matcher_destroy -- * * Release all resources associated with @matcher. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_matcher_destroy (mongoc_matcher_t *matcher) /* IN */ { BSON_ASSERT (matcher); _mongoc_matcher_op_destroy (matcher->optree); bson_destroy (&matcher->query); bson_free (matcher); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-matcher.h0000644000076500000240000000233013572250757025104 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_MATCHER_H #define MONGOC_MATCHER_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS typedef struct _mongoc_matcher_t mongoc_matcher_t; MONGOC_EXPORT (mongoc_matcher_t *) mongoc_matcher_new (const bson_t *query, bson_error_t *error) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (bool) mongoc_matcher_match (const mongoc_matcher_t *matcher, const bson_t *document) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (void) mongoc_matcher_destroy (mongoc_matcher_t *matcher) BSON_GNUC_DEPRECATED; BSON_END_DECLS #endif /* MONGOC_MATCHER_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-memcmp-private.h0000644000076500000240000000211713572250757026412 0ustar alcaeusstaff/* * Copyright 2015 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_MEMCMP_PRIVATE_H #define MONGOC_MEMCMP_PRIVATE_H #include #include "mongoc/mongoc-config.h" /* WARNING: mongoc_memcmp() must be used to verify if two secret keys * are equal, in constant time. * It returns 0 if the keys are equal, and -1 if they differ. * This function is not designed for lexicographical comparisons. */ int mongoc_memcmp (const void *const b1, const void *const b2, size_t len); #endif /* MONGOC_MEMCMP_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-memcmp.c0000644000076500000240000000206113572250757024733 0ustar alcaeusstaff/* * Copyright (c) 2013-2015 * Frank Denis * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "mongoc/mongoc-memcmp-private.h" int mongoc_memcmp (const void *const b1, const void *const b2, size_t len) { const unsigned char *p1 = b1, *p2 = b2; int ret = 0; for (; len > 0; len--) { ret |= *p1++ ^ *p2++; } return ret ? 1 : 0; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-opcode.h0000644000076500000240000000211313572250757024731 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_OPCODE_H #define MONGOC_OPCODE_H #include BSON_BEGIN_DECLS typedef enum { MONGOC_OPCODE_REPLY = 1, MONGOC_OPCODE_UPDATE = 2001, MONGOC_OPCODE_INSERT = 2002, MONGOC_OPCODE_QUERY = 2004, MONGOC_OPCODE_GET_MORE = 2005, MONGOC_OPCODE_DELETE = 2006, MONGOC_OPCODE_KILL_CURSORS = 2007, MONGOC_OPCODE_COMPRESSED = 2012, MONGOC_OPCODE_MSG = 2013, } mongoc_opcode_t; BSON_END_DECLS #endif /* MONGOC_OPCODE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-openssl-private.h0000644000076500000240000000236713572250757026626 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_OPENSSL_PRIVATE_H #define MONGOC_OPENSSL_PRIVATE_H #include #include #include #include #include "mongoc/mongoc-ssl.h" BSON_BEGIN_DECLS bool _mongoc_openssl_check_cert (SSL *ssl, const char *host, bool allow_invalid_hostname); SSL_CTX * _mongoc_openssl_ctx_new (mongoc_ssl_opt_t *opt); char * _mongoc_openssl_extract_subject (const char *filename, const char *passphrase); void _mongoc_openssl_init (void); void _mongoc_openssl_cleanup (void); BSON_END_DECLS #endif /* MONGOC_OPENSSL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-openssl.c0000644000076500000240000004354713572250757025156 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_OPENSSL #include #include #include #include #include #include #include #include #include "mongoc/mongoc-init.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-openssl-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-thread-private.h" #include "mongoc/mongoc-util-private.h" #ifdef _WIN32 #include #endif #if OPENSSL_VERSION_NUMBER < 0x10100000L static bson_mutex_t *gMongocOpenSslThreadLocks; static void _mongoc_openssl_thread_startup (void); static void _mongoc_openssl_thread_cleanup (void); #endif #ifndef MONGOC_HAVE_ASN1_STRING_GET0_DATA #define ASN1_STRING_get0_data ASN1_STRING_data #endif /** * _mongoc_openssl_init: * * initialization function for SSL * * This needs to get called early on and is not threadsafe. Called by * mongoc_init. */ void _mongoc_openssl_init (void) { SSL_CTX *ctx; SSL_library_init (); SSL_load_error_strings (); ERR_load_BIO_strings (); OpenSSL_add_all_algorithms (); #if OPENSSL_VERSION_NUMBER < 0x10100000L _mongoc_openssl_thread_startup (); #endif ctx = SSL_CTX_new (SSLv23_method ()); if (!ctx) { MONGOC_ERROR ("Failed to initialize OpenSSL."); } SSL_CTX_free (ctx); } void _mongoc_openssl_cleanup (void) { #if OPENSSL_VERSION_NUMBER < 0x10100000L _mongoc_openssl_thread_cleanup (); #endif } static int _mongoc_openssl_password_cb (char *buf, int num, int rwflag, void *user_data) { char *pass = (char *) user_data; int pass_len = (int) strlen (pass); if (num < pass_len + 1) { return 0; } bson_strncpy (buf, pass, num); return pass_len; } #ifdef _WIN32 bool _mongoc_openssl_import_cert_store (LPWSTR store_name, DWORD dwFlags, X509_STORE *openssl_store) { PCCERT_CONTEXT cert = NULL; HCERTSTORE cert_store; cert_store = CertOpenStore ( CERT_STORE_PROV_SYSTEM, /* provider */ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* certificate encoding */ 0, /* unused */ dwFlags, /* dwFlags */ store_name); /* system store name. "My" or "Root" */ if (cert_store == NULL) { LPTSTR msg = NULL; FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ARGUMENT_ARRAY, NULL, GetLastError (), LANG_NEUTRAL, (LPTSTR) &msg, 0, NULL); MONGOC_ERROR ("Can't open CA store: 0x%.8X: '%s'", GetLastError (), msg); LocalFree (msg); return false; } while ((cert = CertEnumCertificatesInStore (cert_store, cert)) != NULL) { X509 *x509Obj = d2i_X509 (NULL, (const unsigned char **) &cert->pbCertEncoded, cert->cbCertEncoded); if (x509Obj == NULL) { MONGOC_WARNING ( "Error parsing X509 object from Windows certificate store"); continue; } X509_STORE_add_cert (openssl_store, x509Obj); X509_free (x509Obj); } CertCloseStore (cert_store, 0); return true; } bool _mongoc_openssl_import_cert_stores (SSL_CTX *context) { bool retval; X509_STORE *store = SSL_CTX_get_cert_store (context); if (!store) { MONGOC_WARNING ("no X509 store found for SSL context while loading " "system certificates"); return false; } retval = _mongoc_openssl_import_cert_store (L"root", CERT_SYSTEM_STORE_CURRENT_USER | CERT_STORE_READONLY_FLAG, store); retval &= _mongoc_openssl_import_cert_store ( L"CA", CERT_SYSTEM_STORE_CURRENT_USER | CERT_STORE_READONLY_FLAG, store); return retval; } #endif /** mongoc_openssl_hostcheck * * rfc 6125 match a given hostname against a given pattern * * Patterns come from DNS common names or subjectAltNames. * * This code is meant to implement RFC 6125 6.4.[1-3] * */ static bool _mongoc_openssl_hostcheck (const char *pattern, const char *hostname) { const char *pattern_label_end; const char *pattern_wildcard; const char *hostname_label_end; size_t prefixlen; size_t suffixlen; TRACE ("Comparing '%s' == '%s'", pattern, hostname); pattern_wildcard = strchr (pattern, '*'); if (pattern_wildcard == NULL) { return strcasecmp (pattern, hostname) == 0; } pattern_label_end = strchr (pattern, '.'); /* Bail out on wildcarding in a couple of situations: * o we don't have 2 dots - we're not going to wildcard root tlds * o the wildcard isn't in the left most group (separated by dots) * o the pattern is embedded in an A-label or U-label */ if (pattern_label_end == NULL || strchr (pattern_label_end + 1, '.') == NULL || pattern_wildcard > pattern_label_end || strncasecmp (pattern, "xn--", 4) == 0) { return strcasecmp (pattern, hostname) == 0; } hostname_label_end = strchr (hostname, '.'); /* we know we have a dot in the pattern, we need one in the hostname */ if (hostname_label_end == NULL || strcasecmp (pattern_label_end, hostname_label_end)) { return 0; } /* The wildcard must match at least one character, so the left part of the * hostname is at least as large as the left part of the pattern. */ if ((hostname_label_end - hostname) < (pattern_label_end - pattern)) { return 0; } /* If the left prefix group before the star matches and right of the star * matches... we have a wildcard match */ prefixlen = pattern_wildcard - pattern; suffixlen = pattern_label_end - (pattern_wildcard + 1); return strncasecmp (pattern, hostname, prefixlen) == 0 && strncasecmp (pattern_wildcard + 1, hostname_label_end - suffixlen, suffixlen) == 0; } /** check if a provided cert matches a passed hostname */ bool _mongoc_openssl_check_cert (SSL *ssl, const char *host, bool allow_invalid_hostname) { X509 *peer; X509_NAME *subject_name; X509_NAME_ENTRY *entry; ASN1_STRING *entry_data; int length; int idx; int r = 0; long verify_status; size_t addrlen = 0; unsigned char addr4[sizeof (struct in_addr)]; unsigned char addr6[sizeof (struct in6_addr)]; int i; int n_sans = -1; int target = GEN_DNS; STACK_OF (GENERAL_NAME) *sans = NULL; ENTRY; BSON_ASSERT (ssl); BSON_ASSERT (host); if (allow_invalid_hostname) { RETURN (true); } /** if the host looks like an IP address, match that, otherwise we assume we * have a DNS name */ if (inet_pton (AF_INET, host, &addr4)) { target = GEN_IPADD; addrlen = sizeof addr4; } else if (inet_pton (AF_INET6, host, &addr6)) { target = GEN_IPADD; addrlen = sizeof addr6; } peer = SSL_get_peer_certificate (ssl); if (!peer) { MONGOC_WARNING ("SSL Certification verification failed: %s", ERR_error_string (ERR_get_error (), NULL)); RETURN (false); } verify_status = SSL_get_verify_result (ssl); if (verify_status == X509_V_OK) { /* gets a stack of alt names that we can iterate through */ sans = (STACK_OF (GENERAL_NAME) *) X509_get_ext_d2i ( (X509 *) peer, NID_subject_alt_name, NULL, NULL); if (sans) { n_sans = sk_GENERAL_NAME_num (sans); /* loop through the stack, or until we find a match */ for (i = 0; i < n_sans && !r; i++) { const GENERAL_NAME *name = sk_GENERAL_NAME_value (sans, i); /* skip entries that can't apply, I.e. IP entries if we've got a * DNS host */ if (name->type == target) { const char *check; check = (const char *) ASN1_STRING_get0_data (name->d.ia5); length = ASN1_STRING_length (name->d.ia5); switch (target) { case GEN_DNS: /* check that we don't have an embedded null byte */ if ((length == bson_strnlen (check, length)) && _mongoc_openssl_hostcheck (check, host)) { r = 1; } break; case GEN_IPADD: if (length == addrlen) { if (length == sizeof addr6 && !memcmp (check, &addr6, length)) { r = 1; } else if (length == sizeof addr4 && !memcmp (check, &addr4, length)) { r = 1; } } break; default: BSON_ASSERT (0); break; } } } GENERAL_NAMES_free (sans); } else { subject_name = X509_get_subject_name (peer); if (subject_name) { i = -1; /* skip to the last common name */ while ((idx = X509_NAME_get_index_by_NID ( subject_name, NID_commonName, i)) >= 0) { i = idx; } if (i >= 0) { entry = X509_NAME_get_entry (subject_name, i); entry_data = X509_NAME_ENTRY_get_data (entry); if (entry_data) { char *check; /* TODO: I've heard tell that old versions of SSL crap out * when calling ASN1_STRING_to_UTF8 on already utf8 data. * Check up on that */ length = ASN1_STRING_to_UTF8 ((unsigned char **) &check, entry_data); if (length >= 0) { /* check for embedded nulls */ if ((length == bson_strnlen (check, length)) && _mongoc_openssl_hostcheck (check, host)) { r = 1; } OPENSSL_free (check); } } } } } } X509_free (peer); RETURN (r); } static bool _mongoc_openssl_setup_ca (SSL_CTX *ctx, const char *cert, const char *cert_dir) { BSON_ASSERT (ctx); BSON_ASSERT (cert || cert_dir); if (!SSL_CTX_load_verify_locations (ctx, cert, cert_dir)) { MONGOC_ERROR ("Cannot load Certificate Authorities from '%s' and '%s'", cert, cert_dir); return 0; } return 1; } static bool _mongoc_openssl_setup_crl (SSL_CTX *ctx, const char *crlfile) { X509_STORE *store; X509_LOOKUP *lookup; int status; store = SSL_CTX_get_cert_store (ctx); X509_STORE_set_flags (store, X509_V_FLAG_CRL_CHECK); lookup = X509_STORE_add_lookup (store, X509_LOOKUP_file ()); status = X509_load_crl_file (lookup, crlfile, X509_FILETYPE_PEM); return status != 0; } static bool _mongoc_openssl_setup_pem_file (SSL_CTX *ctx, const char *pem_file, const char *password) { if (!SSL_CTX_use_certificate_chain_file (ctx, pem_file)) { MONGOC_ERROR ("Cannot find certificate in '%s'", pem_file); return 0; } if (password) { SSL_CTX_set_default_passwd_cb_userdata (ctx, (void *) password); SSL_CTX_set_default_passwd_cb (ctx, _mongoc_openssl_password_cb); } if (!(SSL_CTX_use_PrivateKey_file (ctx, pem_file, SSL_FILETYPE_PEM))) { MONGOC_ERROR ("Cannot find private key in: '%s'", pem_file); return 0; } if (!(SSL_CTX_check_private_key (ctx))) { MONGOC_ERROR ("Cannot load private key: '%s'", pem_file); return 0; } return 1; } /** * _mongoc_openssl_ctx_new: * * Create a new ssl context declaratively * * The opt.pem_pwd parameter, if passed, must exist for the life of this * context object (for storing and loading the associated pem file) */ SSL_CTX * _mongoc_openssl_ctx_new (mongoc_ssl_opt_t *opt) { SSL_CTX *ctx = NULL; int ssl_ctx_options = 0; /* * Ensure we are initialized. This is safe to call multiple times. */ mongoc_init (); ctx = SSL_CTX_new (SSLv23_method ()); BSON_ASSERT (ctx); /* SSL_OP_ALL - Activate all bug workaround options, to support buggy client * SSL's. */ ssl_ctx_options |= SSL_OP_ALL; /* SSL_OP_NO_SSLv2 - Disable SSL v2 support */ ssl_ctx_options |= SSL_OP_NO_SSLv2; /* Disable compression, if we can. * OpenSSL 0.9.x added compression support which was always enabled when built * against zlib * OpenSSL 1.0.0 added the ability to disable it, while keeping it enabled by * default * OpenSSL 1.1.0 disabled it by default. */ #if OPENSSL_VERSION_NUMBER >= 0x10000000L ssl_ctx_options |= SSL_OP_NO_COMPRESSION; #endif SSL_CTX_set_options (ctx, ssl_ctx_options); /* only defined in special build, using: * --enable-system-crypto-profile (autotools) * -DENABLE_CRYPTO_SYSTEM_PROFILE:BOOL=ON (cmake) */ #ifndef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE /* HIGH - Enable strong ciphers * !EXPORT - Disable export ciphers (40/56 bit) * !aNULL - Disable anonymous auth ciphers * @STRENGTH - Sort ciphers based on strength */ SSL_CTX_set_cipher_list (ctx, "HIGH:!EXPORT:!aNULL@STRENGTH"); #endif /* If renegotiation is needed, don't return from recv() or send() until it's * successful. * Note: this is for blocking sockets only. */ SSL_CTX_set_mode (ctx, SSL_MODE_AUTO_RETRY); /* Load my private keys to present to the server */ if (opt->pem_file && !_mongoc_openssl_setup_pem_file (ctx, opt->pem_file, opt->pem_pwd)) { SSL_CTX_free (ctx); return NULL; } /* Load in my Certificate Authority, to verify the server against * If none provided, fallback to the distro defaults */ if (opt->ca_file || opt->ca_dir) { if (!_mongoc_openssl_setup_ca (ctx, opt->ca_file, opt->ca_dir)) { SSL_CTX_free (ctx); return NULL; } } else { /* If the server certificate is issued by known CA we trust it by default */ #ifdef _WIN32 _mongoc_openssl_import_cert_stores (ctx); #else SSL_CTX_set_default_verify_paths (ctx); #endif } /* Load my revocation list, to verify the server against */ if (opt->crl_file && !_mongoc_openssl_setup_crl (ctx, opt->crl_file)) { SSL_CTX_free (ctx); return NULL; } return ctx; } char * _mongoc_openssl_extract_subject (const char *filename, const char *passphrase) { X509_NAME *subject = NULL; X509 *cert = NULL; BIO *certbio = NULL; BIO *strbio = NULL; char *str = NULL; int ret; if (!filename) { return NULL; } certbio = BIO_new (BIO_s_file ()); strbio = BIO_new (BIO_s_mem ()); ; BSON_ASSERT (certbio); BSON_ASSERT (strbio); if (BIO_read_filename (certbio, filename) && (cert = PEM_read_bio_X509 (certbio, NULL, 0, NULL))) { if ((subject = X509_get_subject_name (cert))) { ret = X509_NAME_print_ex (strbio, subject, 0, XN_FLAG_RFC2253); if ((ret > 0) && (ret < INT_MAX)) { str = (char *) bson_malloc (ret + 2); BIO_gets (strbio, str, ret + 1); str[ret] = '\0'; } } } if (cert) { X509_free (cert); } if (certbio) { BIO_free (certbio); } if (strbio) { BIO_free (strbio); } return str; } #if OPENSSL_VERSION_NUMBER < 0x10100000L #ifdef _WIN32 static unsigned long _mongoc_openssl_thread_id_callback (void) { unsigned long ret; ret = (unsigned long) GetCurrentThreadId (); return ret; } #else static unsigned long _mongoc_openssl_thread_id_callback (void) { unsigned long ret; ret = (unsigned long) pthread_self (); return ret; } #endif static void _mongoc_openssl_thread_locking_callback (int mode, int type, const char *file, int line) { if (mode & CRYPTO_LOCK) { bson_mutex_lock (&gMongocOpenSslThreadLocks[type]); } else { bson_mutex_unlock (&gMongocOpenSslThreadLocks[type]); } } static void _mongoc_openssl_thread_startup (void) { int i; gMongocOpenSslThreadLocks = (bson_mutex_t *) OPENSSL_malloc ( CRYPTO_num_locks () * sizeof (bson_mutex_t)); for (i = 0; i < CRYPTO_num_locks (); i++) { bson_mutex_init (&gMongocOpenSslThreadLocks[i]); } if (!CRYPTO_get_locking_callback ()) { CRYPTO_set_locking_callback (_mongoc_openssl_thread_locking_callback); CRYPTO_set_id_callback (_mongoc_openssl_thread_id_callback); } } static void _mongoc_openssl_thread_cleanup (void) { int i; if (CRYPTO_get_locking_callback () == _mongoc_openssl_thread_locking_callback) { CRYPTO_set_locking_callback (NULL); } if (CRYPTO_get_id_callback () == _mongoc_openssl_thread_id_callback) { CRYPTO_set_id_callback (NULL); } for (i = 0; i < CRYPTO_num_locks (); i++) { bson_mutex_destroy (&gMongocOpenSslThreadLocks[i]); } OPENSSL_free (gMongocOpenSslThreadLocks); } #endif #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-opts-helpers-private.h0000644000076500000240000001047713572250757027571 0ustar alcaeusstaff/* * Copyright 2019-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #include #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-collection-private.h" #include "mongoc/mongoc-write-command-private.h" #ifndef LIBMONGOC_MONGOC_OPTS_HELPERS_H #define LIBMONGOC_MONGOC_OPTS_HELPERS_H #define _mongoc_convert_session_id _mongoc_client_session_from_iter typedef struct _mongoc_timestamp_t { uint32_t timestamp; uint32_t increment; } mongoc_timestamp_t; bool _mongoc_timestamp_empty (mongoc_timestamp_t *timestamp); void _mongoc_timestamp_set (mongoc_timestamp_t *dst, mongoc_timestamp_t *src); void _mongoc_timestamp_set_from_bson (mongoc_timestamp_t *timestamp, bson_iter_t *iter); void _mongoc_timestamp_append (mongoc_timestamp_t *timestamp, bson_t *bson, char *key); void _mongoc_timestamp_clear (mongoc_timestamp_t *timestamp); bool _mongoc_convert_document (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error); bool _mongoc_convert_array (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error); bool _mongoc_convert_int64_positive (mongoc_client_t *client, const bson_iter_t *iter, int64_t *num, bson_error_t *error); bool _mongoc_convert_int32_t (mongoc_client_t *client, const bson_iter_t *iter, int32_t *num, bson_error_t *error); bool _mongoc_convert_int32_positive (mongoc_client_t *client, const bson_iter_t *iter, int32_t *num, bson_error_t *error); bool _mongoc_convert_bool (mongoc_client_t *client, const bson_iter_t *iter, bool *flag, bson_error_t *error); bool _mongoc_convert_bson_value_t (mongoc_client_t *client, const bson_iter_t *iter, bson_value_t *value, bson_error_t *error); bool _mongoc_convert_timestamp (mongoc_client_t *client, const bson_iter_t *iter, mongoc_timestamp_t *timestamp, bson_error_t *error); bool _mongoc_convert_utf8 (mongoc_client_t *client, const bson_iter_t *iter, const char **comment, bson_error_t *error); bool _mongoc_convert_validate_flags (mongoc_client_t *client, const bson_iter_t *iter, bson_validate_flags_t *flags, bson_error_t *error); bool _mongoc_convert_mongoc_write_bypass_document_validation_t ( mongoc_client_t *client, const bson_iter_t *iter, bool *bdv, bson_error_t *error); bool _mongoc_convert_write_concern (mongoc_client_t *client, const bson_iter_t *iter, mongoc_write_concern_t **wc, bson_error_t *error); bool _mongoc_convert_server_id (mongoc_client_t *client, const bson_iter_t *iter, uint32_t *server_id, bson_error_t *error); bool _mongoc_convert_read_concern (mongoc_client_t *client, const bson_iter_t *iter, mongoc_read_concern_t **rc, bson_error_t *error); #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-opts-helpers.c0000644000076500000240000002407113572250757026107 0ustar alcaeusstaff/* * Copyright 2019-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-opts-helpers-private.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-read-concern-private.h" #define BSON_ERR(...) \ do { \ bson_set_error ( \ error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, __VA_ARGS__); \ return false; \ } while (0) #define CONVERSION_ERR(...) \ do { \ bson_set_error (error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ __VA_ARGS__); \ return false; \ } while (0) bool _mongoc_timestamp_empty (mongoc_timestamp_t *timestamp) { return (timestamp->timestamp == 0 && timestamp->increment == 0); } void _mongoc_timestamp_set (mongoc_timestamp_t *dst, mongoc_timestamp_t *src) { dst->timestamp = src->timestamp; dst->increment = src->increment; } void _mongoc_timestamp_set_from_bson (mongoc_timestamp_t *timestamp, bson_iter_t *iter) { bson_iter_timestamp (iter, &(timestamp->timestamp), &(timestamp->increment)); } void _mongoc_timestamp_append (mongoc_timestamp_t *timestamp, bson_t *bson, char *key) { bson_append_timestamp ( bson, key, strlen (key), timestamp->timestamp, timestamp->increment); } void _mongoc_timestamp_clear (mongoc_timestamp_t *timestamp) { timestamp->timestamp = 0; timestamp->increment = 0; } bool _mongoc_convert_document (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error) { uint32_t len; const uint8_t *data; bson_t value; if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain document," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bson_iter_document (iter, &len, &data); if (!bson_init_static (&value, data, len)) { BSON_ERR ("Corrupt BSON in field \"%s\" in opts", bson_iter_key (iter)); } bson_destroy (doc); bson_copy_to (&value, doc); return true; } bool _mongoc_convert_array (mongoc_client_t *client, const bson_iter_t *iter, bson_t *doc, bson_error_t *error) { uint32_t len; const uint8_t *data; bson_t value; if (!BSON_ITER_HOLDS_ARRAY (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain array," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bson_iter_array (iter, &len, &data); if (!bson_init_static (&value, data, len)) { BSON_ERR ("Corrupt BSON in field \"%s\" in opts", bson_iter_key (iter)); } bson_destroy (doc); bson_copy_to (&value, doc); return true; } bool _mongoc_convert_int64_positive (mongoc_client_t *client, const bson_iter_t *iter, int64_t *num, bson_error_t *error) { int64_t i; if (!BSON_ITER_HOLDS_NUMBER (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain number," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } i = bson_iter_as_int64 (iter); if (i <= 0) { CONVERSION_ERR ("Invalid field \"%s\" in opts, should be greater than 0," " not %" PRId64, bson_iter_key (iter), i); } *num = bson_iter_as_int64 (iter); return true; } bool _mongoc_convert_int32_t (mongoc_client_t *client, const bson_iter_t *iter, int32_t *num, bson_error_t *error) { int64_t i; if (!BSON_ITER_HOLDS_NUMBER (iter)) { CONVERSION_ERR ("Invalid field \"%s\" in opts", bson_iter_key (iter)); } i = bson_iter_as_int64 (iter); if (i > INT32_MAX || i < INT32_MIN) { CONVERSION_ERR ("Invalid field \"%s\" in opts: %" PRId64 " out of range for int32", bson_iter_key (iter), i); } *num = (int32_t) i; return true; } bool _mongoc_convert_int32_positive (mongoc_client_t *client, const bson_iter_t *iter, int32_t *num, bson_error_t *error) { int32_t i; if (!_mongoc_convert_int32_t (client, iter, &i, error)) { return false; } if (i <= 0) { CONVERSION_ERR ( "Invalid field \"%s\" in opts, should be greater than 0, not %d", bson_iter_key (iter), i); } *num = i; return true; } bool _mongoc_convert_bool (mongoc_client_t *client, const bson_iter_t *iter, bool *flag, bson_error_t *error) { if (BSON_ITER_HOLDS_BOOL (iter)) { *flag = bson_iter_bool (iter); return true; } CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain bool," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bool _mongoc_convert_bson_value_t (mongoc_client_t *client, const bson_iter_t *iter, bson_value_t *value, bson_error_t *error) { bson_value_copy (bson_iter_value ((bson_iter_t *) iter), value); return true; } bool _mongoc_convert_timestamp (mongoc_client_t *client, const bson_iter_t *iter, mongoc_timestamp_t *timestamp, bson_error_t *error) { bson_iter_timestamp (iter, ×tamp->timestamp, ×tamp->increment); return true; } bool _mongoc_convert_utf8 (mongoc_client_t *client, const bson_iter_t *iter, const char **str, bson_error_t *error) { if (BSON_ITER_HOLDS_UTF8 (iter)) { *str = bson_iter_utf8 (iter, NULL); return true; } CONVERSION_ERR ("Invalid field \"%s\" in opts, should contain string," " not %s", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter))); } bool _mongoc_convert_validate_flags (mongoc_client_t *client, const bson_iter_t *iter, bson_validate_flags_t *flags, bson_error_t *error) { if (BSON_ITER_HOLDS_BOOL (iter)) { if (!bson_iter_as_bool (iter)) { *flags = BSON_VALIDATE_NONE; return true; } else { /* validate: false is ok but validate: true is prohibited */ CONVERSION_ERR ("Invalid option \"%s\": true, must be a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter)); } } else if (BSON_ITER_HOLDS_INT32 (iter)) { if (bson_iter_int32 (iter) <= 0x1F) { *flags = (bson_validate_flags_t) bson_iter_int32 (iter); return true; } else { CONVERSION_ERR ("Invalid field \"%s\" in opts, must be a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter)); } } CONVERSION_ERR ("Invalid type for option \"%s\": \"%s\"." " \"%s\" must be a boolean or a bitwise-OR of" " bson_validate_flags_t values.", bson_iter_key (iter), _mongoc_bson_type_to_str (bson_iter_type (iter)), bson_iter_key (iter)); } bool _mongoc_convert_write_concern (mongoc_client_t *client, const bson_iter_t *iter, mongoc_write_concern_t **wc, bson_error_t *error) { mongoc_write_concern_t *tmp; tmp = _mongoc_write_concern_new_from_iter (iter, error); if (tmp) { *wc = tmp; return true; } return false; } bool _mongoc_convert_server_id (mongoc_client_t *client, const bson_iter_t *iter, uint32_t *server_id, bson_error_t *error) { int64_t tmp; if (!BSON_ITER_HOLDS_INT (iter)) { CONVERSION_ERR ("The serverId option must be an integer"); } tmp = bson_iter_as_int64 (iter); if (tmp <= 0) { CONVERSION_ERR ("The serverId option must be >= 1"); } *server_id = (uint32_t) tmp; return true; } bool _mongoc_convert_read_concern (mongoc_client_t *client, const bson_iter_t *iter, mongoc_read_concern_t **rc, bson_error_t *error) { *rc = _mongoc_read_concern_new_from_iter (iter, error); if (!*rc) { return false; } return true; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-opts-private.h0000644000076500000240000002235713572250757026131 0ustar alcaeusstaff#include "mongoc/mongoc-prelude.h" #ifndef MONGOC_OPTS_H #define MONGOC_OPTS_H #include #include "mongoc/mongoc-client-session.h" #include "mongoc/mongoc-bulk-operation-private.h" #include "mongoc/mongoc-opts-helpers-private.h" /************************************************** * * Generated by build/generate-opts.py. * * DO NOT EDIT THIS FILE. * *************************************************/ /* clang-format off */ typedef struct _mongoc_crud_opts_t { mongoc_write_concern_t *writeConcern; bool write_concern_owned; mongoc_client_session_t *client_session; bson_validate_flags_t validate; } mongoc_crud_opts_t; typedef struct _mongoc_update_opts_t { mongoc_crud_opts_t crud; bool bypass; bson_t collation; bool upsert; } mongoc_update_opts_t; typedef struct _mongoc_insert_one_opts_t { mongoc_crud_opts_t crud; bool bypass; bson_t extra; } mongoc_insert_one_opts_t; typedef struct _mongoc_insert_many_opts_t { mongoc_crud_opts_t crud; bool ordered; bool bypass; bson_t extra; } mongoc_insert_many_opts_t; typedef struct _mongoc_delete_one_opts_t { mongoc_crud_opts_t crud; bson_t collation; bson_t extra; } mongoc_delete_one_opts_t; typedef struct _mongoc_delete_many_opts_t { mongoc_crud_opts_t crud; bson_t collation; bson_t extra; } mongoc_delete_many_opts_t; typedef struct _mongoc_update_one_opts_t { mongoc_update_opts_t update; bson_t arrayFilters; bson_t extra; } mongoc_update_one_opts_t; typedef struct _mongoc_update_many_opts_t { mongoc_update_opts_t update; bson_t arrayFilters; bson_t extra; } mongoc_update_many_opts_t; typedef struct _mongoc_replace_one_opts_t { mongoc_update_opts_t update; bson_t extra; } mongoc_replace_one_opts_t; typedef struct _mongoc_bulk_opts_t { mongoc_write_concern_t *writeConcern; bool write_concern_owned; bool ordered; mongoc_client_session_t *client_session; bson_t extra; } mongoc_bulk_opts_t; typedef struct _mongoc_bulk_insert_opts_t { bson_validate_flags_t validate; bson_t extra; } mongoc_bulk_insert_opts_t; typedef struct _mongoc_bulk_update_opts_t { bson_validate_flags_t validate; bson_t collation; bool upsert; bool multi; } mongoc_bulk_update_opts_t; typedef struct _mongoc_bulk_update_one_opts_t { mongoc_bulk_update_opts_t update; bson_t arrayFilters; bson_t extra; } mongoc_bulk_update_one_opts_t; typedef struct _mongoc_bulk_update_many_opts_t { mongoc_bulk_update_opts_t update; bson_t arrayFilters; bson_t extra; } mongoc_bulk_update_many_opts_t; typedef struct _mongoc_bulk_replace_one_opts_t { mongoc_bulk_update_opts_t update; bson_t extra; } mongoc_bulk_replace_one_opts_t; typedef struct _mongoc_bulk_remove_opts_t { bson_t collation; int32_t limit; } mongoc_bulk_remove_opts_t; typedef struct _mongoc_bulk_remove_one_opts_t { mongoc_bulk_remove_opts_t remove; bson_t extra; } mongoc_bulk_remove_one_opts_t; typedef struct _mongoc_bulk_remove_many_opts_t { mongoc_bulk_remove_opts_t remove; bson_t extra; } mongoc_bulk_remove_many_opts_t; typedef struct _mongoc_change_stream_opts_t { int32_t batchSize; bson_t resumeAfter; bson_t startAfter; mongoc_timestamp_t startAtOperationTime; int64_t maxAwaitTimeMS; const char *fullDocument; bson_t extra; } mongoc_change_stream_opts_t; typedef struct _mongoc_create_index_opts_t { mongoc_write_concern_t *writeConcern; bool write_concern_owned; mongoc_client_session_t *client_session; bson_t extra; } mongoc_create_index_opts_t; typedef struct _mongoc_read_write_opts_t { bson_t readConcern; mongoc_write_concern_t *writeConcern; bool write_concern_owned; mongoc_client_session_t *client_session; bson_t collation; uint32_t serverId; bson_t extra; } mongoc_read_write_opts_t; typedef struct _mongoc_gridfs_bucket_opts_t { const char *bucketName; int32_t chunkSizeBytes; mongoc_write_concern_t *writeConcern; bool write_concern_owned; mongoc_read_concern_t *readConcern; bson_t extra; } mongoc_gridfs_bucket_opts_t; typedef struct _mongoc_gridfs_bucket_upload_opts_t { int32_t chunkSizeBytes; bson_t metadata; bson_t extra; } mongoc_gridfs_bucket_upload_opts_t; bool _mongoc_insert_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_insert_one_opts_t *mongoc_insert_one_opts, bson_error_t *error); void _mongoc_insert_one_opts_cleanup (mongoc_insert_one_opts_t *mongoc_insert_one_opts); bool _mongoc_insert_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_insert_many_opts_t *mongoc_insert_many_opts, bson_error_t *error); void _mongoc_insert_many_opts_cleanup (mongoc_insert_many_opts_t *mongoc_insert_many_opts); bool _mongoc_delete_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_delete_one_opts_t *mongoc_delete_one_opts, bson_error_t *error); void _mongoc_delete_one_opts_cleanup (mongoc_delete_one_opts_t *mongoc_delete_one_opts); bool _mongoc_delete_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_delete_many_opts_t *mongoc_delete_many_opts, bson_error_t *error); void _mongoc_delete_many_opts_cleanup (mongoc_delete_many_opts_t *mongoc_delete_many_opts); bool _mongoc_update_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_update_one_opts_t *mongoc_update_one_opts, bson_error_t *error); void _mongoc_update_one_opts_cleanup (mongoc_update_one_opts_t *mongoc_update_one_opts); bool _mongoc_update_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_update_many_opts_t *mongoc_update_many_opts, bson_error_t *error); void _mongoc_update_many_opts_cleanup (mongoc_update_many_opts_t *mongoc_update_many_opts); bool _mongoc_replace_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_replace_one_opts_t *mongoc_replace_one_opts, bson_error_t *error); void _mongoc_replace_one_opts_cleanup (mongoc_replace_one_opts_t *mongoc_replace_one_opts); bool _mongoc_bulk_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_opts_t *mongoc_bulk_opts, bson_error_t *error); void _mongoc_bulk_opts_cleanup (mongoc_bulk_opts_t *mongoc_bulk_opts); bool _mongoc_bulk_insert_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_insert_opts_t *mongoc_bulk_insert_opts, bson_error_t *error); void _mongoc_bulk_insert_opts_cleanup (mongoc_bulk_insert_opts_t *mongoc_bulk_insert_opts); bool _mongoc_bulk_update_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_update_one_opts_t *mongoc_bulk_update_one_opts, bson_error_t *error); void _mongoc_bulk_update_one_opts_cleanup (mongoc_bulk_update_one_opts_t *mongoc_bulk_update_one_opts); bool _mongoc_bulk_update_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_update_many_opts_t *mongoc_bulk_update_many_opts, bson_error_t *error); void _mongoc_bulk_update_many_opts_cleanup (mongoc_bulk_update_many_opts_t *mongoc_bulk_update_many_opts); bool _mongoc_bulk_replace_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_replace_one_opts_t *mongoc_bulk_replace_one_opts, bson_error_t *error); void _mongoc_bulk_replace_one_opts_cleanup (mongoc_bulk_replace_one_opts_t *mongoc_bulk_replace_one_opts); bool _mongoc_bulk_remove_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_remove_one_opts_t *mongoc_bulk_remove_one_opts, bson_error_t *error); void _mongoc_bulk_remove_one_opts_cleanup (mongoc_bulk_remove_one_opts_t *mongoc_bulk_remove_one_opts); bool _mongoc_bulk_remove_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_remove_many_opts_t *mongoc_bulk_remove_many_opts, bson_error_t *error); void _mongoc_bulk_remove_many_opts_cleanup (mongoc_bulk_remove_many_opts_t *mongoc_bulk_remove_many_opts); bool _mongoc_change_stream_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_change_stream_opts_t *mongoc_change_stream_opts, bson_error_t *error); void _mongoc_change_stream_opts_cleanup (mongoc_change_stream_opts_t *mongoc_change_stream_opts); bool _mongoc_create_index_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_create_index_opts_t *mongoc_create_index_opts, bson_error_t *error); void _mongoc_create_index_opts_cleanup (mongoc_create_index_opts_t *mongoc_create_index_opts); bool _mongoc_read_write_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_read_write_opts_t *mongoc_read_write_opts, bson_error_t *error); void _mongoc_read_write_opts_cleanup (mongoc_read_write_opts_t *mongoc_read_write_opts); bool _mongoc_gridfs_bucket_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_gridfs_bucket_opts_t *mongoc_gridfs_bucket_opts, bson_error_t *error); void _mongoc_gridfs_bucket_opts_cleanup (mongoc_gridfs_bucket_opts_t *mongoc_gridfs_bucket_opts); bool _mongoc_gridfs_bucket_upload_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_gridfs_bucket_upload_opts_t *mongoc_gridfs_bucket_upload_opts, bson_error_t *error); void _mongoc_gridfs_bucket_upload_opts_cleanup (mongoc_gridfs_bucket_upload_opts_t *mongoc_gridfs_bucket_upload_opts); #endif /* MONGOC_OPTS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-opts.c0000644000076500000240000014642013572250757024452 0ustar alcaeusstaff#include "mongoc/mongoc-opts-helpers-private.h" #include "mongoc/mongoc-opts-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-client-private.h" /************************************************** * * Generated by build/generate-opts.py. * * DO NOT EDIT THIS FILE. * *************************************************/ /* clang-format off */ bool _mongoc_insert_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_insert_one_opts_t *mongoc_insert_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_insert_one_opts->crud.writeConcern = NULL; mongoc_insert_one_opts->crud.write_concern_owned = false; mongoc_insert_one_opts->crud.client_session = NULL; mongoc_insert_one_opts->crud.validate = _mongoc_default_insert_vflags; mongoc_insert_one_opts->bypass = false; bson_init (&mongoc_insert_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_insert_one_opts->crud.writeConcern, error)) { return false; } mongoc_insert_one_opts->crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_insert_one_opts->crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_insert_one_opts->crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_insert_one_opts->bypass, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_insert_one_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_insert_one_opts_cleanup (mongoc_insert_one_opts_t *mongoc_insert_one_opts) { if (mongoc_insert_one_opts->crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_insert_one_opts->crud.writeConcern); } bson_destroy (&mongoc_insert_one_opts->extra); } bool _mongoc_insert_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_insert_many_opts_t *mongoc_insert_many_opts, bson_error_t *error) { bson_iter_t iter; mongoc_insert_many_opts->crud.writeConcern = NULL; mongoc_insert_many_opts->crud.write_concern_owned = false; mongoc_insert_many_opts->crud.client_session = NULL; mongoc_insert_many_opts->crud.validate = _mongoc_default_insert_vflags; mongoc_insert_many_opts->ordered = true; mongoc_insert_many_opts->bypass = false; bson_init (&mongoc_insert_many_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_insert_many_opts->crud.writeConcern, error)) { return false; } mongoc_insert_many_opts->crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_insert_many_opts->crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_insert_many_opts->crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "ordered")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_insert_many_opts->ordered, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_insert_many_opts->bypass, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_insert_many_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_insert_many_opts_cleanup (mongoc_insert_many_opts_t *mongoc_insert_many_opts) { if (mongoc_insert_many_opts->crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_insert_many_opts->crud.writeConcern); } bson_destroy (&mongoc_insert_many_opts->extra); } bool _mongoc_delete_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_delete_one_opts_t *mongoc_delete_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_delete_one_opts->crud.writeConcern = NULL; mongoc_delete_one_opts->crud.write_concern_owned = false; mongoc_delete_one_opts->crud.client_session = NULL; mongoc_delete_one_opts->crud.validate = BSON_VALIDATE_NONE; bson_init (&mongoc_delete_one_opts->collation); bson_init (&mongoc_delete_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_delete_one_opts->crud.writeConcern, error)) { return false; } mongoc_delete_one_opts->crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_delete_one_opts->crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_delete_one_opts->crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_delete_one_opts->collation, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_delete_one_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_delete_one_opts_cleanup (mongoc_delete_one_opts_t *mongoc_delete_one_opts) { if (mongoc_delete_one_opts->crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_delete_one_opts->crud.writeConcern); } bson_destroy (&mongoc_delete_one_opts->collation); bson_destroy (&mongoc_delete_one_opts->extra); } bool _mongoc_delete_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_delete_many_opts_t *mongoc_delete_many_opts, bson_error_t *error) { bson_iter_t iter; mongoc_delete_many_opts->crud.writeConcern = NULL; mongoc_delete_many_opts->crud.write_concern_owned = false; mongoc_delete_many_opts->crud.client_session = NULL; mongoc_delete_many_opts->crud.validate = BSON_VALIDATE_NONE; bson_init (&mongoc_delete_many_opts->collation); bson_init (&mongoc_delete_many_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_delete_many_opts->crud.writeConcern, error)) { return false; } mongoc_delete_many_opts->crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_delete_many_opts->crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_delete_many_opts->crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_delete_many_opts->collation, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_delete_many_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_delete_many_opts_cleanup (mongoc_delete_many_opts_t *mongoc_delete_many_opts) { if (mongoc_delete_many_opts->crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_delete_many_opts->crud.writeConcern); } bson_destroy (&mongoc_delete_many_opts->collation); bson_destroy (&mongoc_delete_many_opts->extra); } bool _mongoc_update_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_update_one_opts_t *mongoc_update_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_update_one_opts->update.crud.writeConcern = NULL; mongoc_update_one_opts->update.crud.write_concern_owned = false; mongoc_update_one_opts->update.crud.client_session = NULL; mongoc_update_one_opts->update.crud.validate = _mongoc_default_update_vflags; mongoc_update_one_opts->update.bypass = false; bson_init (&mongoc_update_one_opts->update.collation); mongoc_update_one_opts->update.upsert = false; bson_init (&mongoc_update_one_opts->arrayFilters); bson_init (&mongoc_update_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_update_one_opts->update.crud.writeConcern, error)) { return false; } mongoc_update_one_opts->update.crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_update_one_opts->update.crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_update_one_opts->update.crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_update_one_opts->update.bypass, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_update_one_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_update_one_opts->update.upsert, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "arrayFilters")) { if (!_mongoc_convert_array ( client, &iter, &mongoc_update_one_opts->arrayFilters, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_update_one_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_update_one_opts_cleanup (mongoc_update_one_opts_t *mongoc_update_one_opts) { if (mongoc_update_one_opts->update.crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_update_one_opts->update.crud.writeConcern); } bson_destroy (&mongoc_update_one_opts->update.collation); bson_destroy (&mongoc_update_one_opts->arrayFilters); bson_destroy (&mongoc_update_one_opts->extra); } bool _mongoc_update_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_update_many_opts_t *mongoc_update_many_opts, bson_error_t *error) { bson_iter_t iter; mongoc_update_many_opts->update.crud.writeConcern = NULL; mongoc_update_many_opts->update.crud.write_concern_owned = false; mongoc_update_many_opts->update.crud.client_session = NULL; mongoc_update_many_opts->update.crud.validate = _mongoc_default_update_vflags; mongoc_update_many_opts->update.bypass = false; bson_init (&mongoc_update_many_opts->update.collation); mongoc_update_many_opts->update.upsert = false; bson_init (&mongoc_update_many_opts->arrayFilters); bson_init (&mongoc_update_many_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_update_many_opts->update.crud.writeConcern, error)) { return false; } mongoc_update_many_opts->update.crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_update_many_opts->update.crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_update_many_opts->update.crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_update_many_opts->update.bypass, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_update_many_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_update_many_opts->update.upsert, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "arrayFilters")) { if (!_mongoc_convert_array ( client, &iter, &mongoc_update_many_opts->arrayFilters, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_update_many_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_update_many_opts_cleanup (mongoc_update_many_opts_t *mongoc_update_many_opts) { if (mongoc_update_many_opts->update.crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_update_many_opts->update.crud.writeConcern); } bson_destroy (&mongoc_update_many_opts->update.collation); bson_destroy (&mongoc_update_many_opts->arrayFilters); bson_destroy (&mongoc_update_many_opts->extra); } bool _mongoc_replace_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_replace_one_opts_t *mongoc_replace_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_replace_one_opts->update.crud.writeConcern = NULL; mongoc_replace_one_opts->update.crud.write_concern_owned = false; mongoc_replace_one_opts->update.crud.client_session = NULL; mongoc_replace_one_opts->update.crud.validate = _mongoc_default_replace_vflags; mongoc_replace_one_opts->update.bypass = false; bson_init (&mongoc_replace_one_opts->update.collation); mongoc_replace_one_opts->update.upsert = false; bson_init (&mongoc_replace_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_replace_one_opts->update.crud.writeConcern, error)) { return false; } mongoc_replace_one_opts->update.crud.write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_replace_one_opts->update.crud.client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_replace_one_opts->update.crud.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "bypassDocumentValidation")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_replace_one_opts->update.bypass, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_replace_one_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_replace_one_opts->update.upsert, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_replace_one_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_replace_one_opts_cleanup (mongoc_replace_one_opts_t *mongoc_replace_one_opts) { if (mongoc_replace_one_opts->update.crud.write_concern_owned) { mongoc_write_concern_destroy (mongoc_replace_one_opts->update.crud.writeConcern); } bson_destroy (&mongoc_replace_one_opts->update.collation); bson_destroy (&mongoc_replace_one_opts->extra); } bool _mongoc_bulk_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_opts_t *mongoc_bulk_opts, bson_error_t *error) { bson_iter_t iter; mongoc_bulk_opts->writeConcern = NULL; mongoc_bulk_opts->write_concern_owned = false; mongoc_bulk_opts->ordered = true; mongoc_bulk_opts->client_session = NULL; bson_init (&mongoc_bulk_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_bulk_opts->writeConcern, error)) { return false; } mongoc_bulk_opts->write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "ordered")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_opts->ordered, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_bulk_opts->client_session, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_opts_cleanup (mongoc_bulk_opts_t *mongoc_bulk_opts) { if (mongoc_bulk_opts->write_concern_owned) { mongoc_write_concern_destroy (mongoc_bulk_opts->writeConcern); } bson_destroy (&mongoc_bulk_opts->extra); } bool _mongoc_bulk_insert_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_insert_opts_t *mongoc_bulk_insert_opts, bson_error_t *error) { bson_iter_t iter; mongoc_bulk_insert_opts->validate = _mongoc_default_insert_vflags; bson_init (&mongoc_bulk_insert_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_bulk_insert_opts->validate, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_insert_opts_cleanup (mongoc_bulk_insert_opts_t *mongoc_bulk_insert_opts) { bson_destroy (&mongoc_bulk_insert_opts->extra); } bool _mongoc_bulk_update_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_update_one_opts_t *mongoc_bulk_update_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_bulk_update_one_opts->update.validate = _mongoc_default_update_vflags; bson_init (&mongoc_bulk_update_one_opts->update.collation); mongoc_bulk_update_one_opts->update.upsert = false; mongoc_bulk_update_one_opts->update.multi = false; bson_init (&mongoc_bulk_update_one_opts->arrayFilters); bson_init (&mongoc_bulk_update_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_bulk_update_one_opts->update.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_bulk_update_one_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_update_one_opts->update.upsert, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "multi")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_update_one_opts->update.multi, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "arrayFilters")) { if (!_mongoc_convert_array ( client, &iter, &mongoc_bulk_update_one_opts->arrayFilters, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_update_one_opts_cleanup (mongoc_bulk_update_one_opts_t *mongoc_bulk_update_one_opts) { bson_destroy (&mongoc_bulk_update_one_opts->update.collation); bson_destroy (&mongoc_bulk_update_one_opts->arrayFilters); bson_destroy (&mongoc_bulk_update_one_opts->extra); } bool _mongoc_bulk_update_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_update_many_opts_t *mongoc_bulk_update_many_opts, bson_error_t *error) { bson_iter_t iter; mongoc_bulk_update_many_opts->update.validate = _mongoc_default_update_vflags; bson_init (&mongoc_bulk_update_many_opts->update.collation); mongoc_bulk_update_many_opts->update.upsert = false; mongoc_bulk_update_many_opts->update.multi = true; bson_init (&mongoc_bulk_update_many_opts->arrayFilters); bson_init (&mongoc_bulk_update_many_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_bulk_update_many_opts->update.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_bulk_update_many_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_update_many_opts->update.upsert, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "multi")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_update_many_opts->update.multi, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "arrayFilters")) { if (!_mongoc_convert_array ( client, &iter, &mongoc_bulk_update_many_opts->arrayFilters, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_update_many_opts_cleanup (mongoc_bulk_update_many_opts_t *mongoc_bulk_update_many_opts) { bson_destroy (&mongoc_bulk_update_many_opts->update.collation); bson_destroy (&mongoc_bulk_update_many_opts->arrayFilters); bson_destroy (&mongoc_bulk_update_many_opts->extra); } bool _mongoc_bulk_replace_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_replace_one_opts_t *mongoc_bulk_replace_one_opts, bson_error_t *error) { bson_iter_t iter; mongoc_bulk_replace_one_opts->update.validate = _mongoc_default_replace_vflags; bson_init (&mongoc_bulk_replace_one_opts->update.collation); mongoc_bulk_replace_one_opts->update.upsert = false; mongoc_bulk_replace_one_opts->update.multi = false; bson_init (&mongoc_bulk_replace_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "validate")) { if (!_mongoc_convert_validate_flags ( client, &iter, &mongoc_bulk_replace_one_opts->update.validate, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_bulk_replace_one_opts->update.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "upsert")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_replace_one_opts->update.upsert, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "multi")) { if (!_mongoc_convert_bool ( client, &iter, &mongoc_bulk_replace_one_opts->update.multi, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_replace_one_opts_cleanup (mongoc_bulk_replace_one_opts_t *mongoc_bulk_replace_one_opts) { bson_destroy (&mongoc_bulk_replace_one_opts->update.collation); bson_destroy (&mongoc_bulk_replace_one_opts->extra); } bool _mongoc_bulk_remove_one_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_remove_one_opts_t *mongoc_bulk_remove_one_opts, bson_error_t *error) { bson_iter_t iter; bson_init (&mongoc_bulk_remove_one_opts->remove.collation); mongoc_bulk_remove_one_opts->remove.limit = 1; bson_init (&mongoc_bulk_remove_one_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_bulk_remove_one_opts->remove.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "limit")) { if (!_mongoc_convert_int32_t ( client, &iter, &mongoc_bulk_remove_one_opts->remove.limit, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_remove_one_opts_cleanup (mongoc_bulk_remove_one_opts_t *mongoc_bulk_remove_one_opts) { bson_destroy (&mongoc_bulk_remove_one_opts->remove.collation); bson_destroy (&mongoc_bulk_remove_one_opts->extra); } bool _mongoc_bulk_remove_many_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_bulk_remove_many_opts_t *mongoc_bulk_remove_many_opts, bson_error_t *error) { bson_iter_t iter; bson_init (&mongoc_bulk_remove_many_opts->remove.collation); mongoc_bulk_remove_many_opts->remove.limit = 0; bson_init (&mongoc_bulk_remove_many_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_bulk_remove_many_opts->remove.collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "limit")) { if (!_mongoc_convert_int32_t ( client, &iter, &mongoc_bulk_remove_many_opts->remove.limit, error)) { return false; } } else { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid option '%s'", bson_iter_key (&iter)); return false; } } return true; } void _mongoc_bulk_remove_many_opts_cleanup (mongoc_bulk_remove_many_opts_t *mongoc_bulk_remove_many_opts) { bson_destroy (&mongoc_bulk_remove_many_opts->remove.collation); bson_destroy (&mongoc_bulk_remove_many_opts->extra); } bool _mongoc_change_stream_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_change_stream_opts_t *mongoc_change_stream_opts, bson_error_t *error) { bson_iter_t iter; mongoc_change_stream_opts->batchSize = 0; bson_init (&mongoc_change_stream_opts->resumeAfter); bson_init (&mongoc_change_stream_opts->startAfter); memset (&mongoc_change_stream_opts->startAtOperationTime, 0, sizeof (mongoc_timestamp_t)); mongoc_change_stream_opts->maxAwaitTimeMS = 0; mongoc_change_stream_opts->fullDocument = "default"; bson_init (&mongoc_change_stream_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "batchSize")) { if (!_mongoc_convert_int32_t ( client, &iter, &mongoc_change_stream_opts->batchSize, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "resumeAfter")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_change_stream_opts->resumeAfter, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "startAfter")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_change_stream_opts->startAfter, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "startAtOperationTime")) { if (!_mongoc_convert_timestamp ( client, &iter, &mongoc_change_stream_opts->startAtOperationTime, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "maxAwaitTimeMS")) { if (!_mongoc_convert_int64_positive ( client, &iter, &mongoc_change_stream_opts->maxAwaitTimeMS, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "fullDocument")) { if (!_mongoc_convert_utf8 ( client, &iter, &mongoc_change_stream_opts->fullDocument, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_change_stream_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_change_stream_opts_cleanup (mongoc_change_stream_opts_t *mongoc_change_stream_opts) { bson_destroy (&mongoc_change_stream_opts->resumeAfter); bson_destroy (&mongoc_change_stream_opts->startAfter); bson_destroy (&mongoc_change_stream_opts->extra); } bool _mongoc_create_index_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_create_index_opts_t *mongoc_create_index_opts, bson_error_t *error) { bson_iter_t iter; mongoc_create_index_opts->writeConcern = NULL; mongoc_create_index_opts->write_concern_owned = false; mongoc_create_index_opts->client_session = NULL; bson_init (&mongoc_create_index_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_create_index_opts->writeConcern, error)) { return false; } mongoc_create_index_opts->write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_create_index_opts->client_session, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_create_index_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_create_index_opts_cleanup (mongoc_create_index_opts_t *mongoc_create_index_opts) { if (mongoc_create_index_opts->write_concern_owned) { mongoc_write_concern_destroy (mongoc_create_index_opts->writeConcern); } bson_destroy (&mongoc_create_index_opts->extra); } bool _mongoc_read_write_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_read_write_opts_t *mongoc_read_write_opts, bson_error_t *error) { bson_iter_t iter; bson_init (&mongoc_read_write_opts->readConcern); mongoc_read_write_opts->writeConcern = NULL; mongoc_read_write_opts->write_concern_owned = false; mongoc_read_write_opts->client_session = NULL; bson_init (&mongoc_read_write_opts->collation); mongoc_read_write_opts->serverId = 0; bson_init (&mongoc_read_write_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "readConcern")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_read_write_opts->readConcern, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_read_write_opts->writeConcern, error)) { return false; } mongoc_read_write_opts->write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "sessionId")) { if (!_mongoc_convert_session_id ( client, &iter, &mongoc_read_write_opts->client_session, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "collation")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_read_write_opts->collation, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "serverId")) { if (!_mongoc_convert_server_id ( client, &iter, &mongoc_read_write_opts->serverId, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_read_write_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_read_write_opts_cleanup (mongoc_read_write_opts_t *mongoc_read_write_opts) { bson_destroy (&mongoc_read_write_opts->readConcern); if (mongoc_read_write_opts->write_concern_owned) { mongoc_write_concern_destroy (mongoc_read_write_opts->writeConcern); } bson_destroy (&mongoc_read_write_opts->collation); bson_destroy (&mongoc_read_write_opts->extra); } bool _mongoc_gridfs_bucket_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_gridfs_bucket_opts_t *mongoc_gridfs_bucket_opts, bson_error_t *error) { bson_iter_t iter; mongoc_gridfs_bucket_opts->bucketName = "fs"; mongoc_gridfs_bucket_opts->chunkSizeBytes = 261120; mongoc_gridfs_bucket_opts->writeConcern = NULL; mongoc_gridfs_bucket_opts->write_concern_owned = false; mongoc_gridfs_bucket_opts->readConcern = NULL; bson_init (&mongoc_gridfs_bucket_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "bucketName")) { if (!_mongoc_convert_utf8 ( client, &iter, &mongoc_gridfs_bucket_opts->bucketName, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "chunkSizeBytes")) { if (!_mongoc_convert_int32_positive ( client, &iter, &mongoc_gridfs_bucket_opts->chunkSizeBytes, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "writeConcern")) { if (!_mongoc_convert_write_concern ( client, &iter, &mongoc_gridfs_bucket_opts->writeConcern, error)) { return false; } mongoc_gridfs_bucket_opts->write_concern_owned = true; } else if (!strcmp (bson_iter_key (&iter), "readConcern")) { if (!_mongoc_convert_read_concern ( client, &iter, &mongoc_gridfs_bucket_opts->readConcern, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_gridfs_bucket_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_gridfs_bucket_opts_cleanup (mongoc_gridfs_bucket_opts_t *mongoc_gridfs_bucket_opts) { if (mongoc_gridfs_bucket_opts->write_concern_owned) { mongoc_write_concern_destroy (mongoc_gridfs_bucket_opts->writeConcern); } mongoc_read_concern_destroy (mongoc_gridfs_bucket_opts->readConcern); bson_destroy (&mongoc_gridfs_bucket_opts->extra); } bool _mongoc_gridfs_bucket_upload_opts_parse ( mongoc_client_t *client, const bson_t *opts, mongoc_gridfs_bucket_upload_opts_t *mongoc_gridfs_bucket_upload_opts, bson_error_t *error) { bson_iter_t iter; mongoc_gridfs_bucket_upload_opts->chunkSizeBytes = 0; bson_init (&mongoc_gridfs_bucket_upload_opts->metadata); bson_init (&mongoc_gridfs_bucket_upload_opts->extra); if (!opts) { return true; } if (!bson_iter_init (&iter, opts)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_key (&iter), "chunkSizeBytes")) { if (!_mongoc_convert_int32_positive ( client, &iter, &mongoc_gridfs_bucket_upload_opts->chunkSizeBytes, error)) { return false; } } else if (!strcmp (bson_iter_key (&iter), "metadata")) { if (!_mongoc_convert_document ( client, &iter, &mongoc_gridfs_bucket_upload_opts->metadata, error)) { return false; } } else { /* unrecognized values are copied to "extra" */ if (!BSON_APPEND_VALUE ( &mongoc_gridfs_bucket_upload_opts->extra, bson_iter_key (&iter), bson_iter_value (&iter))) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Invalid 'opts' parameter."); return false; } } } return true; } void _mongoc_gridfs_bucket_upload_opts_cleanup (mongoc_gridfs_bucket_upload_opts_t *mongoc_gridfs_bucket_upload_opts) { bson_destroy (&mongoc_gridfs_bucket_upload_opts->metadata); bson_destroy (&mongoc_gridfs_bucket_upload_opts->extra); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-prelude.h0000644000076500000240000000133013572250757025120 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined(MONGOC_INSIDE) && !defined(MONGOC_COMPILATION) #error "Only can be included directly." #endifmongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-queue-private.h0000644000076500000240000000307713572250757026266 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_QUEUE_PRIVATE_H #define MONGOC_QUEUE_PRIVATE_H #include #include "mongoc/mongoc-list-private.h" BSON_BEGIN_DECLS #define MONGOC_QUEUE_INITIALIZER \ { \ NULL, NULL \ } typedef struct _mongoc_queue_t mongoc_queue_t; typedef struct _mongoc_queue_item_t mongoc_queue_item_t; struct _mongoc_queue_t { mongoc_queue_item_t *head; mongoc_queue_item_t *tail; uint32_t length; }; struct _mongoc_queue_item_t { mongoc_queue_item_t *next; void *data; }; void _mongoc_queue_init (mongoc_queue_t *queue); void * _mongoc_queue_pop_head (mongoc_queue_t *queue); void * _mongoc_queue_pop_tail (mongoc_queue_t *queue); void _mongoc_queue_push_head (mongoc_queue_t *queue, void *data); void _mongoc_queue_push_tail (mongoc_queue_t *queue, void *data); uint32_t _mongoc_queue_get_length (const mongoc_queue_t *queue); BSON_END_DECLS #endif /* MONGOC_QUEUE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-queue.c0000644000076500000240000000512713572250757024607 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-queue-private.h" void _mongoc_queue_init (mongoc_queue_t *queue) { BSON_ASSERT (queue); memset (queue, 0, sizeof *queue); } void _mongoc_queue_push_head (mongoc_queue_t *queue, void *data) { mongoc_queue_item_t *item; BSON_ASSERT (queue); BSON_ASSERT (data); item = (mongoc_queue_item_t *) bson_malloc0 (sizeof *item); item->next = queue->head; item->data = data; queue->head = item; if (!queue->tail) { queue->tail = item; } queue->length++; } void _mongoc_queue_push_tail (mongoc_queue_t *queue, void *data) { mongoc_queue_item_t *item; BSON_ASSERT (queue); BSON_ASSERT (data); item = (mongoc_queue_item_t *) bson_malloc0 (sizeof *item); item->data = data; if (queue->tail) { queue->tail->next = item; } else { queue->head = item; } queue->tail = item; queue->length++; } void * _mongoc_queue_pop_head (mongoc_queue_t *queue) { mongoc_queue_item_t *item; void *data = NULL; BSON_ASSERT (queue); if ((item = queue->head)) { if (!item->next) { queue->tail = NULL; } queue->head = item->next; data = item->data; bson_free (item); queue->length--; } return data; } void * _mongoc_queue_pop_tail (mongoc_queue_t *queue) { mongoc_queue_item_t *item; void *data = NULL; BSON_ASSERT (queue); if (queue->length == 0) { return NULL; } data = queue->tail->data; if (queue->length == 1) { bson_free (queue->tail); queue->head = queue->tail = NULL; } else { /* find item pointing at tail */ for (item = queue->head; item; item = item->next) { if (item->next == queue->tail) { item->next = NULL; bson_free (queue->tail); queue->tail = item; break; } } } queue->length--; return data; } uint32_t _mongoc_queue_get_length (const mongoc_queue_t *queue) { BSON_ASSERT (queue); return queue->length; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rand-cng.c0000644000076500000240000000333513572250757025153 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL #include "mongoc/mongoc-rand.h" #include "mongoc/mongoc-rand-private.h" #include "mongoc/mongoc.h" #include #include #include #define NT_SUCCESS(Status) (((NTSTATUS) (Status)) >= 0) #define STATUS_UNSUCCESSFUL ((NTSTATUS) 0xC0000001L) int _mongoc_rand_bytes (uint8_t *buf, int num) { static BCRYPT_ALG_HANDLE algorithm = 0; NTSTATUS status = 0; if (!algorithm) { status = BCryptOpenAlgorithmProvider ( &algorithm, BCRYPT_RNG_ALGORITHM, NULL, 0); if (!NT_SUCCESS (status)) { MONGOC_ERROR ("BCryptOpenAlgorithmProvider(): %d", status); return 0; } } status = BCryptGenRandom (algorithm, buf, num, 0); if (NT_SUCCESS (status)) { return 1; } MONGOC_ERROR ("BCryptGenRandom(): %d", status); return 0; } void mongoc_rand_seed (const void *buf, int num) { /* N/A - OS Does not need entropy seed */ } void mongoc_rand_add (const void *buf, int num, double entropy) { /* N/A - OS Does not need entropy seed */ } int mongoc_rand_status (void) { return 1; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rand-common-crypto.c0000644000076500000240000000237113572250757027211 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO #include "mongoc/mongoc-rand.h" #include "mongoc/mongoc-rand-private.h" #include "mongoc/mongoc.h" #include /* rumour has it this wasn't in standard Security.h in ~10.8 */ #include int _mongoc_rand_bytes (uint8_t *buf, int num) { return !SecRandomCopyBytes (kSecRandomDefault, num, buf); } void mongoc_rand_seed (const void *buf, int num) { /* No such thing in Common Crypto */ } void mongoc_rand_add (const void *buf, int num, double entropy) { /* No such thing in Common Crypto */ } int mongoc_rand_status (void) { return 1; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rand-openssl.c0000644000076500000240000000215413572250757026065 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_CRYPTO_LIBCRYPTO #include "mongoc/mongoc-rand.h" #include "mongoc/mongoc-rand-private.h" #include "mongoc/mongoc.h" #include int _mongoc_rand_bytes (uint8_t *buf, int num) { return RAND_bytes (buf, num); } void mongoc_rand_seed (const void *buf, int num) { RAND_seed (buf, num); } void mongoc_rand_add (const void *buf, int num, double entropy) { RAND_add (buf, num, entropy); } int mongoc_rand_status (void) { return RAND_status (); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rand-private.h0000644000076500000240000000150713572250757026062 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_RAND_PRIVATE_H #define MONGOC_RAND_PRIVATE_H #include BSON_BEGIN_DECLS int _mongoc_rand_bytes (uint8_t *buf, int num); BSON_END_DECLS #endif /* MONGOC_RAND_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rand.h0000644000076500000240000000174313572250757024414 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_RAND_H #define MONGOC_RAND_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (void) mongoc_rand_seed (const void *buf, int num); MONGOC_EXPORT (void) mongoc_rand_add (const void *buf, int num, double entropy); MONGOC_EXPORT (int) mongoc_rand_status (void); BSON_END_DECLS #endif /* MONGOC_RAND_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-concern-private.h0000644000076500000240000000222413572250757027473 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_READ_CONCERN_PRIVATE_H #define MONGOC_READ_CONCERN_PRIVATE_H #include #include "mongoc/mongoc-read-concern.h" BSON_BEGIN_DECLS struct _mongoc_read_concern_t { char *level; bool frozen; bson_t compiled; }; const bson_t * _mongoc_read_concern_get_bson (mongoc_read_concern_t *read_concern); mongoc_read_concern_t * _mongoc_read_concern_new_from_iter (const bson_iter_t *iter, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_READ_CONCERN_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-concern.c0000644000076500000240000001367013572250757026025 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-read-concern-private.h" static void _mongoc_read_concern_freeze (mongoc_read_concern_t *read_concern); /** * mongoc_read_concern_new: * * Create a new mongoc_read_concern_t. * * Returns: A newly allocated mongoc_read_concern_t. This should be freed * with mongoc_read_concern_destroy(). */ mongoc_read_concern_t * mongoc_read_concern_new (void) { mongoc_read_concern_t *read_concern; read_concern = (mongoc_read_concern_t *) bson_malloc0 (sizeof *read_concern); bson_init (&read_concern->compiled); return read_concern; } mongoc_read_concern_t * mongoc_read_concern_copy (const mongoc_read_concern_t *read_concern) { mongoc_read_concern_t *ret = NULL; if (read_concern) { ret = mongoc_read_concern_new (); ret->level = bson_strdup (read_concern->level); } return ret; } /** * mongoc_read_concern_destroy: * @read_concern: A mongoc_read_concern_t. * * Releases a mongoc_read_concern_t and all associated memory. */ void mongoc_read_concern_destroy (mongoc_read_concern_t *read_concern) { if (read_concern) { bson_destroy (&read_concern->compiled); bson_free (read_concern->level); bson_free (read_concern); } } const char * mongoc_read_concern_get_level (const mongoc_read_concern_t *read_concern) { BSON_ASSERT (read_concern); return read_concern->level; } /** * mongoc_read_concern_set_level: * @read_concern: A mongoc_read_concern_t. * @level: The read concern level * * Sets the read concern level. Any string is supported for future compatibility * but MongoDB 3.2 only accepts "local" and "majority", aka: * - MONGOC_READ_CONCERN_LEVEL_LOCAL * - MONGOC_READ_CONCERN_LEVEL_MAJORITY * MongoDB 3.4 added * - MONGOC_READ_CONCERN_LEVEL_LINEARIZABLE * * See the MongoDB docs for more information on readConcernLevel */ bool mongoc_read_concern_set_level (mongoc_read_concern_t *read_concern, const char *level) { BSON_ASSERT (read_concern); bson_free (read_concern->level); read_concern->level = bson_strdup (level); read_concern->frozen = false; return true; } /** * mongoc_read_concern_append: * @read_concern: (in): A mongoc_read_concern_t. * @opts: (out): A pointer to a bson document. * * Appends a read_concern document to command options to send to * a server. * * Returns true on success, false on failure. * */ bool mongoc_read_concern_append (mongoc_read_concern_t *read_concern, bson_t *command) { BSON_ASSERT (read_concern); if (!read_concern->level) { return true; } if (!bson_append_document (command, "readConcern", 11, _mongoc_read_concern_get_bson (read_concern))) { MONGOC_ERROR ("Could not append readConcern to command."); return false; } return true; } /** * mongoc_read_concern_is_default: * @read_concern: A const mongoc_read_concern_t. * * Returns true when read_concern has not been modified. */ bool mongoc_read_concern_is_default (const mongoc_read_concern_t *read_concern) { return !read_concern || !read_concern->level; } /** * mongoc_read_concern_get_bson: * @read_concern: A mongoc_read_concern_t. * * This is an internal function. * * Returns: A bson_t representing the read concern, which is owned by the * mongoc_read_concern_t instance and should not be modified or freed. */ const bson_t * _mongoc_read_concern_get_bson (mongoc_read_concern_t *read_concern) { if (!read_concern->frozen) { _mongoc_read_concern_freeze (read_concern); } return &read_concern->compiled; } /** * _mongoc_read_concern_new_from_iter: * * Create a new mongoc_read_concern_t from an iterator positioned on * a "readConcern" document. * * Returns: A newly allocated mongoc_read_concern_t. This should be freed * with mongoc_read_concern_destroy(). */ mongoc_read_concern_t * _mongoc_read_concern_new_from_iter (const bson_iter_t *iter, bson_error_t *error) { bson_iter_t inner; mongoc_read_concern_t *read_concern; BSON_ASSERT (iter); read_concern = mongoc_read_concern_new (); if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { goto fail; } BSON_ASSERT (bson_iter_recurse (iter, &inner)); if (!bson_iter_find (&inner, "level") || !BSON_ITER_HOLDS_UTF8 (&inner)) { goto fail; } mongoc_read_concern_set_level (read_concern, bson_iter_utf8 (&inner, NULL)); return read_concern; fail: bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid readConcern"); mongoc_read_concern_destroy (read_concern); return NULL; } /** * mongoc_read_concern_freeze: * @read_concern: A mongoc_read_concern_t. * * This is an internal function. * * Encodes the read concern into a bson_t, which may then be returned by * mongoc_read_concern_get_bson(). */ static void _mongoc_read_concern_freeze (mongoc_read_concern_t *read_concern) { bson_t *compiled; BSON_ASSERT (read_concern); compiled = &read_concern->compiled; read_concern->frozen = true; bson_reinit (compiled); if (read_concern->level) { BSON_APPEND_UTF8 (compiled, "level", read_concern->level); } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-concern.h0000644000076500000240000000352113572250757026024 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_READ_CONCERN_H #define MONGOC_READ_CONCERN_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS #define MONGOC_READ_CONCERN_LEVEL_AVAILABLE "available" #define MONGOC_READ_CONCERN_LEVEL_LOCAL "local" #define MONGOC_READ_CONCERN_LEVEL_MAJORITY "majority" #define MONGOC_READ_CONCERN_LEVEL_LINEARIZABLE "linearizable" #define MONGOC_READ_CONCERN_LEVEL_SNAPSHOT "snapshot" typedef struct _mongoc_read_concern_t mongoc_read_concern_t; MONGOC_EXPORT (mongoc_read_concern_t *) mongoc_read_concern_new (void); MONGOC_EXPORT (mongoc_read_concern_t *) mongoc_read_concern_copy (const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (void) mongoc_read_concern_destroy (mongoc_read_concern_t *read_concern); MONGOC_EXPORT (const char *) mongoc_read_concern_get_level (const mongoc_read_concern_t *read_concern); MONGOC_EXPORT (bool) mongoc_read_concern_set_level (mongoc_read_concern_t *read_concern, const char *level); MONGOC_EXPORT (bool) mongoc_read_concern_append (mongoc_read_concern_t *read_concern, bson_t *doc); MONGOC_EXPORT (bool) mongoc_read_concern_is_default (const mongoc_read_concern_t *read_concern); BSON_END_DECLS #endif /* MONGOC_READ_CONCERN_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-prefs-private.h0000644000076500000240000000360113572250760027155 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_READ_PREFS_PRIVATE_H #define MONGOC_READ_PREFS_PRIVATE_H #include #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-read-prefs.h" BSON_BEGIN_DECLS struct _mongoc_read_prefs_t { mongoc_read_mode_t mode; bson_t tags; int64_t max_staleness_seconds; }; typedef struct _mongoc_assemble_query_result_t { bson_t *assembled_query; bool query_owned; mongoc_query_flags_t flags; } mongoc_assemble_query_result_t; #define ASSEMBLE_QUERY_RESULT_INIT \ { \ NULL, false, MONGOC_QUERY_NONE \ } const char * _mongoc_read_mode_as_str (mongoc_read_mode_t mode); void assemble_query (const mongoc_read_prefs_t *read_prefs, const mongoc_server_stream_t *server_stream, const bson_t *query_bson, mongoc_query_flags_t initial_flags, mongoc_assemble_query_result_t *result); void assemble_query_result_cleanup (mongoc_assemble_query_result_t *result); bool _mongoc_read_prefs_validate (const mongoc_read_prefs_t *read_prefs, bson_error_t *error); #define IS_PREF_PRIMARY(_pref) \ (!(_pref) || ((_pref)->mode == MONGOC_READ_PRIMARY)) BSON_END_DECLS #endif /* MONGOC_READ_PREFS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-prefs.c0000644000076500000240000002430213572250760025501 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-trace-private.h" mongoc_read_prefs_t * mongoc_read_prefs_new (mongoc_read_mode_t mode) { mongoc_read_prefs_t *read_prefs; read_prefs = (mongoc_read_prefs_t *) bson_malloc0 (sizeof *read_prefs); read_prefs->mode = mode; bson_init (&read_prefs->tags); read_prefs->max_staleness_seconds = MONGOC_NO_MAX_STALENESS; return read_prefs; } mongoc_read_mode_t mongoc_read_prefs_get_mode (const mongoc_read_prefs_t *read_prefs) { return read_prefs ? read_prefs->mode : MONGOC_READ_PRIMARY; } void mongoc_read_prefs_set_mode (mongoc_read_prefs_t *read_prefs, mongoc_read_mode_t mode) { BSON_ASSERT (read_prefs); BSON_ASSERT (mode <= MONGOC_READ_NEAREST); read_prefs->mode = mode; } const bson_t * mongoc_read_prefs_get_tags (const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (read_prefs); return &read_prefs->tags; } void mongoc_read_prefs_set_tags (mongoc_read_prefs_t *read_prefs, const bson_t *tags) { BSON_ASSERT (read_prefs); bson_destroy (&read_prefs->tags); if (tags) { bson_copy_to (tags, &read_prefs->tags); } else { bson_init (&read_prefs->tags); } } void mongoc_read_prefs_add_tag (mongoc_read_prefs_t *read_prefs, const bson_t *tag) { bson_t empty = BSON_INITIALIZER; char str[16]; int key; BSON_ASSERT (read_prefs); key = bson_count_keys (&read_prefs->tags); bson_snprintf (str, sizeof str, "%d", key); if (tag) { bson_append_document (&read_prefs->tags, str, -1, tag); } else { bson_append_document (&read_prefs->tags, str, -1, &empty); } bson_destroy (&empty); } int64_t mongoc_read_prefs_get_max_staleness_seconds ( const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (read_prefs); return read_prefs->max_staleness_seconds; } void mongoc_read_prefs_set_max_staleness_seconds (mongoc_read_prefs_t *read_prefs, int64_t max_staleness_seconds) { BSON_ASSERT (read_prefs); read_prefs->max_staleness_seconds = max_staleness_seconds; } bool mongoc_read_prefs_is_valid (const mongoc_read_prefs_t *read_prefs) { BSON_ASSERT (read_prefs); /* * Tags or maxStalenessSeconds are not supported with PRIMARY mode. */ if (read_prefs->mode == MONGOC_READ_PRIMARY) { if (!bson_empty (&read_prefs->tags) || read_prefs->max_staleness_seconds != MONGOC_NO_MAX_STALENESS) { return false; } } if (read_prefs->max_staleness_seconds != MONGOC_NO_MAX_STALENESS && read_prefs->max_staleness_seconds <= 0) { return false; } return true; } void mongoc_read_prefs_destroy (mongoc_read_prefs_t *read_prefs) { if (read_prefs) { bson_destroy (&read_prefs->tags); bson_free (read_prefs); } } mongoc_read_prefs_t * mongoc_read_prefs_copy (const mongoc_read_prefs_t *read_prefs) { mongoc_read_prefs_t *ret = NULL; if (read_prefs) { ret = mongoc_read_prefs_new (read_prefs->mode); bson_destroy (&ret->tags); bson_copy_to (&read_prefs->tags, &ret->tags); ret->max_staleness_seconds = read_prefs->max_staleness_seconds; } return ret; } const char * _mongoc_read_mode_as_str (mongoc_read_mode_t mode) { switch (mode) { case MONGOC_READ_PRIMARY: return "primary"; case MONGOC_READ_PRIMARY_PREFERRED: return "primaryPreferred"; case MONGOC_READ_SECONDARY: return "secondary"; case MONGOC_READ_SECONDARY_PREFERRED: return "secondaryPreferred"; case MONGOC_READ_NEAREST: return "nearest"; default: return ""; } } /* Update result with the read prefs, following Server Selection Spec. * The driver must have discovered the server is a mongos. */ static void _apply_read_preferences_mongos ( const mongoc_read_prefs_t *read_prefs, const bson_t *query_bson, mongoc_assemble_query_result_t *result /* OUT */) { mongoc_read_mode_t mode; const bson_t *tags = NULL; bson_t child; const char *mode_str; int64_t max_staleness_seconds; mode = mongoc_read_prefs_get_mode (read_prefs); if (read_prefs) { tags = mongoc_read_prefs_get_tags (read_prefs); } /* Server Selection Spec says: * * For mode 'primary', drivers MUST NOT set the slaveOK wire protocol flag * and MUST NOT use $readPreference * * For mode 'secondary', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference * * For mode 'primaryPreferred', drivers MUST set the slaveOK wire protocol * flag and MUST also use $readPreference * * For mode 'secondaryPreferred', drivers MUST set the slaveOK wire protocol * flag. If the read preference contains a non-empty tag_sets parameter, * drivers MUST use $readPreference; otherwise, drivers MUST NOT use * $readPreference * * For mode 'nearest', drivers MUST set the slaveOK wire protocol flag and * MUST also use $readPreference */ if (mode == MONGOC_READ_SECONDARY_PREFERRED && bson_empty0 (tags)) { result->flags |= MONGOC_QUERY_SLAVE_OK; } else if (mode != MONGOC_READ_PRIMARY) { result->flags |= MONGOC_QUERY_SLAVE_OK; /* Server Selection Spec: "When any $ modifier is used, including the * $readPreference modifier, the query MUST be provided using the $query * modifier". * * This applies to commands, too. */ result->assembled_query = bson_new (); result->query_owned = true; if (bson_has_field (query_bson, "$query")) { bson_concat (result->assembled_query, query_bson); } else { bson_append_document ( result->assembled_query, "$query", 6, query_bson); } bson_append_document_begin ( result->assembled_query, "$readPreference", 15, &child); mode_str = _mongoc_read_mode_as_str (mode); bson_append_utf8 (&child, "mode", 4, mode_str, -1); if (!bson_empty0 (tags)) { bson_append_array (&child, "tags", 4, tags); } max_staleness_seconds = mongoc_read_prefs_get_max_staleness_seconds (read_prefs); if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) { bson_append_int64 ( &child, "maxStalenessSeconds", 19, max_staleness_seconds); } bson_append_document_end (result->assembled_query, &child); } } /* *-------------------------------------------------------------------------- * * assemble_query -- * * Update @result based on @read_prefs, following the Server Selection * Spec. * * Side effects: * Sets @result->assembled_query and @result->flags. * * Note: * This function, the mongoc_assemble_query_result_t struct, and all * related functions are only used for find operations with OP_QUERY. * Remove them once we have implemented exhaust cursors with OP_MSG in * the server, and all previous server versions are EOL. * *-------------------------------------------------------------------------- */ void assemble_query (const mongoc_read_prefs_t *read_prefs, const mongoc_server_stream_t *server_stream, const bson_t *query_bson, mongoc_query_flags_t initial_flags, mongoc_assemble_query_result_t *result /* OUT */) { mongoc_server_description_type_t server_type; ENTRY; BSON_ASSERT (server_stream); BSON_ASSERT (query_bson); BSON_ASSERT (result); /* default values */ result->assembled_query = (bson_t *) query_bson; result->query_owned = false; result->flags = initial_flags; server_type = server_stream->sd->type; switch (server_stream->topology_type) { case MONGOC_TOPOLOGY_SINGLE: if (server_type == MONGOC_SERVER_MONGOS) { _apply_read_preferences_mongos (read_prefs, query_bson, result); } else { /* Server Selection Spec: for topology type single and server types * besides mongos, "clients MUST always set the slaveOK wire protocol * flag on reads to ensure that any server type can handle the * request." */ result->flags |= MONGOC_QUERY_SLAVE_OK; } break; case MONGOC_TOPOLOGY_RS_NO_PRIMARY: case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: /* Server Selection Spec: for RS topology types, "For all read * preferences modes except primary, clients MUST set the slaveOK wire * protocol flag to ensure that any suitable server can handle the * request. Clients MUST NOT set the slaveOK wire protocol flag if the * read preference mode is primary. */ if (read_prefs && read_prefs->mode != MONGOC_READ_PRIMARY) { result->flags |= MONGOC_QUERY_SLAVE_OK; } break; case MONGOC_TOPOLOGY_SHARDED: _apply_read_preferences_mongos (read_prefs, query_bson, result); break; case MONGOC_TOPOLOGY_UNKNOWN: case MONGOC_TOPOLOGY_DESCRIPTION_TYPES: default: /* must not call _apply_read_preferences with unknown topology type */ BSON_ASSERT (false); } EXIT; } void assemble_query_result_cleanup (mongoc_assemble_query_result_t *result) { ENTRY; BSON_ASSERT (result); if (result->query_owned) { bson_destroy (result->assembled_query); } EXIT; } bool _mongoc_read_prefs_validate (const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { if (read_prefs && !mongoc_read_prefs_is_valid (read_prefs)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid mongoc_read_prefs_t"); return false; } return true; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-read-prefs.h0000644000076500000240000000474113572250760025513 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_READ_PREFS_H #define MONGOC_READ_PREFS_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-config.h" BSON_BEGIN_DECLS #define MONGOC_NO_MAX_STALENESS -1 #define MONGOC_SMALLEST_MAX_STALENESS_SECONDS 90 typedef struct _mongoc_read_prefs_t mongoc_read_prefs_t; typedef enum { MONGOC_READ_PRIMARY = (1 << 0), MONGOC_READ_SECONDARY = (1 << 1), MONGOC_READ_PRIMARY_PREFERRED = (1 << 2) | MONGOC_READ_PRIMARY, MONGOC_READ_SECONDARY_PREFERRED = (1 << 2) | MONGOC_READ_SECONDARY, MONGOC_READ_NEAREST = (1 << 3) | MONGOC_READ_SECONDARY, } mongoc_read_mode_t; MONGOC_EXPORT (mongoc_read_prefs_t *) mongoc_read_prefs_new (mongoc_read_mode_t read_mode); MONGOC_EXPORT (mongoc_read_prefs_t *) mongoc_read_prefs_copy (const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (void) mongoc_read_prefs_destroy (mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (mongoc_read_mode_t) mongoc_read_prefs_get_mode (const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (void) mongoc_read_prefs_set_mode (mongoc_read_prefs_t *read_prefs, mongoc_read_mode_t mode); MONGOC_EXPORT (const bson_t *) mongoc_read_prefs_get_tags (const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (void) mongoc_read_prefs_set_tags (mongoc_read_prefs_t *read_prefs, const bson_t *tags); MONGOC_EXPORT (void) mongoc_read_prefs_add_tag (mongoc_read_prefs_t *read_prefs, const bson_t *tag); MONGOC_EXPORT (int64_t) mongoc_read_prefs_get_max_staleness_seconds ( const mongoc_read_prefs_t *read_prefs); MONGOC_EXPORT (void) mongoc_read_prefs_set_max_staleness_seconds (mongoc_read_prefs_t *read_prefs, int64_t max_staleness_seconds); MONGOC_EXPORT (bool) mongoc_read_prefs_is_valid (const mongoc_read_prefs_t *read_prefs); BSON_END_DECLS #endif /* MONGOC_READ_PREFS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rpc-private.h0000644000076500000240000001223013572250760025707 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_RPC_PRIVATE_H #define MONGOC_RPC_PRIVATE_H #include #include #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-cmd-private.h" #include "mongoc/mongoc-iovec.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-flags.h" /* forward declaration */ struct _mongoc_cluster_t; BSON_BEGIN_DECLS typedef struct _mongoc_rpc_section_t { uint8_t payload_type; union { /* payload_type == 0 */ const uint8_t *bson_document; /* payload_type == 1 */ struct { int32_t size; uint32_t size_le; const char *identifier; const uint8_t *bson_documents; } sequence; } payload; } mongoc_rpc_section_t; #define RPC(_name, _code) \ typedef struct { \ _code \ } mongoc_rpc_##_name##_t; #define ENUM_FIELD(_name) uint32_t _name; #define INT32_FIELD(_name) int32_t _name; #define UINT8_FIELD(_name) uint8_t _name; #define INT64_FIELD(_name) int64_t _name; #define INT64_ARRAY_FIELD(_len, _name) \ int32_t _len; \ int64_t *_name; #define CSTRING_FIELD(_name) const char *_name; #define BSON_FIELD(_name) const uint8_t *_name; #define BSON_ARRAY_FIELD(_name) \ const uint8_t *_name; \ int32_t _name##_len; #define IOVEC_ARRAY_FIELD(_name) \ const mongoc_iovec_t *_name; \ int32_t n_##_name; \ mongoc_iovec_t _name##_recv; #define SECTION_ARRAY_FIELD(_name) \ mongoc_rpc_section_t _name[2]; \ int32_t n_##_name; #define RAW_BUFFER_FIELD(_name) \ const uint8_t *_name; \ int32_t _name##_len; #define BSON_OPTIONAL(_check, _code) _code #pragma pack(1) #include "op-delete.def" #include "op-get-more.def" #include "op-header.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-query.def" #include "op-reply.def" #include "op-reply-header.def" #include "op-update.def" #include "op-compressed.def" /* restore default packing */ #pragma pack() #include "op-msg.def" typedef union { mongoc_rpc_delete_t delete_; mongoc_rpc_get_more_t get_more; mongoc_rpc_header_t header; mongoc_rpc_insert_t insert; mongoc_rpc_kill_cursors_t kill_cursors; mongoc_rpc_msg_t msg; mongoc_rpc_query_t query; mongoc_rpc_reply_t reply; mongoc_rpc_reply_header_t reply_header; mongoc_rpc_update_t update; mongoc_rpc_compressed_t compressed; } mongoc_rpc_t; BSON_STATIC_ASSERT2 (sizeof_rpc_header, sizeof (mongoc_rpc_header_t) == 16); BSON_STATIC_ASSERT2 (offsetof_rpc_header, offsetof (mongoc_rpc_header_t, opcode) == offsetof (mongoc_rpc_reply_t, opcode)); BSON_STATIC_ASSERT2 (sizeof_reply_header, sizeof (mongoc_rpc_reply_header_t) == 36); #undef RPC #undef ENUM_FIELD #undef UINT8_FIELD #undef INT32_FIELD #undef INT64_FIELD #undef INT64_ARRAY_FIELD #undef CSTRING_FIELD #undef BSON_FIELD #undef BSON_ARRAY_FIELD #undef IOVEC_ARRAY_FIELD #undef SECTION_ARRAY_FIELD #undef BSON_OPTIONAL #undef RAW_BUFFER_FIELD void _mongoc_rpc_gather (mongoc_rpc_t *rpc, mongoc_array_t *array); void _mongoc_rpc_swab_to_le (mongoc_rpc_t *rpc); void _mongoc_rpc_swab_from_le (mongoc_rpc_t *rpc); void _mongoc_rpc_printf (mongoc_rpc_t *rpc); bool _mongoc_rpc_scatter (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen); bool _mongoc_rpc_scatter_reply_header_only (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen); bool _mongoc_rpc_get_first_document (mongoc_rpc_t *rpc, bson_t *reply); bool _mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson); void _mongoc_rpc_prep_command (mongoc_rpc_t *rpc, const char *cmd_ns, mongoc_cmd_t *cmd); bool _mongoc_rpc_check_ok (mongoc_rpc_t *rpc, int32_t error_api_version, bson_error_t *error /* OUT */, bson_t *error_doc /* OUT */); bool _mongoc_cmd_check_ok (const bson_t *doc, int32_t error_api_version, bson_error_t *error); bool _mongoc_cmd_check_ok_no_wce (const bson_t *doc, int32_t error_api_version, bson_error_t *error); bool _mongoc_rpc_decompress (mongoc_rpc_t *rpc_le, uint8_t *buf, size_t buflen); char * _mongoc_rpc_compress (struct _mongoc_cluster_t *cluster, int32_t compressor_id, mongoc_rpc_t *rpc_le, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_RPC_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-rpc.c0000644000076500000240000013716713572250760024253 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-compression-private.h" #include "mongoc/mongoc-cluster-private.h" #define RPC(_name, _code) \ static void _mongoc_rpc_gather_##_name (mongoc_rpc_##_name##_t *rpc, \ mongoc_rpc_header_t *header, \ mongoc_array_t *array) \ { \ mongoc_iovec_t iov; \ BSON_ASSERT (rpc); \ BSON_ASSERT (array); \ header->msg_len = 0; \ _code \ } #define UINT8_FIELD(_name) \ iov.iov_base = (void *) &rpc->_name; \ iov.iov_len = 1; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #define INT32_FIELD(_name) \ iov.iov_base = (void *) &rpc->_name; \ iov.iov_len = 4; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #define ENUM_FIELD INT32_FIELD #define INT64_FIELD(_name) \ iov.iov_base = (void *) &rpc->_name; \ iov.iov_len = 8; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #define CSTRING_FIELD(_name) \ BSON_ASSERT (rpc->_name); \ iov.iov_base = (void *) rpc->_name; \ iov.iov_len = strlen (rpc->_name) + 1; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #define BSON_FIELD(_name) \ do { \ int32_t __l; \ memcpy (&__l, rpc->_name, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ iov.iov_base = (void *) rpc->_name; \ iov.iov_len = __l; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ } while (0); #define BSON_OPTIONAL(_check, _code) \ if (rpc->_check) { \ _code \ } #define BSON_ARRAY_FIELD(_name) \ if (rpc->_name##_len) { \ iov.iov_base = (void *) rpc->_name; \ iov.iov_len = rpc->_name##_len; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ } #define IOVEC_ARRAY_FIELD(_name) \ do { \ ssize_t _i; \ BSON_ASSERT (rpc->n_##_name); \ for (_i = 0; _i < rpc->n_##_name; _i++) { \ BSON_ASSERT (rpc->_name[_i].iov_len); \ header->msg_len += (int32_t) rpc->_name[_i].iov_len; \ _mongoc_array_append_val (array, rpc->_name[_i]); \ } \ } while (0); #define SECTION_ARRAY_FIELD(_name) \ do { \ ssize_t _i; \ BSON_ASSERT (rpc->n_##_name); \ for (_i = 0; _i < rpc->n_##_name; _i++) { \ int32_t __l; \ iov.iov_base = (void *) &rpc->_name[_i].payload_type; \ iov.iov_len = 1; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ switch (rpc->_name[_i].payload_type) { \ case 0: \ memcpy (&__l, rpc->_name[_i].payload.bson_document, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ iov.iov_base = (void *) rpc->_name[_i].payload.bson_document; \ iov.iov_len = __l; \ break; \ case 1: \ rpc->_name[_i].payload.sequence.size_le = \ BSON_UINT32_TO_LE (rpc->_name[_i].payload.sequence.size); \ iov.iov_base = (void *) &rpc->_name[_i].payload.sequence.size_le; \ iov.iov_len = 4; \ header->msg_len += 4; \ _mongoc_array_append_val (array, iov); \ iov.iov_base = \ (void *) rpc->_name[_i].payload.sequence.identifier; \ iov.iov_len = \ strlen (rpc->_name[_i].payload.sequence.identifier) + 1; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ iov.iov_base = \ (void *) rpc->_name[_i].payload.sequence.bson_documents; \ iov.iov_len = \ rpc->_name[_i].payload.sequence.size - iov.iov_len - 4; \ break; \ default: \ MONGOC_ERROR ("Unknown Payload Type: %d", \ rpc->_name[_i].payload_type); \ BSON_ASSERT (0); \ } \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ } \ } while (0); #define RAW_BUFFER_FIELD(_name) \ iov.iov_base = (void *) rpc->_name; \ iov.iov_len = rpc->_name##_len; \ BSON_ASSERT (iov.iov_len); \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #define INT64_ARRAY_FIELD(_len, _name) \ iov.iov_base = (void *) &rpc->_len; \ iov.iov_len = 4; \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); \ iov.iov_base = (void *) rpc->_name; \ iov.iov_len = rpc->_len * 8; \ BSON_ASSERT (iov.iov_len); \ header->msg_len += (int32_t) iov.iov_len; \ _mongoc_array_append_val (array, iov); #include "op-delete.def" #include "op-get-more.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-msg.def" #include "op-query.def" #include "op-reply.def" #include "op-compressed.def" #include "op-update.def" #undef RPC #undef ENUM_FIELD #undef UINT8_FIELD #undef INT32_FIELD #undef INT64_FIELD #undef INT64_ARRAY_FIELD #undef CSTRING_FIELD #undef BSON_FIELD #undef BSON_ARRAY_FIELD #undef IOVEC_ARRAY_FIELD #undef SECTION_ARRAY_FIELD #undef RAW_BUFFER_FIELD #undef BSON_OPTIONAL #if BSON_BYTE_ORDER == BSON_BIG_ENDIAN #define RPC(_name, _code) \ static void _mongoc_rpc_swab_to_le_##_name (mongoc_rpc_##_name##_t *rpc) \ { \ BSON_ASSERT (rpc); \ _code \ } #define UINT8_FIELD(_name) #define INT32_FIELD(_name) rpc->_name = BSON_UINT32_FROM_LE (rpc->_name); #define ENUM_FIELD INT32_FIELD #define INT64_FIELD(_name) rpc->_name = BSON_UINT64_FROM_LE (rpc->_name); #define CSTRING_FIELD(_name) #define BSON_FIELD(_name) #define BSON_ARRAY_FIELD(_name) #define IOVEC_ARRAY_FIELD(_name) #define SECTION_ARRAY_FIELD(_name) #define BSON_OPTIONAL(_check, _code) \ if (rpc->_check) { \ _code \ } #define RAW_BUFFER_FIELD(_name) #define INT64_ARRAY_FIELD(_len, _name) \ do { \ ssize_t i; \ for (i = 0; i < rpc->_len; i++) { \ rpc->_name[i] = BSON_UINT64_FROM_LE (rpc->_name[i]); \ } \ rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \ } while (0); #include "op-delete.def" #include "op-get-more.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-msg.def" #include "op-query.def" #include "op-reply.def" #include "op-compressed.def" #include "op-update.def" #undef RPC #undef INT64_ARRAY_FIELD #define RPC(_name, _code) \ static void _mongoc_rpc_swab_from_le_##_name (mongoc_rpc_##_name##_t *rpc) \ { \ BSON_ASSERT (rpc); \ _code \ } #define INT64_ARRAY_FIELD(_len, _name) \ do { \ ssize_t i; \ rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \ for (i = 0; i < rpc->_len; i++) { \ rpc->_name[i] = BSON_UINT64_FROM_LE (rpc->_name[i]); \ } \ } while (0); #include "op-delete.def" #include "op-get-more.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-msg.def" #include "op-query.def" #include "op-reply.def" #include "op-compressed.def" #include "op-update.def" #undef RPC #undef ENUM_FIELD #undef UINT8_FIELD #undef INT32_FIELD #undef INT64_FIELD #undef INT64_ARRAY_FIELD #undef CSTRING_FIELD #undef BSON_FIELD #undef BSON_ARRAY_FIELD #undef IOVEC_ARRAY_FIELD #undef SECTION_ARRAY_FIELD #undef BSON_OPTIONAL #undef RAW_BUFFER_FIELD #endif /* BSON_BYTE_ORDER == BSON_BIG_ENDIAN */ #define RPC(_name, _code) \ static void _mongoc_rpc_printf_##_name (mongoc_rpc_##_name##_t *rpc) \ { \ BSON_ASSERT (rpc); \ _code \ } #define UINT8_FIELD(_name) printf (" " #_name " : %u\n", rpc->_name); #define INT32_FIELD(_name) printf (" " #_name " : %d\n", rpc->_name); #define ENUM_FIELD(_name) printf (" " #_name " : %u\n", rpc->_name); #define INT64_FIELD(_name) \ printf (" " #_name " : %" PRIi64 "\n", (int64_t) rpc->_name); #define CSTRING_FIELD(_name) printf (" " #_name " : %s\n", rpc->_name); #define BSON_FIELD(_name) \ do { \ bson_t b; \ char *s; \ int32_t __l; \ memcpy (&__l, rpc->_name, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ BSON_ASSERT (bson_init_static (&b, rpc->_name, __l)); \ s = bson_as_relaxed_extended_json (&b, NULL); \ printf (" " #_name " : %s\n", s); \ bson_free (s); \ bson_destroy (&b); \ } while (0); #define BSON_ARRAY_FIELD(_name) \ do { \ bson_reader_t *__r; \ bool __eof; \ const bson_t *__b; \ __r = bson_reader_new_from_data (rpc->_name, rpc->_name##_len); \ while ((__b = bson_reader_read (__r, &__eof))) { \ char *s = bson_as_relaxed_extended_json (__b, NULL); \ printf (" " #_name " : %s\n", s); \ bson_free (s); \ } \ bson_reader_destroy (__r); \ } while (0); #define IOVEC_ARRAY_FIELD(_name) \ do { \ ssize_t _i; \ size_t _j; \ for (_i = 0; _i < rpc->n_##_name; _i++) { \ printf (" " #_name " : "); \ for (_j = 0; _j < rpc->_name[_i].iov_len; _j++) { \ uint8_t u; \ u = ((char *) rpc->_name[_i].iov_base)[_j]; \ printf (" %02x", u); \ } \ printf ("\n"); \ } \ } while (0); #define SECTION_ARRAY_FIELD(_name) \ do { \ ssize_t _i; \ printf (" " #_name " : %d\n", rpc->n_##_name); \ for (_i = 0; _i < rpc->n_##_name; _i++) { \ if (rpc->_name[_i].payload_type == 0) { \ do { \ bson_t b; \ char *s; \ int32_t __l; \ memcpy (&__l, rpc->_name[_i].payload.bson_document, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ BSON_ASSERT (bson_init_static ( \ &b, rpc->_name[_i].payload.bson_document, __l)); \ s = bson_as_relaxed_extended_json (&b, NULL); \ printf (" Type %d: %s\n", rpc->_name[_i].payload_type, s); \ bson_free (s); \ bson_destroy (&b); \ } while (0); \ } else if (rpc->_name[_i].payload_type == 1) { \ bson_reader_t *__r; \ int max = rpc->_name[_i].payload.sequence.size - \ strlen (rpc->_name[_i].payload.sequence.identifier) - \ 1 - sizeof (int32_t); \ bool __eof; \ const bson_t *__b; \ printf (" Identifier: %s\n", \ rpc->_name[_i].payload.sequence.identifier); \ printf (" Size: %d\n", max); \ __r = bson_reader_new_from_data ( \ rpc->_name[_i].payload.sequence.bson_documents, max); \ while ((__b = bson_reader_read (__r, &__eof))) { \ char *s = bson_as_relaxed_extended_json (__b, NULL); \ bson_free (s); \ } \ bson_reader_destroy (__r); \ } \ } \ } while (0); #define BSON_OPTIONAL(_check, _code) \ if (rpc->_check) { \ _code \ } #define RAW_BUFFER_FIELD(_name) \ { \ ssize_t __i; \ printf (" " #_name " :"); \ for (__i = 0; __i < rpc->_name##_len; __i++) { \ uint8_t u; \ u = ((char *) rpc->_name)[__i]; \ printf (" %02x", u); \ } \ printf ("\n"); \ } #define INT64_ARRAY_FIELD(_len, _name) \ do { \ ssize_t i; \ for (i = 0; i < rpc->_len; i++) { \ printf (" " #_name " : %" PRIi64 "\n", (int64_t) rpc->_name[i]); \ } \ rpc->_len = BSON_UINT32_FROM_LE (rpc->_len); \ } while (0); #include "op-delete.def" #include "op-get-more.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-msg.def" #include "op-query.def" #include "op-reply.def" #include "op-compressed.def" #include "op-update.def" #undef RPC #undef ENUM_FIELD #undef UINT8_FIELD #undef INT32_FIELD #undef INT64_FIELD #undef INT64_ARRAY_FIELD #undef CSTRING_FIELD #undef BSON_FIELD #undef BSON_ARRAY_FIELD #undef IOVEC_ARRAY_FIELD #undef SECTION_ARRAY_FIELD #undef BSON_OPTIONAL #undef RAW_BUFFER_FIELD #define RPC(_name, _code) \ static bool _mongoc_rpc_scatter_##_name ( \ mongoc_rpc_##_name##_t *rpc, const uint8_t *buf, size_t buflen) \ { \ BSON_ASSERT (rpc); \ BSON_ASSERT (buf); \ BSON_ASSERT (buflen); \ _code return true; \ } #define UINT8_FIELD(_name) \ if (buflen < 1) { \ return false; \ } \ memcpy (&rpc->_name, buf, 1); \ buflen -= 1; \ buf += 1; #define INT32_FIELD(_name) \ if (buflen < 4) { \ return false; \ } \ memcpy (&rpc->_name, buf, 4); \ buflen -= 4; \ buf += 4; #define ENUM_FIELD INT32_FIELD #define INT64_FIELD(_name) \ if (buflen < 8) { \ return false; \ } \ memcpy (&rpc->_name, buf, 8); \ buflen -= 8; \ buf += 8; #define INT64_ARRAY_FIELD(_len, _name) \ do { \ size_t needed; \ if (buflen < 4) { \ return false; \ } \ memcpy (&rpc->_len, buf, 4); \ buflen -= 4; \ buf += 4; \ needed = BSON_UINT32_FROM_LE (rpc->_len) * 8; \ if (needed > buflen) { \ return false; \ } \ rpc->_name = (int64_t *) buf; \ buf += needed; \ buflen -= needed; \ } while (0); #define CSTRING_FIELD(_name) \ do { \ size_t __i; \ bool found = false; \ for (__i = 0; __i < buflen; __i++) { \ if (!buf[__i]) { \ rpc->_name = (const char *) buf; \ buflen -= __i + 1; \ buf += __i + 1; \ found = true; \ break; \ } \ } \ if (!found) { \ return false; \ } \ } while (0); #define BSON_FIELD(_name) \ do { \ uint32_t __l; \ if (buflen < 4) { \ return false; \ } \ memcpy (&__l, buf, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ if (__l < 5 || __l > buflen) { \ return false; \ } \ rpc->_name = (uint8_t *) buf; \ buf += __l; \ buflen -= __l; \ } while (0); #define BSON_ARRAY_FIELD(_name) \ rpc->_name = (uint8_t *) buf; \ rpc->_name##_len = (int32_t) buflen; \ buf = NULL; \ buflen = 0; #define BSON_OPTIONAL(_check, _code) \ if (buflen) { \ _code \ } #define IOVEC_ARRAY_FIELD(_name) \ rpc->_name##_recv.iov_base = (void *) buf; \ rpc->_name##_recv.iov_len = buflen; \ rpc->_name = &rpc->_name##_recv; \ rpc->n_##_name = 1; \ buf = NULL; \ buflen = 0; #define SECTION_ARRAY_FIELD(_name) \ do { \ uint32_t __l; \ mongoc_rpc_section_t *section = &rpc->_name[rpc->n_##_name]; \ section->payload_type = buf[0]; \ buf++; \ buflen -= 1; \ memcpy (&__l, buf, 4); \ __l = BSON_UINT32_FROM_LE (__l); \ if (section->payload_type == 0) { \ section->payload.bson_document = buf; \ } else { \ const uint8_t *section_buf = buf + 4; \ section->payload.sequence.size = __l; \ section->payload.sequence.identifier = (const char *) section_buf; \ section_buf += strlen ((const char *) section_buf) + 1; \ section->payload.sequence.bson_documents = section_buf; \ } \ buf += __l; \ buflen -= __l; \ rpc->n_##_name++; \ } while (buflen); #define RAW_BUFFER_FIELD(_name) \ rpc->_name = (void *) buf; \ rpc->_name##_len = (int32_t) buflen; \ buf = NULL; \ buflen = 0; #include "op-delete.def" #include "op-get-more.def" #include "op-header.def" #include "op-insert.def" #include "op-kill-cursors.def" #include "op-msg.def" #include "op-query.def" #include "op-reply.def" #include "op-reply-header.def" #include "op-compressed.def" #include "op-update.def" #undef RPC #undef ENUM_FIELD #undef UINT8_FIELD #undef INT32_FIELD #undef INT64_FIELD #undef INT64_ARRAY_FIELD #undef CSTRING_FIELD #undef BSON_FIELD #undef BSON_ARRAY_FIELD #undef IOVEC_ARRAY_FIELD #undef SECTION_ARRAY_FIELD #undef BSON_OPTIONAL #undef RAW_BUFFER_FIELD /* *-------------------------------------------------------------------------- * * _mongoc_rpc_gather -- * * Takes a (native endian) rpc struct and gathers the buffer. * Caller should swab to little endian after calling gather. * * Gather, swab, compress write. * Read, scatter, uncompress, swab * *-------------------------------------------------------------------------- */ void _mongoc_rpc_gather (mongoc_rpc_t *rpc, mongoc_array_t *array) { mongoc_counter_op_egress_total_inc (); switch ((mongoc_opcode_t) rpc->header.opcode) { case MONGOC_OPCODE_REPLY: _mongoc_rpc_gather_reply (&rpc->reply, &rpc->header, array); return; case MONGOC_OPCODE_MSG: _mongoc_rpc_gather_msg (&rpc->msg, &rpc->header, array); mongoc_counter_op_egress_msg_inc (); return; case MONGOC_OPCODE_UPDATE: _mongoc_rpc_gather_update (&rpc->update, &rpc->header, array); mongoc_counter_op_egress_update_inc (); return; case MONGOC_OPCODE_INSERT: _mongoc_rpc_gather_insert (&rpc->insert, &rpc->header, array); mongoc_counter_op_egress_insert_inc (); return; case MONGOC_OPCODE_QUERY: _mongoc_rpc_gather_query (&rpc->query, &rpc->header, array); mongoc_counter_op_egress_query_inc (); return; case MONGOC_OPCODE_GET_MORE: _mongoc_rpc_gather_get_more (&rpc->get_more, &rpc->header, array); mongoc_counter_op_egress_getmore_inc (); return; case MONGOC_OPCODE_DELETE: _mongoc_rpc_gather_delete (&rpc->delete_, &rpc->header, array); mongoc_counter_op_egress_delete_inc (); return; case MONGOC_OPCODE_KILL_CURSORS: _mongoc_rpc_gather_kill_cursors (&rpc->kill_cursors, &rpc->header, array); mongoc_counter_op_egress_killcursors_inc (); return; case MONGOC_OPCODE_COMPRESSED: _mongoc_rpc_gather_compressed (&rpc->compressed, &rpc->header, array); mongoc_counter_op_egress_compressed_inc (); return; default: MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode); BSON_ASSERT (false); break; } } void _mongoc_rpc_swab_to_le (mongoc_rpc_t *rpc) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN mongoc_opcode_t opcode; opcode = rpc->header.opcode; switch (opcode) { case MONGOC_OPCODE_REPLY: _mongoc_rpc_swab_to_le_reply (&rpc->reply); break; case MONGOC_OPCODE_MSG: _mongoc_rpc_swab_to_le_msg (&rpc->msg); break; case MONGOC_OPCODE_UPDATE: _mongoc_rpc_swab_to_le_update (&rpc->update); break; case MONGOC_OPCODE_INSERT: _mongoc_rpc_swab_to_le_insert (&rpc->insert); break; case MONGOC_OPCODE_QUERY: _mongoc_rpc_swab_to_le_query (&rpc->query); break; case MONGOC_OPCODE_GET_MORE: _mongoc_rpc_swab_to_le_get_more (&rpc->get_more); break; case MONGOC_OPCODE_DELETE: _mongoc_rpc_swab_to_le_delete (&rpc->delete_); break; case MONGOC_OPCODE_KILL_CURSORS: _mongoc_rpc_swab_to_le_kill_cursors (&rpc->kill_cursors); break; case MONGOC_OPCODE_COMPRESSED: _mongoc_rpc_swab_to_le_compressed (&rpc->compressed); break; default: MONGOC_WARNING ("Unknown rpc type: 0x%08x", opcode); break; } #endif #if 0 _mongoc_rpc_printf (rpc); #endif } void _mongoc_rpc_swab_from_le (mongoc_rpc_t *rpc) { #if BSON_BYTE_ORDER != BSON_LITTLE_ENDIAN mongoc_opcode_t opcode; opcode = BSON_UINT32_FROM_LE (rpc->header.opcode); switch (opcode) { case MONGOC_OPCODE_REPLY: _mongoc_rpc_swab_from_le_reply (&rpc->reply); break; case MONGOC_OPCODE_MSG: _mongoc_rpc_swab_from_le_msg (&rpc->msg); break; case MONGOC_OPCODE_UPDATE: _mongoc_rpc_swab_from_le_update (&rpc->update); break; case MONGOC_OPCODE_INSERT: _mongoc_rpc_swab_from_le_insert (&rpc->insert); break; case MONGOC_OPCODE_QUERY: _mongoc_rpc_swab_from_le_query (&rpc->query); break; case MONGOC_OPCODE_GET_MORE: _mongoc_rpc_swab_from_le_get_more (&rpc->get_more); break; case MONGOC_OPCODE_DELETE: _mongoc_rpc_swab_from_le_delete (&rpc->delete_); break; case MONGOC_OPCODE_KILL_CURSORS: _mongoc_rpc_swab_from_le_kill_cursors (&rpc->kill_cursors); break; case MONGOC_OPCODE_COMPRESSED: _mongoc_rpc_swab_from_le_compressed (&rpc->compressed); break; default: MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode); break; } #endif #if 0 _mongoc_rpc_printf (rpc); #endif } void _mongoc_rpc_printf (mongoc_rpc_t *rpc) { switch ((mongoc_opcode_t) rpc->header.opcode) { case MONGOC_OPCODE_REPLY: _mongoc_rpc_printf_reply (&rpc->reply); break; case MONGOC_OPCODE_MSG: _mongoc_rpc_printf_msg (&rpc->msg); break; case MONGOC_OPCODE_UPDATE: _mongoc_rpc_printf_update (&rpc->update); break; case MONGOC_OPCODE_INSERT: _mongoc_rpc_printf_insert (&rpc->insert); break; case MONGOC_OPCODE_QUERY: _mongoc_rpc_printf_query (&rpc->query); break; case MONGOC_OPCODE_GET_MORE: _mongoc_rpc_printf_get_more (&rpc->get_more); break; case MONGOC_OPCODE_DELETE: _mongoc_rpc_printf_delete (&rpc->delete_); break; case MONGOC_OPCODE_KILL_CURSORS: _mongoc_rpc_printf_kill_cursors (&rpc->kill_cursors); break; case MONGOC_OPCODE_COMPRESSED: _mongoc_rpc_printf_compressed (&rpc->compressed); break; default: MONGOC_WARNING ("Unknown rpc type: 0x%08x", rpc->header.opcode); break; } printf ("\n"); } /* *-------------------------------------------------------------------------- * * _mongoc_rpc_decompress -- * * Takes a (little endian) rpc struct assumed to be OP_COMPRESSED * and decompresses the opcode into its original opcode. * The in-place updated rpc struct remains little endian. * * Side effects: * Overwrites the RPC, along with the provided buf with the * compressed results. * *-------------------------------------------------------------------------- */ bool _mongoc_rpc_decompress (mongoc_rpc_t *rpc_le, uint8_t *buf, size_t buflen) { size_t uncompressed_size = BSON_UINT32_FROM_LE (rpc_le->compressed.uncompressed_size); bool ok; size_t msg_len = BSON_UINT32_TO_LE (buflen); const size_t original_uncompressed_size = uncompressed_size; BSON_ASSERT (uncompressed_size <= buflen); memcpy (buf, (void *) (&msg_len), 4); memcpy (buf + 4, (void *) (&rpc_le->header.request_id), 4); memcpy (buf + 8, (void *) (&rpc_le->header.response_to), 4); memcpy (buf + 12, (void *) (&rpc_le->compressed.original_opcode), 4); ok = mongoc_uncompress (rpc_le->compressed.compressor_id, rpc_le->compressed.compressed_message, rpc_le->compressed.compressed_message_len, buf + 16, &uncompressed_size); BSON_ASSERT (original_uncompressed_size == uncompressed_size); if (ok) { return _mongoc_rpc_scatter (rpc_le, buf, buflen); } return false; } /* *-------------------------------------------------------------------------- * * _mongoc_rpc_compress -- * * Takes a (little endian) rpc struct and creates a OP_COMPRESSED * compressed opcode based on the provided compressor_id. * The in-place updated rpc struct remains little endian. * * Side effects: * Overwrites the RPC, and clears and overwrites the cluster buffer * with the compressed results. * *-------------------------------------------------------------------------- */ char * _mongoc_rpc_compress (struct _mongoc_cluster_t *cluster, int32_t compressor_id, mongoc_rpc_t *rpc_le, bson_error_t *error) { char *output; size_t output_length = 0; size_t allocate = BSON_UINT32_FROM_LE (rpc_le->header.msg_len) - 16; char *data; int size; int32_t compression_level = -1; if (compressor_id == MONGOC_COMPRESSOR_ZLIB_ID) { compression_level = mongoc_uri_get_option_as_int32 ( cluster->uri, MONGOC_URI_ZLIBCOMPRESSIONLEVEL, -1); } BSON_ASSERT (allocate > 0); data = bson_malloc0 (allocate); size = _mongoc_cluster_buffer_iovec ( cluster->iov.data, cluster->iov.len, 16, data); BSON_ASSERT (size); output_length = mongoc_compressor_max_compressed_length (compressor_id, size); if (!output_length) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Could not determine compression bounds for %s", mongoc_compressor_id_to_name (compressor_id)); bson_free (data); return NULL; } output = (char *) bson_malloc0 (output_length); if (mongoc_compress (compressor_id, compression_level, data, size, output, &output_length)) { rpc_le->header.msg_len = 0; rpc_le->compressed.original_opcode = BSON_UINT32_FROM_LE (rpc_le->header.opcode); rpc_le->header.opcode = MONGOC_OPCODE_COMPRESSED; rpc_le->header.request_id = BSON_UINT32_FROM_LE (rpc_le->header.request_id); rpc_le->header.response_to = BSON_UINT32_FROM_LE (rpc_le->header.response_to); rpc_le->compressed.uncompressed_size = size; rpc_le->compressed.compressor_id = compressor_id; rpc_le->compressed.compressed_message = (const uint8_t *) output; rpc_le->compressed.compressed_message_len = output_length; bson_free (data); _mongoc_array_destroy (&cluster->iov); _mongoc_array_init (&cluster->iov, sizeof (mongoc_iovec_t)); _mongoc_rpc_gather (rpc_le, &cluster->iov); _mongoc_rpc_swab_to_le (rpc_le); return output; } else { MONGOC_WARNING ("Could not compress data with %s", mongoc_compressor_id_to_name (compressor_id)); } bson_free (data); bson_free (output); return NULL; } /* *-------------------------------------------------------------------------- * * _mongoc_rpc_scatter -- * * Takes a (little endian) rpc struct and scatters the buffer. * Caller should check if resulting opcode is OP_COMPRESSED * BEFORE swabbing to native endianness. * *-------------------------------------------------------------------------- */ bool _mongoc_rpc_scatter (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen) { mongoc_opcode_t opcode; memset (rpc, 0, sizeof *rpc); if (BSON_UNLIKELY (buflen < 16)) { return false; } mongoc_counter_op_ingress_total_inc (); if (!_mongoc_rpc_scatter_header (&rpc->header, buf, 16)) { return false; } opcode = (mongoc_opcode_t) BSON_UINT32_FROM_LE (rpc->header.opcode); switch (opcode) { case MONGOC_OPCODE_COMPRESSED: mongoc_counter_op_ingress_compressed_inc (); return _mongoc_rpc_scatter_compressed (&rpc->compressed, buf, buflen); case MONGOC_OPCODE_REPLY: mongoc_counter_op_ingress_reply_inc (); return _mongoc_rpc_scatter_reply (&rpc->reply, buf, buflen); case MONGOC_OPCODE_MSG: mongoc_counter_op_ingress_msg_inc (); return _mongoc_rpc_scatter_msg (&rpc->msg, buf, buflen); /* useless, we are never *getting* these opcodes */ case MONGOC_OPCODE_UPDATE: return _mongoc_rpc_scatter_update (&rpc->update, buf, buflen); case MONGOC_OPCODE_INSERT: return _mongoc_rpc_scatter_insert (&rpc->insert, buf, buflen); case MONGOC_OPCODE_QUERY: return _mongoc_rpc_scatter_query (&rpc->query, buf, buflen); case MONGOC_OPCODE_GET_MORE: return _mongoc_rpc_scatter_get_more (&rpc->get_more, buf, buflen); case MONGOC_OPCODE_DELETE: return _mongoc_rpc_scatter_delete (&rpc->delete_, buf, buflen); case MONGOC_OPCODE_KILL_CURSORS: return _mongoc_rpc_scatter_kill_cursors (&rpc->kill_cursors, buf, buflen); default: MONGOC_WARNING ("Unknown rpc type: 0x%08x", opcode); return false; } } bool _mongoc_rpc_scatter_reply_header_only (mongoc_rpc_t *rpc, const uint8_t *buf, size_t buflen) { if (BSON_UNLIKELY (buflen < sizeof (mongoc_rpc_reply_header_t))) { return false; } mongoc_counter_op_ingress_reply_inc (); mongoc_counter_op_ingress_total_inc (); return _mongoc_rpc_scatter_reply_header (&rpc->reply_header, buf, buflen); } bool _mongoc_rpc_get_first_document (mongoc_rpc_t *rpc, bson_t *reply) { if (rpc->header.opcode == MONGOC_OPCODE_REPLY && _mongoc_rpc_reply_get_first (&rpc->reply, reply)) { return true; } return false; } bool _mongoc_rpc_reply_get_first (mongoc_rpc_reply_t *reply, bson_t *bson) { int32_t len; if (!reply->documents || reply->documents_len < 4) { return false; } memcpy (&len, reply->documents, 4); len = BSON_UINT32_FROM_LE (len); if (reply->documents_len < len) { return false; } return bson_init_static (bson, reply->documents, len); } /* *-------------------------------------------------------------------------- * * _mongoc_rpc_prep_command -- * * Prepare an RPC for mongoc_cluster_run_command_rpc. @cmd_ns and * @cmd must not be freed or modified while the RPC is in use. * * Side effects: * Fills out the RPC, including pointers into @cmd_ns and @command. * *-------------------------------------------------------------------------- */ void _mongoc_rpc_prep_command (mongoc_rpc_t *rpc, const char *cmd_ns, mongoc_cmd_t *cmd) { rpc->header.msg_len = 0; rpc->header.request_id = 0; rpc->header.response_to = 0; rpc->header.opcode = MONGOC_OPCODE_QUERY; rpc->query.collection = cmd_ns; rpc->query.skip = 0; rpc->query.n_return = -1; rpc->query.fields = NULL; rpc->query.query = bson_get_data (cmd->command); /* Find, getMore And killCursors Commands Spec: "When sending a find command * rather than a legacy OP_QUERY find, only the slaveOk flag is honored." * For other cursor-typed commands like aggregate, only slaveOk can be set. * Clear bits except slaveOk; leave slaveOk set only if it is already. */ rpc->query.flags = cmd->query_flags & MONGOC_QUERY_SLAVE_OK; } /* returns true if an error was found. */ static bool _parse_error_reply (const bson_t *doc, bool check_wce, uint32_t *code, const char **msg) { bson_iter_t iter; bool found_error = false; ENTRY; BSON_ASSERT (doc); BSON_ASSERT (code); *code = 0; if (bson_iter_init_find (&iter, doc, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { *code = (uint32_t) bson_iter_int32 (&iter); found_error = true; } if (bson_iter_init_find (&iter, doc, "errmsg") && BSON_ITER_HOLDS_UTF8 (&iter)) { *msg = bson_iter_utf8 (&iter, NULL); found_error = true; } else if (bson_iter_init_find (&iter, doc, "$err") && BSON_ITER_HOLDS_UTF8 (&iter)) { *msg = bson_iter_utf8 (&iter, NULL); found_error = true; } if (found_error) { /* there was a command error */ RETURN (true); } if (check_wce) { /* check for a write concern error */ if (bson_iter_init_find (&iter, doc, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { bson_iter_t child; BSON_ASSERT (bson_iter_recurse (&iter, &child)); if (bson_iter_find (&child, "code") && BSON_ITER_HOLDS_INT32 (&child)) { *code = (uint32_t) bson_iter_int32 (&child); found_error = true; } BSON_ASSERT (bson_iter_recurse (&iter, &child)); if (bson_iter_find (&child, "errmsg") && BSON_ITER_HOLDS_UTF8 (&child)) { *msg = bson_iter_utf8 (&child, NULL); found_error = true; } } } RETURN (found_error); } /* *-------------------------------------------------------------------------- * * _mongoc_cmd_check_ok -- * * Check if a server reply document is an error message. * Optionally fill out a bson_error_t from the server error. * Does *not* check for writeConcernError. * * Returns: * false if @doc is an error message, true otherwise. * * Side effects: * If @doc is an error reply and @error is not NULL, set its * domain, code, and message. * *-------------------------------------------------------------------------- */ bool _mongoc_cmd_check_ok (const bson_t *doc, int32_t error_api_version, bson_error_t *error) { mongoc_error_domain_t domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_QUERY; uint32_t code; bson_iter_t iter; const char *msg = "Unknown command error"; ENTRY; BSON_ASSERT (doc); if (bson_iter_init_find (&iter, doc, "ok") && bson_iter_as_bool (&iter)) { /* no error */ RETURN (true); } if (!_parse_error_reply (doc, false /* check_wce */, &code, &msg)) { RETURN (true); } if (code == MONGOC_ERROR_PROTOCOL_ERROR || code == 13390) { code = MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND; } else if (code == 0) { code = MONGOC_ERROR_QUERY_FAILURE; } bson_set_error (error, domain, code, "%s", msg); /* there was a command error */ RETURN (false); } /* *-------------------------------------------------------------------------- * * _mongoc_cmd_check_ok_no_wce -- * * Check if a server reply document is an error message. * Optionally fill out a bson_error_t from the server error. * If the response contains a writeConcernError, this is considered * an error and returns false. * * Returns: * false if @doc is an error message, true otherwise. * * Side effects: * If @doc is an error reply and @error is not NULL, set its * domain, code, and message. * *-------------------------------------------------------------------------- */ bool _mongoc_cmd_check_ok_no_wce (const bson_t *doc, int32_t error_api_version, bson_error_t *error) { mongoc_error_domain_t domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_QUERY; uint32_t code; const char *msg = "Unknown command error"; ENTRY; BSON_ASSERT (doc); if (!_parse_error_reply (doc, true /* check_wce */, &code, &msg)) { RETURN (true); } if (code == MONGOC_ERROR_PROTOCOL_ERROR || code == 13390) { code = MONGOC_ERROR_QUERY_COMMAND_NOT_FOUND; } else if (code == 0) { code = MONGOC_ERROR_QUERY_FAILURE; } bson_set_error (error, domain, code, "%s", msg); /* there was a command error */ RETURN (false); } /* helper function to parse error reply document to an OP_QUERY */ static void _mongoc_populate_query_error (const bson_t *doc, int32_t error_api_version, bson_error_t *error) { mongoc_error_domain_t domain = error_api_version >= MONGOC_ERROR_API_VERSION_2 ? MONGOC_ERROR_SERVER : MONGOC_ERROR_QUERY; uint32_t code = MONGOC_ERROR_QUERY_FAILURE; bson_iter_t iter; const char *msg = "Unknown query failure"; ENTRY; BSON_ASSERT (doc); if (bson_iter_init_find (&iter, doc, "code") && BSON_ITER_HOLDS_INT32 (&iter)) { code = (uint32_t) bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, doc, "$err") && BSON_ITER_HOLDS_UTF8 (&iter)) { msg = bson_iter_utf8 (&iter, NULL); } bson_set_error (error, domain, code, "%s", msg); EXIT; } /* *-------------------------------------------------------------------------- * * _mongoc_rpc_check_ok -- * * Check if a server OP_REPLY is an error message. * Optionally fill out a bson_error_t from the server error. * @error_document must be an initialized bson_t or NULL. * Does *not* check for writeConcernError. * * Returns: * false if the reply is an error message, true otherwise. * * Side effects: * If rpc is an error reply and @error is not NULL, set its * domain, code, and message. * * If rpc is an error reply and @error_document is not NULL, * it is reinitialized with the server reply. * *-------------------------------------------------------------------------- */ bool _mongoc_rpc_check_ok (mongoc_rpc_t *rpc, int32_t error_api_version, bson_error_t *error /* OUT */, bson_t *error_doc /* OUT */) { bson_t b; ENTRY; BSON_ASSERT (rpc); if (rpc->header.opcode != MONGOC_OPCODE_REPLY) { bson_set_error (error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_INVALID_REPLY, "Received rpc other than OP_REPLY."); RETURN (false); } if (rpc->reply.flags & MONGOC_REPLY_QUERY_FAILURE) { if (_mongoc_rpc_get_first_document (rpc, &b)) { _mongoc_populate_query_error (&b, error_api_version, error); if (error_doc) { bson_destroy (error_doc); bson_copy_to (&b, error_doc); } bson_destroy (&b); } else { bson_set_error (error, MONGOC_ERROR_QUERY, MONGOC_ERROR_QUERY_FAILURE, "Unknown query failure."); } RETURN (false); } else if (rpc->reply.flags & MONGOC_REPLY_CURSOR_NOT_FOUND) { bson_set_error (error, MONGOC_ERROR_CURSOR, MONGOC_ERROR_CURSOR_INVALID_CURSOR, "The cursor is invalid or has expired."); RETURN (false); } RETURN (true); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-sasl-private.h0000644000076500000240000000327013572250760026071 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SASL_PRIVATE_H #define MONGOC_SASL_PRIVATE_H #include #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-stream-socket.h" BSON_BEGIN_DECLS typedef struct { char *user; char *pass; char *service_name; char *service_host; bool canonicalize_host_name; char *mechanism; } mongoc_sasl_t; void _mongoc_sasl_set_pass (mongoc_sasl_t *sasl, const char *pass); void _mongoc_sasl_set_user (mongoc_sasl_t *sasl, const char *user); void _mongoc_sasl_set_service_name (mongoc_sasl_t *sasl, const char *service_name); void _mongoc_sasl_set_service_host (mongoc_sasl_t *sasl, const char *service_host); void _mongoc_sasl_set_properties (mongoc_sasl_t *sasl, const mongoc_uri_t *uri); bool _mongoc_sasl_get_canonicalized_name (mongoc_stream_t *node_stream, /* IN */ char *name, /* OUT */ size_t namelen); /* IN */ BSON_END_DECLS #endif /* MONGOC_SASL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-sasl.c0000644000076500000240000001201513572250760024411 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SASL #include "mongoc/mongoc-sasl-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-change-stream-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "SASL" void _mongoc_sasl_set_user (mongoc_sasl_t *sasl, const char *user) { BSON_ASSERT (sasl); bson_free (sasl->user); sasl->user = user ? bson_strdup (user) : NULL; } void _mongoc_sasl_set_pass (mongoc_sasl_t *sasl, const char *pass) { BSON_ASSERT (sasl); bson_free (sasl->pass); sasl->pass = pass ? bson_strdup (pass) : NULL; } void _mongoc_sasl_set_service_host (mongoc_sasl_t *sasl, const char *service_host) { BSON_ASSERT (sasl); bson_free (sasl->service_host); sasl->service_host = service_host ? bson_strdup (service_host) : NULL; } void _mongoc_sasl_set_service_name (mongoc_sasl_t *sasl, const char *service_name) { BSON_ASSERT (sasl); bson_free (sasl->service_name); sasl->service_name = service_name ? bson_strdup (service_name) : NULL; } void _mongoc_sasl_set_properties (mongoc_sasl_t *sasl, const mongoc_uri_t *uri) { const bson_t *options; bson_iter_t iter; bson_t properties; const char *service_name = NULL; bool canonicalize = false; _mongoc_sasl_set_pass(sasl, mongoc_uri_get_password(uri)); _mongoc_sasl_set_user(sasl, mongoc_uri_get_username(uri)); options = mongoc_uri_get_options (uri); if (!mongoc_uri_get_mechanism_properties (uri, &properties)) { bson_init (&properties); } if (bson_iter_init_find_case ( &iter, options, MONGOC_URI_GSSAPISERVICENAME) && BSON_ITER_HOLDS_UTF8 (&iter)) { service_name = bson_iter_utf8 (&iter, NULL); } if (bson_iter_init_find_case (&iter, &properties, "SERVICE_NAME") && BSON_ITER_HOLDS_UTF8 (&iter)) { /* newer "authMechanismProperties" URI syntax takes precedence */ service_name = bson_iter_utf8 (&iter, NULL); } _mongoc_sasl_set_service_name (sasl, service_name); /* * Driver Authentication Spec: "Drivers MAY allow the user to request * canonicalization of the hostname. This might be required when the hosts * report different hostnames than what is used in the kerberos database. * The default is "false". * * Some underlying GSSAPI layers will do this for us, but can be disabled in * their config (krb.conf). * * See CDRIVER-323 for more information. */ if (bson_iter_init_find_case ( &iter, options, MONGOC_URI_CANONICALIZEHOSTNAME) && BSON_ITER_HOLDS_BOOL (&iter)) { canonicalize = bson_iter_bool (&iter); } if (bson_iter_init_find_case ( &iter, &properties, "CANONICALIZE_HOST_NAME") && BSON_ITER_HOLDS_UTF8 (&iter)) { /* newer "authMechanismProperties" URI syntax takes precedence */ canonicalize = !strcasecmp (bson_iter_utf8 (&iter, NULL), "true"); } sasl->canonicalize_host_name = canonicalize; bson_destroy (&properties); } /* *-------------------------------------------------------------------------- * * _mongoc_sasl_get_canonicalized_name -- * * Query the node to get the canonicalized name. This may happen if * the node has been accessed via an alias. * * The gssapi code will use this if canonicalizeHostname is true. * * Some underlying layers of krb might do this for us, but they can * be disabled in krb.conf. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool _mongoc_sasl_get_canonicalized_name (mongoc_stream_t *node_stream, /* IN */ char *name, /* OUT */ size_t namelen) /* OUT */ { mongoc_stream_t *stream; mongoc_socket_t *sock = NULL; char *canonicalized; ENTRY; BSON_ASSERT (node_stream); BSON_ASSERT (name); stream = mongoc_stream_get_root_stream (node_stream); BSON_ASSERT (stream); if (stream->type == MONGOC_STREAM_SOCKET) { sock = mongoc_stream_socket_get_socket ((mongoc_stream_socket_t *) stream); if (sock) { canonicalized = mongoc_socket_getnameinfo (sock); if (canonicalized) { bson_snprintf (name, namelen, "%s", canonicalized); bson_free (canonicalized); RETURN (true); } } } RETURN (false); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-scram-private.h0000644000076500000240000000741213572250760026236 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SCRAM_PRIVATE_H #define MONGOC_SCRAM_PRIVATE_H #include #include "mongoc/mongoc-crypto-private.h" BSON_BEGIN_DECLS #define MONGOC_SCRAM_SHA_1_HASH_SIZE 20 #define MONGOC_SCRAM_SHA_256_HASH_SIZE 32 /* SCRAM-SHA-1 uses a hash size of 20, and SCRAM-SHA-256 uses a hash size * of 32. Stack allocations should be large enough for either. */ #define MONGOC_SCRAM_HASH_MAX_SIZE MONGOC_SCRAM_SHA_256_HASH_SIZE #define MONGOC_SCRAM_B64_ENCODED_SIZE(n) (2 * n) #define MONGOC_SCRAM_B64_HASH_MAX_SIZE \ MONGOC_SCRAM_B64_ENCODED_SIZE (MONGOC_SCRAM_HASH_MAX_SIZE) typedef struct _mongoc_scram_cache_t { /* pre-secrets */ char *hashed_password; uint8_t decoded_salt[MONGOC_SCRAM_B64_HASH_MAX_SIZE]; uint32_t iterations; /* secrets */ uint8_t client_key[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t server_key[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t salted_password[MONGOC_SCRAM_HASH_MAX_SIZE]; } mongoc_scram_cache_t; typedef struct _mongoc_scram_t { bool done; int step; char *user; char *pass; char *hashed_password; uint8_t decoded_salt[MONGOC_SCRAM_B64_HASH_MAX_SIZE]; uint32_t iterations; uint8_t client_key[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t server_key[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t salted_password[MONGOC_SCRAM_HASH_MAX_SIZE]; char encoded_nonce[48]; int32_t encoded_nonce_len; uint8_t *auth_message; uint32_t auth_messagemax; uint32_t auth_messagelen; #ifdef MONGOC_ENABLE_CRYPTO mongoc_crypto_t crypto; #endif mongoc_scram_cache_t *cache; } mongoc_scram_t; #ifdef MONGOC_ENABLE_CRYPTO void _mongoc_scram_init (mongoc_scram_t *scram, mongoc_crypto_hash_algorithm_t algo); #endif mongoc_scram_cache_t * _mongoc_scram_get_cache (mongoc_scram_t *scram); void _mongoc_scram_set_cache (mongoc_scram_t *scram, mongoc_scram_cache_t *cache); void _mongoc_scram_set_pass (mongoc_scram_t *scram, const char *pass); void _mongoc_scram_set_user (mongoc_scram_t *scram, const char *user); void _mongoc_scram_set_server_key (mongoc_scram_t *scram, const uint8_t *server_key, size_t len); void _mongoc_scram_set_salted_password (mongoc_scram_t *scram, const uint8_t *salted_password, size_t len); void _mongoc_scram_destroy (mongoc_scram_t *scram); bool _mongoc_scram_step (mongoc_scram_t *scram, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error); void _mongoc_scram_cache_destroy (mongoc_scram_cache_t *cache); /* returns false if this string does not need SASLPrep. It returns true * conservatively, if str might need to be SASLPrep'ed. */ bool _mongoc_sasl_prep_required (const char *str); /* returns the output of SASLPrep as a new string which must be freed. Returns * null on error and sets err. * `name` should be "username" or "password". */ char * _mongoc_sasl_prep (const char *in_utf8, int in_utf8_len, bson_error_t *err); BSON_END_DECLS #endif /* MONGOC_SCRAM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-scram.c0000644000076500000240000010102313572250760024552 0ustar alcaeusstaff/* Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_CRYPTO #include #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-scram-private.h" #include "mongoc/mongoc-rand-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-crypto-private.h" #include "common-b64-private.h" #include "mongoc/mongoc-memcmp-private.h" #define MONGOC_SCRAM_SERVER_KEY "Server Key" #define MONGOC_SCRAM_CLIENT_KEY "Client Key" static int _scram_hash_size (mongoc_scram_t *scram) { if (scram->crypto.algorithm == MONGOC_CRYPTO_ALGORITHM_SHA_1) { return MONGOC_SCRAM_SHA_1_HASH_SIZE; } else if (scram->crypto.algorithm == MONGOC_CRYPTO_ALGORITHM_SHA_256) { return MONGOC_SCRAM_SHA_256_HASH_SIZE; } return 0; } /* Copies the cache's secrets to scram */ static void _mongoc_scram_cache_apply_secrets (mongoc_scram_cache_t *cache, mongoc_scram_t *scram) { BSON_ASSERT (cache); BSON_ASSERT (scram); memcpy (scram->client_key, cache->client_key, sizeof (scram->client_key)); memcpy (scram->server_key, cache->server_key, sizeof (scram->server_key)); memcpy (scram->salted_password, cache->salted_password, sizeof (scram->salted_password)); } static mongoc_scram_cache_t * _mongoc_scram_cache_copy (const mongoc_scram_cache_t *cache) { mongoc_scram_cache_t *ret = NULL; if (cache) { ret = (mongoc_scram_cache_t *) bson_malloc0 (sizeof (*ret)); ret->hashed_password = bson_strdup (cache->hashed_password); memcpy ( ret->decoded_salt, cache->decoded_salt, sizeof (ret->decoded_salt)); ret->iterations = cache->iterations; memcpy (ret->client_key, cache->client_key, sizeof (ret->client_key)); memcpy (ret->server_key, cache->server_key, sizeof (ret->server_key)); memcpy (ret->salted_password, cache->salted_password, sizeof (ret->salted_password)); } return ret; } #ifdef MONGOC_ENABLE_ICU #include #include #endif void _mongoc_scram_cache_destroy (mongoc_scram_cache_t *cache) { BSON_ASSERT (cache); if (cache->hashed_password) { bson_zero_free (cache->hashed_password, strlen (cache->hashed_password)); } bson_free (cache); } /* Checks whether the cache contains scram's pre-secrets */ static bool _mongoc_scram_cache_has_presecrets (mongoc_scram_cache_t *cache, mongoc_scram_t *scram) { BSON_ASSERT (cache); BSON_ASSERT (scram); return cache->hashed_password && scram->hashed_password && !strcmp (cache->hashed_password, scram->hashed_password) && cache->iterations == scram->iterations && !memcmp (cache->decoded_salt, scram->decoded_salt, sizeof (cache->decoded_salt)); } mongoc_scram_cache_t * _mongoc_scram_get_cache (mongoc_scram_t *scram) { BSON_ASSERT (scram); return _mongoc_scram_cache_copy (scram->cache); } void _mongoc_scram_set_cache (mongoc_scram_t *scram, mongoc_scram_cache_t *cache) { BSON_ASSERT (scram); if (scram->cache) { _mongoc_scram_cache_destroy (scram->cache); } scram->cache = _mongoc_scram_cache_copy (cache); } void _mongoc_scram_set_pass (mongoc_scram_t *scram, const char *pass) { BSON_ASSERT (scram); if (scram->pass) { bson_zero_free (scram->pass, strlen (scram->pass)); } scram->pass = pass ? bson_strdup (pass) : NULL; } void _mongoc_scram_set_user (mongoc_scram_t *scram, const char *user) { BSON_ASSERT (scram); bson_free (scram->user); scram->user = user ? bson_strdup (user) : NULL; } void _mongoc_scram_init (mongoc_scram_t *scram, mongoc_crypto_hash_algorithm_t algo) { BSON_ASSERT (scram); memset (scram, 0, sizeof *scram); mongoc_crypto_init (&scram->crypto, algo); } void _mongoc_scram_destroy (mongoc_scram_t *scram) { BSON_ASSERT (scram); bson_free (scram->user); if (scram->pass) { bson_zero_free (scram->pass, strlen (scram->pass)); } if (scram->hashed_password) { bson_zero_free (scram->hashed_password, strlen (scram->hashed_password)); } bson_free (scram->auth_message); if (scram->cache) { _mongoc_scram_cache_destroy (scram->cache); } } /* Updates the cache with scram's last-used pre-secrets and secrets */ static void _mongoc_scram_update_cache (mongoc_scram_t *scram) { mongoc_scram_cache_t *cache; BSON_ASSERT (scram); if (scram->cache) { _mongoc_scram_cache_destroy (scram->cache); } cache = (mongoc_scram_cache_t *) bson_malloc0 (sizeof (*cache)); cache->hashed_password = bson_strdup (scram->hashed_password); memcpy ( cache->decoded_salt, scram->decoded_salt, sizeof (cache->decoded_salt)); cache->iterations = scram->iterations; memcpy (cache->client_key, scram->client_key, sizeof (cache->client_key)); memcpy (cache->server_key, scram->server_key, sizeof (cache->server_key)); memcpy (cache->salted_password, scram->salted_password, sizeof (cache->salted_password)); scram->cache = cache; } static bool _mongoc_scram_buf_write (const char *src, int32_t src_len, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen) { if (src_len < 0) { src_len = (int32_t) strlen (src); } if (*outbuflen + src_len >= outbufmax) { return false; } memcpy (outbuf + *outbuflen, src, src_len); *outbuflen += src_len; return true; } /* generate client-first-message: * n,a=authzid,n=encoded-username,r=client-nonce * * note that a= is optional, so we aren't dealing with that here */ static bool _mongoc_scram_start (mongoc_scram_t *scram, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { uint8_t nonce[24]; const char *ptr; bool rval = true; BSON_ASSERT (scram); BSON_ASSERT (outbuf); BSON_ASSERT (outbufmax); BSON_ASSERT (outbuflen); if (!scram->user) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: username is not set"); goto FAIL; } /* auth message is as big as the outbuf just because */ scram->auth_message = (uint8_t *) bson_malloc (outbufmax); scram->auth_messagemax = outbufmax; /* the server uses a 24 byte random nonce. so we do as well */ if (1 != _mongoc_rand_bytes (nonce, sizeof (nonce))) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not generate a cryptographically " "secure nonce in sasl step 1"); goto FAIL; } scram->encoded_nonce_len = bson_b64_ntop (nonce, sizeof (nonce), scram->encoded_nonce, sizeof (scram->encoded_nonce)); if (-1 == scram->encoded_nonce_len) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not encode nonce"); goto FAIL; } if (!_mongoc_scram_buf_write ("n,,n=", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } for (ptr = scram->user; *ptr; ptr++) { /* RFC 5802 specifies that ',' and '=' and encoded as '=2C' and '=3D' * respectively in the user name */ switch (*ptr) { case ',': if (!_mongoc_scram_buf_write ( "=2C", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } break; case '=': if (!_mongoc_scram_buf_write ( "=3D", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } break; default: if (!_mongoc_scram_buf_write (ptr, 1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } break; } } if (!_mongoc_scram_buf_write (",r=", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } if (!_mongoc_scram_buf_write (scram->encoded_nonce, scram->encoded_nonce_len, outbuf, outbufmax, outbuflen)) { goto BUFFER; } /* we have to keep track of the conversation to create a client proof later * on. This copies the message we're crafting from the 'n=' portion onwards * into a buffer we're managing */ if (!_mongoc_scram_buf_write ((char *) outbuf + 3, *outbuflen - 3, scram->auth_message, scram->auth_messagemax, &scram->auth_messagelen)) { goto BUFFER_AUTH; } if (!_mongoc_scram_buf_write (",", -1, scram->auth_message, scram->auth_messagemax, &scram->auth_messagelen)) { goto BUFFER_AUTH; } goto CLEANUP; BUFFER_AUTH: bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not buffer auth message in sasl step1"); goto FAIL; BUFFER: bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not buffer sasl step1"); goto FAIL; FAIL: rval = false; CLEANUP: return rval; } /* Compute the SCRAM step Hi() as defined in RFC5802 */ static void _mongoc_scram_salt_password (mongoc_scram_t *scram, const char *password, uint32_t password_len, const uint8_t *salt, uint32_t salt_len, uint32_t iterations) { uint8_t intermediate_digest[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t start_key[MONGOC_SCRAM_HASH_MAX_SIZE]; int i; int k; uint8_t *output = scram->salted_password; memcpy (start_key, salt, salt_len); start_key[salt_len] = 0; start_key[salt_len + 1] = 0; start_key[salt_len + 2] = 0; start_key[salt_len + 3] = 1; mongoc_crypto_hmac (&scram->crypto, password, password_len, start_key, _scram_hash_size (scram), output); memcpy (intermediate_digest, output, _scram_hash_size (scram)); /* intermediateDigest contains Ui and output contains the accumulated XOR:ed * result */ for (i = 2; i <= iterations; i++) { mongoc_crypto_hmac (&scram->crypto, password, password_len, intermediate_digest, _scram_hash_size (scram), intermediate_digest); for (k = 0; k < _scram_hash_size (scram); k++) { output[k] ^= intermediate_digest[k]; } } } static bool _mongoc_scram_generate_client_proof (mongoc_scram_t *scram, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen) { uint8_t stored_key[MONGOC_SCRAM_HASH_MAX_SIZE]; uint8_t client_signature[MONGOC_SCRAM_HASH_MAX_SIZE]; unsigned char client_proof[MONGOC_SCRAM_HASH_MAX_SIZE]; int i; int r = 0; if (!*scram->client_key) { /* ClientKey := HMAC(saltedPassword, "Client Key") */ mongoc_crypto_hmac (&scram->crypto, scram->salted_password, _scram_hash_size (scram), (uint8_t *) MONGOC_SCRAM_CLIENT_KEY, (int) strlen (MONGOC_SCRAM_CLIENT_KEY), scram->client_key); } /* StoredKey := H(client_key) */ mongoc_crypto_hash (&scram->crypto, scram->client_key, (size_t) _scram_hash_size (scram), stored_key); /* ClientSignature := HMAC(StoredKey, AuthMessage) */ mongoc_crypto_hmac (&scram->crypto, stored_key, _scram_hash_size (scram), scram->auth_message, scram->auth_messagelen, client_signature); /* ClientProof := ClientKey XOR ClientSignature */ for (i = 0; i < _scram_hash_size (scram); i++) { client_proof[i] = scram->client_key[i] ^ client_signature[i]; } r = bson_b64_ntop (client_proof, _scram_hash_size (scram), (char *) outbuf + *outbuflen, outbufmax - *outbuflen); if (-1 == r) { return false; } *outbuflen += r; return true; } /* Parse server-first-message of the form: * r=client-nonce|server-nonce,s=user-salt,i=iteration-count * * Generate client-final-message of the form: * c=channel-binding(base64),r=client-nonce|server-nonce,p=client-proof */ static bool _mongoc_scram_step2 (mongoc_scram_t *scram, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { uint8_t *val_r = NULL; uint32_t val_r_len; uint8_t *val_s = NULL; uint32_t val_s_len; uint8_t *val_i = NULL; uint32_t val_i_len; uint8_t **current_val; uint32_t *current_val_len; const uint8_t *ptr; const uint8_t *next_comma; char *tmp; char *hashed_password; uint8_t decoded_salt[MONGOC_SCRAM_B64_HASH_MAX_SIZE] = {0}; int32_t decoded_salt_len; /* the decoded salt leaves four trailing bytes to add the int32 0x00000001 */ const int32_t expected_salt_length = _scram_hash_size (scram) - 4; bool rval = true; int iterations; BSON_ASSERT (scram); BSON_ASSERT (outbuf); BSON_ASSERT (outbufmax); BSON_ASSERT (outbuflen); if (scram->crypto.algorithm == MONGOC_CRYPTO_ALGORITHM_SHA_1) { /* Auth spec for SCRAM-SHA-1: "The password variable MUST be the mongodb * hashed variant. The mongo hashed variant is computed as hash = HEX( * MD5( UTF8( username + ':mongo:' + plain_text_password )))" */ tmp = bson_strdup_printf ("%s:mongo:%s", scram->user, scram->pass); hashed_password = _mongoc_hex_md5 (tmp); bson_zero_free (tmp, strlen (tmp)); } else if (scram->crypto.algorithm == MONGOC_CRYPTO_ALGORITHM_SHA_256) { /* Auth spec for SCRAM-SHA-256: "Passwords MUST be prepared with SASLprep, * per RFC 5802. Passwords are used directly for key derivation; they * MUST NOT be digested as they are in SCRAM-SHA-1." */ hashed_password = _mongoc_sasl_prep (scram->pass, (int) strlen (scram->pass), error); if (!hashed_password) { goto FAIL; } } else { BSON_ASSERT (false); } /* we need all of the incoming message for the final client proof */ if (!_mongoc_scram_buf_write ((char *) inbuf, inbuflen, scram->auth_message, scram->auth_messagemax, &scram->auth_messagelen)) { goto BUFFER_AUTH; } if (!_mongoc_scram_buf_write (",", -1, scram->auth_message, scram->auth_messagemax, &scram->auth_messagelen)) { goto BUFFER_AUTH; } for (ptr = inbuf; ptr < inbuf + inbuflen;) { switch (*ptr) { case 'r': current_val = &val_r; current_val_len = &val_r_len; break; case 's': current_val = &val_s; current_val_len = &val_s_len; break; case 'i': current_val = &val_i; current_val_len = &val_i_len; break; default: bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: unknown key (%c) in sasl step 2", *ptr); goto FAIL; } ptr++; if (*ptr != '=') { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: invalid parse state in sasl step 2"); goto FAIL; } ptr++; next_comma = (const uint8_t *) memchr (ptr, ',', (inbuf + inbuflen) - ptr); if (next_comma) { *current_val_len = (uint32_t) (next_comma - ptr); } else { *current_val_len = (uint32_t) ((inbuf + inbuflen) - ptr); } *current_val = (uint8_t *) bson_malloc (*current_val_len + 1); memcpy (*current_val, ptr, *current_val_len); (*current_val)[*current_val_len] = '\0'; if (next_comma) { ptr = next_comma + 1; } else { break; } } if (!val_r) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: no r param in sasl step 2"); goto FAIL; } if (!val_s) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: no s param in sasl step 2"); goto FAIL; } if (!val_i) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: no i param in sasl step 2"); goto FAIL; } /* verify our nonce */ if (val_r_len < scram->encoded_nonce_len || mongoc_memcmp (val_r, scram->encoded_nonce, scram->encoded_nonce_len)) { bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: client nonce not repeated in sasl step 2"); } *outbuflen = 0; if (!_mongoc_scram_buf_write ( "c=biws,r=", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } if (!_mongoc_scram_buf_write ( (char *) val_r, val_r_len, outbuf, outbufmax, outbuflen)) { goto BUFFER; } if (!_mongoc_scram_buf_write ((char *) outbuf, *outbuflen, scram->auth_message, scram->auth_messagemax, &scram->auth_messagelen)) { goto BUFFER_AUTH; } if (!_mongoc_scram_buf_write (",p=", -1, outbuf, outbufmax, outbuflen)) { goto BUFFER; } decoded_salt_len = bson_b64_pton ((char *) val_s, decoded_salt, sizeof (decoded_salt)); if (-1 == decoded_salt_len) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: unable to decode salt in sasl step2"); goto FAIL; } if (expected_salt_length != decoded_salt_len) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: invalid salt length of %d in sasl step2", decoded_salt_len); goto FAIL; } iterations = (int) bson_ascii_strtoll ((char *) val_i, &tmp, 10); /* tmp holds the location of the failed to parse character. So if it's * null, we got to the end of the string and didn't have a parse error */ if (*tmp) { bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: unable to parse iterations in sasl step2"); goto FAIL; } if (iterations < 0) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: iterations is negative in sasl step2"); goto FAIL; } /* drivers MUST enforce a minimum iteration count of 4096 and MUST error if * the authentication conversation specifies a lower count. This mitigates * downgrade attacks by a man-in-the-middle attacker. */ if (iterations < 4096) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: iterations must be at least 4096"); goto FAIL; } /* Save the presecrets for caching */ scram->hashed_password = bson_strdup (hashed_password); scram->iterations = iterations; memcpy (scram->decoded_salt, decoded_salt, sizeof (scram->decoded_salt)); if (scram->cache && _mongoc_scram_cache_has_presecrets (scram->cache, scram)) { _mongoc_scram_cache_apply_secrets (scram->cache, scram); } if (!*scram->salted_password) { _mongoc_scram_salt_password (scram, hashed_password, (uint32_t) strlen (hashed_password), decoded_salt, decoded_salt_len, (uint32_t) iterations); } _mongoc_scram_generate_client_proof (scram, outbuf, outbufmax, outbuflen); goto CLEANUP; BUFFER_AUTH: bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not buffer auth message in sasl step2"); goto FAIL; BUFFER: bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not buffer sasl step2"); goto FAIL; FAIL: rval = false; CLEANUP: bson_free (val_r); bson_free (val_s); bson_free (val_i); if (hashed_password) { bson_zero_free (hashed_password, strlen (hashed_password)); } return rval; } static bool _mongoc_scram_verify_server_signature (mongoc_scram_t *scram, uint8_t *verification, uint32_t len) { char encoded_server_signature[MONGOC_SCRAM_B64_HASH_MAX_SIZE]; int32_t encoded_server_signature_len; uint8_t server_signature[MONGOC_SCRAM_HASH_MAX_SIZE]; if (!*scram->server_key) { /* ServerKey := HMAC(SaltedPassword, "Server Key") */ mongoc_crypto_hmac (&scram->crypto, scram->salted_password, _scram_hash_size (scram), (uint8_t *) MONGOC_SCRAM_SERVER_KEY, strlen (MONGOC_SCRAM_SERVER_KEY), scram->server_key); } /* ServerSignature := HMAC(ServerKey, AuthMessage) */ mongoc_crypto_hmac (&scram->crypto, scram->server_key, _scram_hash_size (scram), scram->auth_message, scram->auth_messagelen, server_signature); encoded_server_signature_len = bson_b64_ntop (server_signature, _scram_hash_size (scram), encoded_server_signature, sizeof (encoded_server_signature)); if (encoded_server_signature_len == -1) { return false; } return (len == encoded_server_signature_len) && (mongoc_memcmp (verification, encoded_server_signature, len) == 0); } static bool _mongoc_scram_step3 (mongoc_scram_t *scram, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { uint8_t *val_e = NULL; uint32_t val_e_len; uint8_t *val_v = NULL; uint32_t val_v_len; uint8_t **current_val; uint32_t *current_val_len; const uint8_t *ptr; const uint8_t *next_comma; bool rval = true; BSON_ASSERT (scram); BSON_ASSERT (outbuf); BSON_ASSERT (outbufmax); BSON_ASSERT (outbuflen); for (ptr = inbuf; ptr < inbuf + inbuflen;) { switch (*ptr) { case 'e': current_val = &val_e; current_val_len = &val_e_len; break; case 'v': current_val = &val_v; current_val_len = &val_v_len; break; default: bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: unknown key (%c) in sasl step 3", *ptr); goto FAIL; } ptr++; if (*ptr != '=') { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: invalid parse state in sasl step 3"); goto FAIL; } ptr++; next_comma = (const uint8_t *) memchr (ptr, ',', (inbuf + inbuflen) - ptr); if (next_comma) { *current_val_len = (uint32_t) (next_comma - ptr); } else { *current_val_len = (uint32_t) ((inbuf + inbuflen) - ptr); } *current_val = (uint8_t *) bson_malloc (*current_val_len + 1); memcpy (*current_val, ptr, *current_val_len); (*current_val)[*current_val_len] = '\0'; if (next_comma) { ptr = next_comma + 1; } else { break; } } *outbuflen = 0; if (val_e) { bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: authentication failure in sasl step 3 : %s", val_e); goto FAIL; } if (!val_v) { bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: no v param in sasl step 3"); goto FAIL; } if (!_mongoc_scram_verify_server_signature (scram, val_v, val_v_len)) { bson_set_error ( error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: could not verify server signature in sasl step 3"); goto FAIL; } /* Update the cache if authentication succeeds */ _mongoc_scram_update_cache (scram); goto CLEANUP; FAIL: rval = false; CLEANUP: bson_free (val_e); bson_free (val_v); return rval; } bool _mongoc_scram_step (mongoc_scram_t *scram, const uint8_t *inbuf, uint32_t inbuflen, uint8_t *outbuf, uint32_t outbufmax, uint32_t *outbuflen, bson_error_t *error) { BSON_ASSERT (scram); BSON_ASSERT (inbuf); BSON_ASSERT (outbuf); BSON_ASSERT (outbuflen); scram->step++; switch (scram->step) { case 1: return _mongoc_scram_start (scram, outbuf, outbufmax, outbuflen, error); case 2: return _mongoc_scram_step2 ( scram, inbuf, inbuflen, outbuf, outbufmax, outbuflen, error); case 3: return _mongoc_scram_step3 ( scram, inbuf, inbuflen, outbuf, outbufmax, outbuflen, error); default: bson_set_error (error, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_NOT_DONE, "SCRAM Failure: maximum steps detected"); return false; } } bool _mongoc_sasl_prep_required (const char *str) { unsigned char c; while (*str) { c = (unsigned char) *str; /* characters below 32 contain all of the control characters. * characters above 127 are multibyte UTF-8 characters. * character 127 is the DEL character. */ if (c < 32 || c >= 127) { return true; } str++; } return false; } #ifdef MONGOC_ENABLE_ICU char * _mongoc_sasl_prep_impl (const char *name, const char *in_utf8, int in_utf8_len, bson_error_t *err) { /* The flow is in_utf8 -> in_utf16 -> SASLPrep -> out_utf16 -> out_utf8. */ UChar *in_utf16, *out_utf16; char *out_utf8; int32_t in_utf16_len, out_utf16_len, out_utf8_len; UErrorCode error_code = U_ZERO_ERROR; UStringPrepProfile *prep; #define SASL_PREP_ERR_RETURN(msg) \ do { \ bson_set_error (err, \ MONGOC_ERROR_SCRAM, \ MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, \ (msg), \ name); \ return NULL; \ } while (0) /* 1. convert str to UTF-16. */ /* preflight to get the destination length. */ (void) u_strFromUTF8 ( NULL, 0, &in_utf16_len, in_utf8, in_utf8_len, &error_code); if (error_code != U_BUFFER_OVERFLOW_ERROR) { SASL_PREP_ERR_RETURN ("could not calculate UTF-16 length of %s"); } /* convert to UTF-16. */ error_code = U_ZERO_ERROR; in_utf16 = bson_malloc (sizeof (UChar) * (in_utf16_len + 1)); /* add one for null byte. */ (void) u_strFromUTF8 ( in_utf16, in_utf16_len + 1, NULL, in_utf8, in_utf8_len, &error_code); if (error_code) { bson_free (in_utf16); SASL_PREP_ERR_RETURN ("could not convert %s to UTF-16"); } /* 2. perform SASLPrep. */ prep = usprep_openByType (USPREP_RFC4013_SASLPREP, &error_code); if (error_code) { bson_free (in_utf16); SASL_PREP_ERR_RETURN ("could not start SASLPrep for %s"); } /* preflight. */ out_utf16_len = usprep_prepare ( prep, in_utf16, in_utf16_len, NULL, 0, USPREP_DEFAULT, NULL, &error_code); if (error_code != U_BUFFER_OVERFLOW_ERROR) { bson_free (in_utf16); usprep_close (prep); SASL_PREP_ERR_RETURN ("could not calculate SASLPrep length of %s"); } /* convert. */ error_code = U_ZERO_ERROR; out_utf16 = bson_malloc (sizeof (UChar) * (out_utf16_len + 1)); (void) usprep_prepare (prep, in_utf16, in_utf16_len, out_utf16, out_utf16_len + 1, USPREP_DEFAULT, NULL, &error_code); if (error_code) { bson_free (in_utf16); bson_free (out_utf16); usprep_close (prep); SASL_PREP_ERR_RETURN ("could not execute SASLPrep for %s"); } bson_free (in_utf16); usprep_close (prep); /* 3. convert back to UTF-8. */ /* preflight. */ (void) u_strToUTF8 ( NULL, 0, &out_utf8_len, out_utf16, out_utf16_len, &error_code); if (error_code != U_BUFFER_OVERFLOW_ERROR) { bson_free (out_utf16); SASL_PREP_ERR_RETURN ("could not calculate UTF-8 length of %s"); } /* convert. */ error_code = U_ZERO_ERROR; out_utf8 = (char *) bson_malloc ( sizeof (char) * (out_utf8_len + 1)); /* add one for null byte. */ (void) u_strToUTF8 ( out_utf8, out_utf8_len + 1, NULL, out_utf16, out_utf16_len, &error_code); if (error_code) { bson_free (out_utf8); bson_free (out_utf16); SASL_PREP_ERR_RETURN ("could not convert %s back to UTF-8"); } bson_free (out_utf16); return out_utf8; #undef SASL_PREP_ERR_RETURN } #endif char * _mongoc_sasl_prep (const char *in_utf8, int in_utf8_len, bson_error_t *err) { #ifdef MONGOC_ENABLE_ICU return _mongoc_sasl_prep_impl ("password", in_utf8, in_utf8_len, err); #else if (_mongoc_sasl_prep_required (in_utf8)) { bson_set_error (err, MONGOC_ERROR_SCRAM, MONGOC_ERROR_SCRAM_PROTOCOL_ERROR, "SCRAM Failure: ICU required to SASLPrep password"); return NULL; } return bson_strdup (in_utf8); #endif } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-secure-channel-private.h0000644000076500000240000000547713572250760030036 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SECURE_CHANNEL_PRIVATE_H #define MONGOC_SECURE_CHANNEL_PRIVATE_H #include #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-secure-channel-private.h" #define SECURITY_WIN32 #include #include #include BSON_BEGIN_DECLS char * _mongoc_secure_channel_extract_subject (const char *filename, const char *passphrase); bool mongoc_secure_channel_setup_ca ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt); bool mongoc_secure_channel_setup_crl ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt); size_t mongoc_secure_channel_read (mongoc_stream_tls_t *tls, void *data, size_t data_length); PCCERT_CONTEXT mongoc_secure_channel_setup_certificate ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt); /* it may require 16k + some overhead to hold one decryptable block of data - do * what cURL does, add 1k */ #define MONGOC_SCHANNEL_BUFFER_INIT_SIZE (17 * 1024) void _mongoc_secure_channel_init_sec_buffer (SecBuffer *buffer, unsigned long buf_type, void *buf_data_ptr, unsigned long buf_byte_size); void _mongoc_secure_channel_init_sec_buffer_desc (SecBufferDesc *desc, SecBuffer *buffer_array, unsigned long buffer_count); void mongoc_secure_channel_realloc_buf (size_t *size, uint8_t **buf, size_t new_size); bool mongoc_secure_channel_handshake_step_1 (mongoc_stream_tls_t *tls, char *hostname); bool mongoc_secure_channel_handshake_step_2 (mongoc_stream_tls_t *tls, char *hostname); bool mongoc_secure_channel_handshake_step_3 (mongoc_stream_tls_t *tls, char *hostname); BSON_END_DECLS #endif /* MONGOC_SECURE_CHANNEL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-secure-channel.c0000644000076500000240000010127013572250760026345 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-secure-channel-private.h" #include "mongoc/mongoc-stream-tls-secure-channel-private.h" #include "mongoc/mongoc-errno-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-secure-channel" /* mingw doesn't define this */ #ifndef SECBUFFER_ALERT #define SECBUFFER_ALERT 17 #endif PCCERT_CONTEXT mongoc_secure_channel_setup_certificate_from_file (const char *filename) { char *pem; FILE *file; bool success; HCRYPTKEY hKey; long pem_length; HCRYPTPROV provider; CERT_BLOB public_blob; const char *pem_public; const char *pem_private; LPBYTE blob_private = NULL; PCCERT_CONTEXT cert = NULL; DWORD blob_private_len = 0; HCERTSTORE cert_store = NULL; DWORD encrypted_cert_len = 0; LPBYTE encrypted_cert = NULL; DWORD encrypted_private_len = 0; LPBYTE encrypted_private = NULL; file = fopen (filename, "rb"); if (!file) { MONGOC_ERROR ("Couldn't open file '%s'", filename); return false; } fseek (file, 0, SEEK_END); pem_length = ftell (file); fseek (file, 0, SEEK_SET); if (pem_length < 1) { MONGOC_ERROR ("Couldn't determine file size of '%s'", filename); return false; } pem = (char *) bson_malloc0 (pem_length); fread ((void *) pem, 1, pem_length, file); fclose (file); pem_public = strstr (pem, "-----BEGIN CERTIFICATE-----"); pem_private = strstr (pem, "-----BEGIN ENCRYPTED PRIVATE KEY-----"); if (pem_private) { MONGOC_ERROR ("Detected unsupported encrypted private key"); goto fail; } pem_private = strstr (pem, "-----BEGIN RSA PRIVATE KEY-----"); if (!pem_private) { pem_private = strstr (pem, "-----BEGIN PRIVATE KEY-----"); } if (!pem_private) { MONGOC_ERROR ("Can't find private key in '%s'", filename); goto fail; } public_blob.cbData = (DWORD) strlen (pem_public); public_blob.pbData = (BYTE *) pem_public; /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380264%28v=vs.85%29.aspx */ CryptQueryObject ( CERT_QUERY_OBJECT_BLOB, /* dwObjectType, blob or file */ &public_blob, /* pvObject, Unicode filename */ CERT_QUERY_CONTENT_FLAG_ALL, /* dwExpectedContentTypeFlags */ CERT_QUERY_FORMAT_FLAG_ALL, /* dwExpectedFormatTypeFlags */ 0, /* dwFlags, reserved for "future use" */ NULL, /* pdwMsgAndCertEncodingType, OUT, unused */ NULL, /* pdwContentType (dwExpectedContentTypeFlags), OUT, unused */ NULL, /* pdwFormatType (dwExpectedFormatTypeFlags,), OUT, unused */ NULL, /* phCertStore, OUT, HCERTSTORE.., unused, for now */ NULL, /* phMsg, OUT, HCRYPTMSG, only for PKC7, unused */ (const void **) &cert /* ppvContext, OUT, the Certificate Context */ ); if (!cert) { MONGOC_ERROR ("Failed to extract public key from '%s'. Error 0x%.8X", filename, GetLastError ()); goto fail; } /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380285%28v=vs.85%29.aspx */ success = CryptStringToBinaryA (pem_private, /* pszString */ 0, /* cchString */ CRYPT_STRING_BASE64HEADER, /* dwFlags */ NULL, /* pbBinary */ &encrypted_private_len, /* pcBinary, IN/OUT */ NULL, /* pdwSkip */ NULL); /* pdwFlags */ if (!success) { MONGOC_ERROR ("Failed to convert base64 private key. Error 0x%.8X", GetLastError ()); goto fail; } encrypted_private = (LPBYTE) bson_malloc0 (encrypted_private_len); success = CryptStringToBinaryA (pem_private, 0, CRYPT_STRING_BASE64HEADER, encrypted_private, &encrypted_private_len, NULL, NULL); if (!success) { MONGOC_ERROR ("Failed to convert base64 private key. Error 0x%.8X", GetLastError ()); goto fail; } /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa379912%28v=vs.85%29.aspx */ success = CryptDecodeObjectEx ( X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* dwCertEncodingType */ PKCS_RSA_PRIVATE_KEY, /* lpszStructType */ encrypted_private, /* pbEncoded */ encrypted_private_len, /* cbEncoded */ 0, /* dwFlags */ NULL, /* pDecodePara */ NULL, /* pvStructInfo */ &blob_private_len); /* pcbStructInfo */ if (!success) { LPTSTR msg = NULL; FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ARGUMENT_ARRAY, NULL, GetLastError (), LANG_NEUTRAL, (LPTSTR) &msg, 0, NULL); MONGOC_ERROR ( "Failed to parse private key. %s (0x%.8X)", msg, GetLastError ()); LocalFree (msg); goto fail; } blob_private = (LPBYTE) bson_malloc0 (blob_private_len); success = CryptDecodeObjectEx (X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, PKCS_RSA_PRIVATE_KEY, encrypted_private, encrypted_private_len, 0, NULL, blob_private, &blob_private_len); if (!success) { MONGOC_ERROR ("Failed to parse private key. Error 0x%.8X", GetLastError ()); goto fail; } /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa379886%28v=vs.85%29.aspx */ success = CryptAcquireContext (&provider, /* phProv */ NULL, /* pszContainer */ MS_ENHANCED_PROV, /* pszProvider */ PROV_RSA_FULL, /* dwProvType */ CRYPT_VERIFYCONTEXT); /* dwFlags */ if (!success) { MONGOC_ERROR ("CryptAcquireContext failed with error 0x%.8X", GetLastError ()); goto fail; } /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380207%28v=vs.85%29.aspx */ success = CryptImportKey (provider, /* hProv */ blob_private, /* pbData */ blob_private_len, /* dwDataLen */ 0, /* hPubKey */ 0, /* dwFlags */ &hKey); /* phKey, OUT */ if (!success) { MONGOC_ERROR ("CryptImportKey for private key failed with error 0x%.8X", GetLastError ()); goto fail; } /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa376573%28v=vs.85%29.aspx */ success = CertSetCertificateContextProperty ( cert, /* pCertContext */ CERT_KEY_PROV_HANDLE_PROP_ID, /* dwPropId */ 0, /* dwFlags */ (const void *) provider); /* pvData */ if (success) { TRACE ("%s", "Successfully loaded client certificate"); return cert; } MONGOC_ERROR ("Can't associate private key with public key: 0x%.8X", GetLastError ()); fail: SecureZeroMemory (pem, pem_length); bson_free (pem); if (encrypted_private) { SecureZeroMemory (encrypted_private, encrypted_private_len); bson_free (encrypted_private); } if (blob_private) { SecureZeroMemory (blob_private, blob_private_len); bson_free (blob_private); } return NULL; } PCCERT_CONTEXT mongoc_secure_channel_setup_certificate ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt) { return mongoc_secure_channel_setup_certificate_from_file (opt->pem_file); } void _bson_append_szoid (bson_string_t *retval, PCCERT_CONTEXT cert, const char *label, void *oid) { DWORD oid_len = CertGetNameString (cert, CERT_NAME_ATTR_TYPE, 0, oid, NULL, 0); if (oid_len > 1) { char *tmp = bson_malloc0 (oid_len); CertGetNameString (cert, CERT_NAME_ATTR_TYPE, 0, oid, tmp, oid_len); bson_string_append_printf (retval, "%s%s", label, tmp); bson_free (tmp); } } char * _mongoc_secure_channel_extract_subject (const char *filename, const char *passphrase) { bson_string_t *retval; PCCERT_CONTEXT cert; cert = mongoc_secure_channel_setup_certificate_from_file (filename); if (!cert) { return NULL; } retval = bson_string_new (""); ; _bson_append_szoid (retval, cert, "C=", szOID_COUNTRY_NAME); _bson_append_szoid (retval, cert, ",ST=", szOID_STATE_OR_PROVINCE_NAME); _bson_append_szoid (retval, cert, ",L=", szOID_LOCALITY_NAME); _bson_append_szoid (retval, cert, ",O=", szOID_ORGANIZATION_NAME); _bson_append_szoid (retval, cert, ",OU=", szOID_ORGANIZATIONAL_UNIT_NAME); _bson_append_szoid (retval, cert, ",CN=", szOID_COMMON_NAME); _bson_append_szoid (retval, cert, ",STREET=", szOID_STREET_ADDRESS); return bson_string_free (retval, false); } bool mongoc_secure_channel_setup_ca ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt) { FILE *file; long length; const char *pem_key; HCERTSTORE cert_store = NULL; PCCERT_CONTEXT cert = NULL; DWORD encrypted_cert_len = 0; LPBYTE encrypted_cert = NULL; file = fopen (opt->ca_file, "rb"); if (!file) { MONGOC_ERROR ("Couldn't open file '%s'", opt->ca_file); return false; } fseek (file, 0, SEEK_END); length = ftell (file); fseek (file, 0, SEEK_SET); if (length < 1) { MONGOC_WARNING ("Couldn't determine file size of '%s'", opt->ca_file); return false; } pem_key = (const char *) bson_malloc0 (length); fread ((void *) pem_key, 1, length, file); fclose (file); /* If we have private keys or other fuzz, seek to the good stuff */ pem_key = strstr (pem_key, "-----BEGIN CERTIFICATE-----"); /*printf ("%s\n", pem_key);*/ if (!pem_key) { MONGOC_WARNING ("Couldn't find certificate in '%d'", opt->ca_file); return false; } if (!CryptStringToBinaryA (pem_key, 0, CRYPT_STRING_BASE64HEADER, NULL, &encrypted_cert_len, NULL, NULL)) { MONGOC_ERROR ("Failed to convert BASE64 public key. Error 0x%.8X", GetLastError ()); return false; } encrypted_cert = (LPBYTE) LocalAlloc (0, encrypted_cert_len); if (!CryptStringToBinaryA (pem_key, 0, CRYPT_STRING_BASE64HEADER, encrypted_cert, &encrypted_cert_len, NULL, NULL)) { MONGOC_ERROR ("Failed to convert BASE64 public key. Error 0x%.8X", GetLastError ()); return false; } cert = CertCreateCertificateContext ( X509_ASN_ENCODING, encrypted_cert, encrypted_cert_len); if (!cert) { MONGOC_WARNING ("Could not convert certificate"); return false; } cert_store = CertOpenStore ( CERT_STORE_PROV_SYSTEM, /* provider */ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* certificate encoding */ 0, /* unused */ CERT_SYSTEM_STORE_LOCAL_MACHINE, /* dwFlags */ L"Root"); /* system store name. "My" or "Root" */ if (cert_store == NULL) { MONGOC_ERROR ("Error opening certificate store"); return false; } if (CertAddCertificateContextToStore ( cert_store, cert, CERT_STORE_ADD_USE_EXISTING, NULL)) { TRACE ("%s", "Added the certificate !"); CertCloseStore (cert_store, 0); return true; } MONGOC_WARNING ("Failed adding the cert"); CertCloseStore (cert_store, 0); return false; } bool mongoc_secure_channel_setup_crl ( mongoc_stream_tls_secure_channel_t *secure_channel, mongoc_ssl_opt_t *opt) { HCERTSTORE cert_store = NULL; PCCERT_CONTEXT cert = NULL; LPWSTR str; int chars; chars = MultiByteToWideChar (CP_ACP, 0, opt->crl_file, -1, NULL, 0); if (chars < 1) { MONGOC_WARNING ("Can't determine opt->crl_file length"); return false; } str = (LPWSTR) bson_malloc0 (chars); MultiByteToWideChar (CP_ACP, 0, opt->crl_file, -1, str, chars); /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa380264%28v=vs.85%29.aspx */ CryptQueryObject ( CERT_QUERY_OBJECT_FILE, /* dwObjectType, blob or file */ str, /* pvObject, Unicode filename */ CERT_QUERY_CONTENT_FLAG_CRL, /* dwExpectedContentTypeFlags */ CERT_QUERY_FORMAT_FLAG_ALL, /* dwExpectedFormatTypeFlags */ 0, /* dwFlags, reserved for "future use" */ NULL, /* pdwMsgAndCertEncodingType, OUT, unused */ NULL, /* pdwContentType (dwExpectedContentTypeFlags), OUT, unused */ NULL, /* pdwFormatType (dwExpectedFormatTypeFlags,), OUT, unused */ NULL, /* phCertStore, OUT, HCERTSTORE.., unused, for now */ NULL, /* phMsg, OUT, HCRYPTMSG, only for PKC7, unused */ (const void **) &cert /* ppvContext, OUT, the Certificate Context */ ); bson_free (str); if (!cert) { MONGOC_WARNING ("Can't extract CRL from '%s'", opt->crl_file); return false; } cert_store = CertOpenStore ( CERT_STORE_PROV_SYSTEM, /* provider */ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, /* certificate encoding */ 0, /* unused */ CERT_SYSTEM_STORE_LOCAL_MACHINE, /* dwFlags */ L"Root"); /* system store name. "My" or "Root" */ if (cert_store == NULL) { MONGOC_ERROR ("Error opening certificate store"); CertFreeCertificateContext (cert); return false; } if (CertAddCertificateContextToStore ( cert_store, cert, CERT_STORE_ADD_USE_EXISTING, NULL)) { TRACE ("%s", "Added the certificate !"); CertFreeCertificateContext (cert); CertCloseStore (cert_store, 0); return true; } MONGOC_WARNING ("Failed adding the cert"); CertFreeCertificateContext (cert); CertCloseStore (cert_store, 0); return false; } size_t mongoc_secure_channel_read (mongoc_stream_tls_t *tls, void *data, size_t data_length) { ssize_t length; errno = 0; TRACE ("Wanting to read: %d, timeout is %d", data_length, tls->timeout_msec); /* 4th argument is minimum bytes, while the data_length is the * size of the buffer. We are totally fine with just one TLS record (few *bytes) **/ length = mongoc_stream_read ( tls->base_stream, data, data_length, 0, tls->timeout_msec); TRACE ("Got %d", length); if (length > 0) { return length; } return 0; } size_t mongoc_secure_channel_write (mongoc_stream_tls_t *tls, const void *data, size_t data_length) { ssize_t length; errno = 0; TRACE ("Wanting to write: %d", data_length); length = mongoc_stream_write ( tls->base_stream, (void *) data, data_length, tls->timeout_msec); TRACE ("Wrote: %d", length); return length; } void mongoc_secure_channel_realloc_buf (size_t *size, uint8_t **buf, size_t new_size) { *size = bson_next_power_of_two (new_size); *buf = bson_realloc (*buf, *size); } /** * The follow functions comes from one of my favorite project, cURL! * Thank you so much for having gone through the Secure Channel pain for me. * * * Copyright (C) 2012 - 2015, Marc Hoersken, * Copyright (C) 2012, Mark Salisbury, * Copyright (C) 2012 - 2015, Daniel Stenberg, , et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * Based upon the PolarSSL implementation in polarssl.c and polarssl.h: * Copyright (C) 2010, 2011, Hoi-Ho Chan, * * Based upon the CyaSSL implementation in cyassl.c and cyassl.h: * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. * * Thanks for code and inspiration! */ void _mongoc_secure_channel_init_sec_buffer (SecBuffer *buffer, unsigned long buf_type, void *buf_data_ptr, unsigned long buf_byte_size) { buffer->cbBuffer = buf_byte_size; buffer->BufferType = buf_type; buffer->pvBuffer = buf_data_ptr; } void _mongoc_secure_channel_init_sec_buffer_desc (SecBufferDesc *desc, SecBuffer *buffer_array, unsigned long buffer_count) { desc->ulVersion = SECBUFFER_VERSION; desc->pBuffers = buffer_array; desc->cBuffers = buffer_count; } bool mongoc_secure_channel_handshake_step_1 (mongoc_stream_tls_t *tls, char *hostname) { SecBuffer outbuf; ssize_t written = -1; SecBufferDesc outbuf_desc; SECURITY_STATUS sspi_status = SEC_E_OK; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; TRACE ("SSL/TLS connection with '%s' (step 1/3)", hostname); /* setup output buffer */ _mongoc_secure_channel_init_sec_buffer (&outbuf, SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, &outbuf, 1); /* setup request flags */ secure_channel->req_flags = ISC_REQ_SEQUENCE_DETECT | ISC_REQ_REPLAY_DETECT | ISC_REQ_CONFIDENTIALITY | ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_STREAM; /* allocate memory for the security context handle */ secure_channel->ctxt = (mongoc_secure_channel_ctxt *) bson_malloc0 ( sizeof (mongoc_secure_channel_ctxt)); /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375924.aspx */ sspi_status = InitializeSecurityContext ( &secure_channel->cred->cred_handle, /* phCredential */ NULL, /* phContext */ hostname, /* pszTargetName */ secure_channel->req_flags, /* fContextReq */ 0, /* Reserved1, must be 0 */ 0, /* TargetDataRep, unused */ NULL, /* pInput */ 0, /* Reserved2, must be 0 */ &secure_channel->ctxt->ctxt_handle, /* phNewContext OUT param */ &outbuf_desc, /* pOutput OUT param */ &secure_channel->ret_flags, /* pfContextAttr OUT param */ &secure_channel->ctxt->time_stamp /* ptsExpiry OUT param */ ); if (sspi_status != SEC_I_CONTINUE_NEEDED) { MONGOC_ERROR ("initial InitializeSecurityContext failed: %d", sspi_status); return false; } TRACE ("sending initial handshake data: sending %lu bytes...", outbuf.cbBuffer); /* send initial handshake data which is now stored in output buffer */ written = mongoc_secure_channel_write (tls, outbuf.pvBuffer, outbuf.cbBuffer); FreeContextBuffer (outbuf.pvBuffer); if (outbuf.cbBuffer != (size_t) written) { MONGOC_ERROR ("failed to send initial handshake data: " "sent %zd of %lu bytes", written, outbuf.cbBuffer); return false; } TRACE ("sent initial handshake data: sent %zd bytes", written); secure_channel->recv_unrecoverable_err = 0; secure_channel->recv_sspi_close_notify = false; secure_channel->recv_connection_closed = false; /* continue to second handshake step */ secure_channel->connecting_state = ssl_connect_2; return true; } bool mongoc_secure_channel_handshake_step_2 (mongoc_stream_tls_t *tls, char *hostname) { mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; SECURITY_STATUS sspi_status = SEC_E_OK; ssize_t nread = -1, written = -1; SecBufferDesc outbuf_desc; SecBufferDesc inbuf_desc; SecBuffer outbuf[3]; SecBuffer inbuf[2]; bool doread; int i; doread = (secure_channel->connecting_state != ssl_connect_2_writing) ? true : false; TRACE ("%s", "SSL/TLS connection with endpoint (step 2/3)"); if (!secure_channel->cred || !secure_channel->ctxt) { return false; } /* grow the buffer if necessary */ if (secure_channel->encdata_length == secure_channel->encdata_offset) { mongoc_secure_channel_realloc_buf (&secure_channel->encdata_length, &secure_channel->encdata_buffer, secure_channel->encdata_length + 1); } for (;;) { if (doread) { /* read encrypted handshake data from socket */ nread = mongoc_secure_channel_read ( tls, (char *) (secure_channel->encdata_buffer + secure_channel->encdata_offset), secure_channel->encdata_length - secure_channel->encdata_offset); if (!nread) { if (MONGOC_ERRNO_IS_AGAIN (errno)) { if (secure_channel->connecting_state != ssl_connect_2_writing) { secure_channel->connecting_state = ssl_connect_2_reading; } TRACE ("%s", "failed to receive handshake, need more data"); return true; } MONGOC_ERROR ( "failed to receive handshake, SSL/TLS connection failed"); return false; } /* increase encrypted data buffer offset */ secure_channel->encdata_offset += nread; } TRACE ("encrypted data buffer: offset %d length %d", (int) secure_channel->encdata_offset, (int) secure_channel->encdata_length); /* setup input buffers */ _mongoc_secure_channel_init_sec_buffer ( &inbuf[0], SECBUFFER_TOKEN, malloc (secure_channel->encdata_offset), (unsigned long) (secure_channel->encdata_offset & (size_t) 0xFFFFFFFFUL)); _mongoc_secure_channel_init_sec_buffer ( &inbuf[1], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&inbuf_desc, inbuf, 2); /* setup output buffers */ _mongoc_secure_channel_init_sec_buffer ( &outbuf[0], SECBUFFER_TOKEN, NULL, 0); _mongoc_secure_channel_init_sec_buffer ( &outbuf[1], SECBUFFER_ALERT, NULL, 0); _mongoc_secure_channel_init_sec_buffer ( &outbuf[2], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, outbuf, 3); if (inbuf[0].pvBuffer == NULL) { MONGOC_ERROR ("unable to allocate memory"); return false; } /* copy received handshake data into input buffer */ memcpy (inbuf[0].pvBuffer, secure_channel->encdata_buffer, secure_channel->encdata_offset); /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375924.aspx */ sspi_status = InitializeSecurityContext (&secure_channel->cred->cred_handle, &secure_channel->ctxt->ctxt_handle, hostname, secure_channel->req_flags, 0, 0, &inbuf_desc, 0, NULL, &outbuf_desc, &secure_channel->ret_flags, &secure_channel->ctxt->time_stamp); /* free buffer for received handshake data */ free (inbuf[0].pvBuffer); /* check if the handshake was incomplete */ if (sspi_status == SEC_E_INCOMPLETE_MESSAGE) { secure_channel->connecting_state = ssl_connect_2_reading; TRACE ("%s", "received incomplete message, need more data"); return true; } /* If the server has requested a client certificate, attempt to continue * the handshake without one. This will allow connections to servers which * request a client certificate but do not require it. */ if (sspi_status == SEC_I_INCOMPLETE_CREDENTIALS && !(secure_channel->req_flags & ISC_REQ_USE_SUPPLIED_CREDS)) { secure_channel->req_flags |= ISC_REQ_USE_SUPPLIED_CREDS; secure_channel->connecting_state = ssl_connect_2_writing; TRACE ("%s", "A client certificate has been requested"); return true; } /* check if the handshake needs to be continued */ if (sspi_status == SEC_I_CONTINUE_NEEDED || sspi_status == SEC_E_OK) { for (i = 0; i < 3; i++) { /* search for handshake tokens that need to be send */ if (outbuf[i].BufferType == SECBUFFER_TOKEN && outbuf[i].cbBuffer > 0) { TRACE ("sending next handshake data: sending %lu bytes...", outbuf[i].cbBuffer); /* send handshake token to server */ written = mongoc_secure_channel_write ( tls, outbuf[i].pvBuffer, outbuf[i].cbBuffer); if (outbuf[i].cbBuffer != (size_t) written) { MONGOC_ERROR ("failed to send next handshake data: " "sent %zd of %lu bytes", written, outbuf[i].cbBuffer); return false; } } /* free obsolete buffer */ if (outbuf[i].pvBuffer != NULL) { FreeContextBuffer (outbuf[i].pvBuffer); } } } else { switch (sspi_status) { case SEC_E_WRONG_PRINCIPAL: MONGOC_ERROR ("SSL Certification verification failed: hostname " "doesn't match certificate"); break; case SEC_E_UNTRUSTED_ROOT: MONGOC_ERROR ("SSL Certification verification failed: Untrusted " "root certificate"); break; case SEC_E_CERT_EXPIRED: MONGOC_ERROR ("SSL Certification verification failed: certificate " "has expired"); break; case CRYPT_E_NO_REVOCATION_CHECK: /* This seems to be raised also when hostname doesn't match the * certificate */ MONGOC_ERROR ("SSL Certification verification failed: failed " "revocation/hostname check"); break; case SEC_E_INSUFFICIENT_MEMORY: case SEC_E_INTERNAL_ERROR: case SEC_E_INVALID_HANDLE: case SEC_E_INVALID_TOKEN: case SEC_E_LOGON_DENIED: case SEC_E_NO_AUTHENTICATING_AUTHORITY: case SEC_E_NO_CREDENTIALS: case SEC_E_TARGET_UNKNOWN: case SEC_E_UNSUPPORTED_FUNCTION: #ifdef SEC_E_APPLICATION_PROTOCOL_MISMATCH /* Not available in VS2010 */ case SEC_E_APPLICATION_PROTOCOL_MISMATCH: #endif default: { LPTSTR msg = NULL; FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ARGUMENT_ARRAY, NULL, GetLastError (), LANG_NEUTRAL, (LPTSTR) &msg, 0, NULL); MONGOC_ERROR ("Failed to initialize security context, error code: " "0x%04X%04X: %s", (sspi_status >> 16) & 0xffff, sspi_status & 0xffff, msg); LocalFree (msg); } } return false; } /* check if there was additional remaining encrypted data */ if (inbuf[1].BufferType == SECBUFFER_EXTRA && inbuf[1].cbBuffer > 0) { TRACE ("encrypted data length: %lu", inbuf[1].cbBuffer); /* * There are two cases where we could be getting extra data here: * 1) If we're renegotiating a connection and the handshake is already * complete (from the server perspective), it can encrypted app data * (not handshake data) in an extra buffer at this point. * 2) (sspi_status == SEC_I_CONTINUE_NEEDED) We are negotiating a * connection and this extra data is part of the handshake. * We should process the data immediately; waiting for the socket to * be ready may fail since the server is done sending handshake data. */ /* check if the remaining data is less than the total amount * and therefore begins after the already processed data */ if (secure_channel->encdata_offset > inbuf[1].cbBuffer) { memmove (secure_channel->encdata_buffer, (secure_channel->encdata_buffer + secure_channel->encdata_offset) - inbuf[1].cbBuffer, inbuf[1].cbBuffer); secure_channel->encdata_offset = inbuf[1].cbBuffer; if (sspi_status == SEC_I_CONTINUE_NEEDED) { doread = FALSE; continue; } } } else { secure_channel->encdata_offset = 0; } break; } /* check if the handshake needs to be continued */ if (sspi_status == SEC_I_CONTINUE_NEEDED) { secure_channel->connecting_state = ssl_connect_2_reading; return true; } /* check if the handshake is complete */ if (sspi_status == SEC_E_OK) { secure_channel->connecting_state = ssl_connect_3; TRACE ("%s", "SSL/TLS handshake complete"); } return true; } bool mongoc_secure_channel_handshake_step_3 (mongoc_stream_tls_t *tls, char *hostname) { mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; BSON_ASSERT (ssl_connect_3 == secure_channel->connecting_state); TRACE ("SSL/TLS connection with %s (step 3/3)", hostname); if (!secure_channel->cred) { return false; } /* check if the required context attributes are met */ if (secure_channel->ret_flags != secure_channel->req_flags) { MONGOC_ERROR ("Failed handshake"); return false; } secure_channel->connecting_state = ssl_connect_done; return true; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-secure-transport-private.h0000644000076500000240000000332013572250760030443 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SECURE_TRANSPORT_PRIVATE_H #define MONGOC_SECURE_TRANSPORT_PRIVATE_H #include #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls-secure-transport-private.h" #include BSON_BEGIN_DECLS char * _mongoc_cfstringref_to_cstring (CFStringRef ref); char * _mongoc_secure_transport_extract_subject (const char *filename, const char *passphrase); OSStatus mongoc_secure_transport_write (SSLConnectionRef connection, const void *data, size_t *data_length); OSStatus mongoc_secure_transport_read (SSLConnectionRef connection, void *data, size_t *data_length); bool mongoc_secure_transport_setup_ca ( mongoc_stream_tls_secure_transport_t *secure_transport, mongoc_ssl_opt_t *opt); bool mongoc_secure_transport_setup_certificate ( mongoc_stream_tls_secure_transport_t *secure_transport, mongoc_ssl_opt_t *opt); BSON_END_DECLS #endif /* MONGOC_SECURE_TRANSPORT_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-secure-transport.c0000644000076500000240000003424313572250760026776 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-secure-transport-private.h" #include "mongoc/mongoc-stream-tls-secure-transport-private.h" #include #include #include #include #include #include #include #include /* Jailbreak Darwin Private API */ /* * An alternative to using SecIdentityCreate is to use * SecIdentityCreateWithCertificate with a temporary keychain. However, doing so * leads to memory bugs. Unfortunately, using this private API seems to be the * best solution. */ SecIdentityRef SecIdentityCreate (CFAllocatorRef allocator, SecCertificateRef certificate, SecKeyRef privateKey); #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-secure_transport" char * _mongoc_cfstringref_to_cstring (CFStringRef str) { CFIndex length; CFStringEncoding encoding; CFIndex max_size; char *cs; if (!str) { return NULL; } if (CFGetTypeID (str) != CFStringGetTypeID ()) { return NULL; } length = CFStringGetLength (str); encoding = kCFStringEncodingASCII; max_size = CFStringGetMaximumSizeForEncoding (length, encoding) + 1; cs = bson_malloc ((size_t) max_size); if (CFStringGetCString (str, cs, max_size, encoding)) { return cs; } bson_free (cs); return NULL; } static void _bson_append_cftyperef (bson_string_t *retval, const char *label, CFTypeRef str) { char *cs; if (str) { cs = _mongoc_cfstringref_to_cstring (str); if (cs) { bson_string_append_printf (retval, "%s%s", label, cs); bson_free (cs); } else { bson_string_append_printf (retval, "%s(null)", label); } } } CFTypeRef _mongoc_secure_transport_dict_get (CFArrayRef values, CFStringRef label) { if (!values || CFGetTypeID (values) != CFArrayGetTypeID ()) { return NULL; } for (CFIndex i = 0; i < CFArrayGetCount (values); ++i) { CFStringRef item_label; CFDictionaryRef item = CFArrayGetValueAtIndex (values, i); if (CFGetTypeID (item) != CFDictionaryGetTypeID ()) { continue; } item_label = CFDictionaryGetValue (item, kSecPropertyKeyLabel); if (item_label && CFStringCompare (item_label, label, 0) == kCFCompareEqualTo) { return CFDictionaryGetValue (item, kSecPropertyKeyValue); } } return NULL; } char * _mongoc_secure_transport_RFC2253_from_cert (SecCertificateRef cert) { CFTypeRef value; bson_string_t *retval; CFTypeRef subject_name; CFDictionaryRef cert_dict; cert_dict = SecCertificateCopyValues (cert, NULL, NULL); if (!cert_dict) { return NULL; } subject_name = CFDictionaryGetValue (cert_dict, kSecOIDX509V1SubjectName); if (!subject_name) { CFRelease (cert_dict); return NULL; } subject_name = CFDictionaryGetValue (subject_name, kSecPropertyKeyValue); if (!subject_name) { CFRelease (cert_dict); return NULL; } retval = bson_string_new (""); ; value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDCountryName); _bson_append_cftyperef (retval, "C=", value); value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDStateProvinceName); _bson_append_cftyperef (retval, ",ST=", value); value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDLocalityName); _bson_append_cftyperef (retval, ",L=", value); value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDOrganizationName); _bson_append_cftyperef (retval, ",O=", value); value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDOrganizationalUnitName); if (value) { /* Can be either one unit name, or array of unit names */ if (CFGetTypeID (value) == CFStringGetTypeID ()) { _bson_append_cftyperef (retval, ",OU=", value); } else if (CFGetTypeID (value) == CFArrayGetTypeID ()) { CFIndex len = CFArrayGetCount (value); if (len > 0) { _bson_append_cftyperef ( retval, ",OU=", CFArrayGetValueAtIndex (value, 0)); } if (len > 1) { _bson_append_cftyperef ( retval, ",", CFArrayGetValueAtIndex (value, 1)); } if (len > 2) { _bson_append_cftyperef ( retval, ",", CFArrayGetValueAtIndex (value, 2)); } } } value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDCommonName); _bson_append_cftyperef (retval, ",CN=", value); value = _mongoc_secure_transport_dict_get (subject_name, kSecOIDStreetAddress); _bson_append_cftyperef (retval, ",STREET", value); CFRelease (cert_dict); return bson_string_free (retval, false); } static void safe_release (CFTypeRef ref) { if (ref) { CFRelease (ref); } } bool _mongoc_secure_transport_import_pem (const char *filename, const char *passphrase, CFArrayRef *items, SecExternalItemType *type) { SecExternalFormat format = kSecFormatPEMSequence; SecItemImportExportKeyParameters params = {0}; SecTransformRef sec_transform = NULL; CFReadStreamRef read_stream = NULL; CFDataRef dataref = NULL; CFErrorRef error = NULL; CFURLRef url = NULL; OSStatus res; bool r = false; if (!filename) { TRACE ("%s", "No certificate provided"); return false; } params.version = SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION; params.flags = 0; params.passphrase = NULL; params.alertTitle = NULL; params.alertPrompt = NULL; params.accessRef = NULL; params.keyUsage = NULL; params.keyAttributes = NULL; if (passphrase) { params.passphrase = CFStringCreateWithCString ( kCFAllocatorDefault, passphrase, kCFStringEncodingUTF8); } url = CFURLCreateFromFileSystemRepresentation ( kCFAllocatorDefault, (const UInt8 *) filename, strlen (filename), false); read_stream = CFReadStreamCreateWithFile (kCFAllocatorDefault, url); if (!CFReadStreamOpen (read_stream)) { MONGOC_ERROR ("Cannot find certificate in '%s', error reading file", filename); goto done; } sec_transform = SecTransformCreateReadTransformWithReadStream (read_stream); dataref = SecTransformExecute (sec_transform, &error); if (error) { CFStringRef str = CFErrorCopyDescription (error); MONGOC_ERROR ( "Failed importing PEM '%s': %s", filename, CFStringGetCStringPtr (str, CFStringGetFastestEncoding (str))); CFRelease (str); goto done; } res = SecItemImport ( dataref, CFSTR (".pem"), &format, type, 0, ¶ms, NULL, items); if (res) { MONGOC_ERROR ("Failed importing PEM '%s' (code: %d)", filename, res); goto done; } r = true; done: safe_release (dataref); safe_release (sec_transform); safe_release (read_stream); safe_release (url); safe_release (params.passphrase); return r; } char * _mongoc_secure_transport_extract_subject (const char *filename, const char *passphrase) { bool success; char *retval = NULL; CFArrayRef items = NULL; SecExternalItemType type = kSecItemTypeCertificate; success = _mongoc_secure_transport_import_pem (filename, passphrase, &items, &type); if (!success) { return NULL; } if (type == kSecItemTypeAggregate) { for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) { CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i)); if (item_id == SecCertificateGetTypeID ()) { retval = _mongoc_secure_transport_RFC2253_from_cert ( (SecCertificateRef) CFArrayGetValueAtIndex (items, i)); break; } } } else if (type == kSecItemTypeCertificate) { retval = _mongoc_secure_transport_RFC2253_from_cert ((SecCertificateRef) items); } if (items) { CFRelease (items); } return retval; } bool mongoc_secure_transport_setup_certificate ( mongoc_stream_tls_secure_transport_t *secure_transport, mongoc_ssl_opt_t *opt) { bool success; CFArrayRef items; SecIdentityRef id; SecKeyRef key = NULL; SecCertificateRef cert = NULL; SecExternalItemType type = kSecItemTypeCertificate; if (!opt->pem_file) { TRACE ("%s", "No private key provided, the server won't be able to verify us"); return false; } success = _mongoc_secure_transport_import_pem ( opt->pem_file, opt->pem_pwd, &items, &type); if (!success) { /* caller will log an error */ return false; } if (type != kSecItemTypeAggregate) { MONGOC_ERROR ("Cannot work with keys of type \"%d\". Please file a JIRA", type); CFRelease (items); return false; } for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) { CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i)); if (item_id == SecCertificateGetTypeID ()) { cert = (SecCertificateRef) CFArrayGetValueAtIndex (items, i); } else if (item_id == SecKeyGetTypeID ()) { key = (SecKeyRef) CFArrayGetValueAtIndex (items, i); } } if (!cert || !key) { MONGOC_ERROR ("Couldn't find valid private key"); CFRelease (items); return false; } id = SecIdentityCreate (kCFAllocatorDefault, cert, key); secure_transport->my_cert = CFArrayCreateMutable (kCFAllocatorDefault, 2, &kCFTypeArrayCallBacks); CFArrayAppendValue (secure_transport->my_cert, id); CFArrayAppendValue (secure_transport->my_cert, cert); CFRelease (id); /* * Secure Transport assumes the following: * * The certificate references remain valid for the lifetime of the * session. * * The identity specified in certRefs[0] is capable of signing. */ success = !SSLSetCertificate (secure_transport->ssl_ctx_ref, secure_transport->my_cert); TRACE ("Setting client certificate %s", success ? "succeeded" : "failed"); CFRelease (items); return true; } bool mongoc_secure_transport_setup_ca ( mongoc_stream_tls_secure_transport_t *secure_transport, mongoc_ssl_opt_t *opt) { CFArrayRef items; SecExternalItemType type = kSecItemTypeCertificate; bool success; if (!opt->ca_file) { TRACE ("%s", "No CA provided, using defaults"); return false; } success = _mongoc_secure_transport_import_pem (opt->ca_file, NULL, &items, &type); if (!success) { MONGOC_ERROR ("Cannot load Certificate Authorities from file \'%s\'", opt->ca_file); return false; } if (type == kSecItemTypeAggregate) { CFMutableArrayRef anchors = CFArrayCreateMutable (kCFAllocatorDefault, 0, &kCFTypeArrayCallBacks); for (CFIndex i = 0; i < CFArrayGetCount (items); ++i) { CFTypeID item_id = CFGetTypeID (CFArrayGetValueAtIndex (items, i)); if (item_id == SecCertificateGetTypeID ()) { CFArrayAppendValue (anchors, CFArrayGetValueAtIndex (items, i)); } } secure_transport->anchors = anchors; CFRelease (items); } else if (type == kSecItemTypeCertificate) { secure_transport->anchors = items; } else { CFRelease (items); } /* This should be SSLSetCertificateAuthorities But the /TLS/ tests fail * when it is */ success = !SSLSetTrustedRoots ( secure_transport->ssl_ctx_ref, secure_transport->anchors, true); TRACE ("Setting certificate authority %s (%s)", success ? "succeeded" : "failed", opt->ca_file); return true; } OSStatus mongoc_secure_transport_read (SSLConnectionRef connection, void *data, size_t *data_length) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) connection; ssize_t length; ENTRY; errno = 0; /* 4 arguments is *min_bytes* -- This is not a negotiation. * Secure Transport wants all or nothing. We must continue reading until * we get this amount, or timeout */ length = mongoc_stream_read ( tls->base_stream, data, *data_length, *data_length, tls->timeout_msec); if (length > 0) { *data_length = length; RETURN (noErr); } if (length == 0) { RETURN (errSSLClosedGraceful); } switch (errno) { case ENOENT: RETURN (errSSLClosedGraceful); break; case ECONNRESET: RETURN (errSSLClosedAbort); break; case EAGAIN: RETURN (errSSLWouldBlock); break; default: RETURN (-36); /* ioErr */ break; } } OSStatus mongoc_secure_transport_write (SSLConnectionRef connection, const void *data, size_t *data_length) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) connection; ssize_t length; ENTRY; errno = 0; length = mongoc_stream_write ( tls->base_stream, (void *) data, *data_length, tls->timeout_msec); if (length >= 0) { *data_length = length; RETURN (noErr); } switch (errno) { case EAGAIN: RETURN (errSSLWouldBlock); default: RETURN (-36); /* ioErr */ } } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-server-description-private.h0000644000076500000240000001031313572250760030752 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SERVER_DESCRIPTION_PRIVATE_H #define MONGOC_SERVER_DESCRIPTION_PRIVATE_H #include "mongoc/mongoc-server-description.h" #define MONGOC_DEFAULT_WIRE_VERSION 0 #define MONGOC_DEFAULT_WRITE_BATCH_SIZE 1000 #define MONGOC_DEFAULT_BSON_OBJ_SIZE 16 * 1024 * 1024 #define MONGOC_DEFAULT_MAX_MSG_SIZE 48000000 #define MONGOC_NO_SESSIONS -1 #define MONGOC_IDLE_WRITE_PERIOD_MS 10 * 1000 /* represent a server or topology with no replica set config version */ #define MONGOC_NO_SET_VERSION -1 typedef enum { MONGOC_SERVER_UNKNOWN, MONGOC_SERVER_STANDALONE, MONGOC_SERVER_MONGOS, MONGOC_SERVER_POSSIBLE_PRIMARY, MONGOC_SERVER_RS_PRIMARY, MONGOC_SERVER_RS_SECONDARY, MONGOC_SERVER_RS_ARBITER, MONGOC_SERVER_RS_OTHER, MONGOC_SERVER_RS_GHOST, MONGOC_SERVER_DESCRIPTION_TYPES, } mongoc_server_description_type_t; struct _mongoc_server_description_t { uint32_t id; mongoc_host_list_t host; int64_t round_trip_time_msec; int64_t last_update_time_usec; bson_t last_is_master; bool has_is_master; const char *connection_address; const char *me; /* whether an APM server-opened callback has been fired before */ bool opened; const char *set_name; bson_error_t error; mongoc_server_description_type_t type; int32_t min_wire_version; int32_t max_wire_version; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_write_batch_size; int64_t session_timeout_minutes; bson_t hosts; bson_t passives; bson_t arbiters; bson_t tags; const char *current_primary; int64_t set_version; bson_oid_t election_id; int64_t last_write_date_ms; bson_t compressors; }; void mongoc_server_description_init (mongoc_server_description_t *sd, const char *address, uint32_t id); bool mongoc_server_description_has_rs_member ( mongoc_server_description_t *description, const char *address); bool mongoc_server_description_has_set_version ( mongoc_server_description_t *description); bool mongoc_server_description_has_election_id ( mongoc_server_description_t *description); void mongoc_server_description_cleanup (mongoc_server_description_t *sd); void mongoc_server_description_reset (mongoc_server_description_t *sd); void mongoc_server_description_set_state (mongoc_server_description_t *description, mongoc_server_description_type_t type); void mongoc_server_description_set_set_version ( mongoc_server_description_t *description, int64_t set_version); void mongoc_server_description_set_election_id ( mongoc_server_description_t *description, const bson_oid_t *election_id); void mongoc_server_description_update_rtt (mongoc_server_description_t *server, int64_t rtt_msec); void mongoc_server_description_handle_ismaster (mongoc_server_description_t *sd, const bson_t *reply, int64_t rtt_msec, const bson_error_t *error /* IN */); void mongoc_server_description_filter_stale (mongoc_server_description_t **sds, size_t sds_len, mongoc_server_description_t *primary, int64_t heartbeat_frequency_ms, const mongoc_read_prefs_t *read_prefs); void mongoc_server_description_filter_tags ( mongoc_server_description_t **descriptions, size_t description_len, const mongoc_read_prefs_t *read_prefs); #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-server-description.c0000644000076500000240000007377513572250760027322 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-server-description-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-compression-private.h" #include #define ALPHA 0.2 static bson_oid_t kObjectIdZero = {{0}}; static bool _match_tag_set (const mongoc_server_description_t *sd, bson_iter_t *tag_set_iter); /* Destroy allocated resources within @description, but don't free it */ void mongoc_server_description_cleanup (mongoc_server_description_t *sd) { BSON_ASSERT (sd); bson_destroy (&sd->last_is_master); bson_destroy (&sd->hosts); bson_destroy (&sd->passives); bson_destroy (&sd->arbiters); bson_destroy (&sd->tags); bson_destroy (&sd->compressors); } /* Reset fields inside this sd, but keep same id, host information, and RTT, and leave ismaster in empty inited state */ void mongoc_server_description_reset (mongoc_server_description_t *sd) { BSON_ASSERT (sd); memset (&sd->error, 0, sizeof sd->error); sd->set_name = NULL; sd->type = MONGOC_SERVER_UNKNOWN; sd->min_wire_version = MONGOC_DEFAULT_WIRE_VERSION; sd->max_wire_version = MONGOC_DEFAULT_WIRE_VERSION; sd->max_msg_size = MONGOC_DEFAULT_MAX_MSG_SIZE; sd->max_bson_obj_size = MONGOC_DEFAULT_BSON_OBJ_SIZE; sd->max_write_batch_size = MONGOC_DEFAULT_WRITE_BATCH_SIZE; sd->session_timeout_minutes = MONGOC_NO_SESSIONS; sd->last_write_date_ms = -1; /* always leave last ismaster in an init-ed state until we destroy sd */ bson_destroy (&sd->last_is_master); bson_init (&sd->last_is_master); sd->has_is_master = false; sd->last_update_time_usec = bson_get_monotonic_time (); bson_destroy (&sd->hosts); bson_destroy (&sd->passives); bson_destroy (&sd->arbiters); bson_destroy (&sd->tags); bson_destroy (&sd->compressors); bson_init (&sd->hosts); bson_init (&sd->passives); bson_init (&sd->arbiters); bson_init (&sd->tags); bson_init (&sd->compressors); sd->me = NULL; sd->current_primary = NULL; sd->set_version = MONGOC_NO_SET_VERSION; bson_oid_copy_unsafe (&kObjectIdZero, &sd->election_id); } /* *-------------------------------------------------------------------------- * * mongoc_server_description_init -- * * Initialize a new server_description_t. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_server_description_init (mongoc_server_description_t *sd, const char *address, uint32_t id) { ENTRY; BSON_ASSERT (sd); BSON_ASSERT (address); sd->id = id; sd->type = MONGOC_SERVER_UNKNOWN; sd->round_trip_time_msec = -1; if (!_mongoc_host_list_from_string (&sd->host, address)) { MONGOC_WARNING ("Failed to parse uri for %s", address); return; } sd->connection_address = sd->host.host_and_port; bson_init (&sd->last_is_master); bson_init (&sd->hosts); bson_init (&sd->passives); bson_init (&sd->arbiters); bson_init (&sd->tags); bson_init (&sd->compressors); mongoc_server_description_reset (sd); EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_destroy -- * * Destroy allocated resources within @description and free * @description. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_server_description_destroy (mongoc_server_description_t *description) { ENTRY; if (!description) { EXIT; } mongoc_server_description_cleanup (description); bson_free (description); EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_has_rs_member -- * * Return true if this address is included in server's list of rs * members, false otherwise. * * Returns: * true, false * * Side effects: * None * *-------------------------------------------------------------------------- */ bool mongoc_server_description_has_rs_member (mongoc_server_description_t *server, const char *address) { bson_iter_t member_iter; const bson_t *rs_members[3]; int i; if (server->type != MONGOC_SERVER_UNKNOWN) { rs_members[0] = &server->hosts; rs_members[1] = &server->arbiters; rs_members[2] = &server->passives; for (i = 0; i < 3; i++) { BSON_ASSERT (bson_iter_init (&member_iter, rs_members[i])); while (bson_iter_next (&member_iter)) { if (strcasecmp (address, bson_iter_utf8 (&member_iter, NULL)) == 0) { return true; } } } } return false; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_has_set_version -- * * Did this server's ismaster response have a "setVersion" field? * * Returns: * True if the server description's setVersion is set. * *-------------------------------------------------------------------------- */ bool mongoc_server_description_has_set_version ( mongoc_server_description_t *description) { return description->set_version != MONGOC_NO_SET_VERSION; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_has_election_id -- * * Did this server's ismaster response have an "electionId" field? * * Returns: * True if the server description's electionId is set. * *-------------------------------------------------------------------------- */ bool mongoc_server_description_has_election_id ( mongoc_server_description_t *description) { return 0 != bson_oid_compare (&description->election_id, &kObjectIdZero); } /* *-------------------------------------------------------------------------- * * mongoc_server_description_id -- * * Get the id of this server. * * Returns: * Server's id. * *-------------------------------------------------------------------------- */ uint32_t mongoc_server_description_id (const mongoc_server_description_t *description) { return description->id; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_host -- * * Return a reference to the host associated with this server description. * * Returns: * This server description's host, a mongoc_host_list_t * you must * not modify or free. * *-------------------------------------------------------------------------- */ mongoc_host_list_t * mongoc_server_description_host (const mongoc_server_description_t *description) { return &((mongoc_server_description_t *) description)->host; } int64_t mongoc_server_description_last_update_time ( const mongoc_server_description_t *description) { return description->last_update_time_usec; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_round_trip_time -- * * Get the round trip time of this server, which is the client's * measurement of the duration of an "ismaster" command. * * Returns: * The server's round trip time in milliseconds. * *-------------------------------------------------------------------------- */ int64_t mongoc_server_description_round_trip_time ( const mongoc_server_description_t *description) { return description->round_trip_time_msec; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_type -- * * Get this server's type, one of the types defined in the Server * Discovery And Monitoring Spec. * * Returns: * A string. * *-------------------------------------------------------------------------- */ const char * mongoc_server_description_type (const mongoc_server_description_t *description) { switch (description->type) { case MONGOC_SERVER_UNKNOWN: return "Unknown"; case MONGOC_SERVER_STANDALONE: return "Standalone"; case MONGOC_SERVER_MONGOS: return "Mongos"; case MONGOC_SERVER_POSSIBLE_PRIMARY: return "PossiblePrimary"; case MONGOC_SERVER_RS_PRIMARY: return "RSPrimary"; case MONGOC_SERVER_RS_SECONDARY: return "RSSecondary"; case MONGOC_SERVER_RS_ARBITER: return "RSArbiter"; case MONGOC_SERVER_RS_OTHER: return "RSOther"; case MONGOC_SERVER_RS_GHOST: return "RSGhost"; case MONGOC_SERVER_DESCRIPTION_TYPES: default: MONGOC_ERROR ("Invalid mongoc_server_description_t type"); return "Invalid"; } } /* *-------------------------------------------------------------------------- * * mongoc_server_description_ismaster -- * * Return this server's most recent "ismaster" command response. * * Returns: * A reference to a BSON document, owned by the server description. * *-------------------------------------------------------------------------- */ const bson_t * mongoc_server_description_ismaster ( const mongoc_server_description_t *description) { return &description->last_is_master; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_set_state -- * * Set the server description's server type. * *-------------------------------------------------------------------------- */ void mongoc_server_description_set_state (mongoc_server_description_t *description, mongoc_server_description_type_t type) { description->type = type; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_set_set_version -- * * Set the replica set version of this server. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_server_description_set_set_version ( mongoc_server_description_t *description, int64_t set_version) { description->set_version = set_version; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_set_election_id -- * * Set the election_id of this server. Copies the given ObjectId or, * if it is NULL, zeroes description's election_id. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_server_description_set_election_id ( mongoc_server_description_t *description, const bson_oid_t *election_id) { if (election_id) { bson_oid_copy_unsafe (election_id, &description->election_id); } else { bson_oid_copy_unsafe (&kObjectIdZero, &description->election_id); } } /* *------------------------------------------------------------------------- * * mongoc_server_description_update_rtt -- * * Calculate this server's rtt calculation using an exponentially- * weighted moving average formula. * * Side effects: * None. * *------------------------------------------------------------------------- */ void mongoc_server_description_update_rtt (mongoc_server_description_t *server, int64_t rtt_msec) { if (server->round_trip_time_msec == -1) { server->round_trip_time_msec = rtt_msec; } else { server->round_trip_time_msec = (int64_t) ( ALPHA * rtt_msec + (1 - ALPHA) * server->round_trip_time_msec); } } static void _mongoc_server_description_set_error (mongoc_server_description_t *sd, const bson_error_t *error) { if (error && error->code) { memcpy (&sd->error, error, sizeof (bson_error_t)); } else { bson_set_error (&sd->error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "unknown error calling ismaster"); } /* Server Discovery and Monitoring Spec: if the server type changes from a * known type to Unknown its RTT is set to null. */ sd->round_trip_time_msec = -1; } /* *------------------------------------------------------------------------- * * Called during SDAM, from topology description's ismaster handler, or * when handshaking a connection in _mongoc_cluster_stream_for_server. * * If @ismaster_response is empty, @error must say why ismaster failed. * *------------------------------------------------------------------------- */ void mongoc_server_description_handle_ismaster (mongoc_server_description_t *sd, const bson_t *ismaster_response, int64_t rtt_msec, const bson_error_t *error /* IN */) { bson_iter_t iter; bson_iter_t child; bool is_master = false; bool is_shard = false; bool is_secondary = false; bool is_arbiter = false; bool is_replicaset = false; bool is_hidden = false; const uint8_t *bytes; uint32_t len; int num_keys = 0; ENTRY; BSON_ASSERT (sd); mongoc_server_description_reset (sd); if (!ismaster_response) { _mongoc_server_description_set_error (sd, error); EXIT; } bson_destroy (&sd->last_is_master); bson_copy_to (ismaster_response, &sd->last_is_master); sd->has_is_master = true; BSON_ASSERT (bson_iter_init (&iter, &sd->last_is_master)); while (bson_iter_next (&iter)) { num_keys++; if (strcmp ("ok", bson_iter_key (&iter)) == 0) { if (!bson_iter_as_bool (&iter)) { /* it doesn't really matter what error API we use. the code and * domain will be overwritten. */ (void) _mongoc_cmd_check_ok ( ismaster_response, MONGOC_ERROR_API_VERSION_2, &sd->error); /* ismaster response returned ok: 0. According to auth spec: "If the * isMaster of the MongoDB Handshake fails with an error, drivers * MUST treat this an authentication error." */ sd->error.domain = MONGOC_ERROR_CLIENT; sd->error.code = MONGOC_ERROR_CLIENT_AUTHENTICATE; goto failure; } } else if (strcmp ("ismaster", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_master = bson_iter_bool (&iter); } else if (strcmp ("me", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->me = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("maxMessageSizeBytes", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_msg_size = bson_iter_int32 (&iter); } else if (strcmp ("maxBsonObjectSize", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_bson_obj_size = bson_iter_int32 (&iter); } else if (strcmp ("maxWriteBatchSize", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_write_batch_size = bson_iter_int32 (&iter); } else if (strcmp ("logicalSessionTimeoutMinutes", bson_iter_key (&iter)) == 0) { if (BSON_ITER_HOLDS_NUMBER (&iter)) { sd->session_timeout_minutes = bson_iter_as_int64 (&iter); } else if (BSON_ITER_HOLDS_NULL (&iter)) { /* this arises executing standard JSON tests */ sd->session_timeout_minutes = MONGOC_NO_SESSIONS; } else { goto failure; } } else if (strcmp ("minWireVersion", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->min_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("maxWireVersion", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_INT32 (&iter)) goto failure; sd->max_wire_version = bson_iter_int32 (&iter); } else if (strcmp ("msg", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; is_shard = !!bson_iter_utf8 (&iter, NULL); } else if (strcmp ("setName", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->set_name = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("setVersion", bson_iter_key (&iter)) == 0) { mongoc_server_description_set_set_version (sd, bson_iter_as_int64 (&iter)); } else if (strcmp ("electionId", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_OID (&iter)) goto failure; mongoc_server_description_set_election_id (sd, bson_iter_oid (&iter)); } else if (strcmp ("secondary", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_secondary = bson_iter_bool (&iter); } else if (strcmp ("hosts", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_destroy (&sd->hosts); BSON_ASSERT (bson_init_static (&sd->hosts, bytes, len)); } else if (strcmp ("passives", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_destroy (&sd->passives); BSON_ASSERT (bson_init_static (&sd->passives, bytes, len)); } else if (strcmp ("arbiters", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_destroy (&sd->arbiters); BSON_ASSERT (bson_init_static (&sd->arbiters, bytes, len)); } else if (strcmp ("primary", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_UTF8 (&iter)) goto failure; sd->current_primary = bson_iter_utf8 (&iter, NULL); } else if (strcmp ("arbiterOnly", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_arbiter = bson_iter_bool (&iter); } else if (strcmp ("isreplicaset", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_BOOL (&iter)) goto failure; is_replicaset = bson_iter_bool (&iter); } else if (strcmp ("tags", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter)) goto failure; bson_iter_document (&iter, &len, &bytes); bson_destroy (&sd->tags); BSON_ASSERT (bson_init_static (&sd->tags, bytes, len)); } else if (strcmp ("hidden", bson_iter_key (&iter)) == 0) { is_hidden = bson_iter_bool (&iter); } else if (strcmp ("lastWrite", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_DOCUMENT (&iter) || !bson_iter_recurse (&iter, &child) || !bson_iter_find (&child, "lastWriteDate") || !BSON_ITER_HOLDS_DATE_TIME (&child)) { goto failure; } sd->last_write_date_ms = bson_iter_date_time (&child); } else if (strcmp ("idleWritePeriodMillis", bson_iter_key (&iter)) == 0) { sd->last_write_date_ms = bson_iter_as_int64 (&iter); } else if (strcmp ("compression", bson_iter_key (&iter)) == 0) { if (!BSON_ITER_HOLDS_ARRAY (&iter)) goto failure; bson_iter_array (&iter, &len, &bytes); bson_destroy (&sd->compressors); BSON_ASSERT (bson_init_static (&sd->compressors, bytes, len)); } } if (is_shard) { sd->type = MONGOC_SERVER_MONGOS; } else if (sd->set_name) { if (is_hidden) { sd->type = MONGOC_SERVER_RS_OTHER; } else if (is_master) { sd->type = MONGOC_SERVER_RS_PRIMARY; } else if (is_secondary) { sd->type = MONGOC_SERVER_RS_SECONDARY; } else if (is_arbiter) { sd->type = MONGOC_SERVER_RS_ARBITER; } else { sd->type = MONGOC_SERVER_RS_OTHER; } } else if (is_replicaset) { sd->type = MONGOC_SERVER_RS_GHOST; } else if (num_keys > 0) { sd->type = MONGOC_SERVER_STANDALONE; } else { sd->type = MONGOC_SERVER_UNKNOWN; } if (!num_keys) { /* empty reply means ismaster failed */ _mongoc_server_description_set_error (sd, error); } mongoc_server_description_update_rtt (sd, rtt_msec); EXIT; failure: sd->type = MONGOC_SERVER_UNKNOWN; sd->round_trip_time_msec = -1; EXIT; } /* *------------------------------------------------------------------------- * * mongoc_server_description_new_copy -- * * A copy of a server description that you must destroy, or NULL. * *------------------------------------------------------------------------- */ mongoc_server_description_t * mongoc_server_description_new_copy ( const mongoc_server_description_t *description) { mongoc_server_description_t *copy; if (!description) { return NULL; } copy = (mongoc_server_description_t *) bson_malloc0 (sizeof (*copy)); copy->id = description->id; copy->opened = description->opened; memcpy (©->host, &description->host, sizeof (copy->host)); copy->round_trip_time_msec = -1; copy->connection_address = copy->host.host_and_port; bson_init (©->last_is_master); bson_init (©->hosts); bson_init (©->passives); bson_init (©->arbiters); bson_init (©->tags); bson_init (©->compressors); if (description->has_is_master) { /* calls mongoc_server_description_reset */ mongoc_server_description_handle_ismaster ( copy, &description->last_is_master, description->round_trip_time_msec, &description->error); } else { mongoc_server_description_reset (copy); } /* Preserve the error */ memcpy (©->error, &description->error, sizeof copy->error); return copy; } /* *------------------------------------------------------------------------- * * mongoc_server_description_filter_stale -- * * Estimate servers' staleness according to the Server Selection Spec. * Determines the number of eligible servers, and sets any servers that * are too stale to NULL in the descriptions set. * *------------------------------------------------------------------------- */ void mongoc_server_description_filter_stale (mongoc_server_description_t **sds, size_t sds_len, mongoc_server_description_t *primary, int64_t heartbeat_frequency_ms, const mongoc_read_prefs_t *read_prefs) { int64_t max_staleness_seconds; size_t i; int64_t heartbeat_frequency_usec; int64_t max_last_write_date_usec; int64_t staleness_usec; int64_t max_staleness_usec; if (!read_prefs) { /* NULL read_prefs is PRIMARY, no maxStalenessSeconds to filter by */ return; } max_staleness_seconds = mongoc_read_prefs_get_max_staleness_seconds (read_prefs); if (max_staleness_seconds == MONGOC_NO_MAX_STALENESS) { return; } BSON_ASSERT (max_staleness_seconds > 0); max_staleness_usec = max_staleness_seconds * 1000 * 1000; heartbeat_frequency_usec = heartbeat_frequency_ms * 1000; if (primary) { for (i = 0; i < sds_len; i++) { if (!sds[i] || sds[i]->type != MONGOC_SERVER_RS_SECONDARY) { continue; } /* See max-staleness.rst for explanation of these formulae. */ staleness_usec = primary->last_write_date_ms * 1000 + (sds[i]->last_update_time_usec - primary->last_update_time_usec) - sds[i]->last_write_date_ms * 1000 + heartbeat_frequency_usec; if (staleness_usec > max_staleness_usec) { TRACE ("Rejected stale RSSecondary [%s]", sds[i]->host.host_and_port); sds[i] = NULL; } } } else { /* find max last_write_date */ max_last_write_date_usec = 0; for (i = 0; i < sds_len; i++) { if (sds[i] && sds[i]->type == MONGOC_SERVER_RS_SECONDARY) { max_last_write_date_usec = BSON_MAX ( max_last_write_date_usec, sds[i]->last_write_date_ms * 1000); } } /* use max last_write_date to estimate each secondary's staleness */ for (i = 0; i < sds_len; i++) { if (!sds[i] || sds[i]->type != MONGOC_SERVER_RS_SECONDARY) { continue; } staleness_usec = max_last_write_date_usec - sds[i]->last_write_date_ms * 1000 + heartbeat_frequency_usec; if (staleness_usec > max_staleness_usec) { TRACE ("Rejected stale RSSecondary [%s]", sds[i]->host.host_and_port); sds[i] = NULL; } } } } /* *------------------------------------------------------------------------- * * mongoc_server_description_filter_tags -- * * Given a set of server descriptions, set to NULL any that don't * match the read preference's tag sets. * * https://github.com/mongodb/specifications/blob/master/source/server-selection/server-selection.rst#tag-set * *------------------------------------------------------------------------- */ void mongoc_server_description_filter_tags ( mongoc_server_description_t **descriptions, size_t description_len, const mongoc_read_prefs_t *read_prefs) { const bson_t *rp_tags; bson_iter_t rp_tagset_iter; bson_iter_t tag_set_iter; bool *sd_matched = NULL; bool found; size_t i; if (!read_prefs) { /* NULL read_prefs is PRIMARY, no tags to filter by */ return; } rp_tags = mongoc_read_prefs_get_tags (read_prefs); if (bson_count_keys (rp_tags) == 0) { /* no tags to filter by */ return; } sd_matched = (bool *) bson_malloc0 (sizeof (bool) * description_len); bson_iter_init (&rp_tagset_iter, rp_tags); /* for each read preference tag set */ while (bson_iter_next (&rp_tagset_iter)) { found = false; for (i = 0; i < description_len; i++) { if (!descriptions[i]) { /* NULLed earlier in mongoc_topology_description_suitable_servers */ continue; } BSON_ASSERT (bson_iter_recurse (&rp_tagset_iter, &tag_set_iter)); sd_matched[i] = _match_tag_set (descriptions[i], &tag_set_iter); if (sd_matched[i]) { found = true; } } if (found) { for (i = 0; i < description_len; i++) { if (!sd_matched[i] && descriptions[i]) { TRACE ("Rejected [%s] [%s], doesn't match tags", mongoc_server_description_type (descriptions[i]), descriptions[i]->host.host_and_port); descriptions[i] = NULL; } } goto CLEANUP; } } /* tried each */ for (i = 0; i < description_len; i++) { if (!sd_matched[i]) { TRACE ("Rejected [%s] [%s], reached end of tags array without match", mongoc_server_description_type (descriptions[i]), descriptions[i]->host.host_and_port); descriptions[i] = NULL; } } CLEANUP: bson_free (sd_matched); } /* *------------------------------------------------------------------------- * * _match_tag_set -- * * Check if a server's tags match one tag set, like * {'tag1': 'value1', 'tag2': 'value2'}. * *------------------------------------------------------------------------- */ static bool _match_tag_set (const mongoc_server_description_t *sd, bson_iter_t *tag_set_iter) { bson_iter_t sd_iter; uint32_t read_pref_tag_len; uint32_t sd_len; const char *read_pref_tag; const char *read_pref_val; const char *server_val; while (bson_iter_next (tag_set_iter)) { /* one {'tag': 'value'} pair from the read preference's tag set */ read_pref_tag = bson_iter_key (tag_set_iter); read_pref_val = bson_iter_utf8 (tag_set_iter, &read_pref_tag_len); if (bson_iter_init_find (&sd_iter, &sd->tags, read_pref_tag)) { /* The server has this tag - does it have the right value? */ server_val = bson_iter_utf8 (&sd_iter, &sd_len); if (sd_len != read_pref_tag_len || memcmp (read_pref_val, server_val, read_pref_tag_len)) { /* If the values don't match, no match */ return false; } } else { /* If the server description doesn't have that key, no match */ return false; } } return true; } /* *-------------------------------------------------------------------------- * * mongoc_server_description_compressor_id -- * * Get the compressor id if compression was negotiated. * * Returns: * The compressor ID, or -1 if none was negotiated. * *-------------------------------------------------------------------------- */ int32_t mongoc_server_description_compressor_id ( const mongoc_server_description_t *description) { int id; bson_iter_t iter; BSON_ASSERT (bson_iter_init (&iter, &description->compressors)); while (bson_iter_next (&iter)) { id = mongoc_compressor_name_to_id (bson_iter_utf8 (&iter, NULL)); if (id != -1) { return id; } } return -1; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-server-description.h0000644000076500000240000000373013572250760027307 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SERVER_DESCRIPTION_H #define MONGOC_SERVER_DESCRIPTION_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-host-list.h" BSON_BEGIN_DECLS typedef struct _mongoc_server_description_t mongoc_server_description_t; MONGOC_EXPORT (void) mongoc_server_description_destroy (mongoc_server_description_t *description); MONGOC_EXPORT (mongoc_server_description_t *) mongoc_server_description_new_copy ( const mongoc_server_description_t *description); MONGOC_EXPORT (uint32_t) mongoc_server_description_id (const mongoc_server_description_t *description); MONGOC_EXPORT (mongoc_host_list_t *) mongoc_server_description_host (const mongoc_server_description_t *description); MONGOC_EXPORT (int64_t) mongoc_server_description_last_update_time ( const mongoc_server_description_t *description); MONGOC_EXPORT (int64_t) mongoc_server_description_round_trip_time ( const mongoc_server_description_t *description); MONGOC_EXPORT (const char *) mongoc_server_description_type (const mongoc_server_description_t *description); MONGOC_EXPORT (const bson_t *) mongoc_server_description_ismaster ( const mongoc_server_description_t *description); MONGOC_EXPORT (int32_t) mongoc_server_description_compressor_id ( const mongoc_server_description_t *description); BSON_END_DECLS #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-server-stream-private.h0000644000076500000240000000337413572250760027733 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SERVER_STREAM_H #define MONGOC_SERVER_STREAM_H #include "mongoc/mongoc-config.h" #include #include "mongoc/mongoc-topology-description-private.h" #include "mongoc/mongoc-server-description-private.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_server_stream_t { mongoc_topology_description_type_t topology_type; mongoc_server_description_t *sd; /* owned */ bson_t cluster_time; /* owned */ mongoc_stream_t *stream; /* borrowed */ } mongoc_server_stream_t; mongoc_server_stream_t * mongoc_server_stream_new (const mongoc_topology_description_t *td, mongoc_server_description_t *sd, mongoc_stream_t *stream); int32_t mongoc_server_stream_max_bson_obj_size (mongoc_server_stream_t *server_stream); int32_t mongoc_server_stream_max_msg_size (mongoc_server_stream_t *server_stream); int32_t mongoc_server_stream_max_write_batch_size ( mongoc_server_stream_t *server_stream); void mongoc_server_stream_cleanup (mongoc_server_stream_t *server_stream); BSON_END_DECLS #endif /* MONGOC_SERVER_STREAM_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-server-stream.c0000644000076500000240000000605413572250760026254 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-cluster-private.h" #include "mongoc/mongoc-server-stream-private.h" #include "mongoc/mongoc-util-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "server-stream" mongoc_server_stream_t * mongoc_server_stream_new (const mongoc_topology_description_t *td, mongoc_server_description_t *sd, mongoc_stream_t *stream) { mongoc_server_stream_t *server_stream; BSON_ASSERT (sd); BSON_ASSERT (stream); server_stream = bson_malloc (sizeof (mongoc_server_stream_t)); server_stream->topology_type = td->type; bson_copy_to (&td->cluster_time, &server_stream->cluster_time); server_stream->sd = sd; /* becomes owned */ server_stream->stream = stream; /* merely borrowed */ return server_stream; } void mongoc_server_stream_cleanup (mongoc_server_stream_t *server_stream) { if (server_stream) { mongoc_server_description_destroy (server_stream->sd); bson_destroy (&server_stream->cluster_time); bson_free (server_stream); } } /* *-------------------------------------------------------------------------- * * mongoc_server_stream_max_bson_obj_size -- * * Return the max bson object size for the given server stream. * *-------------------------------------------------------------------------- */ int32_t mongoc_server_stream_max_bson_obj_size (mongoc_server_stream_t *server_stream) { return COALESCE (server_stream->sd->max_bson_obj_size, MONGOC_DEFAULT_BSON_OBJ_SIZE); } /* *-------------------------------------------------------------------------- * * mongoc_server_stream_max_msg_size -- * * Return the max message size for the given server stream. * *-------------------------------------------------------------------------- */ int32_t mongoc_server_stream_max_msg_size (mongoc_server_stream_t *server_stream) { return COALESCE (server_stream->sd->max_msg_size, MONGOC_DEFAULT_MAX_MSG_SIZE); } /* *-------------------------------------------------------------------------- * * mongoc_server_stream_max_write_batch_size -- * * Return the max write batch size for the given server stream. * *-------------------------------------------------------------------------- */ int32_t mongoc_server_stream_max_write_batch_size ( mongoc_server_stream_t *server_stream) { return COALESCE (server_stream->sd->max_write_batch_size, MONGOC_DEFAULT_WRITE_BATCH_SIZE); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-set-private.h0000644000076500000240000000513413572250760025723 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SET_PRIVATE_H #define MONGOC_SET_PRIVATE_H #include BSON_BEGIN_DECLS typedef void (*mongoc_set_item_dtor) (void *item, void *ctx); /* return true to continue iteration, false to stop */ typedef bool (*mongoc_set_for_each_cb_t) (void *item, void *ctx); typedef bool (*mongoc_set_for_each_with_id_cb_t) (uint32_t id, void *item, void *ctx); typedef struct { uint32_t id; void *item; } mongoc_set_item_t; typedef struct { mongoc_set_item_t *items; size_t items_len; size_t items_allocated; mongoc_set_item_dtor dtor; void *dtor_ctx; } mongoc_set_t; mongoc_set_t * mongoc_set_new (size_t nitems, mongoc_set_item_dtor dtor, void *dtor_ctx); void mongoc_set_add (mongoc_set_t *set, uint32_t id, void *item); void mongoc_set_rm (mongoc_set_t *set, uint32_t id); void * mongoc_set_get (mongoc_set_t *set, uint32_t id); void * mongoc_set_get_item (mongoc_set_t *set, int idx); void * mongoc_set_get_item_and_id (mongoc_set_t *set, int idx, uint32_t *id /* OUT */); void mongoc_set_destroy (mongoc_set_t *set); /* loops over the set safe-ish. * * Caveats: * - you can add items at any iteration * - if you remove elements other than the one you're currently looking at, * you may see it later in the iteration */ void mongoc_set_for_each (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx); void mongoc_set_for_each_with_id (mongoc_set_t *set, mongoc_set_for_each_with_id_cb_t cb, void *ctx); /* first item in set for which "cb" returns true */ void * mongoc_set_find_item (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx); /* id of first item in set for which "cb" returns true, or 0. */ uint32_t mongoc_set_find_id (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx); BSON_END_DECLS #endif /* MONGOC_SET_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-set.c0000644000076500000240000001252013572250760024243 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-set-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "set" mongoc_set_t * mongoc_set_new (size_t nitems, mongoc_set_item_dtor dtor, void *dtor_ctx) { mongoc_set_t *set = (mongoc_set_t *) bson_malloc (sizeof (*set)); set->items_allocated = nitems; set->items = (mongoc_set_item_t *) bson_malloc (sizeof (*set->items) * set->items_allocated); set->items_len = 0; set->dtor = dtor; set->dtor_ctx = dtor_ctx; return set; } static int mongoc_set_id_cmp (const void *a_, const void *b_) { mongoc_set_item_t *a = (mongoc_set_item_t *) a_; mongoc_set_item_t *b = (mongoc_set_item_t *) b_; if (a->id == b->id) { return 0; } return a->id < b->id ? -1 : 1; } void mongoc_set_add (mongoc_set_t *set, uint32_t id, void *item) { if (set->items_len >= set->items_allocated) { set->items_allocated *= 2; set->items = (mongoc_set_item_t *) bson_realloc ( set->items, sizeof (*set->items) * set->items_allocated); } set->items[set->items_len].id = id; set->items[set->items_len].item = item; set->items_len++; if (set->items_len > 1 && set->items[set->items_len - 2].id > id) { qsort ( set->items, set->items_len, sizeof (*set->items), mongoc_set_id_cmp); } } void mongoc_set_rm (mongoc_set_t *set, uint32_t id) { mongoc_set_item_t *ptr; mongoc_set_item_t key; int i; key.id = id; ptr = (mongoc_set_item_t *) bsearch ( &key, set->items, set->items_len, sizeof (key), mongoc_set_id_cmp); if (ptr) { if (set->dtor) { set->dtor (ptr->item, set->dtor_ctx); } i = ptr - set->items; if (i != set->items_len - 1) { memmove (set->items + i, set->items + i + 1, (set->items_len - (i + 1)) * sizeof (key)); } set->items_len--; } } void * mongoc_set_get (mongoc_set_t *set, uint32_t id) { mongoc_set_item_t *ptr; mongoc_set_item_t key; key.id = id; ptr = (mongoc_set_item_t *) bsearch ( &key, set->items, set->items_len, sizeof (key), mongoc_set_id_cmp); return ptr ? ptr->item : NULL; } void * mongoc_set_get_item (mongoc_set_t *set, int idx) { BSON_ASSERT (set); BSON_ASSERT (idx < set->items_len); return set->items[idx].item; } void * mongoc_set_get_item_and_id (mongoc_set_t *set, int idx, uint32_t *id /* OUT */) { BSON_ASSERT (set); BSON_ASSERT (id); BSON_ASSERT (idx < set->items_len); *id = set->items[idx].id; return set->items[idx].item; } void mongoc_set_destroy (mongoc_set_t *set) { int i; if (set->dtor) { for (i = 0; i < set->items_len; i++) { set->dtor (set->items[i].item, set->dtor_ctx); } } bson_free (set->items); bson_free (set); } typedef struct { mongoc_set_for_each_cb_t cb; void *ctx; } _mongoc_set_for_each_helper_t; static bool _mongoc_set_for_each_helper (uint32_t id, void *item, void *ctx) { _mongoc_set_for_each_helper_t *helper = (_mongoc_set_for_each_helper_t *) ctx; return helper->cb (item, helper->ctx); } void mongoc_set_for_each (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx) { _mongoc_set_for_each_helper_t helper; helper.cb = cb; helper.ctx = ctx; mongoc_set_for_each_with_id (set, _mongoc_set_for_each_helper, &helper); } void mongoc_set_for_each_with_id (mongoc_set_t *set, mongoc_set_for_each_with_id_cb_t cb, void *ctx) { size_t i; mongoc_set_item_t *old_set; size_t items_len; items_len = set->items_len; /* prevent undefined behavior of memcpy(NULL) */ if (items_len == 0) { return; } old_set = (mongoc_set_item_t *) bson_malloc (sizeof (*old_set) * items_len); memcpy (old_set, set->items, sizeof (*old_set) * items_len); for (i = 0; i < items_len; i++) { if (!cb (i, old_set[i].item, ctx)) { break; } } bson_free (old_set); } static mongoc_set_item_t * _mongoc_set_find (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx) { size_t i; size_t items_len; mongoc_set_item_t *item; items_len = set->items_len; for (i = 0; i < items_len; i++) { item = &set->items[i]; if (cb (item->item, ctx)) { return item; } } return NULL; } void * mongoc_set_find_item (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx) { mongoc_set_item_t *item; if ((item = _mongoc_set_find (set, cb, ctx))) { return item->item; } return NULL; } uint32_t mongoc_set_find_id (mongoc_set_t *set, mongoc_set_for_each_cb_t cb, void *ctx) { mongoc_set_item_t *item; if ((item = _mongoc_set_find (set, cb, ctx))) { return item->id; } return 0; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-socket-private.h0000644000076500000240000000207013572250760026414 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SOCKET_PRIVATE_H #define MONGOC_SOCKET_PRIVATE_H #include "mongoc/mongoc-socket.h" BSON_BEGIN_DECLS struct _mongoc_socket_t { #ifdef _WIN32 SOCKET sd; #else int sd; #endif int errno_; int domain; int pid; }; mongoc_socket_t * mongoc_socket_accept_ex (mongoc_socket_t *sock, int64_t expire_at, uint16_t *port); BSON_END_DECLS #endif /* MONGOC_SOCKET_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-socket.c0000644000076500000240000011040313572250760024737 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-errno-private.h" #include "mongoc/mongoc-socket-private.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-socket-private.h" #include "mongoc/mongoc-trace-private.h" #ifdef _WIN32 #include #include #endif #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "socket" #define OPERATION_EXPIRED(expire_at) \ ((expire_at >= 0) && (expire_at < (bson_get_monotonic_time ()))) /* either struct sockaddr or void, depending on platform */ typedef MONGOC_SOCKET_ARG2 mongoc_sockaddr_t; /* *-------------------------------------------------------------------------- * * _mongoc_socket_capture_errno -- * * Save the errno state for contextual use. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _mongoc_socket_capture_errno (mongoc_socket_t *sock) /* IN */ { #ifdef _WIN32 errno = sock->errno_ = WSAGetLastError (); #else sock->errno_ = errno; #endif TRACE ("setting errno: %d %s", sock->errno_, strerror (sock->errno_)); } /* *-------------------------------------------------------------------------- * * _mongoc_socket_setnonblock -- * * A helper to set a socket in nonblocking mode. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool #ifdef _WIN32 _mongoc_socket_setnonblock (SOCKET sd) #else _mongoc_socket_setnonblock (int sd) #endif { #ifdef _WIN32 u_long io_mode = 1; return (NO_ERROR == ioctlsocket (sd, FIONBIO, &io_mode)); #else int flags; flags = fcntl (sd, F_GETFL, sd); return (-1 != fcntl (sd, F_SETFL, (flags | O_NONBLOCK))); #endif } /* *-------------------------------------------------------------------------- * * _mongoc_socket_wait -- * * A single socket poll helper. * * @events: in most cases should be POLLIN or POLLOUT. * * @expire_at should be an absolute time at which to expire using * the monotonic clock (bson_get_monotonic_time(), which is in * microseconds). Or zero to not block at all. Or -1 to block * forever. * * Returns: * true if an event matched. otherwise false. * a timeout will return false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_socket_wait (mongoc_socket_t *sock, /* IN */ int events, /* IN */ int64_t expire_at) /* IN */ { #ifdef _WIN32 fd_set read_fds; fd_set write_fds; fd_set error_fds; struct timeval timeout_tv; #else struct pollfd pfd; #endif int ret; int timeout; int64_t now; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (events); #ifdef _WIN32 FD_ZERO (&read_fds); FD_ZERO (&write_fds); FD_ZERO (&error_fds); if (events & POLLIN) { FD_SET (sock->sd, &read_fds); } if (events & POLLOUT) { FD_SET (sock->sd, &write_fds); } FD_SET (sock->sd, &error_fds); #else pfd.fd = sock->sd; pfd.events = events | POLLERR | POLLHUP; pfd.revents = 0; #endif now = bson_get_monotonic_time (); for (;;) { if (expire_at < 0) { timeout = -1; } else if (expire_at == 0) { timeout = 0; } else { timeout = (int) ((expire_at - now) / 1000L); if (timeout < 0) { timeout = 0; } } #ifdef _WIN32 if (timeout == -1) { /* not WSAPoll: daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken */ ret = select (0 /*unused*/, &read_fds, &write_fds, &error_fds, NULL); } else { timeout_tv.tv_sec = timeout / 1000; timeout_tv.tv_usec = (timeout % 1000) * 1000; ret = select ( 0 /*unused*/, &read_fds, &write_fds, &error_fds, &timeout_tv); } if (ret == SOCKET_ERROR) { _mongoc_socket_capture_errno (sock); ret = -1; } else if (FD_ISSET (sock->sd, &error_fds)) { errno = WSAECONNRESET; ret = -1; } #else ret = poll (&pfd, 1, timeout); #endif if (ret > 0) { /* Something happened, so return that */ #ifdef _WIN32 return (FD_ISSET (sock->sd, &read_fds) || FD_ISSET (sock->sd, &write_fds)); #else RETURN (0 != (pfd.revents & events)); #endif } else if (ret < 0) { /* poll itself failed */ TRACE ("errno is: %d", errno); if (MONGOC_ERRNO_IS_AGAIN (errno)) { if (OPERATION_EXPIRED (expire_at)) { _mongoc_socket_capture_errno (sock); RETURN (false); } else { continue; } } else { /* poll failed for some non-transient reason */ _mongoc_socket_capture_errno (sock); RETURN (false); } } else { /* ret == 0, poll timed out */ if (timeout) { mongoc_counter_streams_timeout_inc (); } #ifdef _WIN32 sock->errno_ = timeout ? WSAETIMEDOUT : EAGAIN; #else sock->errno_ = timeout ? ETIMEDOUT : EAGAIN; #endif RETURN (false); } } } /* *-------------------------------------------------------------------------- * * mongoc_socket_poll -- * * A multi-socket poll helper. * * @expire_at should be an absolute time at which to expire using * the monotonic clock (bson_get_monotonic_time(), which is in * microseconds). Or zero to not block at all. Or -1 to block * forever. * * Returns: * The number of sockets ready. * * Side effects: * None. * *-------------------------------------------------------------------------- */ ssize_t mongoc_socket_poll (mongoc_socket_poll_t *sds, /* IN */ size_t nsds, /* IN */ int32_t timeout) /* IN */ { #ifdef _WIN32 fd_set read_fds; fd_set write_fds; fd_set error_fds; struct timeval timeout_tv; #else struct pollfd *pfds; #endif int ret; int i; ENTRY; BSON_ASSERT (sds); #ifdef _WIN32 FD_ZERO (&read_fds); FD_ZERO (&write_fds); FD_ZERO (&error_fds); for (i = 0; i < nsds; i++) { if (sds[i].events & POLLIN) { FD_SET (sds[i].socket->sd, &read_fds); } if (sds[i].events & POLLOUT) { FD_SET (sds[i].socket->sd, &write_fds); } FD_SET (sds[i].socket->sd, &error_fds); } timeout_tv.tv_sec = timeout / 1000; timeout_tv.tv_usec = (timeout % 1000) * 1000; /* not WSAPoll: daniel.haxx.se/blog/2012/10/10/wsapoll-is-broken */ ret = select (0 /*unused*/, &read_fds, &write_fds, &error_fds, &timeout_tv); if (ret == SOCKET_ERROR) { errno = WSAGetLastError (); return -1; } for (i = 0; i < nsds; i++) { if (FD_ISSET (sds[i].socket->sd, &read_fds)) { sds[i].revents = POLLIN; } else if (FD_ISSET (sds[i].socket->sd, &write_fds)) { sds[i].revents = POLLOUT; } else if (FD_ISSET (sds[i].socket->sd, &error_fds)) { sds[i].revents = POLLHUP; } else { sds[i].revents = 0; } } #else pfds = (struct pollfd *) bson_malloc (sizeof (*pfds) * nsds); for (i = 0; i < nsds; i++) { pfds[i].fd = sds[i].socket->sd; pfds[i].events = sds[i].events | POLLERR | POLLHUP; pfds[i].revents = 0; } ret = poll (pfds, nsds, timeout); for (i = 0; i < nsds; i++) { sds[i].revents = pfds[i].revents; } bson_free (pfds); #endif return ret; } /* https://jira.mongodb.org/browse/CDRIVER-2176 */ #define MONGODB_KEEPALIVEINTVL 10 #define MONGODB_KEEPIDLE 300 #define MONGODB_KEEPALIVECNT 9 #ifdef _WIN32 static void _mongoc_socket_setkeepalive_windows (SOCKET sd) { struct tcp_keepalive keepalive; DWORD lpcbBytesReturned = 0; HKEY hKey; DWORD type; DWORD data; DWORD data_size = sizeof data; const char *reg_key = "SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters"; keepalive.onoff = true; keepalive.keepalivetime = MONGODB_KEEPIDLE * 1000; keepalive.keepaliveinterval = MONGODB_KEEPALIVEINTVL * 1000; /* * Windows hardcodes probes to 10: * https://msdn.microsoft.com/en-us/library/windows/desktop/dd877220(v=vs.85).aspx * "On Windows Vista and later, the number of keep-alive probes (data * retransmissions) is set to 10 and cannot be changed." * * Note that win2k (and seeminly all versions thereafter) do not set the * registry value by default so there is no way to derive the default value * programmatically. It is however listed in the docs. A user can however * change the default value by setting the registry values. */ if (RegOpenKeyExA (HKEY_LOCAL_MACHINE, reg_key, 0, KEY_QUERY_VALUE, &hKey) == ERROR_SUCCESS) { /* https://technet.microsoft.com/en-us/library/cc957549.aspx */ DWORD default_keepalivetime = 7200000; /* 2 hours */ /* https://technet.microsoft.com/en-us/library/cc957548.aspx */ DWORD default_keepaliveinterval = 1000; /* 1 second */ if (RegQueryValueEx ( hKey, "KeepAliveTime", NULL, &type, (LPBYTE) &data, &data_size) == ERROR_SUCCESS) { if (type == REG_DWORD && data < keepalive.keepalivetime) { keepalive.keepalivetime = data; } } else if (default_keepalivetime < keepalive.keepalivetime) { keepalive.keepalivetime = default_keepalivetime; } if (RegQueryValueEx (hKey, "KeepAliveInterval", NULL, &type, (LPBYTE) &data, &data_size) == ERROR_SUCCESS) { if (type == REG_DWORD && data < keepalive.keepaliveinterval) { keepalive.keepaliveinterval = data; } } else if (default_keepaliveinterval < keepalive.keepaliveinterval) { keepalive.keepaliveinterval = default_keepaliveinterval; } RegCloseKey (hKey); } if (WSAIoctl (sd, SIO_KEEPALIVE_VALS, &keepalive, sizeof keepalive, NULL, 0, &lpcbBytesReturned, NULL, NULL) == SOCKET_ERROR) { TRACE ("%s", "Could not set keepalive values"); } else { TRACE ("%s", "KeepAlive values updated"); TRACE ("KeepAliveTime: %d", keepalive.keepalivetime); TRACE ("KeepAliveInterval: %d", keepalive.keepaliveinterval); } } #else #ifdef MONGOC_TRACE static const char * _mongoc_socket_sockopt_value_to_name (int value) { switch (value) { #ifdef TCP_KEEPIDLE case TCP_KEEPIDLE: return "TCP_KEEPIDLE"; #endif #ifdef TCP_KEEPALIVE case TCP_KEEPALIVE: return "TCP_KEEPALIVE"; #endif #ifdef TCP_KEEPINTVL case TCP_KEEPINTVL: return "TCP_KEEPINTVL"; #endif #ifdef TCP_KEEPCNT case TCP_KEEPCNT: return "TCP_KEEPCNT"; #endif default: MONGOC_WARNING ("Don't know what socketopt %d is", value); return "Unknown option name"; } } #endif static void _mongoc_socket_set_sockopt_if_less (int sd, int name, int value) { int optval = 1; mongoc_socklen_t optlen; optlen = sizeof optval; if (getsockopt (sd, IPPROTO_TCP, name, (char *) &optval, &optlen)) { TRACE ("Getting '%s' failed, errno: %d", _mongoc_socket_sockopt_value_to_name (name), errno); } else { TRACE ("'%s' is %d, target value is %d", _mongoc_socket_sockopt_value_to_name (name), optval, value); if (optval > value) { optval = value; if (setsockopt ( sd, IPPROTO_TCP, name, (char *) &optval, sizeof optval)) { TRACE ("Setting '%s' failed, errno: %d", _mongoc_socket_sockopt_value_to_name (name), errno); } else { TRACE ("'%s' value changed to %d", _mongoc_socket_sockopt_value_to_name (name), optval); } } } } static void _mongoc_socket_setkeepalive_nix (int sd) { #if defined(TCP_KEEPIDLE) _mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPIDLE, MONGODB_KEEPIDLE); #elif defined(TCP_KEEPALIVE) _mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPALIVE, MONGODB_KEEPIDLE); #else TRACE ("%s", "Neither TCP_KEEPIDLE nor TCP_KEEPALIVE available"); #endif #ifdef TCP_KEEPINTVL _mongoc_socket_set_sockopt_if_less ( sd, TCP_KEEPINTVL, MONGODB_KEEPALIVEINTVL); #else TRACE ("%s", "TCP_KEEPINTVL not available"); #endif #ifdef TCP_KEEPCNT _mongoc_socket_set_sockopt_if_less (sd, TCP_KEEPCNT, MONGODB_KEEPALIVECNT); #else TRACE ("%s", "TCP_KEEPCNT not available"); #endif } #endif static void #ifdef _WIN32 _mongoc_socket_setkeepalive (SOCKET sd) /* IN */ #else _mongoc_socket_setkeepalive (int sd) /* IN */ #endif { #ifdef SO_KEEPALIVE int optval = 1; ENTRY; #ifdef SO_KEEPALIVE if (!setsockopt ( sd, SOL_SOCKET, SO_KEEPALIVE, (char *) &optval, sizeof optval)) { TRACE ("%s", "Setting SO_KEEPALIVE"); #ifdef _WIN32 _mongoc_socket_setkeepalive_windows (sd); #else _mongoc_socket_setkeepalive_nix (sd); #endif } else { TRACE ("%s", "Failed setting SO_KEEPALIVE"); } #else TRACE ("%s", "SO_KEEPALIVE not available"); #endif EXIT; #endif } static bool #ifdef _WIN32 _mongoc_socket_setnodelay (SOCKET sd) /* IN */ #else _mongoc_socket_setnodelay (int sd) /* IN */ #endif { #ifdef _WIN32 BOOL optval = 1; #else int optval = 1; #endif int ret; ENTRY; errno = 0; ret = setsockopt ( sd, IPPROTO_TCP, TCP_NODELAY, (char *) &optval, sizeof optval); #ifdef _WIN32 if (ret == SOCKET_ERROR) { MONGOC_WARNING ("WSAGetLastError(): %d", (int) WSAGetLastError ()); } #endif RETURN (ret == 0); } /* *-------------------------------------------------------------------------- * * mongoc_socket_errno -- * * Returns the last error on the socket. * * Returns: * An integer errno, or 0 on no error. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_socket_errno (mongoc_socket_t *sock) /* IN */ { BSON_ASSERT (sock); TRACE ("Current errno: %d", sock->errno_); return sock->errno_; } /* *-------------------------------------------------------------------------- * * _mongoc_socket_errno_is_again -- * * Check to see if we should attempt to make further progress * based on the error of the last operation. * * Returns: * true if we should try again. otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_socket_errno_is_again (mongoc_socket_t *sock) /* IN */ { TRACE ("errno is: %d", sock->errno_); return MONGOC_ERRNO_IS_AGAIN (sock->errno_); } /* *-------------------------------------------------------------------------- * * mongoc_socket_accept -- * * Wrapper for BSD socket accept(). Handles portability between * BSD sockets and WinSock2 on Windows Vista and newer. * * Returns: * NULL upon failure to accept or timeout. * A newly allocated mongoc_socket_t on success. * * Side effects: * *port contains the client port number. * *-------------------------------------------------------------------------- */ mongoc_socket_t * mongoc_socket_accept (mongoc_socket_t *sock, /* IN */ int64_t expire_at) /* IN */ { return mongoc_socket_accept_ex (sock, expire_at, NULL); } /* *-------------------------------------------------------------------------- * * mongoc_socket_accept_ex -- * * Private synonym for mongoc_socket_accept, returning client port. * * Returns: * NULL upon failure to accept or timeout. * A newly allocated mongoc_socket_t on success. * * Side effects: * *port contains the client port number. * *-------------------------------------------------------------------------- */ mongoc_socket_t * mongoc_socket_accept_ex (mongoc_socket_t *sock, /* IN */ int64_t expire_at, /* IN */ uint16_t *port) /* OUT */ { mongoc_socket_t *client; struct sockaddr_storage addr = {0}; mongoc_socklen_t addrlen = sizeof addr; bool try_again = false; bool failed = false; #ifdef _WIN32 SOCKET sd; #else int sd; #endif ENTRY; BSON_ASSERT (sock); again: errno = 0; sd = accept (sock->sd, (mongoc_sockaddr_t *) &addr, &addrlen); _mongoc_socket_capture_errno (sock); #ifdef _WIN32 failed = (sd == INVALID_SOCKET); #else failed = (sd == -1); #endif try_again = (failed && _mongoc_socket_errno_is_again (sock)); if (failed && try_again) { if (_mongoc_socket_wait (sock, POLLIN, expire_at)) { GOTO (again); } RETURN (NULL); } else if (failed) { RETURN (NULL); } else if (!_mongoc_socket_setnonblock (sd)) { #ifdef _WIN32 closesocket (sd); #else close (sd); #endif RETURN (NULL); } client = (mongoc_socket_t *) bson_malloc0 (sizeof *client); client->sd = sd; if (port) { if (addr.ss_family == AF_INET) { struct sockaddr_in *tmp = (struct sockaddr_in *) &addr; *port = ntohs (tmp->sin_port); } else { struct sockaddr_in6 *tmp = (struct sockaddr_in6 *) &addr; *port = ntohs (tmp->sin6_port); } } if (!_mongoc_socket_setnodelay (client->sd)) { MONGOC_WARNING ("Failed to enable TCP_NODELAY."); } RETURN (client); } /* *-------------------------------------------------------------------------- * * mongo_socket_bind -- * * A wrapper around bind(). * * Returns: * 0 on success, -1 on failure and errno is set. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_socket_bind (mongoc_socket_t *sock, /* IN */ const struct sockaddr *addr, /* IN */ mongoc_socklen_t addrlen) /* IN */ { int ret; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (addr); BSON_ASSERT (addrlen); ret = bind (sock->sd, addr, addrlen); _mongoc_socket_capture_errno (sock); RETURN (ret); } int mongoc_socket_close (mongoc_socket_t *sock) /* IN */ { bool owned; ENTRY; BSON_ASSERT (sock); #ifdef _WIN32 owned = (sock->pid == (int) _getpid ()); if (sock->sd != INVALID_SOCKET) { if (owned) { shutdown (sock->sd, SD_BOTH); } if (0 == closesocket (sock->sd)) { sock->sd = INVALID_SOCKET; } else { _mongoc_socket_capture_errno (sock); RETURN (-1); } } RETURN (0); #else owned = (sock->pid == (int) getpid ()); if (sock->sd != -1) { if (owned) { shutdown (sock->sd, SHUT_RDWR); } if (0 == close (sock->sd)) { sock->sd = -1; } else { _mongoc_socket_capture_errno (sock); RETURN (-1); } } RETURN (0); #endif } /* *-------------------------------------------------------------------------- * * mongoc_socket_connect -- * * Performs a socket connection but will fail if @expire_at is * reached by the monotonic clock. * * Returns: * 0 if success, otherwise -1 and errno is set. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_socket_connect (mongoc_socket_t *sock, /* IN */ const struct sockaddr *addr, /* IN */ mongoc_socklen_t addrlen, /* IN */ int64_t expire_at) /* IN */ { bool try_again = false; bool failed = false; int ret; int optval; /* getsockopt parameter types vary, we check in CheckCompiler.m4 */ mongoc_socklen_t optlen = (mongoc_socklen_t) sizeof optval; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (addr); BSON_ASSERT (addrlen); ret = connect (sock->sd, addr, addrlen); #ifdef _WIN32 if (ret == SOCKET_ERROR) { #else if (ret == -1) { #endif _mongoc_socket_capture_errno (sock); failed = true; try_again = _mongoc_socket_errno_is_again (sock); } if (failed && try_again) { if (_mongoc_socket_wait (sock, POLLOUT, expire_at)) { optval = -1; ret = getsockopt ( sock->sd, SOL_SOCKET, SO_ERROR, (char *) &optval, &optlen); if ((ret == 0) && (optval == 0)) { RETURN (0); } else { errno = sock->errno_ = optval; } } RETURN (-1); } else if (failed) { RETURN (-1); } else { RETURN (0); } } /* *-------------------------------------------------------------------------- * * mongoc_socket_destroy -- * * Cleanup after a mongoc_socket_t structure, possibly closing * underlying sockets. * * Returns: * None. * * Side effects: * @sock is freed and should be considered invalid. * *-------------------------------------------------------------------------- */ void mongoc_socket_destroy (mongoc_socket_t *sock) /* IN */ { if (sock) { mongoc_socket_close (sock); bson_free (sock); } } /* *-------------------------------------------------------------------------- * * mongoc_socket_listen -- * * Listen for incoming requests with a backlog up to @backlog. * * If @backlog is zero, a sensible default will be chosen. * * Returns: * true if successful; otherwise false. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_socket_listen (mongoc_socket_t *sock, /* IN */ unsigned int backlog) /* IN */ { int ret; ENTRY; BSON_ASSERT (sock); if (backlog == 0) { backlog = 10; } ret = listen (sock->sd, backlog); _mongoc_socket_capture_errno (sock); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_socket_new -- * * Create a new socket and store the current process id on it. * * Free the result with mongoc_socket_destroy(). * * Returns: * A newly allocated socket. * NULL on failure. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_socket_t * mongoc_socket_new (int domain, /* IN */ int type, /* IN */ int protocol) /* IN */ { mongoc_socket_t *sock; #ifdef _WIN32 SOCKET sd; #else int sd; #endif ENTRY; sd = socket (domain, type, protocol); #ifdef _WIN32 if (sd == INVALID_SOCKET) { #else if (sd == -1) { #endif RETURN (NULL); } if (!_mongoc_socket_setnonblock (sd)) { GOTO (fail); } if (domain != AF_UNIX) { if (!_mongoc_socket_setnodelay (sd)) { MONGOC_WARNING ("Failed to enable TCP_NODELAY."); } _mongoc_socket_setkeepalive (sd); } sock = (mongoc_socket_t *) bson_malloc0 (sizeof *sock); sock->sd = sd; sock->domain = domain; #ifdef _WIN32 sock->pid = (int) _getpid (); #else sock->pid = (int) getpid (); #endif RETURN (sock); fail: #ifdef _WIN32 closesocket (sd); #else close (sd); #endif RETURN (NULL); } /* *-------------------------------------------------------------------------- * * mongoc_socket_recv -- * * A portable wrapper around recv() that also respects an absolute * timeout. * * @expire_at is 0 for no blocking, -1 for infinite blocking, * or a time using the monotonic clock to expire. Calculate this * using bson_get_monotonic_time() + N_MICROSECONDS. * * Returns: * The number of bytes received on success. * 0 on end of stream. * -1 on failure. * * Side effects: * @buf will be read into. * *-------------------------------------------------------------------------- */ ssize_t mongoc_socket_recv (mongoc_socket_t *sock, /* IN */ void *buf, /* OUT */ size_t buflen, /* IN */ int flags, /* IN */ int64_t expire_at) /* IN */ { ssize_t ret = 0; bool failed = false; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (buf); BSON_ASSERT (buflen); again: sock->errno_ = 0; #ifdef _WIN32 ret = recv (sock->sd, (char *) buf, (int) buflen, flags); failed = (ret == SOCKET_ERROR); #else ret = recv (sock->sd, buf, buflen, flags); failed = (ret == -1); #endif if (failed) { _mongoc_socket_capture_errno (sock); if (_mongoc_socket_errno_is_again (sock) && _mongoc_socket_wait (sock, POLLIN, expire_at)) { GOTO (again); } } if (failed) { RETURN (-1); } mongoc_counter_streams_ingress_add (ret); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_socket_setsockopt -- * * A wrapper around setsockopt(). * * Returns: * 0 on success, -1 on failure. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_socket_setsockopt (mongoc_socket_t *sock, /* IN */ int level, /* IN */ int optname, /* IN */ const void *optval, /* IN */ mongoc_socklen_t optlen) /* IN */ { int ret; ENTRY; BSON_ASSERT (sock); ret = setsockopt (sock->sd, level, optname, optval, optlen); _mongoc_socket_capture_errno (sock); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_socket_send -- * * A simplified wrapper around mongoc_socket_sendv(). * * @expire_at is 0 for no blocking, -1 for infinite blocking, * or a time using the monotonic clock to expire. Calculate this * using bson_get_monotonic_time() + N_MICROSECONDS. * * Returns: * -1 on failure. number of bytes written on success. * * Side effects: * None. * *-------------------------------------------------------------------------- */ ssize_t mongoc_socket_send (mongoc_socket_t *sock, /* IN */ const void *buf, /* IN */ size_t buflen, /* IN */ int64_t expire_at) /* IN */ { mongoc_iovec_t iov; BSON_ASSERT (sock); BSON_ASSERT (buf); BSON_ASSERT (buflen); iov.iov_base = (void *) buf; iov.iov_len = buflen; return mongoc_socket_sendv (sock, &iov, 1, expire_at); } /* *-------------------------------------------------------------------------- * * _mongoc_socket_try_sendv_slow -- * * A slow variant of _mongoc_socket_try_sendv() that sends each * iovec entry one by one. This can happen if we hit EMSGSIZE * with sendmsg() on various POSIX systems or WSASend()+WSAEMSGSIZE * on Windows. * * Returns: * the number of bytes sent or -1 and errno is set. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static ssize_t _mongoc_socket_try_sendv_slow (mongoc_socket_t *sock, /* IN */ mongoc_iovec_t *iov, /* IN */ size_t iovcnt) /* IN */ { ssize_t ret = 0; size_t i; ssize_t wrote; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); for (i = 0; i < iovcnt; i++) { wrote = send (sock->sd, iov[i].iov_base, iov[i].iov_len, 0); #ifdef _WIN32 if (wrote == SOCKET_ERROR) { #else if (wrote == -1) { #endif _mongoc_socket_capture_errno (sock); if (!_mongoc_socket_errno_is_again (sock)) { RETURN (-1); } RETURN (ret ? ret : -1); } ret += wrote; if (wrote != iov[i].iov_len) { RETURN (ret); } } RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_socket_try_sendv -- * * Helper used by mongoc_socket_sendv() to try to write as many * bytes to the underlying socket until the socket buffer is full. * * This is performed in a non-blocking fashion. * * Returns: * -1 on failure. the number of bytes written on success. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static ssize_t _mongoc_socket_try_sendv (mongoc_socket_t *sock, /* IN */ mongoc_iovec_t *iov, /* IN */ size_t iovcnt) /* IN */ { #ifdef _WIN32 DWORD dwNumberofBytesSent = 0; int ret; #else struct msghdr msg; ssize_t ret; #endif ENTRY; BSON_ASSERT (sock); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); DUMP_IOVEC (sendbuf, iov, iovcnt); #ifdef _WIN32 ret = WSASend ( sock->sd, (LPWSABUF) iov, iovcnt, &dwNumberofBytesSent, 0, NULL, NULL); TRACE ("WSASend sent: %ld (out of: %ld), ret: %d", dwNumberofBytesSent, iov->iov_len, ret); #else memset (&msg, 0, sizeof msg); msg.msg_iov = iov; msg.msg_iovlen = (int) iovcnt; ret = sendmsg (sock->sd, &msg, #ifdef MSG_NOSIGNAL MSG_NOSIGNAL); #else 0); #endif TRACE ("Send %ld out of %ld bytes", ret, iov->iov_len); #endif #ifdef _WIN32 if (ret == SOCKET_ERROR) { #else if (ret == -1) { #endif _mongoc_socket_capture_errno (sock); /* * Check to see if we have sent an iovec too large for sendmsg to * complete. If so, we need to fallback to the slow path of multiple * send() commands. */ #ifdef _WIN32 if (mongoc_socket_errno (sock) == WSAEMSGSIZE) { #else if (mongoc_socket_errno (sock) == EMSGSIZE) { #endif RETURN (_mongoc_socket_try_sendv_slow (sock, iov, iovcnt)); } RETURN (-1); } #ifdef _WIN32 RETURN (dwNumberofBytesSent); #else RETURN (ret); #endif } /* *-------------------------------------------------------------------------- * * mongoc_socket_sendv -- * * A wrapper around using sendmsg() to send an iovec. * This also deals with the structure differences between * WSABUF and struct iovec. * * @expire_at is 0 for no blocking, -1 for infinite blocking, * or a time using the monotonic clock to expire. Calculate this * using bson_get_monotonic_time() + N_MICROSECONDS. * * Returns: * -1 on failure. * the number of bytes written on success. * * Side effects: * None. * *-------------------------------------------------------------------------- */ ssize_t mongoc_socket_sendv (mongoc_socket_t *sock, /* IN */ mongoc_iovec_t *in_iov, /* IN */ size_t iovcnt, /* IN */ int64_t expire_at) /* IN */ { ssize_t ret = 0; ssize_t sent; size_t cur = 0; mongoc_iovec_t *iov; ENTRY; BSON_ASSERT (sock); BSON_ASSERT (in_iov); BSON_ASSERT (iovcnt); iov = bson_malloc (sizeof (*iov) * iovcnt); memcpy (iov, in_iov, sizeof (*iov) * iovcnt); for (;;) { sent = _mongoc_socket_try_sendv (sock, &iov[cur], iovcnt - cur); TRACE ( "Sent %ld (of %ld) out of iovcnt=%ld", sent, iov[cur].iov_len, iovcnt); /* * If we failed with anything other than EAGAIN or EWOULDBLOCK, * we should fail immediately as there is another issue with the * underlying socket. */ if (sent == -1) { if (!_mongoc_socket_errno_is_again (sock)) { ret = -1; GOTO (CLEANUP); } } /* * Update internal stream counters. */ if (sent > 0) { ret += sent; mongoc_counter_streams_egress_add (sent); /* * Subtract the sent amount from what we still need to send. */ while ((cur < iovcnt) && (sent >= (ssize_t) iov[cur].iov_len)) { TRACE ("still got bytes left: sent -= iov_len: %ld -= %ld", sent, iov[cur].iov_len); sent -= iov[cur++].iov_len; } /* * Check if that made us finish all of the iovecs. If so, we are done * sending data over the socket. */ if (cur == iovcnt) { TRACE ("%s", "Finished the iovecs"); break; } /* * Increment the current iovec buffer to its proper offset and adjust * the number of bytes to write. */ TRACE ("Seeked io_base+%ld", sent); TRACE ( "Subtracting iov_len -= sent; %ld -= %ld", iov[cur].iov_len, sent); iov[cur].iov_base = ((char *) iov[cur].iov_base) + sent; iov[cur].iov_len -= sent; TRACE ("iov_len remaining %ld", iov[cur].iov_len); BSON_ASSERT (iovcnt - cur); BSON_ASSERT (iov[cur].iov_len); } else if (OPERATION_EXPIRED (expire_at)) { if (expire_at > 0) { mongoc_counter_streams_timeout_inc (); } GOTO (CLEANUP); } /* * Block on poll() until our desired condition is met. */ if (!_mongoc_socket_wait (sock, POLLOUT, expire_at)) { GOTO (CLEANUP); } } CLEANUP: bson_free (iov); RETURN (ret); } int mongoc_socket_getsockname (mongoc_socket_t *sock, /* IN */ struct sockaddr *addr, /* OUT */ mongoc_socklen_t *addrlen) /* INOUT */ { int ret; ENTRY; BSON_ASSERT (sock); ret = getsockname (sock->sd, addr, addrlen); _mongoc_socket_capture_errno (sock); RETURN (ret); } char * mongoc_socket_getnameinfo (mongoc_socket_t *sock) /* IN */ { /* getpeername parameter types vary, we check in CheckCompiler.m4 */ struct sockaddr_storage addr; mongoc_socklen_t len = (mongoc_socklen_t) sizeof addr; char *ret; char host[BSON_HOST_NAME_MAX + 1]; ENTRY; BSON_ASSERT (sock); if (getpeername (sock->sd, (struct sockaddr *) &addr, &len)) { RETURN (NULL); } if (getnameinfo ( (struct sockaddr *) &addr, len, host, sizeof host, NULL, 0, 0)) { RETURN (NULL); } ret = bson_strdup (host); RETURN (ret); } bool mongoc_socket_check_closed (mongoc_socket_t *sock) /* IN */ { bool closed = false; char buf[1]; ssize_t r; if (_mongoc_socket_wait (sock, POLLIN, 0)) { sock->errno_ = 0; r = recv (sock->sd, buf, 1, MSG_PEEK); if (r < 0) { _mongoc_socket_capture_errno (sock); } if (r < 1) { closed = true; } } return closed; } /* * *-------------------------------------------------------------------------- * * mongoc_socket_inet_ntop -- * * Convert the ip from addrinfo into a c string. * * Returns: * The value is returned into 'buffer'. The memory has to be allocated * by the caller * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_socket_inet_ntop (struct addrinfo *rp, /* IN */ char *buf, /* INOUT */ size_t buflen) /* IN */ { void *ptr; char tmp[256]; switch (rp->ai_family) { case AF_INET: ptr = &((struct sockaddr_in *) rp->ai_addr)->sin_addr; inet_ntop (rp->ai_family, ptr, tmp, sizeof (tmp)); bson_snprintf (buf, buflen, "ipv4 %s", tmp); break; case AF_INET6: ptr = &((struct sockaddr_in6 *) rp->ai_addr)->sin6_addr; inet_ntop (rp->ai_family, ptr, tmp, sizeof (tmp)); bson_snprintf (buf, buflen, "ipv6 %s", tmp); break; default: bson_snprintf (buf, buflen, "unknown ip %d", rp->ai_family); break; } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-socket.h0000644000076500000240000000702613572250760024752 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SOCKET_H #define MONGOC_SOCKET_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-config.h" #ifdef _WIN32 #include #include #else #include #include #include #include #include #include #include #include #include #endif #if defined(_AIX) && !defined(HAVE_SA_SS_FAMILY) # define ss_family __ss_family #endif #include "mongoc/mongoc-iovec.h" BSON_BEGIN_DECLS typedef MONGOC_SOCKET_ARG3 mongoc_socklen_t; typedef struct _mongoc_socket_t mongoc_socket_t; typedef struct { mongoc_socket_t *socket; int events; int revents; } mongoc_socket_poll_t; MONGOC_EXPORT (mongoc_socket_t *) mongoc_socket_accept (mongoc_socket_t *sock, int64_t expire_at); MONGOC_EXPORT (int) mongoc_socket_bind (mongoc_socket_t *sock, const struct sockaddr *addr, mongoc_socklen_t addrlen); MONGOC_EXPORT (int) mongoc_socket_close (mongoc_socket_t *socket); MONGOC_EXPORT (int) mongoc_socket_connect (mongoc_socket_t *sock, const struct sockaddr *addr, mongoc_socklen_t addrlen, int64_t expire_at); MONGOC_EXPORT (char *) mongoc_socket_getnameinfo (mongoc_socket_t *sock); MONGOC_EXPORT (void) mongoc_socket_destroy (mongoc_socket_t *sock); MONGOC_EXPORT (int) mongoc_socket_errno (mongoc_socket_t *sock); MONGOC_EXPORT (int) mongoc_socket_getsockname (mongoc_socket_t *sock, struct sockaddr *addr, mongoc_socklen_t *addrlen); MONGOC_EXPORT (int) mongoc_socket_listen (mongoc_socket_t *sock, unsigned int backlog); MONGOC_EXPORT (mongoc_socket_t *) mongoc_socket_new (int domain, int type, int protocol); MONGOC_EXPORT (ssize_t) mongoc_socket_recv (mongoc_socket_t *sock, void *buf, size_t buflen, int flags, int64_t expire_at); MONGOC_EXPORT (int) mongoc_socket_setsockopt (mongoc_socket_t *sock, int level, int optname, const void *optval, mongoc_socklen_t optlen); MONGOC_EXPORT (ssize_t) mongoc_socket_send (mongoc_socket_t *sock, const void *buf, size_t buflen, int64_t expire_at); MONGOC_EXPORT (ssize_t) mongoc_socket_sendv (mongoc_socket_t *sock, mongoc_iovec_t *iov, size_t iovcnt, int64_t expire_at); MONGOC_EXPORT (bool) mongoc_socket_check_closed (mongoc_socket_t *sock); MONGOC_EXPORT (void) mongoc_socket_inet_ntop (struct addrinfo *rp, char *buf, size_t buflen); MONGOC_EXPORT (ssize_t) mongoc_socket_poll (mongoc_socket_poll_t *sds, size_t nsds, int32_t timeout); BSON_END_DECLS #endif /* MONGOC_SOCKET_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-ssl-private.h0000644000076500000240000000214713572250760025732 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SSL_PRIVATE_H #define MONGOC_SSL_PRIVATE_H #include #include "mongoc/mongoc-uri-private.h" BSON_BEGIN_DECLS char * mongoc_ssl_extract_subject (const char *filename, const char *passphrase); void _mongoc_ssl_opts_from_uri (mongoc_ssl_opt_t *ssl_opt, mongoc_uri_t *uri); void _mongoc_ssl_opts_copy_to (const mongoc_ssl_opt_t *src, mongoc_ssl_opt_t *dst); void _mongoc_ssl_opts_cleanup (mongoc_ssl_opt_t *opt); BSON_END_DECLS #endif /* MONGOC_SSL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-ssl.c0000644000076500000240000001033013572250760024246 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL #include #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-ssl-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-uri.h" #if defined(MONGOC_ENABLE_SSL_OPENSSL) #include "mongoc/mongoc-openssl-private.h" #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) #include "mongoc/mongoc-libressl-private.h" #elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) #include "mongoc/mongoc-secure-transport-private.h" #elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) #include "mongoc/mongoc-secure-channel-private.h" #endif /* TODO: we could populate these from a config or something further down the * road for providing defaults */ #ifndef MONGOC_SSL_DEFAULT_TRUST_FILE #define MONGOC_SSL_DEFAULT_TRUST_FILE NULL #endif #ifndef MONGOC_SSL_DEFAULT_TRUST_DIR #define MONGOC_SSL_DEFAULT_TRUST_DIR NULL #endif static mongoc_ssl_opt_t gMongocSslOptDefault = { NULL, NULL, MONGOC_SSL_DEFAULT_TRUST_FILE, MONGOC_SSL_DEFAULT_TRUST_DIR, }; const mongoc_ssl_opt_t * mongoc_ssl_opt_get_default (void) { return &gMongocSslOptDefault; } char * mongoc_ssl_extract_subject (const char *filename, const char *passphrase) { char *retval; if (!filename) { MONGOC_ERROR ("No filename provided to extract subject from"); return NULL; } #ifdef _WIN32 if (_access (filename, 0) != 0) { #else if (access (filename, R_OK) != 0) { #endif MONGOC_ERROR ("Can't extract subject from unreadable file: '%s'", filename); return NULL; } #if defined(MONGOC_ENABLE_SSL_OPENSSL) retval = _mongoc_openssl_extract_subject (filename, passphrase); #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) MONGOC_WARNING ( "libtls doesn't support automatically extracting subject from " "certificate to use with authentication"); retval = NULL; #elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) retval = _mongoc_secure_transport_extract_subject (filename, passphrase); #elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) retval = _mongoc_secure_channel_extract_subject (filename, passphrase); #endif if (!retval) { MONGOC_ERROR ("Can't extract subject from file '%s'", filename); } return retval; } void _mongoc_ssl_opts_from_uri (mongoc_ssl_opt_t *ssl_opt, mongoc_uri_t *uri) { bool insecure = mongoc_uri_get_option_as_bool (uri, MONGOC_URI_TLSINSECURE, false); ssl_opt->pem_file = mongoc_uri_get_option_as_utf8 ( uri, MONGOC_URI_TLSCERTIFICATEKEYFILE, NULL); ssl_opt->pem_pwd = mongoc_uri_get_option_as_utf8 ( uri, MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD, NULL); ssl_opt->ca_file = mongoc_uri_get_option_as_utf8 (uri, MONGOC_URI_TLSCAFILE, NULL); ssl_opt->weak_cert_validation = mongoc_uri_get_option_as_bool ( uri, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, insecure); ssl_opt->allow_invalid_hostname = mongoc_uri_get_option_as_bool ( uri, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES, insecure); } void _mongoc_ssl_opts_copy_to (const mongoc_ssl_opt_t *src, mongoc_ssl_opt_t *dst) { BSON_ASSERT (src); BSON_ASSERT (dst); dst->pem_file = bson_strdup (src->pem_file); dst->pem_pwd = bson_strdup (src->pem_pwd); dst->ca_file = bson_strdup (src->ca_file); dst->ca_dir = bson_strdup (src->ca_dir); dst->crl_file = bson_strdup (src->crl_file); dst->weak_cert_validation = src->weak_cert_validation; dst->allow_invalid_hostname = src->allow_invalid_hostname; } void _mongoc_ssl_opts_cleanup (mongoc_ssl_opt_t *opt) { bson_free ((char *) opt->pem_file); bson_free ((char *) opt->pem_pwd); bson_free ((char *) opt->ca_file); bson_free ((char *) opt->ca_dir); bson_free ((char *) opt->crl_file); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-ssl.h0000644000076500000240000000223213572250760024255 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SSL_H #define MONGOC_SSL_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS typedef struct _mongoc_ssl_opt_t mongoc_ssl_opt_t; struct _mongoc_ssl_opt_t { const char *pem_file; const char *pem_pwd; const char *ca_file; const char *ca_dir; const char *crl_file; bool weak_cert_validation; bool allow_invalid_hostname; void *padding[7]; }; MONGOC_EXPORT (const mongoc_ssl_opt_t *) mongoc_ssl_opt_get_default (void) BSON_GNUC_PURE; BSON_END_DECLS #endif /* MONGOC_SSL_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-sspi-private.h0000644000076500000240000000467513572250760026117 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_SSPI_PRIVATE_H #define MONGOC_SSPI_PRIVATE_H #include #include "mongoc/mongoc-sasl-private.h" BSON_BEGIN_DECLS #define SECURITY_WIN32 1 /* Required for SSPI */ #include #include #include #include #define MONGOC_SSPI_AUTH_GSS_ERROR -1 #define MONGOC_SSPI_AUTH_GSS_COMPLETE 1 #define MONGOC_SSPI_AUTH_GSS_CONTINUE 0 typedef struct { mongoc_sasl_t sasl; CredHandle cred; CtxtHandle ctx; WCHAR *spn; SEC_CHAR *response; SEC_CHAR *username; ULONG flags; UCHAR haveCred; UCHAR haveCtx; ULONG qop; } mongoc_sspi_client_state_t; void _mongoc_sspi_set_gsserror (DWORD errCode, const SEC_CHAR *msg); void _mongoc_sspi_destroy_sspi_client_state (mongoc_sspi_client_state_t *state); int _mongoc_sspi_auth_sspi_client_init (WCHAR *service, ULONG flags, WCHAR *user, ULONG ulen, WCHAR *domain, ULONG dlen, WCHAR *password, ULONG plen, mongoc_sspi_client_state_t *state); int _mongoc_sspi_auth_sspi_client_step (mongoc_sspi_client_state_t *state, SEC_CHAR *challenge); int _mongoc_sspi_auth_sspi_client_unwrap (mongoc_sspi_client_state_t *state, SEC_CHAR *challenge); int _mongoc_sspi_auth_sspi_client_wrap (mongoc_sspi_client_state_t *state, SEC_CHAR *data, SEC_CHAR *user, ULONG ulen, INT protect); BSON_END_DECLS #endif /* MONGOC_SSPI_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-sspi.c0000644000076500000240000004025113572250760024430 0ustar alcaeusstaff/* * Copyright 2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * This file originates from https://github.com/mongodb-labs/winkerberos */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SASL_SSPI /* mingw doesn't define this */ #ifndef CRYPT_STRING_NOCRLF #define CRYPT_STRING_NOCRLF 0x40000000 #endif #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-sspi-private.h" void _mongoc_sspi_destroy_sspi_client_state (mongoc_sspi_client_state_t *state) { if (state->haveCtx) { DeleteSecurityContext (&state->ctx); state->haveCtx = 0; } if (state->haveCred) { FreeCredentialsHandle (&state->cred); state->haveCred = 0; } if (state->spn != NULL) { free (state->spn); state->spn = NULL; } if (state->response != NULL) { free (state->response); state->response = NULL; } if (state->username != NULL) { free (state->username); state->username = NULL; } } void _mongoc_sspi_set_gsserror (DWORD errCode, const SEC_CHAR *msg) { SEC_CHAR *err; DWORD status; DWORD flags = FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; status = FormatMessageA (flags, NULL, errCode, MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &err, 0, NULL); if (status) { MONGOC_ERROR ("SSPI: %s: %s", msg, err); LocalFree (err); } else { MONGOC_ERROR ("SSPI: %s", msg); } } static SEC_CHAR * _mongoc_sspi_base64_encode (const SEC_CHAR *value, DWORD vlen) { SEC_CHAR *out = NULL; DWORD len; /* Get the correct size for the out buffer. */ if (CryptBinaryToStringA ((BYTE *) value, vlen, CRYPT_STRING_BASE64 | CRYPT_STRING_NOCRLF, NULL, &len)) { out = (SEC_CHAR *) malloc (sizeof (SEC_CHAR) * len); if (out) { /* Encode to the out buffer. */ if (CryptBinaryToStringA ((BYTE *) value, vlen, CRYPT_STRING_BASE64 | CRYPT_STRING_NOCRLF, out, &len)) { return out; } else { free (out); } } } MONGOC_ERROR ("%s", "CryptBinaryToString failed."); return NULL; } static SEC_CHAR * _mongoc_sspi_base64_decode (const SEC_CHAR *value, DWORD *rlen) { SEC_CHAR *out = NULL; /* Get the correct size for the out buffer. */ if (CryptStringToBinaryA ( value, 0, CRYPT_STRING_BASE64, NULL, rlen, NULL, NULL)) { out = (SEC_CHAR *) malloc (sizeof (SEC_CHAR) * *rlen); if (out) { /* Decode to the out buffer. */ if (CryptStringToBinaryA (value, 0, CRYPT_STRING_BASE64, (BYTE *) out, rlen, NULL, NULL)) { return out; } else { free (out); } } } MONGOC_ERROR ("%s", "CryptStringToBinary failed."); return NULL; } static CHAR * _mongoc_sspi_wide_to_utf8 (WCHAR *value) { CHAR *out; int len = WideCharToMultiByte (CP_UTF8, 0, value, -1, NULL, 0, NULL, NULL); if (len) { out = (CHAR *) malloc (sizeof (CHAR) * len); if (WideCharToMultiByte (CP_UTF8, 0, value, -1, out, len, NULL, NULL)) { return out; } else { free (out); } } _mongoc_sspi_set_gsserror (GetLastError (), "WideCharToMultiByte"); return NULL; } int _mongoc_sspi_auth_sspi_client_init (WCHAR *service, ULONG flags, WCHAR *user, ULONG ulen, WCHAR *domain, ULONG dlen, WCHAR *password, ULONG plen, mongoc_sspi_client_state_t *state) { SECURITY_STATUS status; SEC_WINNT_AUTH_IDENTITY_W authIdentity; TimeStamp ignored; state->response = NULL; state->username = NULL; state->qop = SECQOP_WRAP_NO_ENCRYPT; state->flags = flags; state->haveCred = 0; state->haveCtx = 0; state->spn = _wcsdup (service); if (state->spn == NULL) { return MONGOC_SSPI_AUTH_GSS_ERROR; } /* Convert RFC-2078 format to SPN */ if (!wcschr (state->spn, L'/')) { WCHAR *ptr = wcschr (state->spn, L'@'); if (ptr) { *ptr = L'/'; } } if (user) { authIdentity.User = user; authIdentity.UserLength = ulen; authIdentity.Domain = domain; authIdentity.DomainLength = dlen; authIdentity.Password = password; authIdentity.PasswordLength = plen; authIdentity.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE; } /* Note that the first parameter, pszPrincipal, appears to be * completely ignored in the Kerberos SSP. For more details see * https://github.com/mongodb-labs/winkerberos/issues/11. * */ status = AcquireCredentialsHandleW (/* Principal */ NULL, /* Security package name */ L"kerberos", /* Credentials Use */ SECPKG_CRED_OUTBOUND, /* LogonID (We don't use this) */ NULL, /* AuthData */ user ? &authIdentity : NULL, /* Always NULL */ NULL, /* Always NULL */ NULL, /* CredHandle */ &state->cred, /* Expiry (Required but unused by us) */ &ignored); if (status != SEC_E_OK) { _mongoc_sspi_set_gsserror (status, "AcquireCredentialsHandle"); return MONGOC_SSPI_AUTH_GSS_ERROR; } state->haveCred = 1; return MONGOC_SSPI_AUTH_GSS_COMPLETE; } int _mongoc_sspi_auth_sspi_client_step (mongoc_sspi_client_state_t *state, SEC_CHAR *challenge) { SecBufferDesc inbuf; SecBuffer inBufs[1]; SecBufferDesc outbuf; SecBuffer outBufs[1]; ULONG ignored; SECURITY_STATUS status = MONGOC_SSPI_AUTH_GSS_CONTINUE; DWORD len; if (state->response != NULL) { free (state->response); state->response = NULL; } inbuf.ulVersion = SECBUFFER_VERSION; inbuf.cBuffers = 1; inbuf.pBuffers = inBufs; inBufs[0].pvBuffer = NULL; inBufs[0].cbBuffer = 0; inBufs[0].BufferType = SECBUFFER_TOKEN; if (state->haveCtx) { inBufs[0].pvBuffer = _mongoc_sspi_base64_decode (challenge, &len); if (!inBufs[0].pvBuffer) { return MONGOC_SSPI_AUTH_GSS_ERROR; } inBufs[0].cbBuffer = len; } outbuf.ulVersion = SECBUFFER_VERSION; outbuf.cBuffers = 1; outbuf.pBuffers = outBufs; outBufs[0].pvBuffer = NULL; outBufs[0].cbBuffer = 0; outBufs[0].BufferType = SECBUFFER_TOKEN; status = InitializeSecurityContextW (/* CredHandle */ &state->cred, /* CtxtHandle (NULL on first call) */ state->haveCtx ? &state->ctx : NULL, /* Service Principal Name */ state->spn, /* Flags */ ISC_REQ_ALLOCATE_MEMORY | state->flags, /* Always 0 */ 0, /* Target data representation */ SECURITY_NETWORK_DREP, /* Challenge (NULL on first call) */ state->haveCtx ? &inbuf : NULL, /* Always 0 */ 0, /* CtxtHandle (Set on first call) */ &state->ctx, /* Output */ &outbuf, /* Context attributes */ &ignored, /* Expiry (We don't use this) */ NULL); if (status != SEC_E_OK && status != SEC_I_CONTINUE_NEEDED) { _mongoc_sspi_set_gsserror (status, "InitializeSecurityContext"); status = MONGOC_SSPI_AUTH_GSS_ERROR; goto done; } state->haveCtx = 1; if (outBufs[0].cbBuffer) { state->response = _mongoc_sspi_base64_encode (outBufs[0].pvBuffer, outBufs[0].cbBuffer); if (!state->response) { status = MONGOC_SSPI_AUTH_GSS_ERROR; goto done; } } if (status == SEC_E_OK) { /* Get authenticated username. */ SecPkgContext_NamesW names; status = QueryContextAttributesW (&state->ctx, SECPKG_ATTR_NAMES, &names); if (status != SEC_E_OK) { _mongoc_sspi_set_gsserror (status, "QueryContextAttributesW"); status = MONGOC_SSPI_AUTH_GSS_ERROR; goto done; } state->username = _mongoc_sspi_wide_to_utf8 (names.sUserName); if (state->username == NULL) { FreeContextBuffer (names.sUserName); status = MONGOC_SSPI_AUTH_GSS_ERROR; goto done; } FreeContextBuffer (names.sUserName); status = MONGOC_SSPI_AUTH_GSS_COMPLETE; } else { status = MONGOC_SSPI_AUTH_GSS_CONTINUE; } done: if (inBufs[0].pvBuffer) { free (inBufs[0].pvBuffer); } if (outBufs[0].pvBuffer) { FreeContextBuffer (outBufs[0].pvBuffer); } return status; } int _mongoc_sspi_auth_sspi_client_unwrap (mongoc_sspi_client_state_t *state, SEC_CHAR *challenge) { SECURITY_STATUS status; DWORD len; SecBuffer wrapBufs[2]; SecBufferDesc wrapBufDesc; wrapBufDesc.ulVersion = SECBUFFER_VERSION; wrapBufDesc.cBuffers = 2; wrapBufDesc.pBuffers = wrapBufs; if (state->response != NULL) { free (state->response); state->response = NULL; state->qop = SECQOP_WRAP_NO_ENCRYPT; } if (!state->haveCtx) { return MONGOC_SSPI_AUTH_GSS_ERROR; } wrapBufs[0].pvBuffer = _mongoc_sspi_base64_decode (challenge, &len); if (!wrapBufs[0].pvBuffer) { return MONGOC_SSPI_AUTH_GSS_ERROR; } wrapBufs[0].cbBuffer = len; wrapBufs[0].BufferType = SECBUFFER_STREAM; wrapBufs[1].pvBuffer = NULL; wrapBufs[1].cbBuffer = 0; wrapBufs[1].BufferType = SECBUFFER_DATA; status = DecryptMessage (&state->ctx, &wrapBufDesc, 0, &state->qop); if (status == SEC_E_OK) { status = MONGOC_SSPI_AUTH_GSS_COMPLETE; } else { _mongoc_sspi_set_gsserror (status, "DecryptMessage"); status = MONGOC_SSPI_AUTH_GSS_ERROR; goto done; } if (wrapBufs[1].cbBuffer) { state->response = _mongoc_sspi_base64_encode (wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); if (!state->response) { status = MONGOC_SSPI_AUTH_GSS_ERROR; } } done: if (wrapBufs[0].pvBuffer) { free (wrapBufs[0].pvBuffer); } return status; } int _mongoc_sspi_auth_sspi_client_wrap (mongoc_sspi_client_state_t *state, SEC_CHAR *data, SEC_CHAR *user, ULONG ulen, int protect) { SECURITY_STATUS status; SecPkgContext_Sizes sizes; SecBuffer wrapBufs[3]; SecBufferDesc wrapBufDesc; SEC_CHAR *decodedData = NULL; SEC_CHAR *inbuf; SIZE_T inbufSize; SEC_CHAR *outbuf; DWORD outbufSize; SEC_CHAR *plaintextMessage; ULONG plaintextMessageSize; if (state->response != NULL) { free (state->response); state->response = NULL; } if (!state->haveCtx) { return MONGOC_SSPI_AUTH_GSS_ERROR; } status = QueryContextAttributes (&state->ctx, SECPKG_ATTR_SIZES, &sizes); if (status != SEC_E_OK) { _mongoc_sspi_set_gsserror (status, "QueryContextAttributes"); return MONGOC_SSPI_AUTH_GSS_ERROR; } if (user) { /* Length of user + 4 bytes for security layer (see below). */ plaintextMessageSize = ulen + 4; } else { decodedData = _mongoc_sspi_base64_decode (data, &plaintextMessageSize); if (!decodedData) { return MONGOC_SSPI_AUTH_GSS_ERROR; } } inbufSize = sizes.cbSecurityTrailer + plaintextMessageSize + sizes.cbBlockSize; inbuf = (SEC_CHAR *) malloc (inbufSize); if (inbuf == NULL) { free (decodedData); return MONGOC_SSPI_AUTH_GSS_ERROR; } plaintextMessage = inbuf + sizes.cbSecurityTrailer; if (user) { /* Authenticate the provided user. Unlike pykerberos, we don't * need any information from "data" to do that. * */ plaintextMessage[0] = 1; /* No security layer */ plaintextMessage[1] = 0; plaintextMessage[2] = 0; plaintextMessage[3] = 0; memcpy_s (plaintextMessage + 4, inbufSize - sizes.cbSecurityTrailer - 4, user, strlen (user)); } else { /* No user provided. Just rewrap data. */ memcpy_s (plaintextMessage, inbufSize - sizes.cbSecurityTrailer, decodedData, plaintextMessageSize); free (decodedData); } wrapBufDesc.cBuffers = 3; wrapBufDesc.pBuffers = wrapBufs; wrapBufDesc.ulVersion = SECBUFFER_VERSION; wrapBufs[0].cbBuffer = sizes.cbSecurityTrailer; wrapBufs[0].BufferType = SECBUFFER_TOKEN; wrapBufs[0].pvBuffer = inbuf; wrapBufs[1].cbBuffer = (ULONG) plaintextMessageSize; wrapBufs[1].BufferType = SECBUFFER_DATA; wrapBufs[1].pvBuffer = inbuf + sizes.cbSecurityTrailer; wrapBufs[2].cbBuffer = sizes.cbBlockSize; wrapBufs[2].BufferType = SECBUFFER_PADDING; wrapBufs[2].pvBuffer = inbuf + (sizes.cbSecurityTrailer + plaintextMessageSize); status = EncryptMessage ( &state->ctx, protect ? 0 : SECQOP_WRAP_NO_ENCRYPT, &wrapBufDesc, 0); if (status != SEC_E_OK) { free (inbuf); _mongoc_sspi_set_gsserror (status, "EncryptMessage"); return MONGOC_SSPI_AUTH_GSS_ERROR; } outbufSize = wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer + wrapBufs[2].cbBuffer; outbuf = (SEC_CHAR *) malloc (sizeof (SEC_CHAR) * outbufSize); memcpy_s (outbuf, outbufSize, wrapBufs[0].pvBuffer, wrapBufs[0].cbBuffer); memcpy_s (outbuf + wrapBufs[0].cbBuffer, outbufSize - wrapBufs[0].cbBuffer, wrapBufs[1].pvBuffer, wrapBufs[1].cbBuffer); memcpy_s (outbuf + wrapBufs[0].cbBuffer + wrapBufs[1].cbBuffer, outbufSize - wrapBufs[0].cbBuffer - wrapBufs[1].cbBuffer, wrapBufs[2].pvBuffer, wrapBufs[2].cbBuffer); state->response = _mongoc_sspi_base64_encode (outbuf, outbufSize); if (!state->response) { status = MONGOC_SSPI_AUTH_GSS_ERROR; } else { status = MONGOC_SSPI_AUTH_GSS_COMPLETE; } free (inbuf); free (outbuf); return status; } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-buffered.c0000644000076500000240000002305313572250760026526 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-stream-buffered.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-trace-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream" typedef struct { mongoc_stream_t stream; mongoc_stream_t *base_stream; mongoc_buffer_t buffer; } mongoc_stream_buffered_t; /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_destroy -- * * Clean up after a mongoc_stream_buffered_t. Free all allocated * resources and release the base stream. * * Returns: * None. * * Side effects: * Everything. * *-------------------------------------------------------------------------- */ static void mongoc_stream_buffered_destroy (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (stream); mongoc_stream_destroy (buffered->base_stream); buffered->base_stream = NULL; _mongoc_buffer_destroy (&buffered->buffer); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_failed -- * * Called when a stream fails. Useful for streams that differnciate * between failure and cleanup. * Calls mongoc_stream_buffered_destroy() on the stream. * * Returns: * None. * * Side effects: * Everything. * *-------------------------------------------------------------------------- */ static void mongoc_stream_buffered_failed (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_destroy (stream); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_close -- * * Close the underlying stream. The buffered content is still * valid. * * Returns: * The return value of mongoc_stream_close() on the underlying * stream. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static int mongoc_stream_buffered_close (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (stream); return mongoc_stream_close (buffered->base_stream); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_flush -- * * Flushes the underlying stream. * * Returns: * The result of flush on the base stream. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static int mongoc_stream_buffered_flush (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (buffered); return mongoc_stream_flush (buffered->base_stream); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_writev -- * * Write an iovec to the underlying stream. This write is not * buffered, it passes through to the base stream directly. * * timeout_msec should be the number of milliseconds to wait before * considering the writev as failed. * * Returns: * The number of bytes written or -1 on failure. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static ssize_t mongoc_stream_buffered_writev (mongoc_stream_t *stream, /* IN */ mongoc_iovec_t *iov, /* IN */ size_t iovcnt, /* IN */ int32_t timeout_msec) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; ssize_t ret; ENTRY; BSON_ASSERT (buffered); ret = mongoc_stream_writev (buffered->base_stream, iov, iovcnt, timeout_msec); RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_readv -- * * Read from the underlying stream. The data will be buffered based * on the buffered streams target buffer size. * * When reading from the underlying stream, we read at least the * requested number of bytes, but try to also fill the stream to * the size of the underlying buffer. * * Note: * This isn't actually a huge savings since we never have more than * one reply waiting for us, but perhaps someday that will be * different. It should help for small replies, however that will * reduce our read() syscalls by 50%. * * Returns: * The number of bytes read or -1 on failure. * * Side effects: * iov[*]->iov_base buffers are filled. * *-------------------------------------------------------------------------- */ static ssize_t mongoc_stream_buffered_readv (mongoc_stream_t *stream, /* IN */ mongoc_iovec_t *iov, /* INOUT */ size_t iovcnt, /* IN */ size_t min_bytes, /* IN */ int32_t timeout_msec) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; bson_error_t error = {0}; size_t total_bytes = 0; size_t i; size_t off = 0; ENTRY; BSON_ASSERT (buffered); for (i = 0; i < iovcnt; i++) { total_bytes += iov[i].iov_len; } if (-1 == _mongoc_buffer_fill (&buffered->buffer, buffered->base_stream, total_bytes, timeout_msec, &error)) { MONGOC_WARNING ("%s", error.message); RETURN (-1); } BSON_ASSERT (buffered->buffer.len >= total_bytes); for (i = 0; i < iovcnt; i++) { memcpy (iov[i].iov_base, buffered->buffer.data + off, iov[i].iov_len); off += iov[i].iov_len; buffered->buffer.len -= iov[i].iov_len; } memmove ( buffered->buffer.data, buffered->buffer.data + off, buffered->buffer.len); RETURN (total_bytes); } static mongoc_stream_t * _mongoc_stream_buffered_get_base_stream (mongoc_stream_t *stream) /* IN */ { return ((mongoc_stream_buffered_t *) stream)->base_stream; } static bool _mongoc_stream_buffered_check_closed (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (stream); return mongoc_stream_check_closed (buffered->base_stream); } static bool _mongoc_stream_buffered_timed_out (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (stream); return mongoc_stream_timed_out (buffered->base_stream); } static bool _mongoc_stream_buffered_should_retry (mongoc_stream_t *stream) /* IN */ { mongoc_stream_buffered_t *buffered = (mongoc_stream_buffered_t *) stream; BSON_ASSERT (stream); return mongoc_stream_should_retry (buffered->base_stream); } /* *-------------------------------------------------------------------------- * * mongoc_stream_buffered_new -- * * Creates a new mongoc_stream_buffered_t. * * This stream will read from an underlying stream and try to read * more data than necessary. It can help lower the number of read() * or recv() syscalls performed. * * @base_stream is considered owned by the resulting stream after * calling this function. * * Returns: * A newly allocated mongoc_stream_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_stream_t * mongoc_stream_buffered_new (mongoc_stream_t *base_stream, /* IN */ size_t buffer_size) /* IN */ { mongoc_stream_buffered_t *stream; BSON_ASSERT (base_stream); stream = (mongoc_stream_buffered_t *) bson_malloc0 (sizeof *stream); stream->stream.type = MONGOC_STREAM_BUFFERED; stream->stream.destroy = mongoc_stream_buffered_destroy; stream->stream.failed = mongoc_stream_buffered_failed; stream->stream.close = mongoc_stream_buffered_close; stream->stream.flush = mongoc_stream_buffered_flush; stream->stream.writev = mongoc_stream_buffered_writev; stream->stream.readv = mongoc_stream_buffered_readv; stream->stream.get_base_stream = _mongoc_stream_buffered_get_base_stream; stream->stream.check_closed = _mongoc_stream_buffered_check_closed; stream->stream.timed_out = _mongoc_stream_buffered_timed_out; stream->stream.should_retry = _mongoc_stream_buffered_should_retry; stream->base_stream = base_stream; _mongoc_buffer_init (&stream->buffer, NULL, buffer_size, NULL, NULL); mongoc_counter_streams_active_inc (); return (mongoc_stream_t *) stream; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-buffered.h0000644000076500000240000000172613572250760026536 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_BUFFERED_H #define MONGOC_STREAM_BUFFERED_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_buffered_new (mongoc_stream_t *base_stream, size_t buffer_size); BSON_END_DECLS #endif /* MONGOC_STREAM_BUFFERED_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-file.c0000644000076500000240000001255513572250760025670 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef _WIN32 #include #include #endif #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-file.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-counters-private.h" /* * TODO: This does not respect timeouts or set O_NONBLOCK. * But that should be fine until it isn't :-) */ struct _mongoc_stream_file_t { mongoc_stream_t vtable; int fd; }; static int _mongoc_stream_file_close (mongoc_stream_t *stream) { mongoc_stream_file_t *file = (mongoc_stream_file_t *) stream; int ret; ENTRY; BSON_ASSERT (file); if (file->fd != -1) { #ifdef _WIN32 ret = _close (file->fd); #else ret = close (file->fd); #endif file->fd = -1; RETURN (ret); } RETURN (0); } static void _mongoc_stream_file_destroy (mongoc_stream_t *stream) { mongoc_stream_file_t *file = (mongoc_stream_file_t *) stream; ENTRY; BSON_ASSERT (file); if (file->fd) { _mongoc_stream_file_close (stream); } bson_free (file); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_file_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_file_destroy (stream); EXIT; } static int _mongoc_stream_file_flush (mongoc_stream_t *stream) /* IN */ { mongoc_stream_file_t *file = (mongoc_stream_file_t *) stream; BSON_ASSERT (file); if (file->fd != -1) { #ifdef _WIN32 return _commit (file->fd); #else return fsync (file->fd); #endif } return 0; } static ssize_t _mongoc_stream_file_readv (mongoc_stream_t *stream, /* IN */ mongoc_iovec_t *iov, /* IN */ size_t iovcnt, /* IN */ size_t min_bytes, /* IN */ int32_t timeout_msec) /* IN */ { mongoc_stream_file_t *file = (mongoc_stream_file_t *) stream; ssize_t ret = 0; #ifdef _WIN32 ssize_t nread; size_t i; ENTRY; for (i = 0; i < iovcnt; i++) { nread = _read (file->fd, iov[i].iov_base, iov[i].iov_len); if (nread < 0) { ret = ret ? ret : -1; GOTO (done); } else if (nread == 0) { ret = ret ? ret : 0; GOTO (done); } else { ret += nread; if (nread != iov[i].iov_len) { ret = ret ? ret : -1; GOTO (done); } } } GOTO (done); #else ENTRY; ret = readv (file->fd, iov, (int) iovcnt); GOTO (done); #endif done: if (ret > 0) { mongoc_counter_streams_ingress_add (ret); } return ret; } static ssize_t _mongoc_stream_file_writev (mongoc_stream_t *stream, /* IN */ mongoc_iovec_t *iov, /* IN */ size_t iovcnt, /* IN */ int32_t timeout_msec) /* IN */ { mongoc_stream_file_t *file = (mongoc_stream_file_t *) stream; ssize_t ret = 0; #ifdef _WIN32 ssize_t nwrite; size_t i; for (i = 0; i < iovcnt; i++) { nwrite = _write (file->fd, iov[i].iov_base, iov[i].iov_len); if (nwrite != iov[i].iov_len) { ret = ret ? ret : -1; goto done; } ret += nwrite; } goto done; #else ret = writev (file->fd, iov, (int) iovcnt); goto done; #endif done: if (ret > 0) { mongoc_counter_streams_egress_add (ret); } return ret; } static bool _mongoc_stream_file_check_closed (mongoc_stream_t *stream) /* IN */ { return false; } mongoc_stream_t * mongoc_stream_file_new (int fd) /* IN */ { mongoc_stream_file_t *stream; BSON_ASSERT (fd != -1); stream = (mongoc_stream_file_t *) bson_malloc0 (sizeof *stream); stream->vtable.type = MONGOC_STREAM_FILE; stream->vtable.close = _mongoc_stream_file_close; stream->vtable.destroy = _mongoc_stream_file_destroy; stream->vtable.failed = _mongoc_stream_file_failed; stream->vtable.flush = _mongoc_stream_file_flush; stream->vtable.readv = _mongoc_stream_file_readv; stream->vtable.writev = _mongoc_stream_file_writev; stream->vtable.check_closed = _mongoc_stream_file_check_closed; stream->fd = fd; mongoc_counter_streams_active_inc (); return (mongoc_stream_t *) stream; } mongoc_stream_t * mongoc_stream_file_new_for_path (const char *path, /* IN */ int flags, /* IN */ int mode) /* IN */ { int fd = -1; BSON_ASSERT (path); #ifdef _WIN32 if (_sopen_s (&fd, path, (flags | _O_BINARY), _SH_DENYNO, mode) != 0) { fd = -1; } #else fd = open (path, flags, mode); #endif if (fd == -1) { return NULL; } return mongoc_stream_file_new (fd); } int mongoc_stream_file_get_fd (mongoc_stream_file_t *stream) { BSON_ASSERT (stream); return stream->fd; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-file.h0000644000076500000240000000217213572250760025667 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_FILE_H #define MONGOC_STREAM_FILE_H #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_stream_file_t mongoc_stream_file_t; MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_file_new (int fd); MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_file_new_for_path (const char *path, int flags, int mode); MONGOC_EXPORT (int) mongoc_stream_file_get_fd (mongoc_stream_file_t *stream); BSON_END_DECLS #endif /* MONGOC_STREAM_FILE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs-download-private.h0000644000076500000240000000206213572250760031501 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_GRIDFS_DOWNLOAD_PRIVATE_H #define MONGOC_STREAM_GRIDFS_DOWNLOAD_PRIVATE_H #include "mongoc-stream.h" #include "mongoc-gridfs-bucket-file-private.h" typedef struct { mongoc_stream_t stream; mongoc_gridfs_bucket_file_t *file; } mongoc_gridfs_download_stream_t; mongoc_stream_t * _mongoc_download_stream_gridfs_new (mongoc_gridfs_bucket_file_t *file); #endif /* MONGOC_STREAM_GRIDFS_DOWNLOAD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs-download.c0000644000076500000240000000674113572250760030034 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc-stream-gridfs-download-private.h" #include "mongoc-gridfs-bucket-file-private.h" #include "mongoc-counters-private.h" #include "mongoc-trace-private.h" #include "mongoc-stream-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-gridfs-download" static void _mongoc_download_stream_gridfs_destroy (mongoc_stream_t *stream) { mongoc_gridfs_download_stream_t *gridfs = (mongoc_gridfs_download_stream_t *) stream; ENTRY; BSON_ASSERT (stream); mongoc_stream_close (stream); _mongoc_gridfs_bucket_file_destroy (gridfs->file); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_download_stream_gridfs_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_download_stream_gridfs_destroy (stream); EXIT; } static int _mongoc_download_stream_gridfs_close (mongoc_stream_t *stream) { mongoc_gridfs_download_stream_t *gridfs = (mongoc_gridfs_download_stream_t *) stream; int ret = 0; ENTRY; BSON_ASSERT (stream); gridfs->file->finished = true; RETURN (ret); } static ssize_t _mongoc_download_stream_gridfs_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_gridfs_download_stream_t *gridfs = (mongoc_gridfs_download_stream_t *) stream; ssize_t ret = 0; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); (void) min_bytes; /* unused. */ (void) timeout_msec; /* unused. */ /* timeout_msec is unused by mongoc_gridfs_bucket_file_readv */ ret = _mongoc_gridfs_bucket_file_readv (gridfs->file, iov, iovcnt); mongoc_counter_streams_ingress_add (ret); RETURN (ret); } static bool _mongoc_download_stream_gridfs_check_closed (mongoc_stream_t *stream) /* IN */ { mongoc_gridfs_download_stream_t *gridfs = (mongoc_gridfs_download_stream_t *) stream; ENTRY; BSON_ASSERT (stream); RETURN (gridfs->file->finished); } mongoc_stream_t * _mongoc_download_stream_gridfs_new (mongoc_gridfs_bucket_file_t *file) { mongoc_gridfs_download_stream_t *stream; ENTRY; BSON_ASSERT (file); stream = (mongoc_gridfs_download_stream_t *) bson_malloc0 (sizeof *stream); stream->file = file; stream->stream.type = MONGOC_STREAM_GRIDFS_DOWNLOAD; stream->stream.destroy = _mongoc_download_stream_gridfs_destroy; stream->stream.failed = _mongoc_download_stream_gridfs_failed; stream->stream.close = _mongoc_download_stream_gridfs_close; stream->stream.readv = _mongoc_download_stream_gridfs_readv; stream->stream.check_closed = _mongoc_download_stream_gridfs_check_closed; mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) stream); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs-upload-private.h0000644000076500000240000000205013572250760031153 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_GRIDFS_UPLOAD_PRIVATE_H #define MONGOC_STREAM_GRIDFS_UPLOAD_PRIVATE_H #include "mongoc-gridfs-bucket-file-private.h" #include "mongoc-stream.h" typedef struct { mongoc_stream_t stream; mongoc_gridfs_bucket_file_t *file; } mongoc_gridfs_upload_stream_t; mongoc_stream_t * _mongoc_upload_stream_gridfs_new (mongoc_gridfs_bucket_file_t *file); #endif /* MONGOC_STREAM_GRIDFS_UPLOAD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs-upload.c0000644000076500000240000000656313572250760027513 0ustar alcaeusstaff/* * Copyright 2018-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-gridfs-bucket-file-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-stream-gridfs-upload-private.h" #include "mongoc/mongoc-stream-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-gridfs-upload" static void _mongoc_upload_stream_gridfs_destroy (mongoc_stream_t *stream) { mongoc_gridfs_upload_stream_t *gridfs = (mongoc_gridfs_upload_stream_t *) stream; ENTRY; BSON_ASSERT (stream); mongoc_stream_close (stream); _mongoc_gridfs_bucket_file_destroy (gridfs->file); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_upload_stream_gridfs_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_upload_stream_gridfs_destroy (stream); EXIT; } static int _mongoc_upload_stream_gridfs_close (mongoc_stream_t *stream) { mongoc_gridfs_upload_stream_t *gridfs = (mongoc_gridfs_upload_stream_t *) stream; int ret = 0; ENTRY; BSON_ASSERT (stream); ret = _mongoc_gridfs_bucket_file_save (gridfs->file); RETURN (ret ? 0 : 1); } static ssize_t _mongoc_upload_stream_gridfs_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_gridfs_upload_stream_t *gridfs = (mongoc_gridfs_upload_stream_t *) stream; ssize_t ret = 0; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); (void) timeout_msec; /* unused. */ ret = _mongoc_gridfs_bucket_file_writev (gridfs->file, iov, iovcnt); if (!ret) { RETURN (ret); } mongoc_counter_streams_egress_add (ret); RETURN (ret); } static bool _mongoc_upload_stream_gridfs_check_closed (mongoc_stream_t *stream) /* IN */ { mongoc_gridfs_upload_stream_t *gridfs = (mongoc_gridfs_upload_stream_t *) stream; ENTRY; BSON_ASSERT (stream); RETURN (gridfs->file->saved); } mongoc_stream_t * _mongoc_upload_stream_gridfs_new (mongoc_gridfs_bucket_file_t *file) { mongoc_gridfs_upload_stream_t *stream; ENTRY; BSON_ASSERT (file); stream = (mongoc_gridfs_upload_stream_t *) bson_malloc0 (sizeof *stream); stream->file = file; stream->stream.type = MONGOC_STREAM_GRIDFS_UPLOAD; stream->stream.destroy = _mongoc_upload_stream_gridfs_destroy; stream->stream.failed = _mongoc_upload_stream_gridfs_failed; stream->stream.close = _mongoc_upload_stream_gridfs_close; stream->stream.writev = _mongoc_upload_stream_gridfs_writev; stream->stream.check_closed = _mongoc_upload_stream_gridfs_check_closed; mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) stream); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs.c0000644000076500000240000001007313572250760026220 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-stream-gridfs.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-gridfs" typedef struct { mongoc_stream_t stream; mongoc_gridfs_file_t *file; } mongoc_stream_gridfs_t; static void _mongoc_stream_gridfs_destroy (mongoc_stream_t *stream) { ENTRY; BSON_ASSERT (stream); mongoc_stream_close (stream); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_gridfs_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_gridfs_destroy (stream); EXIT; } static int _mongoc_stream_gridfs_close (mongoc_stream_t *stream) { mongoc_stream_gridfs_t *gridfs = (mongoc_stream_gridfs_t *) stream; int ret = 0; ENTRY; BSON_ASSERT (stream); ret = mongoc_gridfs_file_save (gridfs->file); RETURN (ret); } static int _mongoc_stream_gridfs_flush (mongoc_stream_t *stream) { mongoc_stream_gridfs_t *gridfs = (mongoc_stream_gridfs_t *) stream; int ret = 0; ENTRY; BSON_ASSERT (stream); ret = mongoc_gridfs_file_save (gridfs->file); RETURN (ret); } static ssize_t _mongoc_stream_gridfs_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_gridfs_t *file = (mongoc_stream_gridfs_t *) stream; ssize_t ret = 0; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); /* timeout_msec is unused by mongoc_gridfs_file_readv */ ret = mongoc_gridfs_file_readv (file->file, iov, iovcnt, min_bytes, 0); mongoc_counter_streams_ingress_add (ret); RETURN (ret); } static ssize_t _mongoc_stream_gridfs_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_gridfs_t *file = (mongoc_stream_gridfs_t *) stream; ssize_t ret = 0; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); /* timeout_msec is unused by mongoc_gridfs_file_writev */ ret = mongoc_gridfs_file_writev (file->file, iov, iovcnt, 0); if (!ret) { RETURN (ret); } mongoc_counter_streams_egress_add (ret); RETURN (ret); } static bool _mongoc_stream_gridfs_check_closed (mongoc_stream_t *stream) /* IN */ { return false; } mongoc_stream_t * mongoc_stream_gridfs_new (mongoc_gridfs_file_t *file) { mongoc_stream_gridfs_t *stream; ENTRY; BSON_ASSERT (file); stream = (mongoc_stream_gridfs_t *) bson_malloc0 (sizeof *stream); stream->file = file; stream->stream.type = MONGOC_STREAM_GRIDFS; stream->stream.destroy = _mongoc_stream_gridfs_destroy; stream->stream.failed = _mongoc_stream_gridfs_failed; stream->stream.close = _mongoc_stream_gridfs_close; stream->stream.flush = _mongoc_stream_gridfs_flush; stream->stream.writev = _mongoc_stream_gridfs_writev; stream->stream.readv = _mongoc_stream_gridfs_readv; stream->stream.check_closed = _mongoc_stream_gridfs_check_closed; mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) stream); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-gridfs.h0000644000076500000240000000173113572250760026226 0ustar alcaeusstaff/* * Copyright 2013 MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_GRIDFS_H #define MONGOC_STREAM_GRIDFS_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_gridfs_new (mongoc_gridfs_file_t *file); BSON_END_DECLS #endif /* MONGOC_STREAM_GRIDFS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-private.h0000644000076500000240000000270113572250760026420 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_PRIVATE_H #define MONGOC_STREAM_PRIVATE_H #include "mongoc/mongoc-iovec.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS #define MONGOC_STREAM_SOCKET 1 #define MONGOC_STREAM_FILE 2 #define MONGOC_STREAM_BUFFERED 3 #define MONGOC_STREAM_GRIDFS 4 #define MONGOC_STREAM_TLS 5 #define MONGOC_STREAM_GRIDFS_UPLOAD 6 #define MONGOC_STREAM_GRIDFS_DOWNLOAD 7 bool mongoc_stream_wait (mongoc_stream_t *stream, int64_t expire_at); bool _mongoc_stream_writev_full (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec, bson_error_t *error); mongoc_stream_t * mongoc_stream_get_root_stream (mongoc_stream_t *stream); BSON_END_DECLS #endif /* MONGOC_STREAM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-socket.c0000644000076500000240000001732113572250760026235 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-socket-private.h" #include "mongoc/mongoc-errno-private.h" #include "mongoc/mongoc-counters-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream" struct _mongoc_stream_socket_t { mongoc_stream_t vtable; mongoc_socket_t *sock; }; static BSON_INLINE int64_t get_expiration (int32_t timeout_msec) { if (timeout_msec < 0) { return -1; } else if (timeout_msec == 0) { return 0; } else { return (bson_get_monotonic_time () + ((int64_t) timeout_msec * 1000L)); } } static int _mongoc_stream_socket_close (mongoc_stream_t *stream) { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; int ret; ENTRY; BSON_ASSERT (ss); if (ss->sock) { ret = mongoc_socket_close (ss->sock); RETURN (ret); } RETURN (0); } static void _mongoc_stream_socket_destroy (mongoc_stream_t *stream) { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; ENTRY; BSON_ASSERT (ss); if (ss->sock) { mongoc_socket_destroy (ss->sock); ss->sock = NULL; } bson_free (ss); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_socket_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_socket_destroy (stream); EXIT; } static int _mongoc_stream_socket_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; int ret; ENTRY; BSON_ASSERT (ss); BSON_ASSERT (ss->sock); ret = mongoc_socket_setsockopt (ss->sock, level, optname, optval, optlen); RETURN (ret); } static int _mongoc_stream_socket_flush (mongoc_stream_t *stream) { ENTRY; RETURN (0); } static ssize_t _mongoc_stream_socket_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; int64_t expire_at; ssize_t ret = 0; ssize_t nread; size_t cur = 0; ENTRY; BSON_ASSERT (ss); BSON_ASSERT (ss->sock); expire_at = get_expiration (timeout_msec); /* * This isn't ideal, we should plumb through to recvmsg(), but we * don't actually use this in any way but to a single buffer * currently anyway, so should be just fine. */ for (;;) { nread = mongoc_socket_recv ( ss->sock, iov[cur].iov_base, iov[cur].iov_len, 0, expire_at); if (nread <= 0) { if (ret >= (ssize_t) min_bytes) { RETURN (ret); } errno = mongoc_socket_errno (ss->sock); RETURN (-1); } ret += nread; while ((cur < iovcnt) && (nread >= (ssize_t) iov[cur].iov_len)) { nread -= iov[cur++].iov_len; } if (cur == iovcnt) { break; } if (ret >= (ssize_t) min_bytes) { RETURN (ret); } iov[cur].iov_base = ((char *) iov[cur].iov_base) + nread; iov[cur].iov_len -= nread; BSON_ASSERT (iovcnt - cur); BSON_ASSERT (iov[cur].iov_len); } RETURN (ret); } static ssize_t _mongoc_stream_socket_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; int64_t expire_at; ssize_t ret; ENTRY; if (ss->sock) { expire_at = get_expiration (timeout_msec); ret = mongoc_socket_sendv (ss->sock, iov, iovcnt, expire_at); errno = mongoc_socket_errno (ss->sock); RETURN (ret); } RETURN (-1); } static ssize_t _mongoc_stream_socket_poll (mongoc_stream_poll_t *streams, size_t nstreams, int32_t timeout_msec) { int i; ssize_t ret = -1; mongoc_socket_poll_t *sds; mongoc_stream_socket_t *ss; ENTRY; sds = (mongoc_socket_poll_t *) bson_malloc (sizeof (*sds) * nstreams); for (i = 0; i < nstreams; i++) { ss = (mongoc_stream_socket_t *) streams[i].stream; if (!ss->sock) { goto CLEANUP; } sds[i].socket = ss->sock; sds[i].events = streams[i].events; } ret = mongoc_socket_poll (sds, nstreams, timeout_msec); if (ret > 0) { for (i = 0; i < nstreams; i++) { streams[i].revents = sds[i].revents; } } CLEANUP: bson_free (sds); RETURN (ret); } mongoc_socket_t * mongoc_stream_socket_get_socket (mongoc_stream_socket_t *stream) /* IN */ { BSON_ASSERT (stream); return stream->sock; } static bool _mongoc_stream_socket_check_closed (mongoc_stream_t *stream) /* IN */ { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; ENTRY; BSON_ASSERT (stream); if (ss->sock) { RETURN (mongoc_socket_check_closed (ss->sock)); } RETURN (true); } static bool _mongoc_stream_socket_timed_out (mongoc_stream_t *stream) /* IN */ { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; ENTRY; BSON_ASSERT (ss); BSON_ASSERT (ss->sock); RETURN (MONGOC_ERRNO_IS_TIMEDOUT (ss->sock->errno_)); } static bool _mongoc_stream_socket_should_retry (mongoc_stream_t *stream) /* IN */ { mongoc_stream_socket_t *ss = (mongoc_stream_socket_t *) stream; ENTRY; BSON_ASSERT (ss); BSON_ASSERT (ss->sock); RETURN (MONGOC_ERRNO_IS_AGAIN (ss->sock->errno_)); } /* *-------------------------------------------------------------------------- * * mongoc_stream_socket_new -- * * Create a new mongoc_stream_t using the mongoc_socket_t for * read and write underneath. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_stream_t * mongoc_stream_socket_new (mongoc_socket_t *sock) /* IN */ { mongoc_stream_socket_t *stream; BSON_ASSERT (sock); stream = (mongoc_stream_socket_t *) bson_malloc0 (sizeof *stream); stream->vtable.type = MONGOC_STREAM_SOCKET; stream->vtable.close = _mongoc_stream_socket_close; stream->vtable.destroy = _mongoc_stream_socket_destroy; stream->vtable.failed = _mongoc_stream_socket_failed; stream->vtable.flush = _mongoc_stream_socket_flush; stream->vtable.readv = _mongoc_stream_socket_readv; stream->vtable.writev = _mongoc_stream_socket_writev; stream->vtable.setsockopt = _mongoc_stream_socket_setsockopt; stream->vtable.check_closed = _mongoc_stream_socket_check_closed; stream->vtable.timed_out = _mongoc_stream_socket_timed_out; stream->vtable.should_retry = _mongoc_stream_socket_should_retry; stream->vtable.poll = _mongoc_stream_socket_poll; stream->sock = sock; mongoc_counter_streams_active_inc (); return (mongoc_stream_t *) stream; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-socket.h0000644000076500000240000000214413572250760026237 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_SOCKET_H #define MONGOC_STREAM_SOCKET_H #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_stream_socket_t mongoc_stream_socket_t; MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_socket_new (mongoc_socket_t *socket); MONGOC_EXPORT (mongoc_socket_t *) mongoc_stream_socket_get_socket (mongoc_stream_socket_t *stream); BSON_END_DECLS #endif /* MONGOC_STREAM_SOCKET_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl-private.h0000644000076500000240000000212513572250760031035 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_LIBRESSL_PRIVATE_H #define MONGOC_STREAM_TLS_LIBRESSL_PRIVATE_H #ifdef MONGOC_ENABLE_SSL_LIBRESSL #include #include BSON_BEGIN_DECLS /** * mongoc_stream_tls_libressl_t: * * Private storage for LibreSSL Streams */ typedef struct { struct tls *ctx; struct tls_config *config; } mongoc_stream_tls_libressl_t; BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_LIBRESSL */ #endif /* MONGOC_STREAM_TLS_LIBRESSL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.c0000644000076500000240000003706613572250760027374 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_LIBRESSL #include #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-tls-libressl-private.h" #include "mongoc/mongoc-libressl-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-socket-private.h" #include #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls-libressl" static void _mongoc_stream_tls_libressl_destroy (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); tls_close (libressl->ctx); tls_free (libressl->ctx); tls_config_free (libressl->config); mongoc_stream_destroy (tls->base_stream); bson_free (libressl); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_tls_libressl_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_tls_libressl_destroy (stream); EXIT; } static int _mongoc_stream_tls_libressl_close (mongoc_stream_t *stream) { int ret = 0; mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); ret = mongoc_stream_close (tls->base_stream); RETURN (ret); } static int _mongoc_stream_tls_libressl_flush (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); RETURN (0); } static ssize_t _mongoc_stream_tls_libressl_write (mongoc_stream_t *stream, char *buf, size_t buf_len) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; mongoc_stream_poll_t poller; ssize_t total_write = 0; ssize_t ret; int64_t now; int64_t expire = 0; ENTRY; BSON_ASSERT (libressl); if (tls->timeout_msec >= 0) { expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL); } do { poller.stream = stream; poller.revents = 0; poller.events = POLLOUT; ret = tls_write (libressl->ctx, buf, buf_len); if (ret == TLS_WANT_POLLIN) { poller.events = POLLIN; mongoc_stream_poll (&poller, 1, tls->timeout_msec); } else if (ret == TLS_WANT_POLLOUT) { poller.events = POLLOUT; mongoc_stream_poll (&poller, 1, tls->timeout_msec); } else if (ret < 0) { RETURN (total_write); } else { buf += ret; buf_len -= ret; total_write += ret; } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (ret == 0) { mongoc_counter_streams_timeout_inc (); break; } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } } while (buf_len > 0); RETURN (total_write); } /* This is copypasta from _mongoc_stream_tls_openssl_writev */ #define MONGOC_STREAM_TLS_BUFFER_SIZE 4096 static ssize_t _mongoc_stream_tls_libressl_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; char buf[MONGOC_STREAM_TLS_BUFFER_SIZE]; ssize_t ret = 0; ssize_t child_ret; size_t i; size_t iov_pos = 0; /* There's a bit of a dance to coalesce vectorized writes into * MONGOC_STREAM_TLS_BUFFER_SIZE'd writes to avoid lots of small tls * packets. * * The basic idea is that we want to combine writes in the buffer if they're * smaller than the buffer, flushing as it gets full. For larger writes, or * the last write in the iovec array, we want to ignore the buffer and just * write immediately. We take care of doing buffer writes by re-invoking * ourself with a single iovec_t, pointing at our stack buffer. */ char *buf_head = buf; char *buf_tail = buf; char *buf_end = buf + MONGOC_STREAM_TLS_BUFFER_SIZE; size_t bytes; char *to_write = NULL; size_t to_write_len; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (libressl); ENTRY; tls->timeout_msec = timeout_msec; for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { if (buf_head != buf_tail || ((i + 1 < iovcnt) && ((buf_end - buf_tail) > (iov[i].iov_len - iov_pos)))) { /* If we have either of: * - buffered bytes already * - another iovec to send after this one and we don't have more * bytes to send than the size of the buffer. * * copy into the buffer */ bytes = BSON_MIN (iov[i].iov_len - iov_pos, buf_end - buf_tail); memcpy (buf_tail, (char *) iov[i].iov_base + iov_pos, bytes); buf_tail += bytes; iov_pos += bytes; if (buf_tail == buf_end) { /* If we're full, request send */ to_write = buf_head; to_write_len = buf_tail - buf_head; buf_tail = buf_head = buf; } } else { /* Didn't buffer, so just write it through */ to_write = (char *) iov[i].iov_base + iov_pos; to_write_len = iov[i].iov_len - iov_pos; iov_pos += to_write_len; } if (to_write) { /* We get here if we buffered some bytes and filled the buffer, or * if we didn't buffer and have to send out of the iovec */ child_ret = _mongoc_stream_tls_libressl_write ( stream, to_write, to_write_len); if (child_ret < 0) { RETURN (ret); } ret += child_ret; if (child_ret < to_write_len) { /* we timed out, so send back what we could send */ RETURN (ret); } to_write = NULL; } } } if (buf_head != buf_tail) { /* If we have any bytes buffered, send */ child_ret = _mongoc_stream_tls_libressl_write ( stream, buf_head, buf_tail - buf_head); if (child_ret < 0) { RETURN (child_ret); } ret += child_ret; } if (ret >= 0) { mongoc_counter_streams_egress_add (ret); } TRACE ("Returning %d", (int) ret); RETURN (ret); } /* This function is copypasta of _mongoc_stream_tls_openssl_readv */ static ssize_t _mongoc_stream_tls_libressl_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ssize_t ret = 0; ssize_t read_ret; size_t i; size_t iov_pos = 0; int64_t now; int64_t expire = 0; mongoc_stream_poll_t poller; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (libressl); ENTRY; tls->timeout_msec = timeout_msec; if (timeout_msec >= 0) { expire = bson_get_monotonic_time () + (timeout_msec * 1000UL); } for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { poller.stream = stream; poller.revents = 0; poller.events = POLLIN; read_ret = tls_read (libressl->ctx, (char *) iov[i].iov_base + iov_pos, (int) (iov[i].iov_len - iov_pos)); if (read_ret == TLS_WANT_POLLIN) { poller.events = POLLIN; mongoc_stream_poll (&poller, 1, tls->timeout_msec); } else if (read_ret == TLS_WANT_POLLOUT) { poller.events = POLLOUT; mongoc_stream_poll (&poller, 1, tls->timeout_msec); } else if (read_ret < 0) { RETURN (ret); } else { iov_pos += read_ret; ret += read_ret; } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (read_ret == 0) { mongoc_counter_streams_timeout_inc (); errno = ETIMEDOUT; RETURN (-1); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } if (ret > 0 && (size_t) ret >= min_bytes) { mongoc_counter_streams_ingress_add (ret); RETURN (ret); } } } if (ret >= 0) { mongoc_counter_streams_ingress_add (ret); } RETURN (ret); } static int _mongoc_stream_tls_libressl_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); RETURN (mongoc_stream_setsockopt ( tls->base_stream, level, optname, optval, optlen)); } static mongoc_stream_t * _mongoc_stream_tls_libressl_get_base_stream (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); RETURN (tls->base_stream); } static bool _mongoc_stream_tls_libressl_check_closed (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; ENTRY; BSON_ASSERT (libressl); RETURN (mongoc_stream_check_closed (tls->base_stream)); } bool mongoc_stream_tls_libressl_handshake (mongoc_stream_t *stream, const char *host, int *events, bson_error_t *error) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_libressl_t *libressl = (mongoc_stream_tls_libressl_t *) tls->ctx; int ret; ENTRY; BSON_ASSERT (libressl); ret = tls_handshake (libressl->ctx); if (ret == TLS_WANT_POLLIN) { *events = POLLIN; } else if (ret == TLS_WANT_POLLOUT) { *events = POLLOUT; } else if (ret < 0) { *events = 0; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed: %s", tls_error (libressl->ctx)); RETURN (false); } else { RETURN (true); } RETURN (false); } static bool _mongoc_stream_tls_libressl_timed_out (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_timed_out (tls->base_stream)); } static bool _mongoc_stream_tls_libressl_should_retry (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_should_retry (tls->base_stream)); } mongoc_stream_t * mongoc_stream_tls_libressl_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client) { mongoc_stream_tls_t *tls; mongoc_stream_tls_libressl_t *libressl; ENTRY; BSON_ASSERT (base_stream); BSON_ASSERT (opt); if (opt->crl_file) { MONGOC_ERROR ( "Setting mongoc_ssl_opt_t.crl_file has no effect when built " "against libtls"); RETURN (false); } libressl = (mongoc_stream_tls_libressl_t *) bson_malloc0 (sizeof *libressl); tls = (mongoc_stream_tls_t *) bson_malloc0 (sizeof *tls); tls->parent.type = MONGOC_STREAM_TLS; tls->parent.destroy = _mongoc_stream_tls_libressl_destroy; tls->parent.failed = _mongoc_stream_tls_libressl_failed; tls->parent.close = _mongoc_stream_tls_libressl_close; tls->parent.flush = _mongoc_stream_tls_libressl_flush; tls->parent.writev = _mongoc_stream_tls_libressl_writev; tls->parent.readv = _mongoc_stream_tls_libressl_readv; tls->parent.setsockopt = _mongoc_stream_tls_libressl_setsockopt; tls->parent.get_base_stream = _mongoc_stream_tls_libressl_get_base_stream; tls->parent.check_closed = _mongoc_stream_tls_libressl_check_closed; tls->parent.timed_out = _mongoc_stream_tls_libressl_timed_out; tls->parent.should_retry = _mongoc_stream_tls_libressl_should_retry; memcpy (&tls->ssl_opts, opt, sizeof tls->ssl_opts); tls->handshake = mongoc_stream_tls_libressl_handshake; tls->ctx = (void *) libressl; tls->timeout_msec = -1; tls->base_stream = base_stream; libressl->ctx = client ? tls_client () : tls_server (); libressl->config = tls_config_new (); if (opt->weak_cert_validation) { tls_config_insecure_noverifycert (libressl->config); tls_config_insecure_noverifytime (libressl->config); } if (opt->allow_invalid_hostname) { tls_config_insecure_noverifyname (libressl->config); } tls_config_set_ciphers (libressl->config, "compat"); mongoc_libressl_setup_certificate (libressl, opt); mongoc_libressl_setup_ca (libressl, opt); { mongoc_stream_t *stream = base_stream; do { if (stream->type == MONGOC_STREAM_SOCKET) { int socket = mongoc_stream_socket_get_socket ( (mongoc_stream_socket_t *) stream) ->sd; if (tls_configure (libressl->ctx, libressl->config) == -1) { MONGOC_ERROR ("%s", tls_config_error (libressl->config)); RETURN (false); } if (tls_connect_socket (libressl->ctx, socket, host) == -1) { MONGOC_ERROR ("%s", tls_error (libressl->ctx)); RETURN (false); } break; } } while ((stream = mongoc_stream_get_base_stream (stream))); } mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) tls); } #endif /* MONGOC_ENABLE_SSL_LIBRESSL */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-libressl.h0000644000076500000240000000221413572250760027364 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_LIBRESSL_H #define MONGOC_STREAM_TLS_LIBRESSL_H #ifdef MONGOC_ENABLE_SSL_LIBRESSL #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_libressl_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client); BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_LIBRESSL */ #endif /* MONGOC_STREAM_TLS_LIBRESSL_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio-private.h0000644000076500000240000000305513572250760031453 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_OPENSSL_BIO_PRIVATE_H #define MONGOC_STREAM_TLS_OPENSSL_BIO_PRIVATE_H #ifdef MONGOC_ENABLE_SSL_OPENSSL #include #include #include #include BSON_BEGIN_DECLS BIO_METHOD * mongoc_stream_tls_openssl_bio_meth_new (); void mongoc_stream_tls_openssl_bio_set_data (BIO *b, void *ptr); int mongoc_stream_tls_openssl_bio_create (BIO *b); int mongoc_stream_tls_openssl_bio_destroy (BIO *b); int mongoc_stream_tls_openssl_bio_read (BIO *b, char *buf, int len); int mongoc_stream_tls_openssl_bio_write (BIO *b, const char *buf, int len); long mongoc_stream_tls_openssl_bio_ctrl (BIO *b, int cmd, long num, void *ptr); int mongoc_stream_tls_openssl_bio_gets (BIO *b, char *buf, int len); int mongoc_stream_tls_openssl_bio_puts (BIO *b, const char *str); BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_OPENSSL */ #endif /* MONGOC_STREAM_TLS_OPENSSL_BIO_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-bio.c0000644000076500000240000002030613572250760027774 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_OPENSSL #include #include #include #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-errno-private.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-tls-openssl-bio-private.h" #include "mongoc/mongoc-stream-tls-openssl-private.h" #include "mongoc/mongoc-openssl-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-log.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls-openssl-bio" #if OPENSSL_VERSION_NUMBER < 0x10100000L || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) /* Magic vtable to make our BIO shim */ static BIO_METHOD gMongocStreamTlsOpenSslRawMethods = { BIO_TYPE_FILTER, "mongoc-stream-tls-glue", mongoc_stream_tls_openssl_bio_write, mongoc_stream_tls_openssl_bio_read, mongoc_stream_tls_openssl_bio_puts, mongoc_stream_tls_openssl_bio_gets, mongoc_stream_tls_openssl_bio_ctrl, mongoc_stream_tls_openssl_bio_create, mongoc_stream_tls_openssl_bio_destroy, NULL}; static void BIO_set_data (BIO *b, void *ptr) { b->ptr = ptr; } static void * BIO_get_data (BIO *b) { return b->ptr; } static void BIO_set_init (BIO *b, int init) { b->init = init; } BIO_METHOD * mongoc_stream_tls_openssl_bio_meth_new () { BIO_METHOD *meth = NULL; meth = &gMongocStreamTlsOpenSslRawMethods; return meth; } #else BIO_METHOD * mongoc_stream_tls_openssl_bio_meth_new () { BIO_METHOD *meth = NULL; meth = BIO_meth_new (BIO_TYPE_FILTER, "mongoc-stream-tls-glue"); if (meth) { BIO_meth_set_write (meth, mongoc_stream_tls_openssl_bio_write); BIO_meth_set_read (meth, mongoc_stream_tls_openssl_bio_read); BIO_meth_set_puts (meth, mongoc_stream_tls_openssl_bio_puts); BIO_meth_set_gets (meth, mongoc_stream_tls_openssl_bio_gets); BIO_meth_set_ctrl (meth, mongoc_stream_tls_openssl_bio_ctrl); BIO_meth_set_create (meth, mongoc_stream_tls_openssl_bio_create); BIO_meth_set_destroy (meth, mongoc_stream_tls_openssl_bio_destroy); } return meth; } #endif void mongoc_stream_tls_openssl_bio_set_data (BIO *b, void *ptr) { BIO_set_data (b, ptr); } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_create -- * * BIO callback to create a new BIO instance. * * Returns: * 1 if successful. * * Side effects: * @b is initialized. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_create (BIO *b) { BSON_ASSERT (b); BIO_set_init (b, 1); BIO_set_data (b, NULL); BIO_set_flags (b, 0); return 1; } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_destroy -- * * Release resources associated with BIO. * * Returns: * 1 if successful. * * Side effects: * @b is destroyed. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_destroy (BIO *b) { mongoc_stream_tls_t *tls; BSON_ASSERT (b); tls = (mongoc_stream_tls_t *) BIO_get_data (b); if (!tls) { return -1; } BIO_set_data (b, NULL); BIO_set_init (b, 0); BIO_set_flags (b, 0); ((mongoc_stream_tls_openssl_t *) tls->ctx)->bio = NULL; return 1; } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_read -- * * Read from the underlying stream to BIO. * * Returns: * -1 on failure; otherwise the number of bytes read. * * Side effects: * @buf is filled with data read from underlying stream. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_read (BIO *b, char *buf, int len) { mongoc_stream_tls_t *tls; mongoc_stream_tls_openssl_t *openssl; int ret; BSON_ASSERT (b); BSON_ASSERT (buf); ENTRY; tls = (mongoc_stream_tls_t *) BIO_get_data (b); if (!tls) { RETURN (-1); } openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; errno = 0; ret = (int) mongoc_stream_read ( tls->base_stream, buf, len, 0, tls->timeout_msec); BIO_clear_retry_flags (b); if ((ret <= 0) && MONGOC_ERRNO_IS_AGAIN (errno)) { /* this BIO is not the same as "b", which openssl passed in to this func. * set its retry flag, which we check with BIO_should_retry in * mongoc-stream-tls-openssl.c */ BIO_set_retry_read (openssl->bio); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_write -- * * Write to the underlying stream on behalf of BIO. * * Returns: * -1 on failure; otherwise the number of bytes written. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_write (BIO *b, const char *buf, int len) { mongoc_stream_tls_t *tls; mongoc_stream_tls_openssl_t *openssl; mongoc_iovec_t iov; int ret; ENTRY; BSON_ASSERT (b); BSON_ASSERT (buf); tls = (mongoc_stream_tls_t *) BIO_get_data (b); if (!tls) { RETURN (-1); } openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; iov.iov_base = (void *) buf; iov.iov_len = len; errno = 0; TRACE ("mongoc_stream_writev is expected to write: %d", len); ret = (int) mongoc_stream_writev (tls->base_stream, &iov, 1, tls->timeout_msec); BIO_clear_retry_flags (b); if (len > ret) { TRACE ("Returned short write: %d of %d", ret, len); } else { TRACE ("Completed the %d", ret); } if (ret <= 0 && MONGOC_ERRNO_IS_AGAIN (errno)) { /* this BIO is not the same as "b", which openssl passed in to this func. * set its retry flag, which we check with BIO_should_retry in * mongoc-stream-tls-openssl.c */ TRACE ("%s", "Requesting a retry"); BIO_set_retry_write (openssl->bio); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_ctrl -- * * Handle ctrl callback for BIO. * * Returns: * ioctl dependent. * * Side effects: * ioctl dependent. * *-------------------------------------------------------------------------- */ long mongoc_stream_tls_openssl_bio_ctrl (BIO *b, int cmd, long num, void *ptr) { switch (cmd) { case BIO_CTRL_FLUSH: return 1; default: return 0; } } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_gets -- * * BIO callback for gets(). Not supported. * * Returns: * -1 always. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_gets (BIO *b, char *buf, int len) { return -1; } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_bio_puts -- * * BIO callback to perform puts(). Just calls the actual write * callback. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ int mongoc_stream_tls_openssl_bio_puts (BIO *b, const char *str) { return mongoc_stream_tls_openssl_bio_write (b, str, (int) strlen (str)); } #endif /* MONGOC_ENABLE_SSL_OPENSSL */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl-private.h0000644000076500000240000000213413572250760030701 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_OPENSSL_PRIVATE_H #define MONGOC_STREAM_TLS_OPENSSL_PRIVATE_H #ifdef MONGOC_ENABLE_SSL_OPENSSL #include BSON_BEGIN_DECLS /** * mongoc_stream_tls_openssl_t: * * Private storage for handling callbacks from mongoc_stream and BIO_* */ typedef struct { BIO *bio; BIO_METHOD *meth; SSL_CTX *ctx; } mongoc_stream_tls_openssl_t; BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_OPENSSL */ #endif /* MONGOC_STREAM_TLS_OPENSSL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.c0000644000076500000240000005154713572250760027240 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_OPENSSL #include #include #include #include #include #include #include #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-errno-private.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-tls-openssl-bio-private.h" #include "mongoc/mongoc-stream-tls-openssl-private.h" #include "mongoc/mongoc-openssl-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-error.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls-openssl" #define MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE 4096 #if OPENSSL_VERSION_NUMBER < 0x10100000L || (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x20700000L) static void BIO_meth_free (BIO_METHOD *meth) { /* Nothing to free pre OpenSSL 1.1.0 */ } #endif /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_destroy -- * * Cleanup after usage of a mongoc_stream_tls_openssl_t. Free all *allocated * resources and ensure connections are closed. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _mongoc_stream_tls_openssl_destroy (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; BSON_ASSERT (tls); BIO_free_all (openssl->bio); openssl->bio = NULL; BIO_meth_free (openssl->meth); openssl->meth = NULL; mongoc_stream_destroy (tls->base_stream); tls->base_stream = NULL; SSL_CTX_free (openssl->ctx); openssl->ctx = NULL; bson_free (openssl); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_failed -- * * Called on stream failure. Same as _mongoc_stream_tls_openssl_destroy() * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _mongoc_stream_tls_openssl_failed (mongoc_stream_t *stream) { _mongoc_stream_tls_openssl_destroy (stream); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_close -- * * Close the underlying socket. * * Linus dictates that you should not check the result of close() * since there is a race condition with EAGAIN and a new file * descriptor being opened. * * Returns: * 0 on success; otherwise -1. * * Side effects: * The BIO fd is closed. * *-------------------------------------------------------------------------- */ static int _mongoc_stream_tls_openssl_close (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; int ret = 0; ENTRY; BSON_ASSERT (tls); ret = mongoc_stream_close (tls->base_stream); RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_flush -- * * Flush the underlying stream. * * Returns: * 0 if successful; otherwise -1. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static int _mongoc_stream_tls_openssl_flush (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; BSON_ASSERT (openssl); return BIO_flush (openssl->bio); } static ssize_t _mongoc_stream_tls_openssl_write (mongoc_stream_tls_t *tls, char *buf, size_t buf_len) { mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; ssize_t ret; int64_t now; int64_t expire = 0; ENTRY; BSON_ASSERT (tls); BSON_ASSERT (buf); BSON_ASSERT (buf_len); if (tls->timeout_msec >= 0) { expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL); } ret = BIO_write (openssl->bio, buf, buf_len); if (ret <= 0) { return ret; } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (ret < buf_len) { mongoc_counter_streams_timeout_inc (); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_writev -- * * Write the iovec to the stream. This function will try to write * all of the bytes or fail. If the number of bytes is not equal * to the number requested, a failure or EOF has occurred. * * Returns: * -1 on failure, otherwise the number of bytes written. * * Side effects: * None. * * This function is copied as _mongoc_stream_tls_secure_transport_writev *-------------------------------------------------------------------------- */ static ssize_t _mongoc_stream_tls_openssl_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; char buf[MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE]; ssize_t ret = 0; ssize_t child_ret; size_t i; size_t iov_pos = 0; /* There's a bit of a dance to coalesce vectorized writes into * MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE'd writes to avoid lots of small tls * packets. * * The basic idea is that we want to combine writes in the buffer if they're * smaller than the buffer, flushing as it gets full. For larger writes, or * the last write in the iovec array, we want to ignore the buffer and just * write immediately. We take care of doing buffer writes by re-invoking * ourself with a single iovec_t, pointing at our stack buffer. */ char *buf_head = buf; char *buf_tail = buf; char *buf_end = buf + MONGOC_STREAM_TLS_OPENSSL_BUFFER_SIZE; size_t bytes; char *to_write = NULL; size_t to_write_len; BSON_ASSERT (tls); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); ENTRY; tls->timeout_msec = timeout_msec; for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { if (buf_head != buf_tail || ((i + 1 < iovcnt) && ((buf_end - buf_tail) > (iov[i].iov_len - iov_pos)))) { /* If we have either of: * - buffered bytes already * - another iovec to send after this one and we don't have more * bytes to send than the size of the buffer. * * copy into the buffer */ bytes = BSON_MIN (iov[i].iov_len - iov_pos, buf_end - buf_tail); memcpy (buf_tail, (char *) iov[i].iov_base + iov_pos, bytes); buf_tail += bytes; iov_pos += bytes; if (buf_tail == buf_end) { /* If we're full, request send */ to_write = buf_head; to_write_len = buf_tail - buf_head; buf_tail = buf_head = buf; } } else { /* Didn't buffer, so just write it through */ to_write = (char *) iov[i].iov_base + iov_pos; to_write_len = iov[i].iov_len - iov_pos; iov_pos += to_write_len; } if (to_write) { /* We get here if we buffered some bytes and filled the buffer, or * if we didn't buffer and have to send out of the iovec */ child_ret = _mongoc_stream_tls_openssl_write (tls, to_write, to_write_len); if (child_ret != to_write_len) { TRACE ("Got child_ret: %ld while to_write_len is: %ld", child_ret, to_write_len); } if (child_ret < 0) { TRACE ("Returning what I had (%ld) as apposed to the error " "(%ld, errno:%d)", ret, child_ret, errno); RETURN (ret); } ret += child_ret; if (child_ret < to_write_len) { /* we timed out, so send back what we could send */ RETURN (ret); } to_write = NULL; } } } if (buf_head != buf_tail) { /* If we have any bytes buffered, send */ child_ret = _mongoc_stream_tls_openssl_write (tls, buf_head, buf_tail - buf_head); if (child_ret < 0) { RETURN (child_ret); } ret += child_ret; } if (ret >= 0) { mongoc_counter_streams_egress_add (ret); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_readv -- * * Read from the stream into iov. This function will try to read * all of the bytes or fail. If the number of bytes is not equal * to the number requested, a failure or EOF has occurred. * * Returns: * -1 on failure, 0 on EOF, otherwise the number of bytes read. * * Side effects: * iov buffers will be written to. * * This function is copied as _mongoc_stream_tls_secure_transport_readv * *-------------------------------------------------------------------------- */ static ssize_t _mongoc_stream_tls_openssl_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; ssize_t ret = 0; size_t i; int read_ret; size_t iov_pos = 0; int64_t now; int64_t expire = 0; ENTRY; BSON_ASSERT (tls); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); tls->timeout_msec = timeout_msec; if (timeout_msec >= 0) { expire = bson_get_monotonic_time () + (timeout_msec * 1000UL); } for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { read_ret = BIO_read (openssl->bio, (char *) iov[i].iov_base + iov_pos, (int) (iov[i].iov_len - iov_pos)); /* https://www.openssl.org/docs/crypto/BIO_should_retry.html: * * If BIO_should_retry() returns false then the precise "error * condition" depends on the BIO type that caused it and the return * code of the BIO operation. For example if a call to BIO_read() on a * socket BIO returns 0 and BIO_should_retry() is false then the cause * will be that the connection closed. */ if (read_ret < 0 || (read_ret == 0 && !BIO_should_retry (openssl->bio))) { return -1; } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (read_ret == 0) { mongoc_counter_streams_timeout_inc (); #ifdef _WIN32 errno = WSAETIMEDOUT; #else errno = ETIMEDOUT; #endif RETURN (-1); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } ret += read_ret; if ((size_t) ret >= min_bytes) { mongoc_counter_streams_ingress_add (ret); RETURN (ret); } iov_pos += read_ret; } } if (ret >= 0) { mongoc_counter_streams_ingress_add (ret); } RETURN (ret); } /* *-------------------------------------------------------------------------- * * _mongoc_stream_tls_openssl_setsockopt -- * * Perform a setsockopt on the underlying stream. * * Returns: * -1 on failure, otherwise opt specific value. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static int _mongoc_stream_tls_openssl_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; BSON_ASSERT (tls); return mongoc_stream_setsockopt ( tls->base_stream, level, optname, optval, optlen); } static mongoc_stream_t * _mongoc_stream_tls_openssl_get_base_stream (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; return tls->base_stream; } static bool _mongoc_stream_tls_openssl_check_closed (mongoc_stream_t *stream) /* IN */ { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; BSON_ASSERT (stream); return mongoc_stream_check_closed (tls->base_stream); } /** * mongoc_stream_tls_openssl_handshake: */ bool mongoc_stream_tls_openssl_handshake (mongoc_stream_t *stream, const char *host, int *events, bson_error_t *error) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; SSL *ssl; BSON_ASSERT (tls); BSON_ASSERT (host); ENTRY; BIO_get_ssl (openssl->bio, &ssl); if (BIO_do_handshake (openssl->bio) == 1) { if (_mongoc_openssl_check_cert ( ssl, host, tls->ssl_opts.allow_invalid_hostname)) { RETURN (true); } *events = 0; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed: Failed certificate verification"); RETURN (false); } if (BIO_should_retry (openssl->bio)) { *events = BIO_should_read (openssl->bio) ? POLLIN : POLLOUT; RETURN (false); } if (!errno) { #ifdef _WIN32 errno = WSAETIMEDOUT; #else errno = ETIMEDOUT; #endif } *events = 0; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed: %s", ERR_error_string (ERR_get_error (), NULL)); RETURN (false); } /* Callback to get the client provided SNI, if any * It is only called in SSL "server mode" (e.g. when using the Mock Server), * and we don't actually use the hostname for anything, just debug print it */ static int _mongoc_stream_tls_openssl_sni (SSL *ssl, int *ad, void *arg) { const char *hostname; if (ssl == NULL) { TRACE ("%s", "No SNI hostname provided"); return SSL_TLSEXT_ERR_NOACK; } hostname = SSL_get_servername (ssl, TLSEXT_NAMETYPE_host_name); /* This is intentionally debug since its only used by the mock test server */ MONGOC_DEBUG ("Got SNI: '%s'", hostname); return SSL_TLSEXT_ERR_OK; } static bool _mongoc_stream_tls_openssl_timed_out (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_timed_out (tls->base_stream)); } static bool _mongoc_stream_tls_openssl_should_retry (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_openssl_t *openssl = (mongoc_stream_tls_openssl_t *) tls->ctx; ENTRY; if (BIO_should_retry (openssl->bio)) { RETURN (true); } RETURN (mongoc_stream_should_retry (tls->base_stream)); } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_openssl_new -- * * Creates a new mongoc_stream_tls_openssl_t to communicate with a remote * server using a TLS stream. * * @base_stream should be a stream that will become owned by the * resulting tls stream. It will be used for raw I/O. * * @trust_store_dir should be a path to the SSL cert db to use for * verifying trust of the remote server. * * Returns: * NULL on failure, otherwise a mongoc_stream_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_stream_t * mongoc_stream_tls_openssl_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client) { mongoc_stream_tls_t *tls; mongoc_stream_tls_openssl_t *openssl; SSL_CTX *ssl_ctx = NULL; BIO *bio_ssl = NULL; BIO *bio_mongoc_shim = NULL; BIO_METHOD *meth; BSON_ASSERT (base_stream); BSON_ASSERT (opt); ENTRY; ssl_ctx = _mongoc_openssl_ctx_new (opt); if (!ssl_ctx) { RETURN (NULL); } #if OPENSSL_VERSION_NUMBER >= 0x10002000L && !defined(LIBRESSL_VERSION_NUMBER) if (!opt->allow_invalid_hostname) { struct in_addr addr; X509_VERIFY_PARAM *param = X509_VERIFY_PARAM_new (); X509_VERIFY_PARAM_set_hostflags (param, X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS); if (inet_pton (AF_INET, host, &addr) || inet_pton (AF_INET6, host, &addr)) { X509_VERIFY_PARAM_set1_ip_asc (param, host); } else { X509_VERIFY_PARAM_set1_host (param, host, 0); } SSL_CTX_set1_param (ssl_ctx, param); X509_VERIFY_PARAM_free (param); } #endif if (!client) { /* Only used by the Mock Server. * Set a callback to get the SNI, if provided */ SSL_CTX_set_tlsext_servername_callback (ssl_ctx, _mongoc_stream_tls_openssl_sni); } if (opt->weak_cert_validation) { SSL_CTX_set_verify (ssl_ctx, SSL_VERIFY_NONE, NULL); } else { SSL_CTX_set_verify (ssl_ctx, SSL_VERIFY_PEER, NULL); } bio_ssl = BIO_new_ssl (ssl_ctx, client); if (!bio_ssl) { SSL_CTX_free (ssl_ctx); RETURN (NULL); } meth = mongoc_stream_tls_openssl_bio_meth_new (); bio_mongoc_shim = BIO_new (meth); if (!bio_mongoc_shim) { BIO_free_all (bio_ssl); BIO_meth_free (meth); RETURN (NULL); } /* Added in OpenSSL 0.9.8f, as a build time option */ #ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME if (client) { SSL *ssl; /* Set the SNI hostname we are expecting certificate for */ BIO_get_ssl (bio_ssl, &ssl); SSL_set_tlsext_host_name (ssl, host); #endif } BIO_push (bio_ssl, bio_mongoc_shim); openssl = (mongoc_stream_tls_openssl_t *) bson_malloc0 (sizeof *openssl); openssl->bio = bio_ssl; openssl->meth = meth; openssl->ctx = ssl_ctx; tls = (mongoc_stream_tls_t *) bson_malloc0 (sizeof *tls); tls->parent.type = MONGOC_STREAM_TLS; tls->parent.destroy = _mongoc_stream_tls_openssl_destroy; tls->parent.failed = _mongoc_stream_tls_openssl_failed; tls->parent.close = _mongoc_stream_tls_openssl_close; tls->parent.flush = _mongoc_stream_tls_openssl_flush; tls->parent.writev = _mongoc_stream_tls_openssl_writev; tls->parent.readv = _mongoc_stream_tls_openssl_readv; tls->parent.setsockopt = _mongoc_stream_tls_openssl_setsockopt; tls->parent.get_base_stream = _mongoc_stream_tls_openssl_get_base_stream; tls->parent.check_closed = _mongoc_stream_tls_openssl_check_closed; tls->parent.timed_out = _mongoc_stream_tls_openssl_timed_out; tls->parent.should_retry = _mongoc_stream_tls_openssl_should_retry; memcpy (&tls->ssl_opts, opt, sizeof tls->ssl_opts); tls->handshake = mongoc_stream_tls_openssl_handshake; tls->ctx = (void *) openssl; tls->timeout_msec = -1; tls->base_stream = base_stream; mongoc_stream_tls_openssl_bio_set_data (bio_mongoc_shim, tls); mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) tls); } #endif /* MONGOC_ENABLE_SSL_OPENSSL */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-openssl.h0000644000076500000240000000220213572250760027225 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_OPENSSL_H #define MONGOC_STREAM_TLS_OPENSSL_H #ifdef MONGOC_ENABLE_SSL_OPENSSL #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_openssl_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client); BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_OPENSSL */ #endif /* MONGOC_STREAM_TLS_OPENSSL_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-private.h0000644000076500000240000000266113572250760027225 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_PRIVATE_H #define MONGOC_STREAM_TLS_PRIVATE_H #include #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS /** * mongoc_stream_tls_t: * * Overloaded mongoc_stream_t with additional TLS handshake and verification * callbacks. * */ struct _mongoc_stream_tls_t { mongoc_stream_t parent; /* The TLS stream wrapper */ mongoc_stream_t *base_stream; /* The underlying actual stream */ void *ctx; /* TLS lib specific configuration or wrappers */ int32_t timeout_msec; mongoc_ssl_opt_t ssl_opts; bool (*handshake) (mongoc_stream_t *stream, const char *host, int *events /* OUT*/, bson_error_t *error); }; BSON_END_DECLS #endif /* MONGOC_STREAM_TLS_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel-private.h0000644000076500000240000000436713572250760032124 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_SECURE_CHANNEL_PRIVATE_H #define MONGOC_STREAM_TLS_SECURE_CHANNEL_PRIVATE_H #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL #include /* Its mandatory to indicate to Windows who is compiling the code */ #define SECURITY_WIN32 #include BSON_BEGIN_DECLS /* enum for the nonblocking SSL connection state machine */ typedef enum { ssl_connect_1, ssl_connect_2, ssl_connect_2_reading, ssl_connect_2_writing, ssl_connect_3, ssl_connect_done } ssl_connect_state; /* Structs to store Schannel handles */ typedef struct { CredHandle cred_handle; TimeStamp time_stamp; } mongoc_secure_channel_cred; typedef struct { CtxtHandle ctxt_handle; TimeStamp time_stamp; } mongoc_secure_channel_ctxt; /** * mongoc_stream_tls_secure_channel_t: * * Private storage for Secure Channel Streams */ typedef struct { ssl_connect_state connecting_state; mongoc_secure_channel_cred *cred; mongoc_secure_channel_ctxt *ctxt; SecPkgContext_StreamSizes stream_sizes; size_t encdata_length, decdata_length; size_t encdata_offset, decdata_offset; unsigned char *encdata_buffer, *decdata_buffer; unsigned long req_flags, ret_flags; int recv_unrecoverable_err; /* _mongoc_stream_tls_secure_channel_read had an unrecoverable err */ bool recv_sspi_close_notify; /* true if connection closed by close_notify */ bool recv_connection_closed; /* true if connection closed, regardless how */ } mongoc_stream_tls_secure_channel_t; BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_SECURE_CHANNEL */ #endif /* MONGOC_STREAM_TLS_SECURE_CHANNEL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.c0000644000076500000240000010463713572250760030450 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * Significant portion of this file, such as * _mongoc_stream_tls_secure_channel_write & *_mongoc_stream_tls_secure_channel_read * comes straight from one of my favorite projects, cURL! * Thank you so much for having gone through the Secure Channel pain for me. * * * Copyright (C) 2012 - 2015, Marc Hoersken, * Copyright (C) 2012, Mark Salisbury, * Copyright (C) 2012 - 2015, Daniel Stenberg, , et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at https://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* * Based upon the PolarSSL implementation in polarssl.c and polarssl.h: * Copyright (C) 2010, 2011, Hoi-Ho Chan, * * Based upon the CyaSSL implementation in cyassl.c and cyassl.h: * Copyright (C) 1998 - 2012, Daniel Stenberg, , et al. * * Thanks for code and inspiration! */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL #include #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-tls-secure-channel-private.h" #include "mongoc/mongoc-secure-channel-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-errno-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls-secure-channel" #define SECURITY_WIN32 #include #include #include /* mingw doesn't define these */ #ifndef SP_PROT_TLS1_1_CLIENT #define SP_PROT_TLS1_1_CLIENT 0x00000200 #endif #ifndef SP_PROT_TLS1_2_CLIENT #define SP_PROT_TLS1_2_CLIENT 0x00000800 #endif size_t mongoc_secure_channel_write (mongoc_stream_tls_t *tls, const void *data, size_t data_length); static void _mongoc_stream_tls_secure_channel_destroy (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); /* See https://msdn.microsoft.com/en-us/library/windows/desktop/aa380138.aspx * Shutting Down an Schannel Connection */ TRACE ("shutting down SSL/TLS connection"); if (secure_channel->cred && secure_channel->ctxt) { SecBufferDesc BuffDesc; SecBuffer Buffer; SECURITY_STATUS sspi_status; SecBuffer outbuf; SecBufferDesc outbuf_desc; DWORD dwshut = SCHANNEL_SHUTDOWN; _mongoc_secure_channel_init_sec_buffer ( &Buffer, SECBUFFER_TOKEN, &dwshut, sizeof (dwshut)); _mongoc_secure_channel_init_sec_buffer_desc (&BuffDesc, &Buffer, 1); sspi_status = ApplyControlToken (&secure_channel->ctxt->ctxt_handle, &BuffDesc); if (sspi_status != SEC_E_OK) { MONGOC_ERROR ("ApplyControlToken failure: %d", sspi_status); } /* setup output buffer */ _mongoc_secure_channel_init_sec_buffer ( &outbuf, SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, &outbuf, 1); sspi_status = InitializeSecurityContext (&secure_channel->cred->cred_handle, &secure_channel->ctxt->ctxt_handle, /*tls->hostname*/ NULL, secure_channel->req_flags, 0, 0, NULL, 0, &secure_channel->ctxt->ctxt_handle, &outbuf_desc, &secure_channel->ret_flags, &secure_channel->ctxt->time_stamp); if ((sspi_status == SEC_E_OK) || (sspi_status == SEC_I_CONTEXT_EXPIRED)) { /* send close message which is in output buffer */ ssize_t written = mongoc_secure_channel_write (tls, outbuf.pvBuffer, outbuf.cbBuffer); FreeContextBuffer (outbuf.pvBuffer); if (outbuf.cbBuffer != (size_t) written) { TRACE ("failed to send close msg (wrote %zd out of %zd)", written, outbuf.cbBuffer); } } } /* free SSPI Schannel API security context handle */ if (secure_channel->ctxt) { TRACE ("clear security context handle"); DeleteSecurityContext (&secure_channel->ctxt->ctxt_handle); bson_free (secure_channel->ctxt); } /* free SSPI Schannel API credential handle */ if (secure_channel->cred) { /* decrement the reference counter of the credential/session handle */ /* if the handle was not cached and the refcount is zero */ TRACE ("clear credential handle"); FreeCredentialsHandle (&secure_channel->cred->cred_handle); bson_free (secure_channel->cred); } /* free internal buffer for received encrypted data */ if (secure_channel->encdata_buffer != NULL) { bson_free (secure_channel->encdata_buffer); secure_channel->encdata_length = 0; secure_channel->encdata_offset = 0; } /* free internal buffer for received decrypted data */ if (secure_channel->decdata_buffer != NULL) { bson_free (secure_channel->decdata_buffer); secure_channel->decdata_length = 0; secure_channel->decdata_offset = 0; } mongoc_stream_destroy (tls->base_stream); bson_free (secure_channel); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_tls_secure_channel_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_tls_secure_channel_destroy (stream); EXIT; } static int _mongoc_stream_tls_secure_channel_close (mongoc_stream_t *stream) { int ret = 0; mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); ret = mongoc_stream_close (tls->base_stream); RETURN (ret); } static int _mongoc_stream_tls_secure_channel_flush (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); RETURN (0); } static ssize_t _mongoc_stream_tls_secure_channel_write (mongoc_stream_t *stream, char *buf, size_t buf_len) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ssize_t written = -1; size_t data_len = 0; unsigned char *data = NULL; SecBuffer outbuf[4]; SecBufferDesc outbuf_desc; SECURITY_STATUS sspi_status = SEC_E_OK; ENTRY; BSON_ASSERT (secure_channel); TRACE ("The entire buffer is: %d", buf_len); /* check if the maximum stream sizes were queried */ if (secure_channel->stream_sizes.cbMaximumMessage == 0) { sspi_status = QueryContextAttributes (&secure_channel->ctxt->ctxt_handle, SECPKG_ATTR_STREAM_SIZES, &secure_channel->stream_sizes); if (sspi_status != SEC_E_OK) { TRACE ("failing here: %d", __LINE__); return -1; } } /* check if the buffer is longer than the maximum message length */ if (buf_len > secure_channel->stream_sizes.cbMaximumMessage) { TRACE ("SHRINKING buf_len from %lu to %lu", buf_len, secure_channel->stream_sizes.cbMaximumMessage); buf_len = secure_channel->stream_sizes.cbMaximumMessage; } /* calculate the complete message length and allocate a buffer for it */ data_len = secure_channel->stream_sizes.cbHeader + buf_len + secure_channel->stream_sizes.cbTrailer; data = (unsigned char *) bson_malloc (data_len); /* setup output buffers (header, data, trailer, empty) */ _mongoc_secure_channel_init_sec_buffer ( &outbuf[0], SECBUFFER_STREAM_HEADER, data, secure_channel->stream_sizes.cbHeader); _mongoc_secure_channel_init_sec_buffer ( &outbuf[1], SECBUFFER_DATA, data + secure_channel->stream_sizes.cbHeader, (unsigned long) (buf_len & (size_t) 0xFFFFFFFFUL)); _mongoc_secure_channel_init_sec_buffer ( &outbuf[2], SECBUFFER_STREAM_TRAILER, data + secure_channel->stream_sizes.cbHeader + buf_len, secure_channel->stream_sizes.cbTrailer); _mongoc_secure_channel_init_sec_buffer ( &outbuf[3], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&outbuf_desc, outbuf, 4); /* copy data into output buffer */ memcpy (outbuf[1].pvBuffer, buf, buf_len); /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375390.aspx */ sspi_status = EncryptMessage (&secure_channel->ctxt->ctxt_handle, 0, &outbuf_desc, 0); /* check if the message was encrypted */ if (sspi_status == SEC_E_OK) { written = 0; /* send the encrypted message including header, data and trailer */ buf_len = outbuf[0].cbBuffer + outbuf[1].cbBuffer + outbuf[2].cbBuffer; written = mongoc_secure_channel_write (tls, data, buf_len); } else { written = -1; } bson_free (data); if (buf_len == (size_t) written) { /* Encrypted message including header, data and trailer entirely sent. * The return value is the number of unencrypted bytes that were sent. */ written = outbuf[1].cbBuffer; } return written; } /* This is copypasta from _mongoc_stream_tls_openssl_writev */ #define MONGOC_STREAM_TLS_BUFFER_SIZE 4096 static ssize_t _mongoc_stream_tls_secure_channel_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; char buf[MONGOC_STREAM_TLS_BUFFER_SIZE]; ssize_t ret = 0; ssize_t child_ret; size_t i; size_t iov_pos = 0; /* There's a bit of a dance to coalesce vectorized writes into * MONGOC_STREAM_TLS_BUFFER_SIZE'd writes to avoid lots of small tls * packets. * * The basic idea is that we want to combine writes in the buffer if they're * smaller than the buffer, flushing as it gets full. For larger writes, or * the last write in the iovec array, we want to ignore the buffer and just * write immediately. We take care of doing buffer writes by re-invoking * ourself with a single iovec_t, pointing at our stack buffer. */ char *buf_head = buf; char *buf_tail = buf; char *buf_end = buf + MONGOC_STREAM_TLS_BUFFER_SIZE; size_t bytes; char *to_write = NULL; size_t to_write_len; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (secure_channel); ENTRY; TRACE ("Trying to write to the server"); tls->timeout_msec = timeout_msec; TRACE ("count: %d, 0th: %lu", iovcnt, iov[0].iov_len); for (i = 0; i < iovcnt; i++) { iov_pos = 0; TRACE ("iov %d size: %lu", i, iov[i].iov_len); while (iov_pos < iov[i].iov_len) { if (buf_head != buf_tail || ((i + 1 < iovcnt) && ((buf_end - buf_tail) > (iov[i].iov_len - iov_pos)))) { /* If we have either of: * - buffered bytes already * - another iovec to send after this one and we don't have more * bytes to send than the size of the buffer. * * copy into the buffer */ bytes = BSON_MIN (iov[i].iov_len - iov_pos, buf_end - buf_tail); memcpy (buf_tail, (char *) iov[i].iov_base + iov_pos, bytes); buf_tail += bytes; iov_pos += bytes; if (buf_tail == buf_end) { /* If we're full, request send */ to_write = buf_head; to_write_len = buf_tail - buf_head; buf_tail = buf_head = buf; } } else { /* Didn't buffer, so just write it through */ to_write = (char *) iov[i].iov_base + iov_pos; to_write_len = iov[i].iov_len - iov_pos; iov_pos += to_write_len; } if (to_write) { /* We get here if we buffered some bytes and filled the buffer, or * if we didn't buffer and have to send out of the iovec */ child_ret = _mongoc_stream_tls_secure_channel_write ( stream, to_write, to_write_len); TRACE ("Child0wrote: %d, was supposed to write: %d", child_ret, to_write_len); if (child_ret < 0) { RETURN (ret); } ret += child_ret; iov_pos -= to_write_len - child_ret; to_write = NULL; } } } if (buf_head != buf_tail) { /* If we have any bytes buffered, send */ child_ret = _mongoc_stream_tls_secure_channel_write ( stream, buf_head, buf_tail - buf_head); TRACE ("Child1wrote: %d, was supposed to write: %d", child_ret, buf_tail - buf_head); if (child_ret < 0) { RETURN (child_ret); } ret += child_ret; } if (ret >= 0) { mongoc_counter_streams_egress_add (ret); } TRACE ("Returning %d", (int) ret); RETURN (ret); } /* move up to "len" decrypted bytes to buf, return number of bytes */ static ssize_t _mongoc_stream_tls_secure_channel_debuf ( mongoc_stream_tls_secure_channel_t *secure_channel, char *buf, size_t size) { size_t s = BSON_MIN (size, secure_channel->decdata_offset); memcpy (buf, secure_channel->decdata_buffer, s); memmove (secure_channel->decdata_buffer, secure_channel->decdata_buffer + s, secure_channel->decdata_offset - s); secure_channel->decdata_offset -= s; TRACE ("decrypted data returned %d", (int) s); TRACE ("decrypted data buffer: offset %d length %d", (int) secure_channel->decdata_offset, (int) secure_channel->decdata_length); return (ssize_t) s; } /* decrypt as many received bytes as possible to secure_channel.decdata_buf */ static void _mongoc_stream_tls_secure_channel_decrypt ( mongoc_stream_tls_secure_channel_t *secure_channel) { size_t size = 0; size_t remaining; ssize_t nread = -1; bool done = false; SecBuffer inbuf[4]; SecBufferDesc inbuf_desc; SECURITY_STATUS sspi_status = SEC_E_OK; TRACE ("encrypted data buffer: offset %d length %d", (int) secure_channel->encdata_offset, (int) secure_channel->encdata_length); /* decrypt loop */ while (secure_channel->encdata_offset > 0 && sspi_status == SEC_E_OK) { /* prepare data buffer for DecryptMessage call */ _mongoc_secure_channel_init_sec_buffer ( &inbuf[0], SECBUFFER_DATA, secure_channel->encdata_buffer, (unsigned long) (secure_channel->encdata_offset & (size_t) 0xFFFFFFFFUL)); /* we need 3 more empty input buffers for possible output */ _mongoc_secure_channel_init_sec_buffer ( &inbuf[1], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer ( &inbuf[2], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer ( &inbuf[3], SECBUFFER_EMPTY, NULL, 0); _mongoc_secure_channel_init_sec_buffer_desc (&inbuf_desc, inbuf, 4); /* https://msdn.microsoft.com/en-us/library/windows/desktop/aa375348.aspx */ sspi_status = DecryptMessage ( &secure_channel->ctxt->ctxt_handle, &inbuf_desc, 0, NULL); /* check if everything went fine (server may want to renegotiate * or shutdown the connection context) */ if (sspi_status == SEC_E_OK || sspi_status == SEC_I_RENEGOTIATE || sspi_status == SEC_I_CONTEXT_EXPIRED) { /* check for successfully decrypted data, even before actual * renegotiation or shutdown of the connection context */ if (inbuf[1].BufferType == SECBUFFER_DATA) { TRACE ("decrypted data length: %lu", inbuf[1].cbBuffer); size = inbuf[1].cbBuffer; remaining = secure_channel->decdata_length - secure_channel->decdata_offset; if (remaining < size) { mongoc_secure_channel_realloc_buf ( &secure_channel->decdata_length, &secure_channel->decdata_buffer, size); } /* copy decrypted data to internal buffer */ if (size) { memcpy (secure_channel->decdata_buffer + secure_channel->decdata_offset, inbuf[1].pvBuffer, size); secure_channel->decdata_offset += size; } TRACE ("decrypted data added: %d", (int) size); TRACE ("decrypted data cached: offset %d length %d", (int) secure_channel->decdata_offset, (int) secure_channel->decdata_length); } /* check for remaining encrypted data */ if (inbuf[3].BufferType == SECBUFFER_EXTRA && inbuf[3].cbBuffer > 0) { TRACE ("encrypted data length: %lu", inbuf[3].cbBuffer); /* check if the remaining data is less than the total amount * and therefore begins after the already processed data */ if (secure_channel->encdata_offset > inbuf[3].cbBuffer) { /* move remaining encrypted data forward to the beginning of * buffer */ memmove (secure_channel->encdata_buffer, (secure_channel->encdata_buffer + secure_channel->encdata_offset) - inbuf[3].cbBuffer, inbuf[3].cbBuffer); secure_channel->encdata_offset = inbuf[3].cbBuffer; } TRACE ("encrypted data cached: offset %d length %d", (int) secure_channel->encdata_offset, (int) secure_channel->encdata_length); } else { /* reset encrypted buffer offset, because there is no data remaining */ secure_channel->encdata_offset = 0; } /* check if server wants to renegotiate the connection context */ if (sspi_status == SEC_I_RENEGOTIATE) { TRACE ("remote party requests renegotiation"); } /* check if the server closed the connection */ else if (sspi_status == SEC_I_CONTEXT_EXPIRED) { /* In Windows 2000 SEC_I_CONTEXT_EXPIRED (close_notify) is not * returned so we have to work around that in cleanup. */ secure_channel->recv_sspi_close_notify = true; if (!secure_channel->recv_connection_closed) { secure_channel->recv_connection_closed = true; TRACE ("server closed the connection"); } } } else if (sspi_status == SEC_E_INCOMPLETE_MESSAGE) { TRACE ("failed to decrypt data, need more data"); } else { TRACE ("failed to read data from server: %d", sspi_status); secure_channel->recv_unrecoverable_err = true; } } TRACE ("encrypted data buffer: offset %d length %d", (int) secure_channel->encdata_offset, (int) secure_channel->encdata_length); TRACE ("decrypted data buffer: offset %d length %d", (int) secure_channel->decdata_offset, (int) secure_channel->decdata_length); } static ssize_t _mongoc_stream_tls_secure_channel_read (mongoc_stream_t *stream, char *buf, size_t len) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ssize_t size = 0; ssize_t nread; TRACE ("client wants to read %d bytes", (int) len); BSON_ASSERT (len > 0); /* * Our priority is to always return as much decrypted data to the caller as * possible, even if an error occurs. The state of the decrypted buffer must * always be valid. */ if (secure_channel->decdata_offset) { TRACE ("decrypted data is already available"); return _mongoc_stream_tls_secure_channel_debuf (secure_channel, buf, len); } /* is a complete encrypted block left from last network read? */ if (secure_channel->encdata_offset) { _mongoc_stream_tls_secure_channel_decrypt (secure_channel); if (secure_channel->decdata_offset) { return _mongoc_stream_tls_secure_channel_debuf ( secure_channel, buf, len); } } /* keep these checks separated, for more detailed tracing */ if (secure_channel->recv_unrecoverable_err) { TRACE ("an unrecoverable error occurred in a prior call"); return -1; } if (secure_channel->recv_sspi_close_notify) { TRACE ("server indicated shutdown in a prior call"); return -1; } if (secure_channel->recv_connection_closed) { TRACE ("connection closed"); return -1; } size = secure_channel->encdata_length - secure_channel->encdata_offset; /* read encrypted data from socket. returns 0 on shutdown or error */ nread = mongoc_secure_channel_read (tls, (char *) (secure_channel->encdata_buffer + secure_channel->encdata_offset), (size_t) size); if (!nread) { if (MONGOC_ERRNO_IS_AGAIN (errno)) { TRACE ("Try again"); return 0; } else { secure_channel->recv_connection_closed = true; TRACE ("reading failed: %d", errno); return -1; } } secure_channel->encdata_offset += (size_t) nread; TRACE ("encrypted data got %zd", nread); _mongoc_stream_tls_secure_channel_decrypt (secure_channel); return _mongoc_stream_tls_secure_channel_debuf (secure_channel, buf, len); } /* This function is copypasta of _mongoc_stream_tls_openssl_readv */ static ssize_t _mongoc_stream_tls_secure_channel_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ssize_t ret = 0; size_t i; size_t iov_pos = 0; int64_t now; int64_t expire = 0; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (secure_channel); ENTRY; tls->timeout_msec = timeout_msec; if (timeout_msec >= 0) { expire = bson_get_monotonic_time () + (timeout_msec * 1000UL); } for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { ssize_t read_ret = _mongoc_stream_tls_secure_channel_read ( stream, (char *) iov[i].iov_base + iov_pos, (int) (iov[i].iov_len - iov_pos)); if (read_ret < 0) { RETURN (-1); } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (read_ret == 0) { mongoc_counter_streams_timeout_inc (); errno = ETIMEDOUT; RETURN (-1); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } ret += read_ret; if ((size_t) ret >= min_bytes) { mongoc_counter_streams_ingress_add (ret); RETURN (ret); } iov_pos += read_ret; } } if (ret >= 0) { mongoc_counter_streams_ingress_add (ret); } RETURN (ret); } static int _mongoc_stream_tls_secure_channel_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); RETURN (mongoc_stream_setsockopt ( tls->base_stream, level, optname, optval, optlen)); } static mongoc_stream_t * _mongoc_stream_tls_secure_channel_get_base_stream (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); RETURN (tls->base_stream); } static bool _mongoc_stream_tls_secure_channel_check_closed ( mongoc_stream_t *stream) /* IN */ { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); RETURN (mongoc_stream_check_closed (tls->base_stream)); } bool mongoc_stream_tls_secure_channel_handshake (mongoc_stream_t *stream, const char *host, int *events, bson_error_t *error) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_channel_t *secure_channel = (mongoc_stream_tls_secure_channel_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_channel); TRACE ("Getting ready for state: %d, timeout is %d", secure_channel->connecting_state + 1, tls->timeout_msec); switch (secure_channel->connecting_state) { case ssl_connect_1: if (mongoc_secure_channel_handshake_step_1 (tls, (char *) host)) { TRACE ("Step#1 Worked!\n\n"); *events = POLLIN; RETURN (false); } else { TRACE ("Step#1 FAILED!"); } break; case ssl_connect_2: case ssl_connect_2_reading: case ssl_connect_2_writing: if (mongoc_secure_channel_handshake_step_2 (tls, (char *) host)) { if (secure_channel->connecting_state == ssl_connect_2_reading) { *events = POLLIN; } else { *events = POLLOUT; } RETURN (false); } else { TRACE ("Step#2 FAILED!"); } break; case ssl_connect_3: if (mongoc_secure_channel_handshake_step_3 (tls, (char *) host)) { TRACE ("Step#3 Worked!\n\n"); *events = POLLIN | POLLOUT; RETURN (false); } else { TRACE ("Step#3 FAILED!"); } break; case ssl_connect_done: TRACE ("Connect DONE!"); /* reset our connection state machine */ secure_channel->connecting_state = ssl_connect_1; RETURN (true); break; } *events = 0; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed"); RETURN (false); } static bool _mongoc_stream_tls_secure_channel_timed_out (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_timed_out (tls->base_stream)); } static bool _mongoc_stream_tls_secure_channel_should_retry (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_should_retry (tls->base_stream)); } mongoc_stream_t * mongoc_stream_tls_secure_channel_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client) { SECURITY_STATUS sspi_status = SEC_E_OK; SCHANNEL_CRED schannel_cred; mongoc_stream_tls_t *tls; mongoc_stream_tls_secure_channel_t *secure_channel; PCCERT_CONTEXT cert = NULL; ENTRY; BSON_ASSERT (base_stream); BSON_ASSERT (opt); secure_channel = (mongoc_stream_tls_secure_channel_t *) bson_malloc0 ( sizeof *secure_channel); secure_channel->decdata_buffer = bson_malloc (MONGOC_SCHANNEL_BUFFER_INIT_SIZE); secure_channel->decdata_length = MONGOC_SCHANNEL_BUFFER_INIT_SIZE; secure_channel->encdata_buffer = bson_malloc (MONGOC_SCHANNEL_BUFFER_INIT_SIZE); secure_channel->encdata_length = MONGOC_SCHANNEL_BUFFER_INIT_SIZE; tls = (mongoc_stream_tls_t *) bson_malloc0 (sizeof *tls); tls->parent.type = MONGOC_STREAM_TLS; tls->parent.destroy = _mongoc_stream_tls_secure_channel_destroy; tls->parent.failed = _mongoc_stream_tls_secure_channel_failed; tls->parent.close = _mongoc_stream_tls_secure_channel_close; tls->parent.flush = _mongoc_stream_tls_secure_channel_flush; tls->parent.writev = _mongoc_stream_tls_secure_channel_writev; tls->parent.readv = _mongoc_stream_tls_secure_channel_readv; tls->parent.setsockopt = _mongoc_stream_tls_secure_channel_setsockopt; tls->parent.get_base_stream = _mongoc_stream_tls_secure_channel_get_base_stream; tls->parent.check_closed = _mongoc_stream_tls_secure_channel_check_closed; tls->parent.timed_out = _mongoc_stream_tls_secure_channel_timed_out; tls->parent.should_retry = _mongoc_stream_tls_secure_channel_should_retry; memcpy (&tls->ssl_opts, opt, sizeof tls->ssl_opts); tls->handshake = mongoc_stream_tls_secure_channel_handshake; tls->ctx = (void *) secure_channel; tls->timeout_msec = -1; tls->base_stream = base_stream; TRACE ("SSL/TLS connection with endpoint AcquireCredentialsHandle"); /* setup Schannel API options */ memset (&schannel_cred, 0, sizeof (schannel_cred)); schannel_cred.dwVersion = SCHANNEL_CRED_VERSION; /* SCHANNEL_CRED: * SCH_USE_STRONG_CRYPTO is not available in VS2010 * https://msdn.microsoft.com/en-us/library/windows/desktop/aa379810.aspx */ #ifdef SCH_USE_STRONG_CRYPTO schannel_cred.dwFlags = SCH_USE_STRONG_CRYPTO; #endif if (opt->weak_cert_validation) { schannel_cred.dwFlags |= SCH_CRED_MANUAL_CRED_VALIDATION | SCH_CRED_IGNORE_NO_REVOCATION_CHECK | SCH_CRED_IGNORE_REVOCATION_OFFLINE; TRACE ("disabled server certificate checks"); } else { schannel_cred.dwFlags |= SCH_CRED_AUTO_CRED_VALIDATION | SCH_CRED_REVOCATION_CHECK_CHAIN; TRACE ("enabled server certificate checks"); } if (opt->allow_invalid_hostname) { schannel_cred.dwFlags |= SCH_CRED_NO_SERVERNAME_CHECK | SCH_CRED_IGNORE_NO_REVOCATION_CHECK; } if (opt->ca_file) { mongoc_secure_channel_setup_ca (secure_channel, opt); } if (opt->crl_file) { mongoc_secure_channel_setup_crl (secure_channel, opt); } if (opt->pem_file) { cert = mongoc_secure_channel_setup_certificate (secure_channel, opt); if (cert) { schannel_cred.cCreds = 1; schannel_cred.paCred = &cert; } } schannel_cred.grbitEnabledProtocols = SP_PROT_TLS1_1_CLIENT | SP_PROT_TLS1_2_CLIENT; secure_channel->cred = (mongoc_secure_channel_cred *) bson_malloc0 ( sizeof (mongoc_secure_channel_cred)); /* Example: * https://msdn.microsoft.com/en-us/library/windows/desktop/aa375454%28v=vs.85%29.aspx * AcquireCredentialsHandle: * https://msdn.microsoft.com/en-us/library/windows/desktop/aa374716.aspx */ sspi_status = AcquireCredentialsHandle ( NULL, /* principal */ UNISP_NAME, /* security package */ SECPKG_CRED_OUTBOUND, /* we are preparing outbound connection */ NULL, /* Optional logon */ &schannel_cred, /* TLS "configuration", "auth data" */ NULL, /* unused */ NULL, /* unused */ &secure_channel->cred->cred_handle, /* credential OUT param */ &secure_channel->cred->time_stamp); /* certificate expiration time */ if (sspi_status != SEC_E_OK) { LPTSTR msg = NULL; FormatMessage (FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ARGUMENT_ARRAY, NULL, GetLastError (), LANG_NEUTRAL, (LPTSTR) &msg, 0, NULL); MONGOC_ERROR ( "Failed to initialize security context, error code: 0x%04X%04X: '%s'", (sspi_status >> 16) & 0xffff, sspi_status & 0xffff, msg); LocalFree (msg); RETURN (NULL); } if (opt->ca_dir) { MONGOC_ERROR ("Setting mongoc_ssl_opt_t.ca_dir has no effect when built " "against Secure Channel"); } mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) tls); } #endif /* MONGOC_ENABLE_SSL_SECURE_CHANNEL */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-channel.h0000644000076500000240000000230113572250760030436 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_SECURE_CHANNEL_H #define MONGOC_STREAM_TLS_SECURE_CHANNEL_H #ifdef MONGOC_ENABLE_SSL_SECURE_CHANNEL #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_secure_channel_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client); BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_SECURE_CHANNEL */ #endif /* MONGOC_STREAM_TLS_SECURE_CHANNEL_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport-private.h0000644000076500000240000000230413572250760032535 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_SECURE_TRANSPORT_PRIVATE_H #define MONGOC_STREAM_TLS_SECURE_TRANSPORT_PRIVATE_H #ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #include #include BSON_BEGIN_DECLS /** * mongoc_stream_tls_secure_transport_t: * * Private storage for Secure Transport Streams */ typedef struct { SSLContextRef ssl_ctx_ref; CFArrayRef anchors; CFMutableArrayRef my_cert; } mongoc_stream_tls_secure_transport_t; BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_SECURE_TRANSPORT */ #endif /* MONGOC_STREAM_TLS_SECURE_TRANSPORT_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.c0000644000076500000240000004076413572250760031074 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #include #include #include #include #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-secure-transport-private.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-counters-private.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-tls-secure-transport-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls-secure_transport" static void _mongoc_stream_tls_secure_transport_destroy (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); SSLClose (secure_transport->ssl_ctx_ref); CFRelease (secure_transport->ssl_ctx_ref); secure_transport->ssl_ctx_ref = NULL; /* SSLClose will do IO so destroy must come after */ mongoc_stream_destroy (tls->base_stream); if (secure_transport->anchors) { CFRelease (secure_transport->anchors); } if (secure_transport->my_cert) { CFRelease (secure_transport->my_cert); } bson_free (secure_transport); bson_free (stream); mongoc_counter_streams_active_dec (); mongoc_counter_streams_disposed_inc (); EXIT; } static void _mongoc_stream_tls_secure_transport_failed (mongoc_stream_t *stream) { ENTRY; _mongoc_stream_tls_secure_transport_destroy (stream); EXIT; } static int _mongoc_stream_tls_secure_transport_close (mongoc_stream_t *stream) { int ret = 0; mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); ret = mongoc_stream_close (tls->base_stream); RETURN (ret); } static int _mongoc_stream_tls_secure_transport_flush (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); RETURN (0); } static ssize_t _mongoc_stream_tls_secure_transport_write (mongoc_stream_t *stream, char *buf, size_t buf_len) { OSStatus status; mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ssize_t write_ret; int64_t now; int64_t expire = 0; ENTRY; BSON_ASSERT (secure_transport); if (tls->timeout_msec >= 0) { expire = bson_get_monotonic_time () + (tls->timeout_msec * 1000UL); } status = SSLWrite ( secure_transport->ssl_ctx_ref, buf, buf_len, (size_t *) &write_ret); switch (status) { case errSSLWouldBlock: case noErr: break; case errSSLClosedAbort: errno = ECONNRESET; default: RETURN (-1); } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (write_ret < buf_len) { mongoc_counter_streams_timeout_inc (); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } RETURN (write_ret); } /* This is copypasta from _mongoc_stream_tls_openssl_writev */ #define MONGOC_STREAM_TLS_BUFFER_SIZE 4096 static ssize_t _mongoc_stream_tls_secure_transport_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; char buf[MONGOC_STREAM_TLS_BUFFER_SIZE]; ssize_t ret = 0; ssize_t child_ret; size_t i; size_t iov_pos = 0; /* There's a bit of a dance to coalesce vectorized writes into * MONGOC_STREAM_TLS_BUFFER_SIZE'd writes to avoid lots of small tls * packets. * * The basic idea is that we want to combine writes in the buffer if they're * smaller than the buffer, flushing as it gets full. For larger writes, or * the last write in the iovec array, we want to ignore the buffer and just * write immediately. We take care of doing buffer writes by re-invoking * ourself with a single iovec_t, pointing at our stack buffer. */ char *buf_head = buf; char *buf_tail = buf; char *buf_end = buf + MONGOC_STREAM_TLS_BUFFER_SIZE; size_t bytes; char *to_write = NULL; size_t to_write_len; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (secure_transport); ENTRY; tls->timeout_msec = timeout_msec; for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { if (buf_head != buf_tail || ((i + 1 < iovcnt) && ((buf_end - buf_tail) > (iov[i].iov_len - iov_pos)))) { /* If we have either of: * - buffered bytes already * - another iovec to send after this one and we don't have more * bytes to send than the size of the buffer. * * copy into the buffer */ bytes = BSON_MIN (iov[i].iov_len - iov_pos, buf_end - buf_tail); memcpy (buf_tail, (char *) iov[i].iov_base + iov_pos, bytes); buf_tail += bytes; iov_pos += bytes; if (buf_tail == buf_end) { /* If we're full, request send */ to_write = buf_head; to_write_len = buf_tail - buf_head; buf_tail = buf_head = buf; } } else { /* Didn't buffer, so just write it through */ to_write = (char *) iov[i].iov_base + iov_pos; to_write_len = iov[i].iov_len - iov_pos; iov_pos += to_write_len; } if (to_write) { /* We get here if we buffered some bytes and filled the buffer, or * if we didn't buffer and have to send out of the iovec */ child_ret = _mongoc_stream_tls_secure_transport_write ( stream, to_write, to_write_len); if (child_ret < 0) { RETURN (ret); } ret += child_ret; if (child_ret < to_write_len) { /* we timed out, so send back what we could send */ RETURN (ret); } to_write = NULL; } } } if (buf_head != buf_tail) { /* If we have any bytes buffered, send */ child_ret = _mongoc_stream_tls_secure_transport_write ( stream, buf_head, buf_tail - buf_head); if (child_ret < 0) { RETURN (child_ret); } ret += child_ret; } if (ret >= 0) { mongoc_counter_streams_egress_add (ret); } TRACE ("Returning %d", (int) ret); RETURN (ret); } /* This function is copypasta of _mongoc_stream_tls_openssl_readv */ static ssize_t _mongoc_stream_tls_secure_transport_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ssize_t ret = 0; size_t i; size_t read_ret; size_t iov_pos = 0; int64_t now; int64_t expire = 0; BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (secure_transport); ENTRY; tls->timeout_msec = timeout_msec; if (timeout_msec >= 0) { expire = bson_get_monotonic_time () + (timeout_msec * 1000UL); } for (i = 0; i < iovcnt; i++) { iov_pos = 0; while (iov_pos < iov[i].iov_len) { OSStatus status = SSLRead (secure_transport->ssl_ctx_ref, (char *) iov[i].iov_base + iov_pos, (int) (iov[i].iov_len - iov_pos), &read_ret); if (status != noErr) { RETURN (-1); } if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { if (read_ret == 0) { mongoc_counter_streams_timeout_inc (); errno = ETIMEDOUT; RETURN (-1); } tls->timeout_msec = 0; } else { tls->timeout_msec = (expire - now) / 1000L; } } ret += read_ret; if ((size_t) ret >= min_bytes) { mongoc_counter_streams_ingress_add (ret); RETURN (ret); } iov_pos += read_ret; } } if (ret >= 0) { mongoc_counter_streams_ingress_add (ret); } RETURN (ret); } static int _mongoc_stream_tls_secure_transport_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); RETURN (mongoc_stream_setsockopt ( tls->base_stream, level, optname, optval, optlen)); } static mongoc_stream_t * _mongoc_stream_tls_secure_transport_get_base_stream (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); RETURN (tls->base_stream); } static bool _mongoc_stream_tls_secure_transport_check_closed ( mongoc_stream_t *stream) /* IN */ { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); RETURN (mongoc_stream_check_closed (tls->base_stream)); } bool mongoc_stream_tls_secure_transport_handshake (mongoc_stream_t *stream, const char *host, int *events, bson_error_t *error) { OSStatus ret = 0; CFStringRef err; char *err_str; mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; mongoc_stream_tls_secure_transport_t *secure_transport = (mongoc_stream_tls_secure_transport_t *) tls->ctx; ENTRY; BSON_ASSERT (secure_transport); ret = SSLHandshake (secure_transport->ssl_ctx_ref); /* Weak certificate validation requested, eg: none */ if (ret == errSSLServerAuthCompleted) { ret = errSSLWouldBlock; } if (ret == noErr) { RETURN (true); } if (ret == errSSLWouldBlock) { *events = POLLIN | POLLOUT; } else { *events = 0; err = SecCopyErrorMessageString (ret, NULL); err_str = _mongoc_cfstringref_to_cstring (err); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed: %s (%d)", err_str, ret); bson_free (err_str); CFRelease (err); } RETURN (false); } static bool _mongoc_stream_tls_secure_channel_timed_out (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_timed_out (tls->base_stream)); } static bool _mongoc_stream_tls_secure_channel_should_retry (mongoc_stream_t *stream) { mongoc_stream_tls_t *tls = (mongoc_stream_tls_t *) stream; ENTRY; RETURN (mongoc_stream_should_retry (tls->base_stream)); } mongoc_stream_t * mongoc_stream_tls_secure_transport_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client) { mongoc_stream_tls_t *tls; mongoc_stream_tls_secure_transport_t *secure_transport; ENTRY; BSON_ASSERT (base_stream); BSON_ASSERT (opt); if (opt->ca_dir) { MONGOC_ERROR ("Setting mongoc_ssl_opt_t.ca_dir has no effect when built " "against Secure Transport"); RETURN (NULL); } if (opt->crl_file) { MONGOC_ERROR ( "Setting mongoc_ssl_opt_t.crl_file has no effect when built " "against Secure Transport"); RETURN (NULL); } secure_transport = (mongoc_stream_tls_secure_transport_t *) bson_malloc0 ( sizeof *secure_transport); tls = (mongoc_stream_tls_t *) bson_malloc0 (sizeof *tls); tls->parent.type = MONGOC_STREAM_TLS; tls->parent.destroy = _mongoc_stream_tls_secure_transport_destroy; tls->parent.failed = _mongoc_stream_tls_secure_transport_failed; tls->parent.close = _mongoc_stream_tls_secure_transport_close; tls->parent.flush = _mongoc_stream_tls_secure_transport_flush; tls->parent.writev = _mongoc_stream_tls_secure_transport_writev; tls->parent.readv = _mongoc_stream_tls_secure_transport_readv; tls->parent.setsockopt = _mongoc_stream_tls_secure_transport_setsockopt; tls->parent.get_base_stream = _mongoc_stream_tls_secure_transport_get_base_stream; tls->parent.check_closed = _mongoc_stream_tls_secure_transport_check_closed; tls->parent.timed_out = _mongoc_stream_tls_secure_channel_timed_out; tls->parent.should_retry = _mongoc_stream_tls_secure_channel_should_retry; memcpy (&tls->ssl_opts, opt, sizeof tls->ssl_opts); tls->handshake = mongoc_stream_tls_secure_transport_handshake; tls->ctx = (void *) secure_transport; tls->timeout_msec = -1; secure_transport->ssl_ctx_ref = SSLCreateContext (kCFAllocatorDefault, client ? kSSLClientSide : kSSLServerSide, kSSLStreamType); SSLSetIOFuncs (secure_transport->ssl_ctx_ref, mongoc_secure_transport_read, mongoc_secure_transport_write); SSLSetProtocolVersionMin (secure_transport->ssl_ctx_ref, kTLSProtocol1); if (opt->pem_file && !mongoc_secure_transport_setup_certificate (secure_transport, opt)) { mongoc_stream_destroy ((mongoc_stream_t *) tls); RETURN (NULL); } if (opt->ca_file && !mongoc_secure_transport_setup_ca (secure_transport, opt)) { mongoc_stream_destroy ((mongoc_stream_t *) tls); RETURN (NULL); } /* don't link base_stream to tls until we're sure we won't destroy tls */ tls->base_stream = base_stream; if (client) { SSLSetSessionOption (secure_transport->ssl_ctx_ref, kSSLSessionOptionBreakOnServerAuth, opt->weak_cert_validation); } else if (!opt->allow_invalid_hostname) { /* used only in mock_server_t tests */ SSLSetClientSideAuthenticate (secure_transport->ssl_ctx_ref, kAlwaysAuthenticate); } if (!opt->allow_invalid_hostname) { SSLSetPeerDomainName (secure_transport->ssl_ctx_ref, host, strlen (host)); } SSLSetConnection (secure_transport->ssl_ctx_ref, tls); mongoc_counter_streams_active_inc (); RETURN ((mongoc_stream_t *) tls); } #endif /* MONGOC_ENABLE_SSL_SECURE_TRANSPORT */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls-secure-transport.h0000644000076500000240000000232313572250760031066 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_SECURE_TRANSPORT_H #define MONGOC_STREAM_TLS_SECURE_TRANSPORT_H #ifdef MONGOC_ENABLE_SSL_SECURE_TRANSPORT #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_secure_transport_new (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client); BSON_END_DECLS #endif /* MONGOC_ENABLE_SSL_SECURE_TRANSPORT */ #endif /* MONGOC_STREAM_TLS_SECURE_TRANSPORT_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls.c0000644000076500000240000001604613572250760025552 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #ifdef MONGOC_ENABLE_SSL #include #include #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-stream-tls-private.h" #include "mongoc/mongoc-stream-private.h" #if defined(MONGOC_ENABLE_SSL_OPENSSL) #include "mongoc/mongoc-stream-tls-openssl.h" #include "mongoc/mongoc-openssl-private.h" #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) #include "mongoc/mongoc-libressl-private.h" #include "mongoc/mongoc-stream-tls-libressl.h" #elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) #include "mongoc/mongoc-secure-transport-private.h" #include "mongoc/mongoc-stream-tls-secure-transport.h" #elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) #include "mongoc/mongoc-secure-channel-private.h" #include "mongoc/mongoc-stream-tls-secure-channel.h" #endif #include "mongoc/mongoc-stream-tls.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream-tls" /** * mongoc_stream_tls_handshake: * * Performs TLS handshake dance */ bool mongoc_stream_tls_handshake (mongoc_stream_t *stream, const char *host, int32_t timeout_msec, int *events, bson_error_t *error) { mongoc_stream_tls_t *stream_tls = (mongoc_stream_tls_t *) mongoc_stream_get_tls_stream (stream); BSON_ASSERT (stream_tls); BSON_ASSERT (stream_tls->handshake); stream_tls->timeout_msec = timeout_msec; return stream_tls->handshake (stream, host, events, error); } bool mongoc_stream_tls_handshake_block (mongoc_stream_t *stream, const char *host, int32_t timeout_msec, bson_error_t *error) { int events; ssize_t ret = 0; mongoc_stream_poll_t poller; int64_t now; int64_t expire = 0; if (timeout_msec >= 0) { expire = bson_get_monotonic_time () + (timeout_msec * 1000UL); } /* * error variables get re-used a lot. To prevent cross-contamination of error * messages, and still be able to provide a generic failure message when * mongoc_stream_tls_handshake fails without a specific reason, we need to * init * the error code to 0. */ if (error) { error->code = 0; } do { events = 0; if (mongoc_stream_tls_handshake ( stream, host, timeout_msec, &events, error)) { return true; } if (events) { poller.stream = stream; poller.events = events; poller.revents = 0; if (expire) { now = bson_get_monotonic_time (); if ((expire - now) < 0) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake timed out."); return false; } else { timeout_msec = (expire - now) / 1000L; } } ret = mongoc_stream_poll (&poller, 1, timeout_msec); } } while (events && ret > 0); if (error && !error->code) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "TLS handshake failed."); } return false; } /** * Deprecated. Was never supposed to be part of the public API. * See mongoc_stream_tls_handshake. */ bool mongoc_stream_tls_do_handshake (mongoc_stream_t *stream, int32_t timeout_msec) { mongoc_stream_tls_t *stream_tls = (mongoc_stream_tls_t *) mongoc_stream_get_tls_stream (stream); BSON_ASSERT (stream_tls); MONGOC_ERROR ("This function doesn't do anything. Please call " "mongoc_stream_tls_handshake()"); return false; } /** * Deprecated. Was never supposed to be part of the public API. * See mongoc_stream_tls_handshake. */ bool mongoc_stream_tls_check_cert (mongoc_stream_t *stream, const char *host) { mongoc_stream_tls_t *stream_tls = (mongoc_stream_tls_t *) mongoc_stream_get_tls_stream (stream); BSON_ASSERT (stream_tls); MONGOC_ERROR ("This function doesn't do anything. Please call " "mongoc_stream_tls_handshake()"); return false; } /* *-------------------------------------------------------------------------- * * mongoc_stream_tls_new_with_hostname -- * * Creates a new mongoc_stream_tls_t to communicate with a remote * server using a TLS stream. * * @host the hostname we are connected to and to verify the * server certificate against * * @base_stream should be a stream that will become owned by the * resulting tls stream. It will be used for raw I/O. * * @trust_store_dir should be a path to the SSL cert db to use for * verifying trust of the remote server. * * Returns: * NULL on failure, otherwise a mongoc_stream_t. * * Side effects: * None. * *-------------------------------------------------------------------------- */ mongoc_stream_t * mongoc_stream_tls_new_with_hostname (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client) { BSON_ASSERT (base_stream); /* !client is only used for testing, * when the streams are pretending to be the server */ if (!client || opt->weak_cert_validation) { opt->allow_invalid_hostname = true; } #ifndef _WIN32 /* Silly check for Unix Domain Sockets */ if (!host || (host[0] == '/' && !access (host, F_OK))) { opt->allow_invalid_hostname = true; } #endif #if defined(MONGOC_ENABLE_SSL_OPENSSL) return mongoc_stream_tls_openssl_new (base_stream, host, opt, client); #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) return mongoc_stream_tls_libressl_new (base_stream, host, opt, client); #elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) return mongoc_stream_tls_secure_transport_new ( base_stream, host, opt, client); #elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) return mongoc_stream_tls_secure_channel_new (base_stream, host, opt, client); #else #error "Don't know how to create TLS stream" #endif } mongoc_stream_t * mongoc_stream_tls_new (mongoc_stream_t *base_stream, mongoc_ssl_opt_t *opt, int client) { return mongoc_stream_tls_new_with_hostname (base_stream, NULL, opt, client); } #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream-tls.h0000644000076500000240000000433513572250760025555 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_TLS_H #define MONGOC_STREAM_TLS_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-ssl.h" #include "mongoc/mongoc-stream.h" BSON_BEGIN_DECLS typedef struct _mongoc_stream_tls_t mongoc_stream_tls_t; MONGOC_EXPORT (bool) mongoc_stream_tls_handshake (mongoc_stream_t *stream, const char *host, int32_t timeout_msec, int *events, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_stream_tls_handshake_block (mongoc_stream_t *stream, const char *host, int32_t timeout_msec, bson_error_t *error); MONGOC_EXPORT (bool) mongoc_stream_tls_do_handshake (mongoc_stream_t *stream, int32_t timeout_msec) BSON_GNUC_DEPRECATED_FOR (mongoc_stream_tls_handshake); MONGOC_EXPORT (bool) mongoc_stream_tls_check_cert (mongoc_stream_t *stream, const char *host) BSON_GNUC_DEPRECATED_FOR (mongoc_stream_tls_handshake); MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_new_with_hostname (mongoc_stream_t *base_stream, const char *host, mongoc_ssl_opt_t *opt, int client); MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_tls_new (mongoc_stream_t *base_stream, mongoc_ssl_opt_t *opt, int client) BSON_GNUC_DEPRECATED_FOR (mongoc_stream_tls_new_with_hostname); BSON_END_DECLS #endif /* MONGOC_STREAM_TLS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream.c0000644000076500000240000002424013572250760024745 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-buffer-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-errno-private.h" #include "mongoc/mongoc-flags.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-opcode.h" #include "mongoc/mongoc-rpc-private.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "stream" #ifndef MONGOC_DEFAULT_TIMEOUT_MSEC #define MONGOC_DEFAULT_TIMEOUT_MSEC (60L * 60L * 1000L) #endif /** * mongoc_stream_close: * @stream: A mongoc_stream_t. * * Closes the underlying file-descriptor used by @stream. * * Returns: 0 on success, -1 on failure. */ int mongoc_stream_close (mongoc_stream_t *stream) { int ret; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (stream->close); ret = stream->close (stream); RETURN (ret); } /** * mongoc_stream_failed: * @stream: A mongoc_stream_t. * * Frees any resources referenced by @stream, including the memory allocation * for @stream. * This handler is called upon stream failure, such as network errors, invalid * replies * or replicaset reconfigures deleting the stream */ void mongoc_stream_failed (mongoc_stream_t *stream) { ENTRY; BSON_ASSERT (stream); if (stream->failed) { stream->failed (stream); } else { stream->destroy (stream); } EXIT; } /** * mongoc_stream_destroy: * @stream: A mongoc_stream_t. * * Frees any resources referenced by @stream, including the memory allocation * for @stream. */ void mongoc_stream_destroy (mongoc_stream_t *stream) { ENTRY; if (!stream) { EXIT; } BSON_ASSERT (stream->destroy); stream->destroy (stream); EXIT; } /** * mongoc_stream_flush: * @stream: A mongoc_stream_t. * * Flushes the data in the underlying stream to the transport. * * Returns: 0 on success, -1 on failure. */ int mongoc_stream_flush (mongoc_stream_t *stream) { BSON_ASSERT (stream); return stream->flush (stream); } /** * mongoc_stream_writev: * @stream: A mongoc_stream_t. * @iov: An array of iovec to write to the stream. * @iovcnt: The number of elements in @iov. * * Writes an array of iovec buffers to the underlying stream. * * Returns: the number of bytes written, or -1 upon failure. */ ssize_t mongoc_stream_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec) { ssize_t ret; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (stream->writev); if (timeout_msec < 0) { timeout_msec = MONGOC_DEFAULT_TIMEOUT_MSEC; } DUMP_IOVEC (writev, iov, iovcnt); ret = stream->writev (stream, iov, iovcnt, timeout_msec); RETURN (ret); } /** * mongoc_stream_write: * @stream: A mongoc_stream_t. * @buf: A buffer to write. * @count: The number of bytes to write into @buf. * * Simplified access to mongoc_stream_writev(). Creates a single iovec * with the buffer provided. * * Returns: -1 on failure, otherwise the number of bytes written. */ ssize_t mongoc_stream_write (mongoc_stream_t *stream, void *buf, size_t count, int32_t timeout_msec) { mongoc_iovec_t iov; ssize_t ret; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (buf); iov.iov_base = buf; iov.iov_len = count; BSON_ASSERT (stream->writev); ret = mongoc_stream_writev (stream, &iov, 1, timeout_msec); RETURN (ret); } /** * mongoc_stream_readv: * @stream: A mongoc_stream_t. * @iov: An array of iovec containing the location and sizes to read. * @iovcnt: the number of elements in @iov. * @min_bytes: the minimum number of bytes to return, or -1. * * Reads into the various buffers pointed to by @iov and associated * buffer lengths. * * If @min_bytes is specified, then at least min_bytes will be returned unless * eof is encountered. This may result in ETIMEDOUT * * Returns: the number of bytes read or -1 on failure. */ ssize_t mongoc_stream_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec) { ssize_t ret; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (iov); BSON_ASSERT (iovcnt); BSON_ASSERT (stream->readv); ret = stream->readv (stream, iov, iovcnt, min_bytes, timeout_msec); if (ret >= 0) { DUMP_IOVEC (readv, iov, iovcnt); } RETURN (ret); } /** * mongoc_stream_read: * @stream: A mongoc_stream_t. * @buf: A buffer to write into. * @count: The number of bytes to write into @buf. * @min_bytes: The minimum number of bytes to receive * * Simplified access to mongoc_stream_readv(). Creates a single iovec * with the buffer provided. * * If @min_bytes is specified, then at least min_bytes will be returned unless * eof is encountered. This may result in ETIMEDOUT * * Returns: -1 on failure, otherwise the number of bytes read. */ ssize_t mongoc_stream_read (mongoc_stream_t *stream, void *buf, size_t count, size_t min_bytes, int32_t timeout_msec) { mongoc_iovec_t iov; ssize_t ret; ENTRY; BSON_ASSERT (stream); BSON_ASSERT (buf); iov.iov_base = buf; iov.iov_len = count; BSON_ASSERT (stream->readv); ret = mongoc_stream_readv (stream, &iov, 1, min_bytes, timeout_msec); RETURN (ret); } int mongoc_stream_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen) { BSON_ASSERT (stream); if (stream->setsockopt) { return stream->setsockopt (stream, level, optname, optval, optlen); } return 0; } mongoc_stream_t * mongoc_stream_get_base_stream (mongoc_stream_t *stream) /* IN */ { BSON_ASSERT (stream); if (stream->get_base_stream) { return stream->get_base_stream (stream); } return stream; } mongoc_stream_t * mongoc_stream_get_root_stream (mongoc_stream_t *stream) { BSON_ASSERT (stream); while (stream->get_base_stream) { stream = stream->get_base_stream (stream); } return stream; } mongoc_stream_t * mongoc_stream_get_tls_stream (mongoc_stream_t *stream) /* IN */ { BSON_ASSERT (stream); for (; stream && stream->type != MONGOC_STREAM_TLS; stream = stream->get_base_stream (stream)) ; return stream; } ssize_t mongoc_stream_poll (mongoc_stream_poll_t *streams, size_t nstreams, int32_t timeout) { mongoc_stream_poll_t *poller = (mongoc_stream_poll_t *) bson_malloc (sizeof (*poller) * nstreams); int i; int last_type = 0; ssize_t rval = -1; errno = 0; for (i = 0; i < nstreams; i++) { poller[i].stream = mongoc_stream_get_root_stream (streams[i].stream); poller[i].events = streams[i].events; poller[i].revents = 0; if (i == 0) { last_type = poller[i].stream->type; } else if (last_type != poller[i].stream->type) { errno = EINVAL; goto CLEANUP; } } if (!poller[0].stream->poll) { errno = EINVAL; goto CLEANUP; } rval = poller[0].stream->poll (poller, nstreams, timeout); if (rval > 0) { for (i = 0; i < nstreams; i++) { streams[i].revents = poller[i].revents; } } CLEANUP: bson_free (poller); return rval; } bool mongoc_stream_check_closed (mongoc_stream_t *stream) { int ret; ENTRY; if (!stream) { return true; } ret = stream->check_closed (stream); RETURN (ret); } bool mongoc_stream_timed_out (mongoc_stream_t *stream) { ENTRY; BSON_ASSERT (stream); /* for e.g. a file stream there is no timed_out function */ RETURN (stream->timed_out && stream->timed_out (stream)); } bool mongoc_stream_should_retry (mongoc_stream_t *stream) { ENTRY; BSON_ASSERT (stream); /* for e.g. a file stream there is no should_retry function */ RETURN (stream->should_retry && stream->should_retry (stream)); } bool _mongoc_stream_writev_full (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec, bson_error_t *error) { size_t total_bytes = 0; int i; ssize_t r; ENTRY; for (i = 0; i < iovcnt; i++) { total_bytes += iov[i].iov_len; } r = mongoc_stream_writev (stream, iov, iovcnt, timeout_msec); TRACE ("writev returned: %ld", r); if (r < 0) { if (error) { char buf[128]; char *errstr; errstr = bson_strerror_r (errno, buf, sizeof (buf)); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failure during socket delivery: %s (%d)", errstr, errno); } RETURN (false); } if (r != total_bytes) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failure to send all requested bytes (only sent: %" PRIu64 "/%" PRId64 " in %dms) during socket delivery", (uint64_t) r, (int64_t) total_bytes, timeout_msec); RETURN (false); } RETURN (true); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-stream.h0000644000076500000240000000775713572250760024770 0ustar alcaeusstaff/* * Copyright 2013-2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_STREAM_H #define MONGOC_STREAM_H #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-iovec.h" #include "mongoc/mongoc-socket.h" BSON_BEGIN_DECLS typedef struct _mongoc_stream_t mongoc_stream_t; typedef struct _mongoc_stream_poll_t { mongoc_stream_t *stream; int events; int revents; } mongoc_stream_poll_t; struct _mongoc_stream_t { int type; void (*destroy) (mongoc_stream_t *stream); int (*close) (mongoc_stream_t *stream); int (*flush) (mongoc_stream_t *stream); ssize_t (*writev) (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec); ssize_t (*readv) (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec); int (*setsockopt) (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen); mongoc_stream_t *(*get_base_stream) (mongoc_stream_t *stream); bool (*check_closed) (mongoc_stream_t *stream); ssize_t (*poll) (mongoc_stream_poll_t *streams, size_t nstreams, int32_t timeout); void (*failed) (mongoc_stream_t *stream); bool (*timed_out) (mongoc_stream_t *stream); bool (*should_retry) (mongoc_stream_t *stream); void *padding[3]; }; MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_get_base_stream (mongoc_stream_t *stream); MONGOC_EXPORT (mongoc_stream_t *) mongoc_stream_get_tls_stream (mongoc_stream_t *stream); MONGOC_EXPORT (int) mongoc_stream_close (mongoc_stream_t *stream); MONGOC_EXPORT (void) mongoc_stream_destroy (mongoc_stream_t *stream); MONGOC_EXPORT (void) mongoc_stream_failed (mongoc_stream_t *stream); MONGOC_EXPORT (int) mongoc_stream_flush (mongoc_stream_t *stream); MONGOC_EXPORT (ssize_t) mongoc_stream_writev (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, int32_t timeout_msec); MONGOC_EXPORT (ssize_t) mongoc_stream_write (mongoc_stream_t *stream, void *buf, size_t count, int32_t timeout_msec); MONGOC_EXPORT (ssize_t) mongoc_stream_readv (mongoc_stream_t *stream, mongoc_iovec_t *iov, size_t iovcnt, size_t min_bytes, int32_t timeout_msec); MONGOC_EXPORT (ssize_t) mongoc_stream_read (mongoc_stream_t *stream, void *buf, size_t count, size_t min_bytes, int32_t timeout_msec); MONGOC_EXPORT (int) mongoc_stream_setsockopt (mongoc_stream_t *stream, int level, int optname, void *optval, mongoc_socklen_t optlen); MONGOC_EXPORT (bool) mongoc_stream_check_closed (mongoc_stream_t *stream); MONGOC_EXPORT (bool) mongoc_stream_timed_out (mongoc_stream_t *stream); MONGOC_EXPORT (bool) mongoc_stream_should_retry (mongoc_stream_t *stream); MONGOC_EXPORT (ssize_t) mongoc_stream_poll (mongoc_stream_poll_t *streams, size_t nstreams, int32_t timeout); BSON_END_DECLS #endif /* MONGOC_STREAM_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-thread-private.h0000644000076500000240000000462413572250760026402 0ustar alcaeusstaff/* * Copyright 2013-present MongoDB Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_THREAD_PRIVATE_H #define MONGOC_THREAD_PRIVATE_H #include #include "common-thread-private.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-log.h" #if defined(BSON_OS_UNIX) #define mongoc_cond_t pthread_cond_t #define mongoc_cond_broadcast pthread_cond_broadcast #define mongoc_cond_init(_n) pthread_cond_init ((_n), NULL) #define mongoc_cond_wait pthread_cond_wait #define mongoc_cond_signal pthread_cond_signal static BSON_INLINE int mongoc_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, int64_t timeout_msec) { struct timespec to; struct timeval tv; int64_t msec; bson_gettimeofday (&tv); msec = ((int64_t) tv.tv_sec * 1000) + (tv.tv_usec / 1000) + timeout_msec; to.tv_sec = msec / 1000; to.tv_nsec = (msec % 1000) * 1000 * 1000; return pthread_cond_timedwait (cond, mutex, &to); } #define mongoc_cond_destroy pthread_cond_destroy #else #define mongoc_cond_t CONDITION_VARIABLE #define mongoc_cond_init InitializeConditionVariable #define mongoc_cond_wait(_c, _m) mongoc_cond_timedwait ((_c), (_m), INFINITE) static BSON_INLINE int mongoc_cond_timedwait (mongoc_cond_t *cond, bson_mutex_t *mutex, int64_t timeout_msec) { int r; if (SleepConditionVariableCS (cond, mutex, (DWORD) timeout_msec)) { return 0; } else { r = GetLastError (); if (r == WAIT_TIMEOUT || r == ERROR_TIMEOUT) { return WSAETIMEDOUT; } else { return EINVAL; } } } #define mongoc_cond_signal WakeConditionVariable #define mongoc_cond_broadcast WakeAllConditionVariable static BSON_INLINE int mongoc_cond_destroy (mongoc_cond_t *_ignored) { return 0; } #endif #endif /* MONGOC_THREAD_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-description-apm-private.h0000644000076500000240000000371113572250760032077 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TOPOLOGY_DESCRIPTION_APM_PRIVATE_H #define MONGOC_TOPOLOGY_DESCRIPTION_APM_PRIVATE_H #include #include "mongoc/mongoc-topology-description-private.h" /* Application Performance Monitoring for topology events, complies with the * SDAM Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-monitoring.rst */ void _mongoc_topology_description_monitor_server_opening ( const mongoc_topology_description_t *td, mongoc_server_description_t *sd); void _mongoc_topology_description_monitor_server_changed ( const mongoc_topology_description_t *td, const mongoc_server_description_t *prev_sd, const mongoc_server_description_t *new_sd); void _mongoc_topology_description_monitor_server_closed ( const mongoc_topology_description_t *td, const mongoc_server_description_t *sd); /* td is not const: we set its "opened" field here */ void _mongoc_topology_description_monitor_opening ( mongoc_topology_description_t *td); void _mongoc_topology_description_monitor_changed ( const mongoc_topology_description_t *prev_td, const mongoc_topology_description_t *new_td); void _mongoc_topology_description_monitor_closed ( const mongoc_topology_description_t *td); #endif /* MONGOC_TOPOLOGY_DESCRIPTION_APM_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-description-apm.c0000644000076500000240000001145413572250760030425 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-topology-description-apm-private.h" #include "mongoc/mongoc-server-description-private.h" /* Application Performance Monitoring for topology events, complies with the * SDAM Monitoring Spec: https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring-monitoring.rst */ /* ServerOpeningEvent */ void _mongoc_topology_description_monitor_server_opening ( const mongoc_topology_description_t *td, mongoc_server_description_t *sd) { if (td->apm_callbacks.server_opening && !sd->opened) { mongoc_apm_server_opening_t event; bson_oid_copy (&td->topology_id, &event.topology_id); event.host = &sd->host; event.context = td->apm_context; sd->opened = true; td->apm_callbacks.server_opening (&event); } } /* ServerDescriptionChangedEvent */ void _mongoc_topology_description_monitor_server_changed ( const mongoc_topology_description_t *td, const mongoc_server_description_t *prev_sd, const mongoc_server_description_t *new_sd) { if (td->apm_callbacks.server_changed) { mongoc_apm_server_changed_t event; /* address is same in previous and new sd */ bson_oid_copy (&td->topology_id, &event.topology_id); event.host = &new_sd->host; event.previous_description = prev_sd; event.new_description = new_sd; event.context = td->apm_context; td->apm_callbacks.server_changed (&event); } } /* ServerClosedEvent */ void _mongoc_topology_description_monitor_server_closed ( const mongoc_topology_description_t *td, const mongoc_server_description_t *sd) { if (td->apm_callbacks.server_closed) { mongoc_apm_server_closed_t event; bson_oid_copy (&td->topology_id, &event.topology_id); event.host = &sd->host; event.context = td->apm_context; td->apm_callbacks.server_closed (&event); } } /* Send TopologyOpeningEvent when first called on this topology description. * td is not const: we set its "opened" field here */ void _mongoc_topology_description_monitor_opening (mongoc_topology_description_t *td) { mongoc_topology_description_t *prev_td = NULL; size_t i; mongoc_server_description_t *sd; if (td->opened) { return; } if (td->apm_callbacks.topology_changed) { /* prepare to call monitor_changed */ prev_td = bson_malloc0 (sizeof (mongoc_topology_description_t)); mongoc_topology_description_init (prev_td, td->heartbeat_msec); } td->opened = true; if (td->apm_callbacks.topology_opening) { mongoc_apm_topology_opening_t event; bson_oid_copy (&td->topology_id, &event.topology_id); event.context = td->apm_context; td->apm_callbacks.topology_opening (&event); } if (td->apm_callbacks.topology_changed) { /* send initial description-changed event */ _mongoc_topology_description_monitor_changed (prev_td, td); } for (i = 0; i < td->servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, (int) i); _mongoc_topology_description_monitor_server_opening (td, sd); } if (prev_td) { mongoc_topology_description_destroy (prev_td); bson_free (prev_td); } } /* TopologyDescriptionChangedEvent */ void _mongoc_topology_description_monitor_changed ( const mongoc_topology_description_t *prev_td, const mongoc_topology_description_t *new_td) { if (new_td->apm_callbacks.topology_changed) { mongoc_apm_topology_changed_t event; /* callbacks, context, and id are the same in previous and new td */ bson_oid_copy (&new_td->topology_id, &event.topology_id); event.context = new_td->apm_context; event.previous_description = prev_td; event.new_description = new_td; new_td->apm_callbacks.topology_changed (&event); } } /* TopologyClosedEvent */ void _mongoc_topology_description_monitor_closed ( const mongoc_topology_description_t *td) { if (td->apm_callbacks.topology_closed) { mongoc_apm_topology_closed_t event; bson_oid_copy (&td->topology_id, &event.topology_id); event.context = td->apm_context; td->apm_callbacks.topology_closed (&event); } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-description-private.h0000644000076500000240000001033413572250760031323 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H #define MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H #include "mongoc/mongoc-set-private.h" #include "mongoc/mongoc-server-description.h" #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-topology-description.h" #include "mongoc/mongoc-apm-private.h" typedef enum { MONGOC_TOPOLOGY_UNKNOWN, MONGOC_TOPOLOGY_SHARDED, MONGOC_TOPOLOGY_RS_NO_PRIMARY, MONGOC_TOPOLOGY_RS_WITH_PRIMARY, MONGOC_TOPOLOGY_SINGLE, MONGOC_TOPOLOGY_DESCRIPTION_TYPES } mongoc_topology_description_type_t; struct _mongoc_topology_description_t { bson_oid_t topology_id; bool opened; mongoc_topology_description_type_t type; int64_t heartbeat_msec; mongoc_set_t *servers; char *set_name; int64_t max_set_version; bson_oid_t max_election_id; bson_error_t compatibility_error; uint32_t max_server_id; bool stale; unsigned int rand_seed; /* the greatest seen cluster time, for a MongoDB 3.6+ sharded cluster. * see Driver Sessions Spec. */ bson_t cluster_time; /* smallest seen logicalSessionTimeoutMinutes, or -1 if any server has no * logicalSessionTimeoutMinutes. see Server Discovery and Monitoring Spec */ int64_t session_timeout_minutes; mongoc_apm_callbacks_t apm_callbacks; void *apm_context; }; typedef enum { MONGOC_SS_READ, MONGOC_SS_WRITE } mongoc_ss_optype_t; void mongoc_topology_description_init (mongoc_topology_description_t *description, int64_t heartbeat_msec); void _mongoc_topology_description_copy_to (const mongoc_topology_description_t *src, mongoc_topology_description_t *dst); void mongoc_topology_description_destroy ( mongoc_topology_description_t *description); void mongoc_topology_description_handle_ismaster ( mongoc_topology_description_t *topology, uint32_t server_id, const bson_t *reply, int64_t rtt_msec, const bson_error_t *error /* IN */); mongoc_server_description_t * mongoc_topology_description_select (mongoc_topology_description_t *description, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_pref, int64_t local_threshold_ms); mongoc_server_description_t * mongoc_topology_description_server_by_id ( mongoc_topology_description_t *description, uint32_t id, bson_error_t *error); int32_t mongoc_topology_description_lowest_max_wire_version ( const mongoc_topology_description_t *td); bool mongoc_topology_description_all_sds_have_write_date ( const mongoc_topology_description_t *td); bool _mongoc_topology_description_validate_max_staleness ( const mongoc_topology_description_t *td, int64_t max_staleness_seconds, bson_error_t *error); void mongoc_topology_description_suitable_servers ( mongoc_array_t *set, /* OUT */ mongoc_ss_optype_t optype, mongoc_topology_description_t *topology, const mongoc_read_prefs_t *read_pref, size_t local_threshold_ms); bool mongoc_topology_description_has_data_node (mongoc_topology_description_t *td); void mongoc_topology_description_invalidate_server ( mongoc_topology_description_t *topology, uint32_t id, const bson_error_t *error /* IN */); bool mongoc_topology_description_add_server (mongoc_topology_description_t *topology, const char *server, uint32_t *id /* OUT */); void mongoc_topology_description_update_cluster_time ( mongoc_topology_description_t *td, const bson_t *reply); #endif /* MONGOC_TOPOLOGY_DESCRIPTION_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-description.c0000644000076500000240000020135713572250760027655 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-array-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-server-description-private.h" #include "mongoc/mongoc-topology-description-apm-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-read-prefs-private.h" #include "mongoc/mongoc-set-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-thread-private.h" static bool _is_data_node (mongoc_server_description_t *sd) { switch (sd->type) { case MONGOC_SERVER_MONGOS: case MONGOC_SERVER_STANDALONE: case MONGOC_SERVER_RS_SECONDARY: case MONGOC_SERVER_RS_PRIMARY: return true; case MONGOC_SERVER_RS_OTHER: case MONGOC_SERVER_RS_ARBITER: case MONGOC_SERVER_UNKNOWN: case MONGOC_SERVER_POSSIBLE_PRIMARY: case MONGOC_SERVER_RS_GHOST: case MONGOC_SERVER_DESCRIPTION_TYPES: default: return false; } } static void _mongoc_topology_server_dtor (void *server_, void *ctx_) { mongoc_server_description_destroy ((mongoc_server_description_t *) server_); } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_init -- * * Initialize the given topology description * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_topology_description_init (mongoc_topology_description_t *description, int64_t heartbeat_msec) { ENTRY; BSON_ASSERT (description); memset (description, 0, sizeof (*description)); bson_oid_init (&description->topology_id, NULL); description->opened = false; description->type = MONGOC_TOPOLOGY_UNKNOWN; description->heartbeat_msec = heartbeat_msec; description->servers = mongoc_set_new (8, _mongoc_topology_server_dtor, NULL); description->set_name = NULL; description->max_set_version = MONGOC_NO_SET_VERSION; description->stale = true; description->rand_seed = (unsigned int) bson_get_monotonic_time (); bson_init (&description->cluster_time); description->session_timeout_minutes = MONGOC_NO_SESSIONS; EXIT; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_copy_to -- * * Deep-copy @src to an uninitialized topology description @dst. * @dst must not already point to any allocated resources. Clean * up with mongoc_topology_description_destroy. * * WARNING: @dst's rand_seed is not initialized. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void _mongoc_topology_description_copy_to (const mongoc_topology_description_t *src, mongoc_topology_description_t *dst) { size_t nitems; size_t i; mongoc_server_description_t *sd; uint32_t id; ENTRY; BSON_ASSERT (src); BSON_ASSERT (dst); bson_oid_copy (&src->topology_id, &dst->topology_id); dst->opened = src->opened; dst->type = src->type; dst->heartbeat_msec = src->heartbeat_msec; nitems = bson_next_power_of_two (src->servers->items_len); dst->servers = mongoc_set_new (nitems, _mongoc_topology_server_dtor, NULL); for (i = 0; i < src->servers->items_len; i++) { sd = mongoc_set_get_item_and_id (src->servers, (int) i, &id); mongoc_set_add ( dst->servers, id, mongoc_server_description_new_copy (sd)); } dst->set_name = bson_strdup (src->set_name); dst->max_set_version = src->max_set_version; memcpy (&dst->compatibility_error, &src->compatibility_error, sizeof (bson_error_t)); dst->max_server_id = src->max_server_id; dst->stale = src->stale; memcpy (&dst->apm_callbacks, &src->apm_callbacks, sizeof (mongoc_apm_callbacks_t)); dst->apm_context = src->apm_context; bson_copy_to (&src->cluster_time, &dst->cluster_time); dst->session_timeout_minutes = src->session_timeout_minutes; EXIT; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_destroy -- * * Destroy allocated resources within @description * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ void mongoc_topology_description_destroy (mongoc_topology_description_t *description) { ENTRY; BSON_ASSERT (description); if (description->servers) { mongoc_set_destroy (description->servers); } if (description->set_name) { bson_free (description->set_name); } bson_destroy (&description->cluster_time); EXIT; } /* find the primary, then stop iterating */ static bool _mongoc_topology_description_has_primary_cb (void *item, void *ctx /* OUT */) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_server_description_t **primary = (mongoc_server_description_t **) ctx; /* TODO should this include MONGOS? */ if (server->type == MONGOC_SERVER_RS_PRIMARY || server->type == MONGOC_SERVER_STANDALONE) { *primary = (mongoc_server_description_t *) item; return false; } return true; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_has_primary -- * * If topology has a primary, return it. * * Returns: * A pointer to the primary, or NULL. * * Side effects: * None * *-------------------------------------------------------------------------- */ static mongoc_server_description_t * _mongoc_topology_description_has_primary ( mongoc_topology_description_t *description) { mongoc_server_description_t *primary = NULL; mongoc_set_for_each (description->servers, _mongoc_topology_description_has_primary_cb, &primary); return primary; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_later_election -- * * Check if we've seen a more recent election in the replica set * than this server has. * * Returns: * True if the topology description's max replica set version plus * election id is later than the server description's. * * Side effects: * None * *-------------------------------------------------------------------------- */ static bool _mongoc_topology_description_later_election (mongoc_topology_description_t *td, mongoc_server_description_t *sd) { /* initially max_set_version is -1 and max_election_id is zeroed */ return td->max_set_version > sd->set_version || (td->max_set_version == sd->set_version && bson_oid_compare (&td->max_election_id, &sd->election_id) > 0); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_set_max_set_version -- * * Remember that we've seen a new replica set version. Unconditionally * sets td->set_version to sd->set_version. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_set_max_set_version ( mongoc_topology_description_t *td, mongoc_server_description_t *sd) { td->max_set_version = sd->set_version; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_set_max_election_id -- * * Remember that we've seen a new election id. Unconditionally sets * td->max_election_id to sd->election_id. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_set_max_election_id ( mongoc_topology_description_t *td, mongoc_server_description_t *sd) { bson_oid_copy (&sd->election_id, &td->max_election_id); } static bool _mongoc_topology_description_server_is_candidate ( mongoc_server_description_type_t desc_type, mongoc_read_mode_t read_mode, mongoc_topology_description_type_t topology_type) { switch ((int) topology_type) { case MONGOC_TOPOLOGY_SINGLE: switch ((int) desc_type) { case MONGOC_SERVER_STANDALONE: return true; default: return false; } case MONGOC_TOPOLOGY_RS_NO_PRIMARY: case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: switch ((int) read_mode) { case MONGOC_READ_PRIMARY: switch ((int) desc_type) { case MONGOC_SERVER_RS_PRIMARY: return true; default: return false; } case MONGOC_READ_SECONDARY: switch ((int) desc_type) { case MONGOC_SERVER_RS_SECONDARY: return true; default: return false; } default: switch ((int) desc_type) { case MONGOC_SERVER_RS_PRIMARY: case MONGOC_SERVER_RS_SECONDARY: return true; default: return false; } } case MONGOC_TOPOLOGY_SHARDED: switch ((int) desc_type) { case MONGOC_SERVER_MONGOS: return true; default: return false; } default: return false; } } typedef struct _mongoc_suitable_data_t { mongoc_read_mode_t read_mode; mongoc_topology_description_type_t topology_type; mongoc_server_description_t *primary; /* OUT */ mongoc_server_description_t **candidates; /* OUT */ size_t candidates_len; /* OUT */ bool has_secondary; /* OUT */ } mongoc_suitable_data_t; static bool _mongoc_replica_set_read_suitable_cb (void *item, void *ctx) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_suitable_data_t *data = (mongoc_suitable_data_t *) ctx; /* primary's used in staleness calculation, even with mode SECONDARY */ if (server->type == MONGOC_SERVER_RS_PRIMARY) { data->primary = server; } if (_mongoc_topology_description_server_is_candidate ( server->type, data->read_mode, data->topology_type)) { if (server->type == MONGOC_SERVER_RS_PRIMARY) { if (data->read_mode == MONGOC_READ_PRIMARY || data->read_mode == MONGOC_READ_PRIMARY_PREFERRED) { /* we want a primary and we have one, done! */ return false; } } if (server->type == MONGOC_SERVER_RS_SECONDARY) { data->has_secondary = true; } /* add to our candidates */ data->candidates[data->candidates_len++] = server; } else { TRACE ("Rejected [%s] [%s] for mode [%s]", mongoc_server_description_type (server), server->host.host_and_port, _mongoc_read_mode_as_str (data->read_mode)); } return true; } /* if any mongos are candidates, add them to the candidates array */ static void _mongoc_try_mode_secondary (mongoc_array_t *set, /* OUT */ mongoc_topology_description_t *topology, const mongoc_read_prefs_t *read_pref, size_t local_threshold_ms) { mongoc_read_prefs_t *secondary; secondary = mongoc_read_prefs_copy (read_pref); mongoc_read_prefs_set_mode (secondary, MONGOC_READ_SECONDARY); mongoc_topology_description_suitable_servers ( set, MONGOC_SS_READ, topology, secondary, local_threshold_ms); mongoc_read_prefs_destroy (secondary); } /* if any mongos are candidates, add them to the candidates array */ static bool _mongoc_find_suitable_mongos_cb (void *item, void *ctx) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_suitable_data_t *data = (mongoc_suitable_data_t *) ctx; if (_mongoc_topology_description_server_is_candidate ( server->type, data->read_mode, data->topology_type)) { data->candidates[data->candidates_len++] = server; } return true; } /* *------------------------------------------------------------------------- * * mongoc_topology_description_lowest_max_wire_version -- * * The topology's max wire version. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * * Returns: * The minimum of all known servers' max wire versions, or INT32_MAX * if there are no known servers. * * Side effects: * None. * *------------------------------------------------------------------------- */ int32_t mongoc_topology_description_lowest_max_wire_version ( const mongoc_topology_description_t *td) { int i; int32_t ret = INT32_MAX; mongoc_server_description_t *sd; for (i = 0; (size_t) i < td->servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i); if (sd->type != MONGOC_SERVER_UNKNOWN && sd->max_wire_version < ret) { ret = sd->max_wire_version; } } return ret; } /* *------------------------------------------------------------------------- * * mongoc_topology_description_all_sds_have_write_date -- * * Whether the primary and all secondaries' server descriptions have * last_write_date_ms. * * Side effects: * None. * *------------------------------------------------------------------------- */ bool mongoc_topology_description_all_sds_have_write_date ( const mongoc_topology_description_t *td) { int i; mongoc_server_description_t *sd; for (i = 0; (size_t) i < td->servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i); if (sd->last_write_date_ms <= 0 && (sd->type == MONGOC_SERVER_RS_PRIMARY || sd->type == MONGOC_SERVER_RS_SECONDARY)) { return false; } } return true; } /* *------------------------------------------------------------------------- * * _mongoc_topology_description_validate_max_staleness -- * * If the provided "maxStalenessSeconds" component of the read * preference is not valid for this topology, fill out @error and * return false. * * Side effects: * None. * *------------------------------------------------------------------------- */ bool _mongoc_topology_description_validate_max_staleness ( const mongoc_topology_description_t *td, int64_t max_staleness_seconds, bson_error_t *error) { mongoc_topology_description_type_t td_type; /* Server Selection Spec: A driver MUST raise an error if the TopologyType * is ReplicaSetWithPrimary or ReplicaSetNoPrimary and either of these * conditions is false: * * maxStalenessSeconds * 1000 >= heartbeatFrequencyMS + idleWritePeriodMS * maxStalenessSeconds >= smallestMaxStalenessSeconds */ td_type = td->type; if (td_type != MONGOC_TOPOLOGY_RS_WITH_PRIMARY && td_type != MONGOC_TOPOLOGY_RS_NO_PRIMARY) { return true; } if (max_staleness_seconds * 1000 < td->heartbeat_msec + MONGOC_IDLE_WRITE_PERIOD_MS) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "maxStalenessSeconds is set to %" PRId64 ", it must be at least heartbeatFrequencyMS (%" PRId64 ") + server's idle write period (%d seconds)", max_staleness_seconds, td->heartbeat_msec, MONGOC_IDLE_WRITE_PERIOD_MS / 1000); return false; } if (max_staleness_seconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "maxStalenessSeconds is set to %" PRId64 ", it must be at least %d seconds", max_staleness_seconds, MONGOC_SMALLEST_MAX_STALENESS_SECONDS); return false; } return true; } /* *------------------------------------------------------------------------- * * mongoc_topology_description_suitable_servers -- * * Fill out an array of servers matching the read preference and * localThresholdMS. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * * Side effects: * None. * *------------------------------------------------------------------------- */ void mongoc_topology_description_suitable_servers ( mongoc_array_t *set, /* OUT */ mongoc_ss_optype_t optype, mongoc_topology_description_t *topology, const mongoc_read_prefs_t *read_pref, size_t local_threshold_ms) { mongoc_suitable_data_t data; mongoc_server_description_t **candidates; mongoc_server_description_t *server; int64_t nearest = -1; int i; mongoc_read_mode_t read_mode = mongoc_read_prefs_get_mode (read_pref); candidates = (mongoc_server_description_t **) bson_malloc0 ( sizeof (*candidates) * topology->servers->items_len); data.read_mode = read_mode; data.topology_type = topology->type; data.primary = NULL; data.candidates = candidates; data.candidates_len = 0; data.has_secondary = false; /* Single server -- * Either it is suitable or it isn't */ if (topology->type == MONGOC_TOPOLOGY_SINGLE) { server = (mongoc_server_description_t *) mongoc_set_get_item ( topology->servers, 0); if (_mongoc_topology_description_server_is_candidate ( server->type, read_mode, topology->type)) { _mongoc_array_append_val (set, server); } else { TRACE ( "Rejected [%s] [%s] for read mode [%s] with topology type Single", mongoc_server_description_type (server), server->host.host_and_port, _mongoc_read_mode_as_str (read_mode)); } goto DONE; } /* Replica sets -- * Find suitable servers based on read mode */ if (topology->type == MONGOC_TOPOLOGY_RS_NO_PRIMARY || topology->type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY) { if (optype == MONGOC_SS_READ) { mongoc_set_for_each ( topology->servers, _mongoc_replica_set_read_suitable_cb, &data); if (read_mode == MONGOC_READ_PRIMARY) { if (data.primary) { _mongoc_array_append_val (set, data.primary); } goto DONE; } if (read_mode == MONGOC_READ_PRIMARY_PREFERRED && data.primary) { _mongoc_array_append_val (set, data.primary); goto DONE; } if (read_mode == MONGOC_READ_SECONDARY_PREFERRED) { /* try read_mode SECONDARY */ _mongoc_try_mode_secondary ( set, topology, read_pref, local_threshold_ms); /* otherwise fall back to primary */ if (!set->len && data.primary) { _mongoc_array_append_val (set, data.primary); } goto DONE; } if (read_mode == MONGOC_READ_SECONDARY) { for (i = 0; i < data.candidates_len; i++) { if (candidates[i] && candidates[i]->type != MONGOC_SERVER_RS_SECONDARY) { TRACE ("Rejected [%s] [%s] for mode [%s] with RS topology", mongoc_server_description_type (candidates[i]), candidates[i]->host.host_and_port, _mongoc_read_mode_as_str (read_mode)); candidates[i] = NULL; } } } /* mode is SECONDARY or NEAREST, filter by staleness and tags */ mongoc_server_description_filter_stale (data.candidates, data.candidates_len, data.primary, topology->heartbeat_msec, read_pref); mongoc_server_description_filter_tags ( data.candidates, data.candidates_len, read_pref); } else if (topology->type == MONGOC_TOPOLOGY_RS_WITH_PRIMARY) { /* includes optype == MONGOC_SS_WRITE as the exclusion of the above if */ mongoc_set_for_each (topology->servers, _mongoc_topology_description_has_primary_cb, &data.primary); if (data.primary) { _mongoc_array_append_val (set, data.primary); goto DONE; } } } /* Sharded clusters -- * All candidates in the latency window are suitable */ if (topology->type == MONGOC_TOPOLOGY_SHARDED) { mongoc_set_for_each ( topology->servers, _mongoc_find_suitable_mongos_cb, &data); } /* Ways to get here: * - secondary read * - secondary preferred read * - primary_preferred and no primary read * - sharded anything * Find the nearest, then select within the window */ for (i = 0; i < data.candidates_len; i++) { if (candidates[i] && (nearest == -1 || nearest > candidates[i]->round_trip_time_msec)) { nearest = candidates[i]->round_trip_time_msec; } } for (i = 0; i < data.candidates_len; i++) { if (candidates[i] && (candidates[i]->round_trip_time_msec <= nearest + local_threshold_ms)) { _mongoc_array_append_val (set, candidates[i]); } } DONE: bson_free (candidates); return; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_has_data_node -- * * Internal method: are any servers not Arbiter, Ghost, or Unknown? * *-------------------------------------------------------------------------- */ bool mongoc_topology_description_has_data_node (mongoc_topology_description_t *td) { int i; mongoc_server_description_t *sd; for (i = 0; i < (int) td->servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, i); if (_is_data_node (sd)) { return true; } } return false; } /* *------------------------------------------------------------------------- * * mongoc_topology_description_select -- * * Return a server description of a node that is appropriate for * the given read preference and operation type. * * NOTE: this method simply attempts to select a server from the * current topology, it does not retry or trigger topology checks. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * * Returns: * Selected server description, or NULL upon failure. * * Side effects: * None. * *------------------------------------------------------------------------- */ mongoc_server_description_t * mongoc_topology_description_select (mongoc_topology_description_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_pref, int64_t local_threshold_ms) { mongoc_array_t suitable_servers; mongoc_server_description_t *sd = NULL; int rand_n; ENTRY; if (topology->type == MONGOC_TOPOLOGY_SINGLE) { sd = (mongoc_server_description_t *) mongoc_set_get_item ( topology->servers, 0); if (sd->has_is_master) { RETURN (sd); } else { TRACE ("Topology type single, [%s] is down", sd->host.host_and_port); RETURN (NULL); } } _mongoc_array_init (&suitable_servers, sizeof (mongoc_server_description_t *)); mongoc_topology_description_suitable_servers ( &suitable_servers, optype, topology, read_pref, local_threshold_ms); if (suitable_servers.len != 0) { rand_n = _mongoc_rand_simple (&topology->rand_seed); sd = _mongoc_array_index (&suitable_servers, mongoc_server_description_t *, rand_n % suitable_servers.len); } _mongoc_array_destroy (&suitable_servers); if (sd) { TRACE ("Topology type [%s], selected [%s] [%s]", mongoc_topology_description_type (topology), mongoc_server_description_type (sd), sd->host.host_and_port); } RETURN (sd); } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_server_by_id -- * * Get the server description for @id, if that server is present * in @description. Otherwise, return NULL and fill out optional * @error. * * NOTE: In most cases, caller should create a duplicate of the * returned server description. Caller should hold the mutex on the * owning topology object while calling this method and while using * the returned reference. * * Returns: * A mongoc_server_description_t *, or NULL. * * Side effects: * Fills out optional @error if server not found. * *-------------------------------------------------------------------------- */ mongoc_server_description_t * mongoc_topology_description_server_by_id ( mongoc_topology_description_t *description, uint32_t id, bson_error_t *error) { mongoc_server_description_t *sd; BSON_ASSERT (description); sd = (mongoc_server_description_t *) mongoc_set_get (description->servers, id); if (!sd) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NOT_ESTABLISHED, "Could not find description for node %u", id); } return sd; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_remove_server -- * * If present, remove this server from this topology description. * * Returns: * None. * * Side effects: * Removes the server description from topology and destroys it. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_remove_server ( mongoc_topology_description_t *description, mongoc_server_description_t *server) { BSON_ASSERT (description); BSON_ASSERT (server); _mongoc_topology_description_monitor_server_closed (description, server); mongoc_set_rm (description->servers, server->id); /* Check if removing server resulted in an empty set of servers */ if (description->servers->items_len == 0) { MONGOC_WARNING ("Last server removed from topology"); } } typedef struct _mongoc_address_and_id_t { const char *address; /* IN */ bool found; /* OUT */ uint32_t id; /* OUT */ } mongoc_address_and_id_t; /* find the given server and stop iterating */ static bool _mongoc_topology_description_has_server_cb (void *item, void *ctx /* IN - OUT */) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_address_and_id_t *data = (mongoc_address_and_id_t *) ctx; if (strcasecmp (data->address, server->connection_address) == 0) { data->found = true; data->id = server->id; return false; } return true; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_has_set_version -- * * Whether @topology's max replica set version has been set. * * Returns: * True if the max setVersion was ever set. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_topology_description_has_set_version (mongoc_topology_description_t *td) { return td->max_set_version != MONGOC_NO_SET_VERSION; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_topology_has_server -- * * Return true if @server is in @topology. If so, place its id in * @id if given. * * Returns: * True if server is in topology, false otherwise. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_topology_description_has_server ( mongoc_topology_description_t *description, const char *address, uint32_t *id /* OUT */) { mongoc_address_and_id_t data; BSON_ASSERT (description); BSON_ASSERT (address); data.address = address; data.found = false; mongoc_set_for_each ( description->servers, _mongoc_topology_description_has_server_cb, &data); if (data.found && id) { *id = data.id; } return data.found; } typedef struct _mongoc_address_and_type_t { const char *address; mongoc_server_description_type_t type; } mongoc_address_and_type_t; static bool _mongoc_label_unknown_member_cb (void *item, void *ctx) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_address_and_type_t *data = (mongoc_address_and_type_t *) ctx; if (strcasecmp (server->connection_address, data->address) == 0 && server->type == MONGOC_SERVER_UNKNOWN) { mongoc_server_description_set_state (server, data->type); return false; } return true; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_label_unknown_member -- * * Find the server description with the given @address and if its * type is UNKNOWN, set its type to @type. * * Returns: * None. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_label_unknown_member ( mongoc_topology_description_t *description, const char *address, mongoc_server_description_type_t type) { mongoc_address_and_type_t data; BSON_ASSERT (description); BSON_ASSERT (address); data.type = type; data.address = address; mongoc_set_for_each ( description->servers, _mongoc_label_unknown_member_cb, &data); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_set_state -- * * Change the state of this cluster and unblock things waiting * on a change of topology type. * * Returns: * None. * * Side effects: * Unblocks anything waiting on this description to change states. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_set_state ( mongoc_topology_description_t *description, mongoc_topology_description_type_t type) { description->type = type; } static void _update_rs_type (mongoc_topology_description_t *topology) { if (_mongoc_topology_description_has_primary (topology)) { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_RS_WITH_PRIMARY); } else { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_RS_NO_PRIMARY); } } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_check_if_has_primary -- * * If there is a primary in topology, set topology * type to RS_WITH_PRIMARY, otherwise set it to * RS_NO_PRIMARY. * * Returns: * None. * * Side effects: * Changes the topology type. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_check_if_has_primary ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { _update_rs_type (topology); } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_invalidate_server -- * * Invalidate a server if a network error occurred while using it in * another part of the client. Server description is set to type * UNKNOWN, the error is recorded, and other parameters are reset to * defaults. Pass in the reason for invalidation in @error. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * *-------------------------------------------------------------------------- */ void mongoc_topology_description_invalidate_server ( mongoc_topology_description_t *topology, uint32_t id, const bson_error_t *error /* IN */) { BSON_ASSERT (error); /* send NULL ismaster reply */ mongoc_topology_description_handle_ismaster (topology, id, NULL, 0, error); } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_add_server -- * * Add the specified server to the cluster topology if it is not * already a member. If @id, place its id in @id. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * * Return: * True if the server was added or already existed in the topology, * false if an error occurred. * * Side effects: * None. * *-------------------------------------------------------------------------- */ bool mongoc_topology_description_add_server (mongoc_topology_description_t *topology, const char *server, uint32_t *id /* OUT */) { uint32_t server_id; mongoc_server_description_t *description; BSON_ASSERT (topology); BSON_ASSERT (server); if (!_mongoc_topology_description_has_server ( topology, server, &server_id)) { /* TODO this might not be an accurate count in all cases */ server_id = ++topology->max_server_id; description = (mongoc_server_description_t *) bson_malloc0 (sizeof *description); mongoc_server_description_init (description, server, server_id); mongoc_set_add (topology->servers, server_id, description); /* if we're in topology_new then no callbacks are registered and this is * a no-op. later, if we discover a new RS member this sends an event. */ _mongoc_topology_description_monitor_server_opening (topology, description); } if (id) { *id = server_id; } return true; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_update_cluster_time -- * * Drivers Session Spec: Drivers MUST examine responses to server commands to * see if they contain a top level field named $clusterTime formatted as * follows: * * { * ... * $clusterTime : { * clusterTime : , * signature : { * hash : , * keyId : * } * }, * ... * } * * Whenever a driver receives a clusterTime from a server it MUST compare it * to the current highest seen clusterTime for the cluster. If the new * clusterTime is higher than the highest seen clusterTime it MUST become * the new highest seen clusterTime. Two clusterTimes are compared using * only the BsonTimestamp value of the clusterTime embedded field (be sure to * include both the timestamp and the increment of the BsonTimestamp in the * comparison). The signature field does not participate in the comparison. * *-------------------------------------------------------------------------- */ void mongoc_topology_description_update_cluster_time ( mongoc_topology_description_t *td, const bson_t *reply) { bson_iter_t iter; bson_iter_t child; const uint8_t *data; uint32_t size; bson_t cluster_time; if (!reply || !bson_iter_init_find (&iter, reply, "$clusterTime")) { return; } if (!BSON_ITER_HOLDS_DOCUMENT (&iter) || !bson_iter_recurse (&iter, &child)) { MONGOC_ERROR ("Can't parse $clusterTime"); return; } bson_iter_document (&iter, &size, &data); BSON_ASSERT (bson_init_static (&cluster_time, data, (size_t) size)); if (bson_empty (&td->cluster_time) || _mongoc_cluster_time_greater (&cluster_time, &td->cluster_time)) { bson_destroy (&td->cluster_time); bson_copy_to (&cluster_time, &td->cluster_time); } } static void _mongoc_topology_description_add_new_servers ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { bson_iter_t member_iter; const bson_t *rs_members[3]; int i; rs_members[0] = &server->hosts; rs_members[1] = &server->arbiters; rs_members[2] = &server->passives; for (i = 0; i < 3; i++) { BSON_ASSERT (bson_iter_init (&member_iter, rs_members[i])); while (bson_iter_next (&member_iter)) { mongoc_topology_description_add_server ( topology, bson_iter_utf8 (&member_iter, NULL), NULL); } } } typedef struct _mongoc_primary_and_topology_t { mongoc_topology_description_t *topology; mongoc_server_description_t *primary; } mongoc_primary_and_topology_t; /* invalidate old primaries */ static bool _mongoc_topology_description_invalidate_primaries_cb (void *item, void *ctx) { mongoc_server_description_t *server = (mongoc_server_description_t *) item; mongoc_primary_and_topology_t *data = (mongoc_primary_and_topology_t *) ctx; if (server->id != data->primary->id && server->type == MONGOC_SERVER_RS_PRIMARY) { mongoc_server_description_set_state (server, MONGOC_SERVER_UNKNOWN); mongoc_server_description_set_set_version (server, MONGOC_NO_SET_VERSION); mongoc_server_description_set_election_id (server, NULL); } return true; } /* Remove and destroy all replica set members not in primary's hosts lists */ static void _mongoc_topology_description_remove_unreported_servers ( mongoc_topology_description_t *topology, mongoc_server_description_t *primary) { mongoc_array_t to_remove; int i; mongoc_server_description_t *member; const char *address; _mongoc_array_init (&to_remove, sizeof (mongoc_server_description_t *)); /* Accumulate servers to be removed - do this before calling * _mongoc_topology_description_remove_server, which could call * mongoc_server_description_cleanup on the primary itself if it * doesn't report its own connection_address in its hosts list. * See hosts_differ_from_seeds.json */ for (i = 0; i < topology->servers->items_len; i++) { member = (mongoc_server_description_t *) mongoc_set_get_item ( topology->servers, i); address = member->connection_address; if (!mongoc_server_description_has_rs_member (primary, address)) { _mongoc_array_append_val (&to_remove, member); } } /* now it's safe to call _mongoc_topology_description_remove_server, * even on the primary */ for (i = 0; i < to_remove.len; i++) { member = _mongoc_array_index (&to_remove, mongoc_server_description_t *, i); _mongoc_topology_description_remove_server (topology, member); } _mongoc_array_destroy (&to_remove); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_matches_me -- * * Server Discovery And Monitoring Spec: "Removal from the topology of * seed list members where the "me" property does not match the address * used to connect prevents clients from being able to select a server, * only to fail to re-select that server once the primary has responded. * * Returns: * True if "me" matches "connection_address". * * Side Effects: * None. * *-------------------------------------------------------------------------- */ static bool _mongoc_topology_description_matches_me (mongoc_server_description_t *server) { BSON_ASSERT (server->connection_address); if (!server->me) { /* "me" is unknown: consider it a match */ return true; } return strcasecmp (server->connection_address, server->me) == 0; } /* *-------------------------------------------------------------------------- * * _mongoc_update_rs_from_primary -- * * First, determine that this is really the primary: * -If this node isn't in the cluster, do nothing. * -If the cluster's set name is null, set it to node's set name. * Otherwise if the cluster's set name is different from node's, * we found a rogue primary, so remove it from the cluster and * check the cluster for a primary, then return. * -If any of the members of cluster reports an address different * from node's, node cannot be the primary. * Now that we know this is the primary: * -If any hosts, passives, or arbiters in node's description aren't * in the cluster, add them as UNKNOWN servers. * -If the cluster has any servers that aren't in node's description, * remove and destroy them. * Finally, check the cluster for the new primary. * * Returns: * None. * * Side effects: * Changes to the cluster, possible removal of cluster nodes. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_update_rs_from_primary ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { mongoc_primary_and_topology_t data; bson_error_t error; BSON_ASSERT (topology); BSON_ASSERT (server); if (!_mongoc_topology_description_has_server ( topology, server->connection_address, NULL)) return; /* If server->set_name was null this function wouldn't be called from * mongoc_server_description_handle_ismaster(). static code analyzers however * don't know that so we check for it explicitly. */ if (server->set_name) { /* 'Server' can only be the primary if it has the right rs name */ if (!topology->set_name) { topology->set_name = bson_strdup (server->set_name); } else if (strcmp (topology->set_name, server->set_name) != 0) { _mongoc_topology_description_remove_server (topology, server); _update_rs_type (topology); return; } } if (mongoc_server_description_has_set_version (server) && mongoc_server_description_has_election_id (server)) { /* Server Discovery And Monitoring Spec: "The client remembers the * greatest electionId reported by a primary, and distrusts primaries * with lesser electionIds. This prevents the client from oscillating * between the old and new primary during a split-brain period." */ if (_mongoc_topology_description_later_election (topology, server)) { bson_set_error (&error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "member's setVersion or electionId is stale"); mongoc_topology_description_invalidate_server ( topology, server->id, &error); _update_rs_type (topology); return; } /* server's electionId >= topology's max electionId */ _mongoc_topology_description_set_max_election_id (topology, server); } if (mongoc_server_description_has_set_version (server) && (!_mongoc_topology_description_has_set_version (topology) || server->set_version > topology->max_set_version)) { _mongoc_topology_description_set_max_set_version (topology, server); } /* 'Server' is the primary! Invalidate other primaries if found */ data.primary = server; data.topology = topology; mongoc_set_for_each (topology->servers, _mongoc_topology_description_invalidate_primaries_cb, &data); /* Add to topology description any new servers primary knows about */ _mongoc_topology_description_add_new_servers (topology, server); /* Remove from topology description any servers primary doesn't know about */ _mongoc_topology_description_remove_unreported_servers (topology, server); /* Finally, set topology type */ _update_rs_type (topology); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_update_rs_without_primary -- * * Update cluster's information when there is no primary. * * Returns: * None. * * Side Effects: * Alters cluster state, may remove node from cluster. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_update_rs_without_primary ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { BSON_ASSERT (topology); BSON_ASSERT (server); if (!_mongoc_topology_description_has_server ( topology, server->connection_address, NULL)) { return; } /* make sure we're talking about the same replica set */ if (server->set_name) { if (!topology->set_name) { topology->set_name = bson_strdup (server->set_name); } else if (strcmp (topology->set_name, server->set_name) != 0) { _mongoc_topology_description_remove_server (topology, server); return; } } /* Add new servers that this replica set member knows about */ _mongoc_topology_description_add_new_servers (topology, server); /* If this server thinks there is a primary, label it POSSIBLE_PRIMARY */ if (server->current_primary) { _mongoc_topology_description_label_unknown_member ( topology, server->current_primary, MONGOC_SERVER_POSSIBLE_PRIMARY); } if (!_mongoc_topology_description_matches_me (server)) { _mongoc_topology_description_remove_server (topology, server); return; } } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_update_rs_with_primary_from_member -- * * Update cluster's information when there is a primary, but the * update is coming from another replica set member. * * Returns: * None. * * Side Effects: * Alters cluster state. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_update_rs_with_primary_from_member ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { BSON_ASSERT (topology); BSON_ASSERT (server); if (!_mongoc_topology_description_has_server ( topology, server->connection_address, NULL)) { return; } /* set_name should never be null here */ if (strcmp (topology->set_name, server->set_name) != 0) { _mongoc_topology_description_remove_server (topology, server); _update_rs_type (topology); return; } if (!_mongoc_topology_description_matches_me (server)) { _mongoc_topology_description_remove_server (topology, server); return; } /* If there is no primary, label server's current_primary as the * POSSIBLE_PRIMARY */ if (!_mongoc_topology_description_has_primary (topology) && server->current_primary) { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_RS_NO_PRIMARY); _mongoc_topology_description_label_unknown_member ( topology, server->current_primary, MONGOC_SERVER_POSSIBLE_PRIMARY); } } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_set_topology_type_to_sharded -- * * Sets topology's type to SHARDED. * * Returns: * None * * Side effects: * Alter's topology's type * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_set_topology_type_to_sharded ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_SHARDED); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_transition_unknown_to_rs_no_primary -- * * Encapsulates transition from cluster state UNKNOWN to * RS_NO_PRIMARY. Sets the type to RS_NO_PRIMARY, * then updates the replica set accordingly. * * Returns: * None. * * Side effects: * Changes topology state. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_transition_unknown_to_rs_no_primary ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_RS_NO_PRIMARY); _mongoc_topology_description_update_rs_without_primary (topology, server); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_remove_and_check_primary -- * * Remove the server and check if the topology still has a primary. * * Returns: * None. * * Side effects: * Removes server from topology and destroys it. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_remove_and_check_primary ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { _mongoc_topology_description_remove_server (topology, server); _update_rs_type (topology); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_update_unknown_with_standalone -- * * If the cluster doesn't contain this server, do nothing. * Otherwise, if the topology only has one seed, change its * type to SINGLE. If the topology has multiple seeds, it does not * include us, so remove this server and destroy it. * * Returns: * None. * * Side effects: * Changes the topology type, might remove server from topology. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_update_unknown_with_standalone ( mongoc_topology_description_t *topology, mongoc_server_description_t *server) { BSON_ASSERT (topology); BSON_ASSERT (server); if (!_mongoc_topology_description_has_server ( topology, server->connection_address, NULL)) return; if (topology->servers->items_len > 1) { /* This cluster contains other servers, it cannot be a standalone. */ _mongoc_topology_description_remove_server (topology, server); } else { _mongoc_topology_description_set_state (topology, MONGOC_TOPOLOGY_SINGLE); } } /* *-------------------------------------------------------------------------- * * This table implements the 'ToplogyType' table outlined in the Server * Discovery and Monitoring spec. Each row represents a server type, * and each column represents the topology type. Given a current topology * type T and a newly-observed server type S, use the function at * state_transions[S][T] to transition to a new state. * * Rows should be read like so: * { server type for this row * UNKNOWN, * SHARDED, * RS_NO_PRIMARY, * RS_WITH_PRIMARY * } * *-------------------------------------------------------------------------- */ typedef void (*transition_t) (mongoc_topology_description_t *topology, mongoc_server_description_t *server); transition_t gSDAMTransitionTable [MONGOC_SERVER_DESCRIPTION_TYPES][MONGOC_TOPOLOGY_DESCRIPTION_TYPES] = { { /* UNKNOWN */ NULL, /* MONGOC_TOPOLOGY_UNKNOWN */ NULL, /* MONGOC_TOPOLOGY_SHARDED */ NULL, /* MONGOC_TOPOLOGY_RS_NO_PRIMARY */ _mongoc_topology_description_check_if_has_primary /* MONGOC_TOPOLOGY_RS_WITH_PRIMARY */ }, {/* STANDALONE */ _mongoc_topology_description_update_unknown_with_standalone, _mongoc_topology_description_remove_server, _mongoc_topology_description_remove_server, _mongoc_topology_description_remove_and_check_primary}, {/* MONGOS */ _mongoc_topology_description_set_topology_type_to_sharded, NULL, _mongoc_topology_description_remove_server, _mongoc_topology_description_remove_and_check_primary}, {/* POSSIBLE_PRIMARY */ NULL, NULL, NULL, NULL}, {/* PRIMARY */ _mongoc_topology_description_update_rs_from_primary, _mongoc_topology_description_remove_server, _mongoc_topology_description_update_rs_from_primary, _mongoc_topology_description_update_rs_from_primary}, {/* SECONDARY */ _mongoc_topology_description_transition_unknown_to_rs_no_primary, _mongoc_topology_description_remove_server, _mongoc_topology_description_update_rs_without_primary, _mongoc_topology_description_update_rs_with_primary_from_member}, {/* ARBITER */ _mongoc_topology_description_transition_unknown_to_rs_no_primary, _mongoc_topology_description_remove_server, _mongoc_topology_description_update_rs_without_primary, _mongoc_topology_description_update_rs_with_primary_from_member}, {/* RS_OTHER */ _mongoc_topology_description_transition_unknown_to_rs_no_primary, _mongoc_topology_description_remove_server, _mongoc_topology_description_update_rs_without_primary, _mongoc_topology_description_update_rs_with_primary_from_member}, {/* RS_GHOST */ NULL, _mongoc_topology_description_remove_server, NULL, _mongoc_topology_description_check_if_has_primary}}; #ifdef MONGOC_TRACE /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_type -- * * Get this topology's type, one of the types defined in the Server * Discovery And Monitoring Spec. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * * Returns: * A string. * * Side effects: * None. * *-------------------------------------------------------------------------- */ static const char * _mongoc_topology_description_type (mongoc_topology_description_t *topology) { switch (topology->type) { case MONGOC_TOPOLOGY_UNKNOWN: return "Unknown"; case MONGOC_TOPOLOGY_SHARDED: return "Sharded"; case MONGOC_TOPOLOGY_RS_NO_PRIMARY: return "RSNoPrimary"; case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: return "RSWithPrimary"; case MONGOC_TOPOLOGY_SINGLE: return "Single"; case MONGOC_TOPOLOGY_DESCRIPTION_TYPES: default: MONGOC_ERROR ("Invalid mongoc_topology_description_type_t type"); return "Invalid"; } } #endif /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_update_session_timeout -- * * Fill out td.session_timeout_minutes. * * Server Discovery and Monitoring Spec: "set logicalSessionTimeoutMinutes * to the smallest logicalSessionTimeoutMinutes value among all * ServerDescriptions of known ServerType. If any ServerDescription of * known ServerType has a null logicalSessionTimeoutMinutes, then * logicalSessionTimeoutMinutes MUST be set to null." * * -------------------------------------------------------------------------- */ static void _mongoc_topology_description_update_session_timeout ( mongoc_topology_description_t *td) { mongoc_set_t *set; size_t i; mongoc_server_description_t *sd; set = td->servers; td->session_timeout_minutes = MONGOC_NO_SESSIONS; for (i = 0; i < set->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (set, (int) i); if (!_is_data_node (sd)) { continue; } if (sd->session_timeout_minutes == MONGOC_NO_SESSIONS) { td->session_timeout_minutes = MONGOC_NO_SESSIONS; return; } else if (td->session_timeout_minutes == MONGOC_NO_SESSIONS) { td->session_timeout_minutes = sd->session_timeout_minutes; } else if (td->session_timeout_minutes > sd->session_timeout_minutes) { td->session_timeout_minutes = sd->session_timeout_minutes; } } } /* *-------------------------------------------------------------------------- * * _mongoc_topology_description_check_compatible -- * * Fill out td.compatibility_error if any server's wire versions do * not overlap with ours. Otherwise clear td.compatibility_error. * * If any server is incompatible, the topology as a whole is considered * incompatible. * *-------------------------------------------------------------------------- */ static void _mongoc_topology_description_check_compatible ( mongoc_topology_description_t *td) { size_t i; mongoc_server_description_t *sd; memset (&td->compatibility_error, 0, sizeof (bson_error_t)); for (i = 0; i < td->servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (td->servers, (int) i); if (sd->type == MONGOC_SERVER_UNKNOWN || sd->type == MONGOC_SERVER_POSSIBLE_PRIMARY) { continue; } if (sd->min_wire_version > WIRE_VERSION_MAX) { bson_set_error ( &td->compatibility_error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Server at %s requires wire version %d," " but this version of libmongoc only supports up to %d", sd->host.host_and_port, sd->min_wire_version, WIRE_VERSION_MAX); } else if (sd->max_wire_version < WIRE_VERSION_MIN) { bson_set_error ( &td->compatibility_error, MONGOC_ERROR_PROTOCOL, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Server at %s reports wire version %d, but this" " version of libmongoc requires at least 3 (MongoDB 3.0)", sd->host.host_and_port, sd->max_wire_version); } } } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_handle_ismaster -- * * Handle an ismaster. This is called by the background SDAM process, * and by client when invalidating servers. If there was an error * calling ismaster, pass it in as @error. * * NOTE: this method should only be called while holding the mutex on * the owning topology object. * *-------------------------------------------------------------------------- */ void mongoc_topology_description_handle_ismaster ( mongoc_topology_description_t *topology, uint32_t server_id, const bson_t *ismaster_response, int64_t rtt_msec, const bson_error_t *error /* IN */) { mongoc_topology_description_t *prev_td = NULL; mongoc_server_description_t *prev_sd = NULL; mongoc_server_description_t *sd; BSON_ASSERT (topology); BSON_ASSERT (server_id != 0); sd = mongoc_topology_description_server_by_id (topology, server_id, NULL); if (!sd) { return; /* server already removed from topology */ } if (topology->apm_callbacks.topology_changed) { prev_td = bson_malloc0 (sizeof (mongoc_topology_description_t)); _mongoc_topology_description_copy_to (topology, prev_td); } if (topology->apm_callbacks.server_changed) { prev_sd = mongoc_server_description_new_copy (sd); } /* pass the current error in */ mongoc_server_description_handle_ismaster ( sd, ismaster_response, rtt_msec, error); mongoc_topology_description_update_cluster_time (topology, ismaster_response); _mongoc_topology_description_monitor_server_changed (topology, prev_sd, sd); if (gSDAMTransitionTable[sd->type][topology->type]) { TRACE ("Transitioning to %s for %s", _mongoc_topology_description_type (topology), mongoc_server_description_type (sd)); gSDAMTransitionTable[sd->type][topology->type](topology, sd); } else { TRACE ("No transition entry to %s for %s", _mongoc_topology_description_type (topology), mongoc_server_description_type (sd)); } _mongoc_topology_description_update_session_timeout (topology); /* Don't bother checking wire version compatibility if we already errored */ if (ismaster_response && (!error || !error->code)) { _mongoc_topology_description_check_compatible (topology); } _mongoc_topology_description_monitor_changed (prev_td, topology); if (prev_td) { mongoc_topology_description_destroy (prev_td); bson_free (prev_td); } if (prev_sd) { mongoc_server_description_destroy (prev_sd); } } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_has_readable_server -- * * SDAM Monitoring Spec: * "Determines if the topology has a readable server available." * * NOTE: this method should only be called by user code in an SDAM * Monitoring callback, while the monitoring framework holds the mutex * on the owning topology object. * *-------------------------------------------------------------------------- */ bool mongoc_topology_description_has_readable_server ( mongoc_topology_description_t *td, const mongoc_read_prefs_t *prefs) { bson_error_t error; if (!mongoc_topology_compatible (td, NULL, &error)) { return false; } /* local threshold argument doesn't matter */ return mongoc_topology_description_select (td, MONGOC_SS_READ, prefs, 0) != NULL; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_has_writable_server -- * * SDAM Monitoring Spec: * "Determines if the topology has a writable server available." * * NOTE: this method should only be called by user code in an SDAM * Monitoring callback, while the monitoring framework holds the mutex * on the owning topology object. * *-------------------------------------------------------------------------- */ bool mongoc_topology_description_has_writable_server ( mongoc_topology_description_t *td) { bson_error_t error; if (!mongoc_topology_compatible (td, NULL, &error)) { return false; } return mongoc_topology_description_select (td, MONGOC_SS_WRITE, NULL, 0) != NULL; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_type -- * * Get this topology's type, one of the types defined in the Server * Discovery And Monitoring Spec. * * NOTE: this method should only be called by user code in an SDAM * Monitoring callback, while the monitoring framework holds the mutex * on the owning topology object. * * Returns: * A string. * *-------------------------------------------------------------------------- */ const char * mongoc_topology_description_type (const mongoc_topology_description_t *td) { switch (td->type) { case MONGOC_TOPOLOGY_UNKNOWN: return "Unknown"; case MONGOC_TOPOLOGY_SHARDED: return "Sharded"; case MONGOC_TOPOLOGY_RS_NO_PRIMARY: return "ReplicaSetNoPrimary"; case MONGOC_TOPOLOGY_RS_WITH_PRIMARY: return "ReplicaSetWithPrimary"; case MONGOC_TOPOLOGY_SINGLE: return "Single"; case MONGOC_TOPOLOGY_DESCRIPTION_TYPES: default: fprintf (stderr, "ERROR: Unknown topology type %d\n", td->type); BSON_ASSERT (0); } return NULL; } /* *-------------------------------------------------------------------------- * * mongoc_topology_description_get_servers -- * * Fetch an array of server descriptions for all known servers in the * topology. * * Returns: * An array you must free with mongoc_server_descriptions_destroy_all. * *-------------------------------------------------------------------------- */ mongoc_server_description_t ** mongoc_topology_description_get_servers ( const mongoc_topology_description_t *td, size_t *n /* OUT */) { size_t i; mongoc_set_t *set; mongoc_server_description_t **sds; mongoc_server_description_t *sd; BSON_ASSERT (td); BSON_ASSERT (n); set = td->servers; /* enough room for all descriptions, even if some are unknown */ sds = (mongoc_server_description_t **) bson_malloc0 ( sizeof (mongoc_server_description_t *) * set->items_len); *n = 0; for (i = 0; i < set->items_len; ++i) { sd = (mongoc_server_description_t *) mongoc_set_get_item (set, (int) i); if (sd->type != MONGOC_SERVER_UNKNOWN) { sds[*n] = mongoc_server_description_new_copy (sd); ++(*n); } } return sds; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-description.h0000644000076500000240000000267313572250760027662 0ustar alcaeusstaff/* * Copyright 2016 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TOPOLOGY_DESCRIPTION_H #define MONGOC_TOPOLOGY_DESCRIPTION_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-read-prefs.h" BSON_BEGIN_DECLS typedef struct _mongoc_topology_description_t mongoc_topology_description_t; MONGOC_EXPORT (bool) mongoc_topology_description_has_readable_server ( mongoc_topology_description_t *td, const mongoc_read_prefs_t *prefs); MONGOC_EXPORT (bool) mongoc_topology_description_has_writable_server ( mongoc_topology_description_t *td); MONGOC_EXPORT (const char *) mongoc_topology_description_type (const mongoc_topology_description_t *td); MONGOC_EXPORT (mongoc_server_description_t **) mongoc_topology_description_get_servers ( const mongoc_topology_description_t *td, size_t *n); BSON_END_DECLS #endif /* MONGOC_TOPOLOGY_DESCRIPTION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-private.h0000644000076500000240000001313613572250760027005 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TOPOLOGY_PRIVATE_H #define MONGOC_TOPOLOGY_PRIVATE_H #include "mongoc/mongoc-topology-scanner-private.h" #include "mongoc/mongoc-server-description-private.h" #include "mongoc/mongoc-topology-description-private.h" #include "mongoc/mongoc-thread-private.h" #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-client-session-private.h" #define MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS 500 #define MONGOC_TOPOLOGY_SOCKET_CHECK_INTERVAL_MS 5000 #define MONGOC_TOPOLOGY_COOLDOWN_MS 5000 #define MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS 15 #define MONGOC_TOPOLOGY_SERVER_SELECTION_TIMEOUT_MS 30000 #define MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_MULTI_THREADED 10000 #define MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_SINGLE_THREADED 60000 #define MONGOC_TOPOLOGY_MIN_RESCAN_SRV_INTERVAL_MS 60000 typedef enum { MONGOC_TOPOLOGY_SCANNER_OFF, MONGOC_TOPOLOGY_SCANNER_BG_RUNNING, MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN, MONGOC_TOPOLOGY_SCANNER_SINGLE_THREADED, } mongoc_topology_scanner_state_t; typedef struct _mongoc_topology_t { mongoc_topology_description_t description; mongoc_uri_t *uri; mongoc_topology_scanner_t *scanner; bool server_selection_try_once; int64_t last_scan; int64_t local_threshold_msec; int64_t connect_timeout_msec; int64_t server_selection_timeout_msec; /* defaults to 500ms, configurable by tests */ int64_t min_heartbeat_frequency_msec; /* Minimum of SRV record TTLs, but no lower than 60 seconds. * May be zero for non-SRV/non-MongoS topology. */ int64_t rescanSRVIntervalMS; int64_t last_srv_scan; bson_mutex_t mutex; mongoc_cond_t cond_client; mongoc_cond_t cond_server; bson_thread_t thread; mongoc_topology_scanner_state_t scanner_state; bool scan_requested; bool single_threaded; bool stale; mongoc_server_session_t *session_pool; } mongoc_topology_t; mongoc_topology_t * mongoc_topology_new (const mongoc_uri_t *uri, bool single_threaded); void mongoc_topology_set_apm_callbacks (mongoc_topology_t *topology, mongoc_apm_callbacks_t *callbacks, void *context); void mongoc_topology_destroy (mongoc_topology_t *topology); void mongoc_topology_reconcile (mongoc_topology_t *topology); bool mongoc_topology_compatible (const mongoc_topology_description_t *td, const mongoc_read_prefs_t *read_prefs, bson_error_t *error); mongoc_server_description_t * mongoc_topology_select (mongoc_topology_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, bson_error_t *error); uint32_t mongoc_topology_select_server_id (mongoc_topology_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, bson_error_t *error); mongoc_server_description_t * mongoc_topology_server_by_id (mongoc_topology_t *topology, uint32_t id, bson_error_t *error); mongoc_host_list_t * _mongoc_topology_host_by_id (mongoc_topology_t *topology, uint32_t id, bson_error_t *error); void mongoc_topology_invalidate_server (mongoc_topology_t *topology, uint32_t id, const bson_error_t *error); bool _mongoc_topology_update_from_handshake (mongoc_topology_t *topology, const mongoc_server_description_t *sd); void _mongoc_topology_update_last_used (mongoc_topology_t *topology, uint32_t server_id); int64_t mongoc_topology_server_timestamp (mongoc_topology_t *topology, uint32_t id); mongoc_topology_description_type_t _mongoc_topology_get_type (mongoc_topology_t *topology); bool _mongoc_topology_start_background_scanner (mongoc_topology_t *topology); void _mongoc_topology_background_thread_stop (mongoc_topology_t *topology); bool _mongoc_topology_set_appname (mongoc_topology_t *topology, const char *appname); void _mongoc_topology_update_cluster_time (mongoc_topology_t *topology, const bson_t *reply); mongoc_server_session_t * _mongoc_topology_pop_server_session (mongoc_topology_t *topology, bson_error_t *error); void _mongoc_topology_push_server_session (mongoc_topology_t *topology, mongoc_server_session_t *server_session); bool _mongoc_topology_end_sessions_cmd (mongoc_topology_t *topology, bson_t *cmd); void _mongoc_topology_clear_session_pool (mongoc_topology_t *topology); void _mongoc_topology_do_blocking_scan (mongoc_topology_t *topology, bson_error_t *error); const bson_t * _mongoc_topology_get_ismaster (mongoc_topology_t *topology); void _mongoc_topology_request_scan (mongoc_topology_t *topology); #endif mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-scanner-private.h0000644000076500000240000001463613572250760030442 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TOPOLOGY_SCANNER_PRIVATE_H #define MONGOC_TOPOLOGY_SCANNER_PRIVATE_H /* TODO: rename to TOPOLOGY scanner */ #include #include "mongoc/mongoc-async-private.h" #include "mongoc/mongoc-async-cmd-private.h" #include "mongoc/mongoc-handshake-private.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-apm-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-ssl.h" #endif BSON_BEGIN_DECLS typedef void (*mongoc_topology_scanner_setup_err_cb_t) ( uint32_t id, void *data, const bson_error_t *error /* IN */); typedef void (*mongoc_topology_scanner_cb_t) ( uint32_t id, const bson_t *bson, int64_t rtt, void *data, const bson_error_t *error /* IN */); struct mongoc_topology_scanner; struct mongoc_topology_scanner_node; typedef struct mongoc_topology_scanner_node { uint32_t id; /* after scanning, this is set to the successful stream if one exists. */ mongoc_stream_t *stream; int64_t timestamp; int64_t last_used; int64_t last_failed; bool has_auth; mongoc_host_list_t host; struct mongoc_topology_scanner *ts; struct mongoc_topology_scanner_node *next; struct mongoc_topology_scanner_node *prev; bool retired; bson_error_t last_error; /* the hostname for a node may resolve to multiple DNS results. * dns_results has the full list of DNS results, ordered by host preference. * successful_dns_result is the most recent successful DNS result. */ struct addrinfo *dns_results; struct addrinfo *successful_dns_result; int64_t last_dns_cache; /* used by single-threaded clients to store negotiated sasl mechanisms on a * node. */ mongoc_handshake_sasl_supported_mechs_t sasl_supported_mechs; bool negotiated_sasl_supported_mechs; } mongoc_topology_scanner_node_t; typedef struct mongoc_topology_scanner { mongoc_async_t *async; int64_t connect_timeout_msec; mongoc_topology_scanner_node_t *nodes; bson_t ismaster_cmd; bson_t ismaster_cmd_with_handshake; bson_t cluster_time; bool handshake_ok_to_send; const char *appname; mongoc_topology_scanner_setup_err_cb_t setup_err_cb; mongoc_topology_scanner_cb_t cb; void *cb_data; const mongoc_uri_t *uri; mongoc_async_cmd_setup_t setup; mongoc_stream_initiator_t initiator; void *initiator_context; bson_error_t error; #ifdef MONGOC_ENABLE_SSL mongoc_ssl_opt_t *ssl_opts; #endif mongoc_apm_callbacks_t apm_callbacks; void *apm_context; int64_t dns_cache_timeout_ms; /* only used by single-threaded clients to negotiate auth mechanisms. */ bool negotiate_sasl_supported_mechs; } mongoc_topology_scanner_t; mongoc_topology_scanner_t * mongoc_topology_scanner_new ( const mongoc_uri_t *uri, mongoc_topology_scanner_setup_err_cb_t setup_err_cb, mongoc_topology_scanner_cb_t cb, void *data, int64_t connect_timeout_msec); void mongoc_topology_scanner_destroy (mongoc_topology_scanner_t *ts); bool mongoc_topology_scanner_valid (mongoc_topology_scanner_t *ts); void mongoc_topology_scanner_add (mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, uint32_t id); void mongoc_topology_scanner_scan (mongoc_topology_scanner_t *ts, uint32_t id); void mongoc_topology_scanner_disconnect (mongoc_topology_scanner_t *scanner); void mongoc_topology_scanner_node_retire (mongoc_topology_scanner_node_t *node); void mongoc_topology_scanner_node_disconnect (mongoc_topology_scanner_node_t *node, bool failed); void mongoc_topology_scanner_node_destroy (mongoc_topology_scanner_node_t *node, bool failed); bool mongoc_topology_scanner_in_cooldown (mongoc_topology_scanner_t *ts, int64_t when); void mongoc_topology_scanner_start (mongoc_topology_scanner_t *ts, bool obey_cooldown); void mongoc_topology_scanner_work (mongoc_topology_scanner_t *ts); void _mongoc_topology_scanner_finish (mongoc_topology_scanner_t *ts); void mongoc_topology_scanner_get_error (mongoc_topology_scanner_t *ts, bson_error_t *error); void mongoc_topology_scanner_reset (mongoc_topology_scanner_t *ts); void mongoc_topology_scanner_node_setup (mongoc_topology_scanner_node_t *node, bson_error_t *error); mongoc_topology_scanner_node_t * mongoc_topology_scanner_get_node (mongoc_topology_scanner_t *ts, uint32_t id); const bson_t * _mongoc_topology_scanner_get_ismaster (mongoc_topology_scanner_t *ts); bool mongoc_topology_scanner_has_node_for_host (mongoc_topology_scanner_t *ts, mongoc_host_list_t *host); void mongoc_topology_scanner_set_stream_initiator (mongoc_topology_scanner_t *ts, mongoc_stream_initiator_t si, void *ctx); bool _mongoc_topology_scanner_set_appname (mongoc_topology_scanner_t *ts, const char *name); void _mongoc_topology_scanner_set_cluster_time (mongoc_topology_scanner_t *ts, const bson_t *cluster_time); void _mongoc_topology_scanner_set_dns_cache_timeout (mongoc_topology_scanner_t *ts, int64_t timeout_ms); #ifdef MONGOC_ENABLE_SSL void mongoc_topology_scanner_set_ssl_opts (mongoc_topology_scanner_t *ts, mongoc_ssl_opt_t *opts); #endif bool mongoc_topology_scanner_node_in_cooldown (mongoc_topology_scanner_node_t *node, int64_t when); /* for testing. */ mongoc_stream_t * _mongoc_topology_scanner_tcp_initiate (mongoc_async_cmd_t *acmd); BSON_END_DECLS #endif /* MONGOC_TOPOLOGY_SCANNER_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology-scanner.c0000644000076500000240000010164513572250760026762 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-topology-scanner-private.h" #include "mongoc/mongoc-stream-private.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-handshake.h" #include "mongoc/mongoc-handshake-private.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-stream-tls.h" #endif #include "mongoc/mongoc-counters-private.h" #include "mongoc/utlist.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-uri-private.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "topology_scanner" #define DNS_CACHE_TIMEOUT_MS 10 * 60 * 1000 #define HAPPY_EYEBALLS_DELAY_MS 250 /* forward declarations */ static void _async_connected (mongoc_async_cmd_t *acmd); static void _async_success (mongoc_async_cmd_t *acmd, const bson_t *ismaster_response, int64_t duration_usec); static void _async_error_or_timeout (mongoc_async_cmd_t *acmd, int64_t duration_usec, const char *default_err_msg); static void _async_handler (mongoc_async_cmd_t *acmd, mongoc_async_cmd_result_t async_status, const bson_t *ismaster_response, int64_t duration_usec); static void _mongoc_topology_scanner_monitor_heartbeat_started ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host); static void _mongoc_topology_scanner_monitor_heartbeat_succeeded ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, const bson_t *reply, int64_t duration_usec); static void _mongoc_topology_scanner_monitor_heartbeat_failed ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, const bson_error_t *error, int64_t duration_usec); /* reset "retired" nodes that failed or were removed in the previous scan */ static void _delete_retired_nodes (mongoc_topology_scanner_t *ts); /* cancel any pending async commands for a specific node excluding acmd. * If acmd is NULL, cancel all async commands on the node. */ static void _cancel_commands_excluding (mongoc_topology_scanner_node_t *node, mongoc_async_cmd_t *acmd); /* return the number of pending async commands for a node. */ static int _count_acmds (mongoc_topology_scanner_node_t *node); /* if acmd fails, schedule the sibling commands sooner. */ static void _jumpstart_other_acmds (mongoc_topology_scanner_node_t *node, mongoc_async_cmd_t *acmd); static void _add_ismaster (bson_t *cmd) { BSON_APPEND_INT32 (cmd, "isMaster", 1); } static bool _build_ismaster_with_handshake (mongoc_topology_scanner_t *ts) { bson_t *doc = &ts->ismaster_cmd_with_handshake; bson_t subdoc; bson_iter_t iter; const char *key; int keylen; bool res; const bson_t *compressors; int count = 0; char buf[16]; _add_ismaster (doc); BSON_APPEND_DOCUMENT_BEGIN (doc, HANDSHAKE_FIELD, &subdoc); res = _mongoc_handshake_build_doc_with_application (&subdoc, ts->appname); bson_append_document_end (doc, &subdoc); BSON_APPEND_ARRAY_BEGIN (doc, "compression", &subdoc); if (ts->uri) { compressors = mongoc_uri_get_compressors (ts->uri); if (bson_iter_init (&iter, compressors)) { while (bson_iter_next (&iter)) { keylen = bson_uint32_to_string (count++, &key, buf, sizeof buf); bson_append_utf8 ( &subdoc, key, (int) keylen, bson_iter_key (&iter), -1); } } } bson_append_array_end (doc, &subdoc); /* Return whether the handshake doc fit the size limit */ return res; } /* Caller must lock topology->mutex to protect ismaster_cmd_with_handshake. This * is called at the start of the scan in _mongoc_topology_run_background, when a * node is added in _mongoc_topology_reconcile_add_nodes, or when running an * ismaster directly on a node in _mongoc_stream_run_ismaster. */ const bson_t * _mongoc_topology_scanner_get_ismaster (mongoc_topology_scanner_t *ts) { /* If this is the first time using the node or if it's the first time * using it after a failure, build handshake doc */ if (bson_empty (&ts->ismaster_cmd_with_handshake)) { ts->handshake_ok_to_send = _build_ismaster_with_handshake (ts); if (!ts->handshake_ok_to_send) { MONGOC_WARNING ("Handshake doc too big, not including in isMaster"); } } /* If the doc turned out to be too big */ if (!ts->handshake_ok_to_send) { return &ts->ismaster_cmd; } return &ts->ismaster_cmd_with_handshake; } static void _begin_ismaster_cmd (mongoc_topology_scanner_node_t *node, mongoc_stream_t *stream, bool is_setup_done, struct addrinfo *dns_result, int64_t initiate_delay_ms) { mongoc_topology_scanner_t *ts = node->ts; bson_t cmd; if (node->last_used != -1 && node->last_failed == -1) { /* The node's been used before and not failed recently */ bson_copy_to (&ts->ismaster_cmd, &cmd); } else { bson_copy_to (_mongoc_topology_scanner_get_ismaster (ts), &cmd); } if (node->ts->negotiate_sasl_supported_mechs && !node->negotiated_sasl_supported_mechs) { _mongoc_handshake_append_sasl_supported_mechs (ts->uri, &cmd); } if (!bson_empty (&ts->cluster_time)) { bson_append_document (&cmd, "$clusterTime", 12, &ts->cluster_time); } /* if the node should connect with a TCP socket, stream will be null, and * dns_result will be set. The async loop is responsible for calling the * _tcp_initiator to construct TCP sockets. */ mongoc_async_cmd_new (ts->async, stream, is_setup_done, dns_result, _mongoc_topology_scanner_tcp_initiate, initiate_delay_ms, ts->setup, node->host.host, "admin", &cmd, &_async_handler, node, ts->connect_timeout_msec); bson_destroy (&cmd); } mongoc_topology_scanner_t * mongoc_topology_scanner_new ( const mongoc_uri_t *uri, mongoc_topology_scanner_setup_err_cb_t setup_err_cb, mongoc_topology_scanner_cb_t cb, void *data, int64_t connect_timeout_msec) { mongoc_topology_scanner_t *ts = (mongoc_topology_scanner_t *) bson_malloc0 (sizeof (*ts)); ts->async = mongoc_async_new (); bson_init (&ts->ismaster_cmd); _add_ismaster (&ts->ismaster_cmd); bson_init (&ts->ismaster_cmd_with_handshake); bson_init (&ts->cluster_time); ts->setup_err_cb = setup_err_cb; ts->cb = cb; ts->cb_data = data; ts->uri = uri; ts->appname = NULL; ts->handshake_ok_to_send = false; ts->connect_timeout_msec = connect_timeout_msec; /* may be overridden for testing. */ ts->dns_cache_timeout_ms = DNS_CACHE_TIMEOUT_MS; return ts; } #ifdef MONGOC_ENABLE_SSL void mongoc_topology_scanner_set_ssl_opts (mongoc_topology_scanner_t *ts, mongoc_ssl_opt_t *opts) { ts->ssl_opts = opts; ts->setup = mongoc_async_cmd_tls_setup; } #endif void mongoc_topology_scanner_set_stream_initiator (mongoc_topology_scanner_t *ts, mongoc_stream_initiator_t si, void *ctx) { ts->initiator = si; ts->initiator_context = ctx; ts->setup = NULL; } void mongoc_topology_scanner_destroy (mongoc_topology_scanner_t *ts) { mongoc_topology_scanner_node_t *ele, *tmp; DL_FOREACH_SAFE (ts->nodes, ele, tmp) { mongoc_topology_scanner_node_destroy (ele, false); } mongoc_async_destroy (ts->async); bson_destroy (&ts->ismaster_cmd); bson_destroy (&ts->ismaster_cmd_with_handshake); bson_destroy (&ts->cluster_time); /* This field can be set by a mongoc_client */ bson_free ((char *) ts->appname); bson_free (ts); } /* whether the scanner was successfully initialized - false if a mongodb+srv * URI failed to resolve to any hosts */ bool mongoc_topology_scanner_valid (mongoc_topology_scanner_t *ts) { return ts->nodes != NULL; } void mongoc_topology_scanner_add (mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, uint32_t id) { mongoc_topology_scanner_node_t *node; node = (mongoc_topology_scanner_node_t *) bson_malloc0 (sizeof (*node)); memcpy (&node->host, host, sizeof (*host)); node->id = id; node->ts = ts; node->last_failed = -1; node->last_used = -1; DL_APPEND (ts->nodes, node); } void mongoc_topology_scanner_scan (mongoc_topology_scanner_t *ts, uint32_t id) { mongoc_topology_scanner_node_t *node; node = mongoc_topology_scanner_get_node (ts, id); /* begin non-blocking connection, don't wait for success */ if (node) { mongoc_topology_scanner_node_setup (node, &node->last_error); } /* if setup fails the node stays in the scanner. destroyed after the scan. */ } void mongoc_topology_scanner_disconnect (mongoc_topology_scanner_t *scanner) { mongoc_topology_scanner_node_t *node; BSON_ASSERT (scanner); node = scanner->nodes; while (node) { mongoc_topology_scanner_node_disconnect (node, false); node = node->next; } } void mongoc_topology_scanner_node_retire (mongoc_topology_scanner_node_t *node) { /* cancel any pending commands. */ _cancel_commands_excluding (node, NULL); node->retired = true; } void mongoc_topology_scanner_node_disconnect (mongoc_topology_scanner_node_t *node, bool failed) { /* the node may or may not have succeeded in finding a working stream. */ if (node->stream) { if (failed) { mongoc_stream_failed (node->stream); } else { mongoc_stream_destroy (node->stream); } node->stream = NULL; memset ( &node->sasl_supported_mechs, 0, sizeof (node->sasl_supported_mechs)); node->negotiated_sasl_supported_mechs = false; } } void mongoc_topology_scanner_node_destroy (mongoc_topology_scanner_node_t *node, bool failed) { DL_DELETE (node->ts->nodes, node); mongoc_topology_scanner_node_disconnect (node, failed); if (node->dns_results) { freeaddrinfo (node->dns_results); } bson_free (node); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_get_node -- * * Return the scanner node with the given id. * *-------------------------------------------------------------------------- */ mongoc_topology_scanner_node_t * mongoc_topology_scanner_get_node (mongoc_topology_scanner_t *ts, uint32_t id) { mongoc_topology_scanner_node_t *ele, *tmp; DL_FOREACH_SAFE (ts->nodes, ele, tmp) { if (ele->id == id) { return ele; } if (ele->id > id) { break; } } return NULL; } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_has_node_for_host -- * * Whether the scanner has a node for the given host and port. * *-------------------------------------------------------------------------- */ bool mongoc_topology_scanner_has_node_for_host (mongoc_topology_scanner_t *ts, mongoc_host_list_t *host) { mongoc_topology_scanner_node_t *ele, *tmp; DL_FOREACH_SAFE (ts->nodes, ele, tmp) { if (_mongoc_host_list_equal (&ele->host, host)) { return true; } } return false; } static void _async_connected (mongoc_async_cmd_t *acmd) { mongoc_topology_scanner_node_t *node = (mongoc_topology_scanner_node_t *) acmd->data; /* this cmd connected successfully, cancel other cmds on this node. */ _cancel_commands_excluding (node, acmd); node->successful_dns_result = acmd->dns_result; } static void _async_success (mongoc_async_cmd_t *acmd, const bson_t *ismaster_response, int64_t duration_usec) { void *data = acmd->data; mongoc_topology_scanner_node_t *node = (mongoc_topology_scanner_node_t *) data; mongoc_stream_t *stream = acmd->stream; mongoc_topology_scanner_t *ts = node->ts; if (node->retired) { if (stream) { mongoc_stream_failed (stream); } return; } node->last_used = bson_get_monotonic_time (); node->last_failed = -1; _mongoc_topology_scanner_monitor_heartbeat_succeeded ( ts, &node->host, ismaster_response, duration_usec); /* set our successful stream. */ BSON_ASSERT (!node->stream); node->stream = stream; if (ts->negotiate_sasl_supported_mechs && !node->negotiated_sasl_supported_mechs) { _mongoc_handshake_parse_sasl_supported_mechs ( ismaster_response, &node->sasl_supported_mechs); } /* mongoc_topology_scanner_cb_t takes rtt_msec, not usec */ ts->cb (node->id, ismaster_response, duration_usec / 1000, ts->cb_data, &acmd->error); } static void _async_error_or_timeout (mongoc_async_cmd_t *acmd, int64_t duration_usec, const char *default_err_msg) { void *data = acmd->data; mongoc_topology_scanner_node_t *node = (mongoc_topology_scanner_node_t *) data; mongoc_stream_t *stream = acmd->stream; mongoc_topology_scanner_t *ts = node->ts; bson_error_t *error = &acmd->error; int64_t now = bson_get_monotonic_time (); const char *message; /* the stream may have failed on initiation. */ if (stream) { mongoc_stream_failed (stream); } if (node->retired) { return; } node->last_used = now; if (!node->stream && _count_acmds (node) == 1) { /* there are no remaining streams, connecting has failed. */ node->last_failed = now; if (error->code) { message = error->message; } else { message = default_err_msg; } /* invalidate any cached DNS results. */ if (node->dns_results) { freeaddrinfo (node->dns_results); node->dns_results = NULL; node->successful_dns_result = NULL; } bson_set_error (&node->last_error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_STREAM_CONNECT, "%s calling ismaster on \'%s\'", message, node->host.host_and_port); _mongoc_topology_scanner_monitor_heartbeat_failed ( ts, &node->host, &node->last_error, duration_usec); /* call the topology scanner callback. cannot connect to this node. * callback takes rtt_msec, not usec. */ ts->cb (node->id, NULL, duration_usec / 1000, ts->cb_data, error); } else { /* there are still more commands left for this node or it succeeded * with another stream. skip the topology scanner callback. */ _jumpstart_other_acmds (node, acmd); } } /* *----------------------------------------------------------------------- * * This is the callback passed to async_cmd when we're running * ismasters from within the topology monitor. * *----------------------------------------------------------------------- */ static void _async_handler (mongoc_async_cmd_t *acmd, mongoc_async_cmd_result_t async_status, const bson_t *ismaster_response, int64_t duration_usec) { BSON_ASSERT (acmd->data); switch (async_status) { case MONGOC_ASYNC_CMD_CONNECTED: _async_connected (acmd); return; case MONGOC_ASYNC_CMD_SUCCESS: _async_success (acmd, ismaster_response, duration_usec); return; case MONGOC_ASYNC_CMD_TIMEOUT: _async_error_or_timeout (acmd, duration_usec, "connection timeout"); return; case MONGOC_ASYNC_CMD_ERROR: _async_error_or_timeout (acmd, duration_usec, "connection error"); return; case MONGOC_ASYNC_CMD_IN_PROGRESS: default: fprintf (stderr, "unexpected async status: %d\n", async_status); BSON_ASSERT (false); return; } } mongoc_stream_t * _mongoc_topology_scanner_node_setup_stream_for_tls ( mongoc_topology_scanner_node_t *node, mongoc_stream_t *stream) { #ifdef MONGOC_ENABLE_SSL mongoc_stream_t *tls_stream; #endif if (!stream) { return NULL; } #ifdef MONGOC_ENABLE_SSL if (node->ts->ssl_opts) { tls_stream = mongoc_stream_tls_new_with_hostname ( stream, node->host.host, node->ts->ssl_opts, 1); if (!tls_stream) { mongoc_stream_destroy (stream); return NULL; } else { return tls_stream; } } #endif return stream; } /* attempt to create a new socket stream using this dns result. */ mongoc_stream_t * _mongoc_topology_scanner_tcp_initiate (mongoc_async_cmd_t *acmd) { mongoc_topology_scanner_node_t *node = (mongoc_topology_scanner_node_t *) acmd->data; struct addrinfo *res = acmd->dns_result; mongoc_socket_t *sock = NULL; BSON_ASSERT (acmd->dns_result); /* create a new non-blocking socket. */ if (!(sock = mongoc_socket_new ( res->ai_family, res->ai_socktype, res->ai_protocol))) { return NULL; } (void) mongoc_socket_connect ( sock, res->ai_addr, (mongoc_socklen_t) res->ai_addrlen, 0); return _mongoc_topology_scanner_node_setup_stream_for_tls ( node, mongoc_stream_socket_new (sock)); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_node_setup_tcp -- * * Create an async command for each DNS record found for this node. * * Returns: * A bool. On failure error is set. * *-------------------------------------------------------------------------- */ bool mongoc_topology_scanner_node_setup_tcp (mongoc_topology_scanner_node_t *node, bson_error_t *error) { struct addrinfo hints; struct addrinfo *iter; char portstr[8]; mongoc_host_list_t *host; int s; int64_t delay = 0; int64_t now = bson_get_monotonic_time (); ENTRY; host = &node->host; /* if cached dns results are expired, flush. */ if (node->dns_results && (now - node->last_dns_cache) > node->ts->dns_cache_timeout_ms * 1000) { freeaddrinfo (node->dns_results); node->dns_results = NULL; node->successful_dns_result = NULL; } if (!node->dns_results) { bson_snprintf (portstr, sizeof portstr, "%hu", host->port); memset (&hints, 0, sizeof hints); hints.ai_family = host->family; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = 0; hints.ai_protocol = 0; s = getaddrinfo (host->host, portstr, &hints, &node->dns_results); if (s != 0) { mongoc_counter_dns_failure_inc (); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_NAME_RESOLUTION, "Failed to resolve '%s'", host->host); RETURN (false); } mongoc_counter_dns_success_inc (); node->last_dns_cache = now; } if (node->successful_dns_result) { _begin_ismaster_cmd (node, NULL, false, node->successful_dns_result, 0); } else { LL_FOREACH2 (node->dns_results, iter, ai_next) { _begin_ismaster_cmd (node, NULL, false, iter, delay); /* each subsequent DNS result will have an additional 250ms delay. */ delay += HAPPY_EYEBALLS_DELAY_MS; } } RETURN (true); } bool mongoc_topology_scanner_node_connect_unix (mongoc_topology_scanner_node_t *node, bson_error_t *error) { #ifdef _WIN32 ENTRY; bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "UNIX domain sockets not supported on win32."); RETURN (false); #else struct sockaddr_un saddr; mongoc_socket_t *sock; mongoc_stream_t *stream; mongoc_host_list_t *host; ENTRY; host = &node->host; memset (&saddr, 0, sizeof saddr); saddr.sun_family = AF_UNIX; bson_snprintf (saddr.sun_path, sizeof saddr.sun_path - 1, "%s", host->host); sock = mongoc_socket_new (AF_UNIX, SOCK_STREAM, 0); if (sock == NULL) { bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_SOCKET, "Failed to create socket."); RETURN (false); } if (-1 == mongoc_socket_connect ( sock, (struct sockaddr *) &saddr, sizeof saddr, -1)) { char buf[128]; char *errstr; errstr = bson_strerror_r (mongoc_socket_errno (sock), buf, sizeof (buf)); bson_set_error (error, MONGOC_ERROR_STREAM, MONGOC_ERROR_STREAM_CONNECT, "Failed to connect to UNIX domain socket: %s", errstr); mongoc_socket_destroy (sock); RETURN (false); } stream = _mongoc_topology_scanner_node_setup_stream_for_tls ( node, mongoc_stream_socket_new (sock)); if (stream) { _begin_ismaster_cmd (node, stream, false /* is_setup_done */, NULL /* dns result */, 0 /* delay */); RETURN (true); } RETURN (false); #endif } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_node_setup -- * * Create a stream and begin a non-blocking connect. * * Returns: * true on success, or false and error is set. * *-------------------------------------------------------------------------- */ void mongoc_topology_scanner_node_setup (mongoc_topology_scanner_node_t *node, bson_error_t *error) { bool success = false; mongoc_stream_t *stream; int64_t start; _mongoc_topology_scanner_monitor_heartbeat_started (node->ts, &node->host); start = bson_get_monotonic_time (); /* if there is already a working stream, push it back to be re-scanned. */ if (node->stream) { _begin_ismaster_cmd ( node, node->stream, true /* is_setup_done */, NULL, 0); node->stream = NULL; return; } BSON_ASSERT (!node->retired); if (node->ts->initiator) { stream = node->ts->initiator ( node->ts->uri, &node->host, node->ts->initiator_context, error); if (stream) { success = true; _begin_ismaster_cmd (node, stream, false, NULL, 0); } } else { if (node->host.family == AF_UNIX) { success = mongoc_topology_scanner_node_connect_unix (node, error); } else { success = mongoc_topology_scanner_node_setup_tcp (node, error); } } if (!success) { _mongoc_topology_scanner_monitor_heartbeat_failed ( node->ts, &node->host, error, (bson_get_monotonic_time () - start) / 1000); node->ts->setup_err_cb (node->id, node->ts->cb_data, error); return; } node->has_auth = false; node->timestamp = bson_get_monotonic_time (); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_node_in_cooldown -- * * Return true if @node has experienced a network error attempting * to call "ismaster" less than 5 seconds before @when, a timestamp in * microseconds. * * Server Discovery and Monitoring Spec: "After a single-threaded client * gets a network error trying to check a server, the client skips * re-checking the server until cooldownMS has passed. This avoids * spending connectTimeoutMS on each unavailable server during each scan. * This value MUST be 5000 ms, and it MUST NOT be configurable." * *-------------------------------------------------------------------------- */ bool mongoc_topology_scanner_node_in_cooldown (mongoc_topology_scanner_node_t *node, int64_t when) { if (node->last_failed == -1) { return false; /* node is new, or connected */ } return node->last_failed + 1000 * MONGOC_TOPOLOGY_COOLDOWN_MS >= when; } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_in_cooldown -- * * Return true if all nodes will be in cooldown at time @when, a * timestamp in microseconds. * *-------------------------------------------------------------------------- */ bool mongoc_topology_scanner_in_cooldown (mongoc_topology_scanner_t *ts, int64_t when) { mongoc_topology_scanner_node_t *node; DL_FOREACH (ts->nodes, node) { if (!mongoc_topology_scanner_node_in_cooldown (node, when)) { return false; } } return true; } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_start -- * * Initializes the scanner and begins a full topology check. This * should be called once before calling mongoc_topology_scanner_work() * to complete the scan. * * The topology mutex must be held by the caller. * * If "obey_cooldown" is true, this is a single-threaded blocking scan * that must obey the Server Discovery And Monitoring Spec's cooldownMS: * * "After a single-threaded client gets a network error trying to check * a server, the client skips re-checking the server until cooldownMS has * passed. * * "This avoids spending connectTimeoutMS on each unavailable server * during each scan. * * "This value MUST be 5000 ms, and it MUST NOT be configurable." * *-------------------------------------------------------------------------- */ void mongoc_topology_scanner_start (mongoc_topology_scanner_t *ts, bool obey_cooldown) { mongoc_topology_scanner_node_t *node, *tmp; bool skip; int64_t now; BSON_ASSERT (ts); _delete_retired_nodes (ts); now = bson_get_monotonic_time (); DL_FOREACH_SAFE (ts->nodes, node, tmp) { skip = obey_cooldown && mongoc_topology_scanner_node_in_cooldown (node, now); if (!skip) { mongoc_topology_scanner_node_setup (node, &node->last_error); } } } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_finish_scan -- * * Summarizes all scanner node errors into one error message, * deletes retired nodes. * *-------------------------------------------------------------------------- */ void _mongoc_topology_scanner_finish (mongoc_topology_scanner_t *ts) { mongoc_topology_scanner_node_t *node, *tmp; bson_error_t *error = &ts->error; bson_string_t *msg; memset (&ts->error, 0, sizeof (bson_error_t)); msg = bson_string_new (NULL); DL_FOREACH_SAFE (ts->nodes, node, tmp) { if (node->last_error.code) { if (msg->len) { bson_string_append_c (msg, ' '); } bson_string_append_printf (msg, "[%s]", node->last_error.message); /* last error domain and code win */ error->domain = node->last_error.domain; error->code = node->last_error.code; } } bson_strncpy ((char *) &error->message, msg->str, sizeof (error->message)); bson_string_free (msg, true); _delete_retired_nodes (ts); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_work -- * * Crank the knob on the topology scanner state machine. This should * be called only after mongoc_topology_scanner_start() has been used * to begin the scan. * *-------------------------------------------------------------------------- */ void mongoc_topology_scanner_work (mongoc_topology_scanner_t *ts) { mongoc_async_run (ts->async); BSON_ASSERT (ts->async->ncmds == 0); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scanner_get_error -- * * Copy the scanner's current error; which may no-error (code 0). * *-------------------------------------------------------------------------- */ void mongoc_topology_scanner_get_error (mongoc_topology_scanner_t *ts, bson_error_t *error) { BSON_ASSERT (ts); BSON_ASSERT (error); memcpy (error, &ts->error, sizeof (bson_error_t)); } /* * Set a field in the topology scanner. */ bool _mongoc_topology_scanner_set_appname (mongoc_topology_scanner_t *ts, const char *appname) { if (!_mongoc_handshake_appname_is_valid (appname)) { MONGOC_ERROR ("Cannot set appname: %s is invalid", appname); return false; } if (ts->appname != NULL) { MONGOC_ERROR ("Cannot set appname more than once"); return false; } ts->appname = bson_strdup (appname); return true; } /* * Set the scanner's clusterTime unconditionally: don't compare with prior * @cluster_time is like {clusterTime: } */ void _mongoc_topology_scanner_set_cluster_time (mongoc_topology_scanner_t *ts, const bson_t *cluster_time) { bson_destroy (&ts->cluster_time); bson_copy_to (cluster_time, &ts->cluster_time); } /* SDAM Monitoring Spec: send HeartbeatStartedEvent */ static void _mongoc_topology_scanner_monitor_heartbeat_started ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host) { if (ts->apm_callbacks.server_heartbeat_started) { mongoc_apm_server_heartbeat_started_t event; event.host = host; event.context = ts->apm_context; ts->apm_callbacks.server_heartbeat_started (&event); } } /* SDAM Monitoring Spec: send HeartbeatSucceededEvent */ static void _mongoc_topology_scanner_monitor_heartbeat_succeeded ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, const bson_t *reply, int64_t duration_usec) { if (ts->apm_callbacks.server_heartbeat_succeeded) { mongoc_apm_server_heartbeat_succeeded_t event; event.host = host; event.context = ts->apm_context; event.reply = reply; event.duration_usec = duration_usec; ts->apm_callbacks.server_heartbeat_succeeded (&event); } } /* SDAM Monitoring Spec: send HeartbeatFailedEvent */ static void _mongoc_topology_scanner_monitor_heartbeat_failed ( const mongoc_topology_scanner_t *ts, const mongoc_host_list_t *host, const bson_error_t *error, int64_t duration_usec) { if (ts->apm_callbacks.server_heartbeat_failed) { mongoc_apm_server_heartbeat_failed_t event; event.host = host; event.context = ts->apm_context; event.error = error; event.duration_usec = duration_usec; ts->apm_callbacks.server_heartbeat_failed (&event); } } /* this is for testing the dns cache timeout. */ void _mongoc_topology_scanner_set_dns_cache_timeout (mongoc_topology_scanner_t *ts, int64_t timeout_ms) { ts->dns_cache_timeout_ms = timeout_ms; } /* reset "retired" nodes that failed or were removed in the previous scan */ static void _delete_retired_nodes (mongoc_topology_scanner_t *ts) { mongoc_topology_scanner_node_t *node, *tmp; DL_FOREACH_SAFE (ts->nodes, node, tmp) { if (node->retired) { mongoc_topology_scanner_node_destroy (node, true); } } } static void _cancel_commands_excluding (mongoc_topology_scanner_node_t *node, mongoc_async_cmd_t *acmd) { mongoc_async_cmd_t *iter; DL_FOREACH (node->ts->async->cmds, iter) { if ((mongoc_topology_scanner_node_t *) iter->data == node && iter != acmd) { iter->state = MONGOC_ASYNC_CMD_CANCELED_STATE; } } } static int _count_acmds (mongoc_topology_scanner_node_t *node) { mongoc_async_cmd_t *iter; int count = 0; DL_FOREACH (node->ts->async->cmds, iter) { if ((mongoc_topology_scanner_node_t *) iter->data == node) { ++count; } } return count; } static void _jumpstart_other_acmds (mongoc_topology_scanner_node_t *node, mongoc_async_cmd_t *acmd) { mongoc_async_cmd_t *iter; DL_FOREACH (node->ts->async->cmds, iter) { if ((mongoc_topology_scanner_node_t *) iter->data == node && iter != acmd && acmd->initiate_delay_ms < iter->initiate_delay_ms) { iter->initiate_delay_ms = BSON_MAX (iter->initiate_delay_ms - HAPPY_EYEBALLS_DELAY_MS, 0); } } } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-topology.c0000644000076500000240000014263213572250760025334 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-handshake.h" #include "mongoc/mongoc-handshake-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-topology-description-apm-private.h" #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-cmd-private.h" #include "mongoc/mongoc-uri-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/utlist.h" static bool _mongoc_topology_reconcile_add_nodes (mongoc_server_description_t *sd, mongoc_topology_t *topology) { mongoc_topology_scanner_t *scanner = topology->scanner; /* quickly search by id, then check if a node for this host was retired in * this scan. */ if (!mongoc_topology_scanner_get_node (scanner, sd->id) && !mongoc_topology_scanner_has_node_for_host (scanner, &sd->host)) { mongoc_topology_scanner_add (scanner, &sd->host, sd->id); mongoc_topology_scanner_scan (scanner, sd->id); } return true; } void mongoc_topology_reconcile (mongoc_topology_t *topology) { mongoc_topology_description_t *description; mongoc_set_t *servers; mongoc_server_description_t *sd; int i; mongoc_topology_scanner_node_t *ele, *tmp; description = &topology->description; servers = description->servers; /* Add newly discovered nodes */ for (i = 0; i < (int) servers->items_len; i++) { sd = (mongoc_server_description_t *) mongoc_set_get_item (servers, i); _mongoc_topology_reconcile_add_nodes (sd, topology); } /* Remove removed nodes */ DL_FOREACH_SAFE (topology->scanner->nodes, ele, tmp) { if (!mongoc_topology_description_server_by_id ( description, ele->id, NULL)) { mongoc_topology_scanner_node_retire (ele); } } } /* call this while already holding the lock */ static bool _mongoc_topology_update_no_lock (uint32_t id, const bson_t *ismaster_response, int64_t rtt_msec, mongoc_topology_t *topology, const bson_error_t *error /* IN */) { mongoc_topology_description_handle_ismaster ( &topology->description, id, ismaster_response, rtt_msec, error); /* return false if server removed from topology */ return mongoc_topology_description_server_by_id ( &topology->description, id, NULL) != NULL; } /* *------------------------------------------------------------------------- * * _mongoc_topology_scanner_setup_err_cb -- * * Callback method to handle errors during topology scanner node * setup, typically DNS or SSL errors. * *------------------------------------------------------------------------- */ void _mongoc_topology_scanner_setup_err_cb (uint32_t id, void *data, const bson_error_t *error /* IN */) { mongoc_topology_t *topology; BSON_ASSERT (data); topology = (mongoc_topology_t *) data; mongoc_topology_description_handle_ismaster (&topology->description, id, NULL /* ismaster reply */, -1 /* rtt_msec */, error); } /* *------------------------------------------------------------------------- * * _mongoc_topology_scanner_cb -- * * Callback method to handle ismaster responses received by async * command objects. * * NOTE: This method locks the given topology's mutex. * *------------------------------------------------------------------------- */ void _mongoc_topology_scanner_cb (uint32_t id, const bson_t *ismaster_response, int64_t rtt_msec, void *data, const bson_error_t *error /* IN */) { mongoc_topology_t *topology; mongoc_server_description_t *sd; BSON_ASSERT (data); topology = (mongoc_topology_t *) data; bson_mutex_lock (&topology->mutex); sd = mongoc_topology_description_server_by_id ( &topology->description, id, NULL); /* Server Discovery and Monitoring Spec: "Once a server is connected, the * client MUST change its type to Unknown only after it has retried the * server once." */ if (!ismaster_response && sd && sd->type != MONGOC_SERVER_UNKNOWN) { _mongoc_topology_update_no_lock ( id, ismaster_response, rtt_msec, topology, error); /* add another ismaster call to the current scan - the scan continues * until all commands are done */ mongoc_topology_scanner_scan (topology->scanner, sd->id); } else { _mongoc_topology_update_no_lock ( id, ismaster_response, rtt_msec, topology, error); /* The processing of the ismaster results above may have added/removed * server descriptions. We need to reconcile that with our monitoring * agents */ mongoc_topology_reconcile (topology); mongoc_cond_broadcast (&topology->cond_client); } bson_mutex_unlock (&topology->mutex); } /* *------------------------------------------------------------------------- * * mongoc_topology_new -- * * Creates and returns a new topology object. * * Returns: * A new topology object. * * Side effects: * None. * *------------------------------------------------------------------------- */ mongoc_topology_t * mongoc_topology_new (const mongoc_uri_t *uri, bool single_threaded) { int64_t heartbeat_default; int64_t heartbeat; mongoc_topology_t *topology; bool topology_valid; mongoc_topology_description_type_t init_type; const char *service; char *prefixed_service; uint32_t id; const mongoc_host_list_t *hl; mongoc_rr_data_t rr_data; BSON_ASSERT (uri); #ifndef MONGOC_ENABLE_CRYPTO if (mongoc_uri_get_option_as_bool ( uri, MONGOC_URI_RETRYWRITES, MONGOC_DEFAULT_RETRYWRITES)) { /* retryWrites requires sessions, which require crypto - just warn */ MONGOC_WARNING ( "retryWrites not supported without an SSL crypto library"); } #endif topology = (mongoc_topology_t *) bson_malloc0 (sizeof *topology); topology->session_pool = NULL; heartbeat_default = single_threaded ? MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_SINGLE_THREADED : MONGOC_TOPOLOGY_HEARTBEAT_FREQUENCY_MS_MULTI_THREADED; heartbeat = mongoc_uri_get_option_as_int32 ( uri, MONGOC_URI_HEARTBEATFREQUENCYMS, heartbeat_default); mongoc_topology_description_init (&topology->description, heartbeat); topology->description.set_name = bson_strdup (mongoc_uri_get_replica_set (uri)); topology->uri = mongoc_uri_copy (uri); topology->single_threaded = single_threaded; if (single_threaded) { /* Server Selection Spec: * * "Single-threaded drivers MUST provide a "serverSelectionTryOnce" * mode, in which the driver scans the topology exactly once after * server selection fails, then either selects a server or raises an * error. * * "The serverSelectionTryOnce option MUST be true by default." */ topology->server_selection_try_once = mongoc_uri_get_option_as_bool ( uri, MONGOC_URI_SERVERSELECTIONTRYONCE, true); } else { topology->server_selection_try_once = false; } topology->server_selection_timeout_msec = mongoc_uri_get_option_as_int32 ( topology->uri, MONGOC_URI_SERVERSELECTIONTIMEOUTMS, MONGOC_TOPOLOGY_SERVER_SELECTION_TIMEOUT_MS); /* tests can override this */ topology->min_heartbeat_frequency_msec = MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS; topology->local_threshold_msec = mongoc_uri_get_local_threshold_option (topology->uri); /* Total time allowed to check a server is connectTimeoutMS. * Server Discovery And Monitoring Spec: * * "The socket used to check a server MUST use the same connectTimeoutMS as * regular sockets. Multi-threaded clients SHOULD set monitoring sockets' * socketTimeoutMS to the connectTimeoutMS." */ topology->connect_timeout_msec = mongoc_uri_get_option_as_int32 (topology->uri, MONGOC_URI_CONNECTTIMEOUTMS, MONGOC_DEFAULT_CONNECTTIMEOUTMS); topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_OFF; topology->scanner = mongoc_topology_scanner_new (topology->uri, _mongoc_topology_scanner_setup_err_cb, _mongoc_topology_scanner_cb, topology, topology->connect_timeout_msec); bson_mutex_init (&topology->mutex); mongoc_cond_init (&topology->cond_client); mongoc_cond_init (&topology->cond_server); if (single_threaded) { /* single threaded clients negotiate sasl supported mechanisms during * a topology scan. */ if (_mongoc_uri_requires_auth_negotiation (uri)) { topology->scanner->negotiate_sasl_supported_mechs = true; } } topology_valid = true; service = mongoc_uri_get_service (uri); if (service) { memset (&rr_data, 0, sizeof (mongoc_rr_data_t)); /* a mongodb+srv URI. try SRV lookup, if no error then also try TXT */ prefixed_service = bson_strdup_printf ("_mongodb._tcp.%s", service); if (!_mongoc_client_get_rr (prefixed_service, MONGOC_RR_SRV, topology->uri, &rr_data, &topology->scanner->error) || !_mongoc_client_get_rr (service, MONGOC_RR_TXT, topology->uri, NULL, &topology->scanner->error)) { topology_valid = false; } else { topology->last_srv_scan = bson_get_monotonic_time (); topology->rescanSRVIntervalMS = BSON_MAX ( rr_data.min_ttl * 1000, MONGOC_TOPOLOGY_MIN_RESCAN_SRV_INTERVAL_MS); } bson_free (prefixed_service); } /* * Set topology type from URI: * - if we've got a replicaSet name, initialize to RS_NO_PRIMARY * - otherwise, if the seed list has a single host, initialize to SINGLE * - everything else gets initialized to UNKNOWN */ hl = mongoc_uri_get_hosts (topology->uri); if (mongoc_uri_get_replica_set (topology->uri)) { init_type = MONGOC_TOPOLOGY_RS_NO_PRIMARY; } else { if (hl && hl->next) { init_type = MONGOC_TOPOLOGY_UNKNOWN; } else { init_type = MONGOC_TOPOLOGY_SINGLE; } } topology->description.type = init_type; if (!topology_valid) { /* add no nodes */ return topology; } while (hl) { mongoc_topology_description_add_server ( &topology->description, hl->host_and_port, &id); mongoc_topology_scanner_add (topology->scanner, hl, id); hl = hl->next; } return topology; } /* *------------------------------------------------------------------------- * * mongoc_topology_set_apm_callbacks -- * * Set Application Performance Monitoring callbacks. * *------------------------------------------------------------------------- */ void mongoc_topology_set_apm_callbacks (mongoc_topology_t *topology, mongoc_apm_callbacks_t *callbacks, void *context) { if (callbacks) { memcpy (&topology->description.apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t)); memcpy (&topology->scanner->apm_callbacks, callbacks, sizeof (mongoc_apm_callbacks_t)); } else { memset (&topology->description.apm_callbacks, 0, sizeof (mongoc_apm_callbacks_t)); memset ( &topology->scanner->apm_callbacks, 0, sizeof (mongoc_apm_callbacks_t)); } topology->description.apm_context = context; topology->scanner->apm_context = context; } /* *------------------------------------------------------------------------- * * mongoc_topology_destroy -- * * Free the memory associated with this topology object. * * Returns: * None. * * Side effects: * @topology will be cleaned up. * *------------------------------------------------------------------------- */ void mongoc_topology_destroy (mongoc_topology_t *topology) { if (!topology) { return; } _mongoc_topology_background_thread_stop (topology); _mongoc_topology_description_monitor_closed (&topology->description); mongoc_uri_destroy (topology->uri); mongoc_topology_description_destroy (&topology->description); mongoc_topology_scanner_destroy (topology->scanner); /* If we are single-threaded, the client will try to call _mongoc_topology_end_sessions_cmd when it dies. This removes sessions from the pool as it calls endSessions on them. In case this does not succeed, we clear the pool again here. */ _mongoc_topology_clear_session_pool (topology); mongoc_cond_destroy (&topology->cond_client); mongoc_cond_destroy (&topology->cond_server); bson_mutex_destroy (&topology->mutex); bson_free (topology); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_clear_session_pool -- * * Clears the pool of server sessions without sending endSessions. * * Returns: * Nothing. * * Side effects: * Server session pool will be emptied. * *-------------------------------------------------------------------------- */ void _mongoc_topology_clear_session_pool (mongoc_topology_t *topology) { mongoc_server_session_t *ss, *tmp1, *tmp2; CDL_FOREACH_SAFE (topology->session_pool, ss, tmp1, tmp2) { _mongoc_server_session_destroy (ss); } } /* *-------------------------------------------------------------------------- * * mongoc_topology_rescan_srv -- * * Queries SRV records for new hosts in a mongos cluster. * * NOTE: this method expects @topology's mutex to be locked on entry. * * -------------------------------------------------------------------------- */ static void mongoc_topology_rescan_srv (mongoc_topology_t *topology) { mongoc_rr_data_t rr_data = {0}; mongoc_host_list_t *h = NULL; const char *service; char *prefixed_service = NULL; int64_t scan_time; if ((topology->description.type != MONGOC_TOPOLOGY_SHARDED) && (topology->description.type != MONGOC_TOPOLOGY_UNKNOWN)) { /* Only perform rescan for sharded topology. */ return; } service = mongoc_uri_get_service (topology->uri); if (!service) { /* Only rescan if we have a mongodb+srv:// URI. */ return; } scan_time = topology->last_srv_scan + (topology->rescanSRVIntervalMS * 1000); if (bson_get_monotonic_time () < scan_time) { /* Query SRV no more frequently than rescanSRVIntervalMS. */ return; } /* Go forth and query... */ rr_data.hosts = _mongoc_host_list_copy (mongoc_uri_get_hosts (topology->uri), NULL); prefixed_service = bson_strdup_printf ("_mongodb._tcp.%s", service); if (!_mongoc_client_get_rr (prefixed_service, MONGOC_RR_SRV, topology->uri, &rr_data, &topology->scanner->error)) { /* Failed querying, soldier on and try again next time. */ topology->rescanSRVIntervalMS = topology->description.heartbeat_msec; GOTO (done); } topology->last_srv_scan = bson_get_monotonic_time (); topology->rescanSRVIntervalMS = BSON_MAX ( rr_data.min_ttl * 1000, MONGOC_TOPOLOGY_MIN_RESCAN_SRV_INTERVAL_MS); if (rr_data.count == 0) { /* Special case when DNS returns zero records successfully. * Leave the toplogy alone and perform another scan at the next interval * rather than removing all records and having nothing to connect to. * For no verified hosts drivers "MUST temporarily set rescanSRVIntervalMS * to heartbeatFrequencyMS until at least one verified SRV record is * obtained." */ topology->rescanSRVIntervalMS = topology->description.heartbeat_msec; GOTO (done); } /* rr_data.hosts was initialized to the current set of known hosts * on entry, and mongoc_client_get_rr will have stripped it down to * only include hosts which were NOT included in the most recent query. * Remove those hosts and we're left with only active servers. */ for (h = rr_data.hosts; h; h = rr_data.hosts) { rr_data.hosts = h->next; mongoc_uri_remove_host (topology->uri, h->host, h->port); bson_free (h); } done: bson_free (prefixed_service); _mongoc_host_list_destroy_all (rr_data.hosts); } /* *-------------------------------------------------------------------------- * * mongoc_topology_scan_once -- * * Runs a single complete scan. * * NOTE: this method expects @topology's mutex to be locked on entry. * * NOTE: this method unlocks and re-locks @topology's mutex. * *-------------------------------------------------------------------------- */ static void mongoc_topology_scan_once (mongoc_topology_t *topology, bool obey_cooldown) { /* Prior to scanning hosts, update the list of SRV hosts, if applicable. */ mongoc_topology_rescan_srv (topology); /* since the last scan, members may be added or removed from the topology * description based on ismaster responses in connection handshakes, see * _mongoc_topology_update_from_handshake. retire scanner nodes for removed * members and create scanner nodes for new ones. */ mongoc_topology_reconcile (topology); mongoc_topology_scanner_start (topology->scanner, obey_cooldown); /* scanning locks and unlocks the mutex itself until the scan is done */ bson_mutex_unlock (&topology->mutex); mongoc_topology_scanner_work (topology->scanner); bson_mutex_lock (&topology->mutex); _mongoc_topology_scanner_finish (topology->scanner); topology->last_scan = bson_get_monotonic_time (); topology->stale = false; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_do_blocking_scan -- * * Monitoring entry for single-threaded use case. Assumes the caller * has checked that it's the right time to scan. * *-------------------------------------------------------------------------- */ void _mongoc_topology_do_blocking_scan (mongoc_topology_t *topology, bson_error_t *error) { topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_SINGLE_THREADED; _mongoc_handshake_freeze (); bson_mutex_lock (&topology->mutex); mongoc_topology_scan_once (topology, true /* obey cooldown */); bson_mutex_unlock (&topology->mutex); mongoc_topology_scanner_get_error (topology->scanner, error); } bool mongoc_topology_compatible (const mongoc_topology_description_t *td, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { int64_t max_staleness_seconds; int32_t max_wire_version; if (td->compatibility_error.code) { if (error) { memcpy (error, &td->compatibility_error, sizeof (bson_error_t)); } return false; } if (!read_prefs) { /* NULL means read preference Primary */ return true; } max_staleness_seconds = mongoc_read_prefs_get_max_staleness_seconds (read_prefs); if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) { max_wire_version = mongoc_topology_description_lowest_max_wire_version (td); if (max_wire_version < WIRE_VERSION_MAX_STALENESS) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Not all servers support maxStalenessSeconds"); return false; } /* shouldn't happen if we've properly enforced wire version */ if (!mongoc_topology_description_all_sds_have_write_date (td)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "Not all servers have lastWriteDate"); return false; } if (!_mongoc_topology_description_validate_max_staleness ( td, max_staleness_seconds, error)) { return false; } } return true; } static void _mongoc_server_selection_error (const char *msg, const bson_error_t *scanner_error, bson_error_t *error) { if (scanner_error && scanner_error->code) { bson_set_error (error, MONGOC_ERROR_SERVER_SELECTION, MONGOC_ERROR_SERVER_SELECTION_FAILURE, "%s: %s", msg, scanner_error->message); } else { bson_set_error (error, MONGOC_ERROR_SERVER_SELECTION, MONGOC_ERROR_SERVER_SELECTION_FAILURE, "%s", msg); } } /* *------------------------------------------------------------------------- * * mongoc_topology_select -- * * Selects a server description for an operation based on @optype * and @read_prefs. * * NOTE: this method returns a copy of the original server * description. Callers must own and clean up this copy. * * NOTE: this method locks and unlocks @topology's mutex. * * Parameters: * @topology: The topology. * @optype: Whether we are selecting for a read or write operation. * @read_prefs: Required, the read preferences for the command. * @error: Required, out pointer for error info. * * Returns: * A mongoc_server_description_t, or NULL on failure, in which case * @error will be set. * * Side effects: * @error may be set. * *------------------------------------------------------------------------- */ mongoc_server_description_t * mongoc_topology_select (mongoc_topology_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { uint32_t server_id = mongoc_topology_select_server_id (topology, optype, read_prefs, error); if (server_id) { /* new copy of the server description */ return mongoc_topology_server_by_id (topology, server_id, error); } else { return NULL; } } /* *------------------------------------------------------------------------- * * mongoc_topology_select_server_id -- * * Alternative to mongoc_topology_select when you only need the id. * * Returns: * A server id, or 0 on failure, in which case @error will be set. * *------------------------------------------------------------------------- */ uint32_t mongoc_topology_select_server_id (mongoc_topology_t *topology, mongoc_ss_optype_t optype, const mongoc_read_prefs_t *read_prefs, bson_error_t *error) { static const char *timeout_msg = "No suitable servers found: `serverSelectionTimeoutMS` expired"; mongoc_topology_scanner_t *ts; int r; int64_t local_threshold_ms; mongoc_server_description_t *selected_server = NULL; bool try_once; int64_t sleep_usec; bool tried_once; bson_error_t scanner_error = {0}; int64_t heartbeat_msec; uint32_t server_id; /* These names come from the Server Selection Spec pseudocode */ int64_t loop_start; /* when we entered this function */ int64_t loop_end; /* when we last completed a loop (single-threaded) */ int64_t scan_ready; /* the soonest we can do a blocking scan */ int64_t next_update; /* the latest we must do a blocking scan */ int64_t expire_at; /* when server selection timeout expires */ BSON_ASSERT (topology); ts = topology->scanner; bson_mutex_lock (&topology->mutex); /* It isn't strictly necessary to lock here, because if the topology * is invalid, it will never become valid. Lock anyway for consistency. */ if (!mongoc_topology_scanner_valid (ts)) { if (error) { mongoc_topology_scanner_get_error (ts, error); error->domain = MONGOC_ERROR_SERVER_SELECTION; error->code = MONGOC_ERROR_SERVER_SELECTION_FAILURE; } bson_mutex_unlock (&topology->mutex); return 0; } bson_mutex_unlock (&topology->mutex); heartbeat_msec = topology->description.heartbeat_msec; local_threshold_ms = topology->local_threshold_msec; try_once = topology->server_selection_try_once; loop_start = loop_end = bson_get_monotonic_time (); expire_at = loop_start + ((int64_t) topology->server_selection_timeout_msec * 1000); if (topology->single_threaded) { _mongoc_topology_description_monitor_opening (&topology->description); tried_once = false; next_update = topology->last_scan + heartbeat_msec * 1000; if (next_update < loop_start) { /* we must scan now */ topology->stale = true; } /* until we find a server or time out */ for (;;) { if (topology->stale) { /* how soon are we allowed to scan? */ scan_ready = topology->last_scan + topology->min_heartbeat_frequency_msec * 1000; if (scan_ready > expire_at && !try_once) { /* selection timeout will expire before min heartbeat passes */ _mongoc_server_selection_error ( "No suitable servers found: " "`serverselectiontimeoutms` timed out", &scanner_error, error); return 0; } sleep_usec = scan_ready - loop_end; if (sleep_usec > 0) { if (try_once && mongoc_topology_scanner_in_cooldown (ts, scan_ready)) { _mongoc_server_selection_error ( "No servers yet eligible for rescan", &scanner_error, error); return 0; } _mongoc_usleep (sleep_usec); } /* takes up to connectTimeoutMS. sets "last_scan", clears "stale" */ _mongoc_topology_do_blocking_scan (topology, &scanner_error); loop_end = topology->last_scan; tried_once = true; } if (!mongoc_topology_compatible ( &topology->description, read_prefs, error)) { return 0; } selected_server = mongoc_topology_description_select ( &topology->description, optype, read_prefs, local_threshold_ms); if (selected_server) { return selected_server->id; } topology->stale = true; if (try_once) { if (tried_once) { _mongoc_server_selection_error ( "No suitable servers found (`serverSelectionTryOnce` set)", &scanner_error, error); return 0; } } else { loop_end = bson_get_monotonic_time (); if (loop_end > expire_at) { /* no time left in server_selection_timeout_msec */ _mongoc_server_selection_error ( timeout_msg, &scanner_error, error); return 0; } } } } /* With background thread */ /* we break out when we've found a server or timed out */ for (;;) { bson_mutex_lock (&topology->mutex); if (!mongoc_topology_compatible ( &topology->description, read_prefs, error)) { bson_mutex_unlock (&topology->mutex); return 0; } selected_server = mongoc_topology_description_select ( &topology->description, optype, read_prefs, local_threshold_ms); if (!selected_server) { _mongoc_topology_request_scan (topology); r = mongoc_cond_timedwait (&topology->cond_client, &topology->mutex, (expire_at - loop_start) / 1000); mongoc_topology_scanner_get_error (ts, &scanner_error); bson_mutex_unlock (&topology->mutex); #ifdef _WIN32 if (r == WSAETIMEDOUT) { #else if (r == ETIMEDOUT) { #endif /* handle timeouts */ _mongoc_server_selection_error (timeout_msg, &scanner_error, error); return 0; } else if (r) { bson_set_error (error, MONGOC_ERROR_SERVER_SELECTION, MONGOC_ERROR_SERVER_SELECTION_FAILURE, "Unknown error '%d' received while waiting on " "thread condition", r); return 0; } loop_start = bson_get_monotonic_time (); if (loop_start > expire_at) { _mongoc_server_selection_error (timeout_msg, &scanner_error, error); return 0; } } else { server_id = selected_server->id; bson_mutex_unlock (&topology->mutex); return server_id; } } } /* *------------------------------------------------------------------------- * * mongoc_topology_server_by_id -- * * Get the server description for @id, if that server is present * in @description. Otherwise, return NULL and fill out the optional * @error. * * NOTE: this method returns a copy of the original server * description. Callers must own and clean up this copy. * * NOTE: this method locks and unlocks @topology's mutex. * * Returns: * A mongoc_server_description_t, or NULL. * * Side effects: * Fills out optional @error if server not found. * *------------------------------------------------------------------------- */ mongoc_server_description_t * mongoc_topology_server_by_id (mongoc_topology_t *topology, uint32_t id, bson_error_t *error) { mongoc_server_description_t *sd; bson_mutex_lock (&topology->mutex); sd = mongoc_server_description_new_copy ( mongoc_topology_description_server_by_id ( &topology->description, id, error)); bson_mutex_unlock (&topology->mutex); return sd; } /* *------------------------------------------------------------------------- * * mongoc_topology_host_by_id -- * * Copy the mongoc_host_list_t for @id, if that server is present * in @description. Otherwise, return NULL and fill out the optional * @error. * * NOTE: this method returns a copy of the original mongoc_host_list_t. * Callers must own and clean up this copy. * * NOTE: this method locks and unlocks @topology's mutex. * * Returns: * A mongoc_host_list_t, or NULL. * * Side effects: * Fills out optional @error if server not found. * *------------------------------------------------------------------------- */ mongoc_host_list_t * _mongoc_topology_host_by_id (mongoc_topology_t *topology, uint32_t id, bson_error_t *error) { mongoc_server_description_t *sd; mongoc_host_list_t *host = NULL; bson_mutex_lock (&topology->mutex); /* not a copy - direct pointer into topology description data */ sd = mongoc_topology_description_server_by_id ( &topology->description, id, error); if (sd) { host = bson_malloc0 (sizeof (mongoc_host_list_t)); memcpy (host, &sd->host, sizeof (mongoc_host_list_t)); } bson_mutex_unlock (&topology->mutex); return host; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_request_scan -- * * Non-locking variant * *-------------------------------------------------------------------------- */ void _mongoc_topology_request_scan (mongoc_topology_t *topology) { topology->scan_requested = true; mongoc_cond_signal (&topology->cond_server); } /* *-------------------------------------------------------------------------- * * mongoc_topology_invalidate_server -- * * Invalidate the given server after receiving a network error in * another part of the client. * * NOTE: this method uses @topology's mutex. * *-------------------------------------------------------------------------- */ void mongoc_topology_invalidate_server (mongoc_topology_t *topology, uint32_t id, const bson_error_t *error) { BSON_ASSERT (error); bson_mutex_lock (&topology->mutex); mongoc_topology_description_invalidate_server ( &topology->description, id, error); bson_mutex_unlock (&topology->mutex); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_update_from_handshake -- * * A client opens a new connection and calls ismaster on it when it * detects a closed connection in _mongoc_cluster_check_interval, or if * mongoc_client_pool_pop creates a new client. Update the topology * description from the ismaster response. * * NOTE: this method uses @topology's mutex. * * Returns: * false if the server was removed from the topology *-------------------------------------------------------------------------- */ bool _mongoc_topology_update_from_handshake (mongoc_topology_t *topology, const mongoc_server_description_t *sd) { bool has_server; BSON_ASSERT (topology); BSON_ASSERT (sd); bson_mutex_lock (&topology->mutex); /* return false if server was removed from topology */ has_server = _mongoc_topology_update_no_lock ( sd->id, &sd->last_is_master, sd->round_trip_time_msec, topology, NULL); /* if pooled, wake threads waiting in mongoc_topology_server_by_id */ mongoc_cond_broadcast (&topology->cond_client); bson_mutex_unlock (&topology->mutex); return has_server; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_update_last_used -- * * Internal function. In single-threaded mode only, track when the socket * to a particular server was last used. This is required for * mongoc_cluster_check_interval to know when a socket has been idle. * *-------------------------------------------------------------------------- */ void _mongoc_topology_update_last_used (mongoc_topology_t *topology, uint32_t server_id) { mongoc_topology_scanner_node_t *node; if (!topology->single_threaded) { return; } node = mongoc_topology_scanner_get_node (topology->scanner, server_id); if (node) { node->last_used = bson_get_monotonic_time (); } } /* *-------------------------------------------------------------------------- * * mongoc_topology_server_timestamp -- * * Return the topology's scanner's timestamp for the given server, * or -1 if there is no scanner node for the given server. * * NOTE: this method uses @topology's mutex. * * Returns: * Timestamp, or -1 * *-------------------------------------------------------------------------- */ int64_t mongoc_topology_server_timestamp (mongoc_topology_t *topology, uint32_t id) { mongoc_topology_scanner_node_t *node; int64_t timestamp = -1; bson_mutex_lock (&topology->mutex); node = mongoc_topology_scanner_get_node (topology->scanner, id); if (node) { timestamp = node->timestamp; } bson_mutex_unlock (&topology->mutex); return timestamp; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_get_type -- * * Return the topology's description's type. * * NOTE: this method uses @topology's mutex. * * Returns: * The topology description type. * *-------------------------------------------------------------------------- */ mongoc_topology_description_type_t _mongoc_topology_get_type (mongoc_topology_t *topology) { mongoc_topology_description_type_t td_type; bson_mutex_lock (&topology->mutex); td_type = topology->description.type; bson_mutex_unlock (&topology->mutex); return td_type; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_run_background -- * * The background topology monitoring thread runs in this loop. * * NOTE: this method uses @topology's mutex. * *-------------------------------------------------------------------------- */ static void * _mongoc_topology_run_background (void *data) { mongoc_topology_t *topology; int64_t now; int64_t last_scan; int64_t timeout; int64_t force_timeout; int64_t heartbeat_msec; int r; BSON_ASSERT (data); last_scan = 0; topology = (mongoc_topology_t *) data; heartbeat_msec = topology->description.heartbeat_msec; /* we exit this loop when shutting down, or on error */ for (;;) { /* unlocked after starting a scan or after breaking out of the loop */ bson_mutex_lock (&topology->mutex); if (!mongoc_topology_scanner_valid (topology->scanner)) { bson_mutex_unlock (&topology->mutex); goto DONE; } /* we exit this loop on error, or when we should scan immediately */ for (;;) { if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN) { bson_mutex_unlock (&topology->mutex); goto DONE; } now = bson_get_monotonic_time (); if (last_scan == 0) { /* set up the "last scan" as exactly long enough to force an * immediate scan on the first pass */ last_scan = now - (heartbeat_msec * 1000); } timeout = heartbeat_msec - ((now - last_scan) / 1000); /* if someone's specifically asked for a scan, use a shorter interval */ if (topology->scan_requested) { force_timeout = topology->min_heartbeat_frequency_msec - ((now - last_scan) / 1000); timeout = BSON_MIN (timeout, force_timeout); } /* if we can start scanning, do so immediately */ if (timeout <= 0) { break; } else { /* otherwise wait until someone: * o requests a scan * o we time out * o requests a shutdown */ r = mongoc_cond_timedwait ( &topology->cond_server, &topology->mutex, timeout); #ifdef _WIN32 if (!(r == 0 || r == WSAETIMEDOUT)) { #else if (!(r == 0 || r == ETIMEDOUT)) { #endif bson_mutex_unlock (&topology->mutex); /* handle errors */ goto DONE; } /* if we timed out, or were woken up, check if it's time to scan * again, or bail out */ } } topology->scan_requested = false; mongoc_topology_scan_once (topology, false /* obey cooldown */); bson_mutex_unlock (&topology->mutex); last_scan = bson_get_monotonic_time (); } DONE: return NULL; } /* *-------------------------------------------------------------------------- * * mongoc_topology_start_background_scanner * * Start the topology background thread running. This should only be * called once per pool. If clients are created separately (not * through a pool) the SDAM logic will not be run in a background * thread. Returns whether or not the scanner is running on termination * of the function. * * NOTE: this method uses @topology's mutex. * *-------------------------------------------------------------------------- */ bool _mongoc_topology_start_background_scanner (mongoc_topology_t *topology) { int r; if (topology->single_threaded) { return false; } bson_mutex_lock (&topology->mutex); if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { bson_mutex_unlock (&topology->mutex); return true; } BSON_ASSERT (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_OFF); topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_BG_RUNNING; _mongoc_handshake_freeze (); _mongoc_topology_description_monitor_opening (&topology->description); r = bson_thread_create ( &topology->thread, _mongoc_topology_run_background, topology); if (r != 0) { MONGOC_ERROR ("could not start topology scanner thread: %s", strerror (r)); abort (); } bson_mutex_unlock (&topology->mutex); return true; } /* *-------------------------------------------------------------------------- * * mongoc_topology_background_thread_stop -- * * Stop the topology background thread. Called by the owning pool at * its destruction. * * NOTE: this method uses @topology's mutex. * *-------------------------------------------------------------------------- */ void _mongoc_topology_background_thread_stop (mongoc_topology_t *topology) { bool join_thread = false; if (topology->single_threaded) { return; } bson_mutex_lock (&topology->mutex); BSON_ASSERT (topology->scanner_state != MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN); if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_BG_RUNNING) { /* if the background thread is running, request a shutdown and signal the * thread */ topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_SHUTTING_DOWN; mongoc_cond_signal (&topology->cond_server); join_thread = true; } else { /* nothing to do if it's already off */ } bson_mutex_unlock (&topology->mutex); if (join_thread) { /* if we're joining the thread, wait for it to come back and broadcast * all listeners */ bson_thread_join (topology->thread); bson_mutex_lock (&topology->mutex); topology->scanner_state = MONGOC_TOPOLOGY_SCANNER_OFF; bson_mutex_unlock (&topology->mutex); mongoc_cond_broadcast (&topology->cond_client); } } bool _mongoc_topology_set_appname (mongoc_topology_t *topology, const char *appname) { bool ret = false; bson_mutex_lock (&topology->mutex); if (topology->scanner_state == MONGOC_TOPOLOGY_SCANNER_OFF) { ret = _mongoc_topology_scanner_set_appname (topology->scanner, appname); } else { MONGOC_ERROR ("Cannot set appname after handshake initiated"); } bson_mutex_unlock (&topology->mutex); return ret; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_update_cluster_time -- * * Internal function. If the server reply has a later $clusterTime than * any seen before, update the topology's clusterTime. See the Driver * Sessions Spec. * *-------------------------------------------------------------------------- */ void _mongoc_topology_update_cluster_time (mongoc_topology_t *topology, const bson_t *reply) { bson_mutex_lock (&topology->mutex); mongoc_topology_description_update_cluster_time (&topology->description, reply); _mongoc_topology_scanner_set_cluster_time ( topology->scanner, &topology->description.cluster_time); bson_mutex_unlock (&topology->mutex); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_pop_server_session -- * * Internal function. Get a server session from the pool or create * one. On error, return NULL and fill out @error. * *-------------------------------------------------------------------------- */ mongoc_server_session_t * _mongoc_topology_pop_server_session (mongoc_topology_t *topology, bson_error_t *error) { int64_t timeout; mongoc_server_session_t *ss = NULL; mongoc_topology_description_t *td; ENTRY; bson_mutex_lock (&topology->mutex); td = &topology->description; timeout = td->session_timeout_minutes; if (timeout == MONGOC_NO_SESSIONS) { /* if needed, connect and check for session timeout again */ if (!mongoc_topology_description_has_data_node (td)) { bson_mutex_unlock (&topology->mutex); if (!mongoc_topology_select_server_id ( topology, MONGOC_SS_READ, NULL, error)) { RETURN (NULL); } bson_mutex_lock (&topology->mutex); timeout = td->session_timeout_minutes; } if (timeout == MONGOC_NO_SESSIONS) { bson_mutex_unlock (&topology->mutex); bson_set_error (error, MONGOC_ERROR_CLIENT, MONGOC_ERROR_CLIENT_SESSION_FAILURE, "Server does not support sessions"); RETURN (NULL); } } while (topology->session_pool) { ss = topology->session_pool; CDL_DELETE (topology->session_pool, ss); if (_mongoc_server_session_timed_out (ss, timeout)) { _mongoc_server_session_destroy (ss); ss = NULL; } else { break; } } bson_mutex_unlock (&topology->mutex); if (!ss) { ss = _mongoc_server_session_new (error); } RETURN (ss); } /* *-------------------------------------------------------------------------- * * _mongoc_topology_push_server_session -- * * Internal function. Return a server session to the pool. * *-------------------------------------------------------------------------- */ void _mongoc_topology_push_server_session (mongoc_topology_t *topology, mongoc_server_session_t *server_session) { int64_t timeout; mongoc_server_session_t *ss; ENTRY; bson_mutex_lock (&topology->mutex); timeout = topology->description.session_timeout_minutes; /* start at back of queue and reap timed-out sessions */ while (topology->session_pool && topology->session_pool->prev) { ss = topology->session_pool->prev; if (_mongoc_server_session_timed_out (ss, timeout)) { BSON_ASSERT (ss->next); /* silences clang scan-build */ CDL_DELETE (topology->session_pool, ss); _mongoc_server_session_destroy (ss); } else { /* if ss is not timed out, sessions in front of it are ok too */ break; } } if (_mongoc_server_session_timed_out (server_session, timeout)) { _mongoc_server_session_destroy (server_session); } else { /* silences clang scan-build */ BSON_ASSERT (!topology->session_pool || (topology->session_pool->next && topology->session_pool->prev)); CDL_PREPEND (topology->session_pool, server_session); } bson_mutex_unlock (&topology->mutex); EXIT; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_end_sessions_cmd -- * * Internal function. End up to 10,000 server sessions. @cmd is an * uninitialized document. Sessions are destroyed as their ids are * appended to @cmd. * * Driver Sessions Spec: "If the number of sessions is very large the * endSessions command SHOULD be run multiple times to end 10,000 * sessions at a time (in order to avoid creating excessively large * commands)." * * Returns: * true if any session ids were appended to @cmd. * *-------------------------------------------------------------------------- */ bool _mongoc_topology_end_sessions_cmd (mongoc_topology_t *topology, bson_t *cmd) { mongoc_server_session_t *ss, *tmp1, *tmp2; char buf[16]; const char *key; uint32_t i; bson_t ar; bson_init (cmd); BSON_APPEND_ARRAY_BEGIN (cmd, "endSessions", &ar); i = 0; CDL_FOREACH_SAFE (topology->session_pool, ss, tmp1, tmp2) { bson_uint32_to_string (i, &key, buf, sizeof buf); BSON_APPEND_DOCUMENT (&ar, key, &ss->lsid); CDL_DELETE (topology->session_pool, ss); _mongoc_server_session_destroy (ss); if (++i == 10000) { break; } } bson_append_array_end (cmd, &ar); return i > 0; } /* *-------------------------------------------------------------------------- * * _mongoc_topology_get_ismaster -- * * Locks topology->mutex and retrieves (possibly constructing) the * handshake on the topology scanner. * * Returns: * A bson_t representing an ismaster command. * *-------------------------------------------------------------------------- */ const bson_t * _mongoc_topology_get_ismaster (mongoc_topology_t *topology) { const bson_t *cmd; bson_mutex_lock (&topology->mutex); cmd = _mongoc_topology_scanner_get_ismaster (topology->scanner); bson_mutex_unlock (&topology->mutex); return cmd; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-trace-private.h0000644000076500000240000001042213572250760026222 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_TRACE_PRIVATE_H #define MONGOC_TRACE_PRIVATE_H #include #include #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-log-private.h" BSON_BEGIN_DECLS #ifdef MONGOC_TRACE #define TRACE(msg, ...) \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ "TRACE: %s():%d " msg, \ BSON_FUNC, \ __LINE__, \ __VA_ARGS__); \ } while (0) #define ENTRY \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ "ENTRY: %s():%d", \ BSON_FUNC, \ __LINE__); \ } while (0) #define EXIT \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ " EXIT: %s():%d", \ BSON_FUNC, \ __LINE__); \ return; \ } while (0) #define RETURN(ret) \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ " EXIT: %s():%d", \ BSON_FUNC, \ __LINE__); \ return ret; \ } while (0) #define GOTO(label) \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ " GOTO: %s():%d %s", \ BSON_FUNC, \ __LINE__, \ #label); \ goto label; \ } while (0) #define DUMP_BYTES(_n, _b, _l) \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ "TRACE: %s():%d %s = %p [%d]", \ BSON_FUNC, \ __LINE__, \ #_n, \ _b, \ (int) _l); \ mongoc_log_trace_bytes (MONGOC_LOG_DOMAIN, _b, _l); \ } while (0) #define DUMP_IOVEC(_n, _iov, _iovcnt) \ do { \ mongoc_log (MONGOC_LOG_LEVEL_TRACE, \ MONGOC_LOG_DOMAIN, \ "TRACE: %s():%d %s = %p [%d]", \ BSON_FUNC, \ __LINE__, \ #_n, \ _iov, \ (int) _iovcnt); \ mongoc_log_trace_iovec (MONGOC_LOG_DOMAIN, _iov, _iovcnt); \ } while (0) #else #define TRACE(msg, ...) #define ENTRY #define EXIT return #define RETURN(ret) return ret #define GOTO(label) goto label #define DUMP_BYTES(_n, _b, _l) #define DUMP_IOVEC(_n, _iov, _iovcnt) #endif BSON_END_DECLS #endif /* MONGOC_TRACE_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-uri-private.h0000644000076500000240000000344113572250760025726 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_URI_PRIVATE_H #define MONGOC_URI_PRIVATE_H #include "mongoc/mongoc-uri.h" BSON_BEGIN_DECLS bool mongoc_uri_upsert_host_and_port (mongoc_uri_t *uri, const char *host_and_port, bson_error_t *error); bool mongoc_uri_upsert_host (mongoc_uri_t *uri, const char *host, uint16_t port, bson_error_t *error); void mongoc_uri_remove_host (mongoc_uri_t *uri, const char *host, uint16_t port); bool mongoc_uri_parse_host (mongoc_uri_t *uri, const char *str); bool mongoc_uri_parse_options (mongoc_uri_t *uri, const char *str, bool from_dns, bson_error_t *error); int32_t mongoc_uri_get_local_threshold_option (const mongoc_uri_t *uri); bool _mongoc_uri_requires_auth_negotiation (const mongoc_uri_t *uri); const char * mongoc_uri_canonicalize_option (const char *key); mongoc_uri_t * _mongoc_uri_copy_and_replace_host_list (const mongoc_uri_t *original, const char *host); BSON_END_DECLS #endif /* MONGOC_URI_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-uri.c0000644000076500000240000022261113572250760024253 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include /* strcasecmp on windows */ #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-host-list-private.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-handshake-private.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-topology-private.h" #include "mongoc/mongoc-uri-private.h" #include "mongoc/mongoc-read-concern-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-compression-private.h" #include "mongoc/utlist.h" struct _mongoc_uri_t { char *str; bool is_srv; char srv[BSON_HOST_NAME_MAX + 1]; mongoc_host_list_t *hosts; char *username; char *password; char *database; bson_t raw; /* Unparsed options, see mongoc_uri_parse_options */ bson_t options; /* Type-coerced and canonicalized options */ bson_t credentials; bson_t compressors; mongoc_read_prefs_t *read_prefs; mongoc_read_concern_t *read_concern; mongoc_write_concern_t *write_concern; }; #define MONGOC_URI_ERROR(error, format, ...) \ bson_set_error (error, \ MONGOC_ERROR_COMMAND, \ MONGOC_ERROR_COMMAND_INVALID_ARG, \ format, \ __VA_ARGS__); static const char *escape_instructions = "Percent-encode username and password" " according to RFC 3986"; static bool _mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option, int32_t value); static bool _mongoc_uri_set_option_as_int32_with_error (mongoc_uri_t *uri, const char *option, int32_t value, bson_error_t *error); static bool ends_with (const char *str, const char *suffix); static void mongoc_uri_do_unescape (char **str) { char *tmp; if ((tmp = *str)) { *str = mongoc_uri_unescape (tmp); bson_free (tmp); } } #define VALIDATE_SRV_ERR() \ do { \ bson_set_error (error, \ MONGOC_ERROR_STREAM, \ MONGOC_ERROR_STREAM_NAME_RESOLUTION, \ "Invalid host \"%s\" returned for service \"%s\": " \ "host must be subdomain of service name", \ host, \ service); \ return false; \ } while (0) static int count_dots (const char *s) { int n = 0; const char *dot = s; while ((dot = strchr (dot + 1, '.'))) { n++; } return n; } /* at least one character, and does not start or end with dot */ static bool valid_hostname (const char *s) { size_t len = strlen (s); return len > 1 && s[0] != '.' && s[len - 1] != '.'; } static bool validate_srv_result (mongoc_uri_t *uri, const char *host, bson_error_t *error) { const char *service; const char *service_root; service = mongoc_uri_get_service (uri); BSON_ASSERT (service); if (!valid_hostname (host)) { VALIDATE_SRV_ERR (); } service_root = strchr (service, '.'); BSON_ASSERT (service_root); /* host must be descendent of service root: if service is * "a.foo.co" host can be like "a.foo.co", "b.foo.co", "a.b.foo.co", etc. */ if (strlen (host) < strlen (service_root)) { VALIDATE_SRV_ERR (); } if (!ends_with (host, service_root)) { VALIDATE_SRV_ERR (); } return true; } static mongoc_host_list_t * _mongoc_host_list_find_host_and_port (mongoc_host_list_t *hosts, const char *host_and_port) { mongoc_host_list_t *iter; LL_FOREACH (hosts, iter) { if (strcmp (iter->host_and_port, host_and_port) == 0) { return iter; } } return NULL; } /* upsert @host into @uri's host list. Side effect: modifies host->next when * inserting. */ static bool _upsert_into_host_list (mongoc_uri_t *uri, mongoc_host_list_t *host, bson_error_t *error) { mongoc_host_list_t *link; if (uri->is_srv && !validate_srv_result (uri, host->host, error)) { return false; } link = _mongoc_host_list_find_host_and_port (uri->hosts, host->host_and_port); if (!link) { link = bson_malloc0 (sizeof (mongoc_host_list_t)); LL_APPEND (uri->hosts, link); } else { /* Make sure linking is preserved when copying data into final. */ host->next = link->next; } memcpy (link, host, sizeof (mongoc_host_list_t)); return true; } bool mongoc_uri_upsert_host_and_port (mongoc_uri_t *uri, const char *host_and_port, bson_error_t *error) { mongoc_host_list_t temp; memset (&temp, 0, sizeof (mongoc_host_list_t)); if (!_mongoc_host_list_from_string_with_err (&temp, host_and_port, error)) { return false; } return _upsert_into_host_list (uri, &temp, error); } bool mongoc_uri_upsert_host (mongoc_uri_t *uri, const char *host, uint16_t port, bson_error_t *error) { mongoc_host_list_t temp; memset (&temp, 0, sizeof (mongoc_host_list_t)); if (!_mongoc_host_list_from_hostport_with_err (&temp, host, port, error)) { return false; } return _upsert_into_host_list (uri, &temp, error); } void mongoc_uri_remove_host (mongoc_uri_t *uri, const char *host, uint16_t port) { _mongoc_host_list_remove_host (&(uri->hosts), host, port); } /* *-------------------------------------------------------------------------- * * scan_to_unichar -- * * Scans 'str' until either a character matching 'match' is found, * until one of the characters in 'terminators' is encountered, or * until we reach the end of 'str'. * * NOTE: 'terminators' may not include multibyte UTF-8 characters. * * Returns: * If 'match' is found, returns a copy of the section of 'str' before * that character. Otherwise, returns NULL. * * Side Effects: * If 'match' is found, sets 'end' to begin at the matching character * in 'str'. * *-------------------------------------------------------------------------- */ static char * scan_to_unichar (const char *str, bson_unichar_t match, const char *terminators, const char **end) { bson_unichar_t c; const char *iter; for (iter = str; iter && *iter && (c = bson_utf8_get_char (iter)); iter = bson_utf8_next_char (iter)) { if (c == match) { *end = iter; return bson_strndup (str, iter - str); } else if (c == '\\') { iter = bson_utf8_next_char (iter); if (!bson_utf8_get_char (iter)) { break; } } else { const char *term_iter; for (term_iter = terminators; *term_iter; term_iter++) { if (c == *term_iter) { return NULL; } } } } return NULL; } /* *-------------------------------------------------------------------------- * * ends_with -- * * Return true if str ends with suffix. * *-------------------------------------------------------------------------- */ static bool ends_with (const char *str, const char *suffix) { size_t str_len = strlen (str); size_t suffix_len = strlen (suffix); const char *s1, *s2; if (str_len < suffix_len) { return false; } /* start at the ends of both strings */ s1 = str + str_len; s2 = suffix + suffix_len; /* until either pointer reaches start of its string, compare the pointers */ for (; s1 >= str && s2 >= suffix; s1--, s2--) { if (*s1 != *s2) { return false; } } return true; } static bool mongoc_uri_parse_scheme (mongoc_uri_t *uri, const char *str, const char **end) { if (!strncmp (str, "mongodb+srv://", 14)) { uri->is_srv = true; *end = str + 14; return true; } if (!strncmp (str, "mongodb://", 10)) { uri->is_srv = false; *end = str + 10; return true; } return false; } static bool mongoc_uri_has_unescaped_chars (const char *str, const char *chars) { const char *c; const char *tmp; char *s; for (c = chars; *c; c++) { s = scan_to_unichar (str, (bson_unichar_t) *c, "", &tmp); if (s) { bson_free (s); return true; } } return false; } /* "str" is non-NULL, the part of URI between "mongodb://" and first "@" */ static bool mongoc_uri_parse_userpass (mongoc_uri_t *uri, const char *str, bson_error_t *error) { const char *prohibited = "@:/"; const char *end_user; BSON_ASSERT (str); if ((uri->username = scan_to_unichar (str, ':', "", &end_user))) { uri->password = bson_strdup (end_user + 1); } else { uri->username = bson_strdup (str); uri->password = NULL; } if (mongoc_uri_has_unescaped_chars (uri->username, prohibited)) { MONGOC_URI_ERROR (error, "Username \"%s\" must not have unescaped chars. %s", uri->username, escape_instructions); return false; } mongoc_uri_do_unescape (&uri->username); if (!uri->username) { MONGOC_URI_ERROR ( error, "Incorrect URI escapes in username. %s", escape_instructions); return false; } /* Providing password at all is optional */ if (uri->password) { if (mongoc_uri_has_unescaped_chars (uri->password, prohibited)) { MONGOC_URI_ERROR (error, "Password \"%s\" must not have unescaped chars. %s", uri->password, escape_instructions); return false; } mongoc_uri_do_unescape (&uri->password); if (!uri->password) { MONGOC_URI_ERROR (error, "%s", "Incorrect URI escapes in password"); return false; } } return true; } bool mongoc_uri_parse_host (mongoc_uri_t *uri, const char *host_and_port_in) { char *host_and_port = bson_strdup (host_and_port_in); bson_error_t err = {0}; bool r; /* unescape host. It doesn't hurt including port. */ if (mongoc_uri_has_unescaped_chars (host_and_port, "/")) { MONGOC_WARNING ("Unix Domain Sockets must be escaped (e.g. / = %%2F)"); bson_free (host_and_port); return false; } mongoc_uri_do_unescape (&host_and_port); if (!host_and_port) { /* invalid */ bson_free (host_and_port); return false; } r = mongoc_uri_upsert_host_and_port (uri, host_and_port, &err); if (!r) { MONGOC_ERROR ("%s", err.message); bson_free (host_and_port); return false; } bson_free (host_and_port); return true; } bool mongoc_uri_parse_srv (mongoc_uri_t *uri, const char *str) { char *service; if (*str == '\0') { return false; } service = bson_strdup (str); mongoc_uri_do_unescape (&service); if (!service) { /* invalid */ return false; } if (!valid_hostname (service) || count_dots (service) < 2) { bson_free (service); return false; } bson_strncpy (uri->srv, service, sizeof uri->srv); bson_free (service); if (strchr (uri->srv, ',') || strchr (uri->srv, ':')) { /* prohibit port number or multiple service names */ return false; } return true; } /* "hosts" is non-NULL, the part between "mongodb://" or "@" and last "/" */ static bool mongoc_uri_parse_hosts (mongoc_uri_t *uri, const char *hosts) { const char *next; const char *end_hostport; char *s; BSON_ASSERT (hosts); /* * Parsing the series of hosts is a lot more complicated than you might * imagine. This is due to some characters being both separators as well as * valid characters within the "hostname". In particularly, we can have file * paths to specify paths to UNIX domain sockets. We impose the restriction * that they must be suffixed with ".sock" to simplify the parsing. * * You can separate hosts and file system paths to UNIX domain sockets with * ",". */ s = scan_to_unichar (hosts, '?', "", &end_hostport); if (s) { MONGOC_WARNING ( "%s", "A '/' is required between the host list and any options."); goto error; } next = hosts; do { /* makes a copy of the section of the string */ s = scan_to_unichar (next, ',', "", &end_hostport); if (s) { next = (char *) end_hostport + 1; } else { s = bson_strdup (next); next = NULL; } if (!mongoc_uri_parse_host (uri, s)) { goto error; } bson_free (s); } while (next); return true; error: bson_free (s); return false; } /* ----------------------------------------------------------------------------- * * mongoc_uri_parse_database -- * * Parse the database after @str. @str is expected to point after the * host list to the character immediately after the / in the uri string. * If no database is specified in the uri, e.g. the uri has a form like: * mongodb://localhost/?option=X then uri->database remains NULL after * parsing. * * Return: * True if the parsed database is valid. An empty database is considered * valid. * ----------------------------------------------------------------------------- */ static bool mongoc_uri_parse_database (mongoc_uri_t *uri, const char *str, const char **end) { const char *end_database; const char *c; char *invalid_c; const char *tmp; if ((uri->database = scan_to_unichar (str, '?', "", &end_database))) { if (strcmp (uri->database, "") == 0) { /* no database is found, don't store the empty string. */ bson_free (uri->database); uri->database = NULL; /* but it is valid to have an empty database. */ return true; } *end = end_database; } else if (*str) { uri->database = bson_strdup (str); *end = str + strlen (str); } mongoc_uri_do_unescape (&uri->database); if (!uri->database) { /* invalid */ return false; } /* invalid characters in database name */ for (c = "/\\. \"$"; *c; c++) { invalid_c = scan_to_unichar (uri->database, (bson_unichar_t) *c, "", &tmp); if (invalid_c) { bson_free (invalid_c); return false; } } return true; } static bool mongoc_uri_parse_auth_mechanism_properties (mongoc_uri_t *uri, const char *str) { char *field; char *value; const char *end_scan; bson_t properties; bson_init (&properties); /* build up the properties document */ while ((field = scan_to_unichar (str, ':', "&", &end_scan))) { str = end_scan + 1; if (!(value = scan_to_unichar (str, ',', ":&", &end_scan))) { value = bson_strdup (str); str = ""; } else { str = end_scan + 1; } bson_append_utf8 (&properties, field, -1, value, -1); bson_free (field); bson_free (value); } /* append our auth properties to our credentials */ if (!mongoc_uri_set_mechanism_properties (uri, &properties)) { bson_destroy (&properties); return false; } bson_destroy (&properties); return true; } static bool mongoc_uri_parse_tags (mongoc_uri_t *uri, /* IN */ const char *str) /* IN */ { const char *end_keyval; const char *end_key; bson_t b; char *keyval; char *key; bson_init (&b); again: if ((keyval = scan_to_unichar (str, ',', "", &end_keyval))) { if (!(key = scan_to_unichar (keyval, ':', "", &end_key))) { bson_free (keyval); goto fail; } bson_append_utf8 (&b, key, -1, end_key + 1, -1); bson_free (key); bson_free (keyval); str = end_keyval + 1; goto again; } else if ((key = scan_to_unichar (str, ':', "", &end_key))) { bson_append_utf8 (&b, key, -1, end_key + 1, -1); bson_free (key); } else if (strlen (str)) { /* we're not finished but we couldn't parse the string */ goto fail; } mongoc_read_prefs_add_tag (uri->read_prefs, &b); bson_destroy (&b); return true; fail: MONGOC_WARNING ("Unsupported value for \"" MONGOC_URI_READPREFERENCETAGS "\": \"%s\"", str); bson_destroy (&b); return false; } /* *-------------------------------------------------------------------------- * * mongoc_uri_bson_append_or_replace_key -- * * * Appends 'option' to the end of 'options' if not already set. * * Since we cannot grow utf8 strings inline, we have to allocate a * temporary bson variable and splice in the new value if the key * is already set. * * NOTE: This function keeps the order of the BSON keys. * * NOTE: 'option' is case*in*sensitive. * * *-------------------------------------------------------------------------- */ static void mongoc_uri_bson_append_or_replace_key (bson_t *options, const char *option, const char *value) { bson_iter_t iter; bool found = false; if (bson_iter_init (&iter, options)) { bson_t tmp = BSON_INITIALIZER; while (bson_iter_next (&iter)) { const bson_value_t *bvalue; if (!strcasecmp (bson_iter_key (&iter), option)) { bson_append_utf8 (&tmp, option, -1, value, -1); found = true; continue; } bvalue = bson_iter_value (&iter); BSON_APPEND_VALUE (&tmp, bson_iter_key (&iter), bvalue); } if (!found) { bson_append_utf8 (&tmp, option, -1, value, -1); } bson_destroy (options); bson_copy_to (&tmp, options); bson_destroy (&tmp); } } bool mongoc_uri_option_is_int32 (const char *key) { return !strcasecmp (key, MONGOC_URI_CONNECTTIMEOUTMS) || !strcasecmp (key, MONGOC_URI_HEARTBEATFREQUENCYMS) || !strcasecmp (key, MONGOC_URI_SERVERSELECTIONTIMEOUTMS) || !strcasecmp (key, MONGOC_URI_SOCKETCHECKINTERVALMS) || !strcasecmp (key, MONGOC_URI_SOCKETTIMEOUTMS) || !strcasecmp (key, MONGOC_URI_LOCALTHRESHOLDMS) || !strcasecmp (key, MONGOC_URI_MAXPOOLSIZE) || !strcasecmp (key, MONGOC_URI_MAXSTALENESSSECONDS) || !strcasecmp (key, MONGOC_URI_MINPOOLSIZE) || !strcasecmp (key, MONGOC_URI_MAXIDLETIMEMS) || !strcasecmp (key, MONGOC_URI_WAITQUEUEMULTIPLE) || !strcasecmp (key, MONGOC_URI_WAITQUEUETIMEOUTMS) || !strcasecmp (key, MONGOC_URI_WTIMEOUTMS) || !strcasecmp (key, MONGOC_URI_ZLIBCOMPRESSIONLEVEL); } bool mongoc_uri_option_is_bool (const char *key) { return !strcasecmp (key, MONGOC_URI_CANONICALIZEHOSTNAME) || !strcasecmp (key, MONGOC_URI_JOURNAL) || !strcasecmp (key, MONGOC_URI_RETRYREADS) || !strcasecmp (key, MONGOC_URI_RETRYWRITES) || !strcasecmp (key, MONGOC_URI_SAFE) || !strcasecmp (key, MONGOC_URI_SERVERSELECTIONTRYONCE) || !strcasecmp (key, MONGOC_URI_SLAVEOK) || !strcasecmp (key, MONGOC_URI_TLS) || !strcasecmp (key, MONGOC_URI_TLSINSECURE) || !strcasecmp (key, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES) || !strcasecmp (key, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES) || /* deprecated options */ !strcasecmp (key, MONGOC_URI_SSL) || !strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDCERTIFICATES) || !strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDHOSTNAMES); } bool mongoc_uri_option_is_utf8 (const char *key) { return !strcasecmp (key, MONGOC_URI_APPNAME) || !strcasecmp (key, MONGOC_URI_REPLICASET) || !strcasecmp (key, MONGOC_URI_READPREFERENCE) || !strcasecmp (key, MONGOC_URI_TLSCERTIFICATEKEYFILE) || !strcasecmp (key, MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD) || !strcasecmp (key, MONGOC_URI_TLSCAFILE) || /* deprecated options */ !strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE) || !strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYPASSWORD) || !strcasecmp (key, MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE); } const char * mongoc_uri_canonicalize_option (const char *key) { if (!strcasecmp (key, MONGOC_URI_SSL)) { return MONGOC_URI_TLS; } else if (!strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE)) { return MONGOC_URI_TLSCERTIFICATEKEYFILE; } else if (!strcasecmp (key, MONGOC_URI_SSLCLIENTCERTIFICATEKEYPASSWORD)) { return MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD; } else if (!strcasecmp (key, MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE)) { return MONGOC_URI_TLSCAFILE; } else if (!strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDCERTIFICATES)) { return MONGOC_URI_TLSALLOWINVALIDCERTIFICATES; } else if (!strcasecmp (key, MONGOC_URI_SSLALLOWINVALIDHOSTNAMES)) { return MONGOC_URI_TLSALLOWINVALIDHOSTNAMES; } else { return key; } } static bool mongoc_uri_parse_int32 (const char *key, const char *value, int32_t *result) { char *endptr; int64_t i; errno = 0; i = bson_ascii_strtoll (value, &endptr, 10); if (errno || endptr < value + strlen (value)) { MONGOC_WARNING ("Invalid %s: cannot parse integer\n", key); return false; } if (i > INT32_MAX || i < INT32_MIN) { MONGOC_WARNING ("Invalid %s: cannot fit in int32\n", key); return false; } *result = (int32_t) i; return true; } static bool dns_option_allowed (const char *lkey) { /* Initial DNS Seedlist Discovery Spec: "A Client MUST only support the * authSource and replicaSet options through a TXT record, and MUST raise an * error if any other option is encountered." */ return !strcmp (lkey, MONGOC_URI_AUTHSOURCE) || !strcmp (lkey, MONGOC_URI_REPLICASET); } /* Decompose a key=val pair and place them into a document. * Includes case-folding for key portion. */ static bool mongoc_uri_split_option (mongoc_uri_t *uri, bson_t *options, const char *str, bool from_dns, bson_error_t *error) { bson_iter_t iter; const char *end_key; char *key = NULL; char *lkey = NULL; char *value = NULL; const char *opt; char *opt_end; size_t opt_len; bool ret = false; if (!(key = scan_to_unichar (str, '=', "", &end_key))) { MONGOC_URI_ERROR (error, "URI option \"%s\" contains no \"=\" sign", str); goto CLEANUP; } value = bson_strdup (end_key + 1); mongoc_uri_do_unescape (&value); if (!value) { /* do_unescape detected invalid UTF-8 and freed value */ MONGOC_URI_ERROR ( error, "Value for URI option \"%s\" contains invalid UTF-8", key); goto CLEANUP; } lkey = bson_strdup (key); mongoc_lowercase (key, lkey); /* Initial DNS Seedlist Discovery Spec: "A Client MUST only support the * authSource and replicaSet options through a TXT record, and MUST raise an * error if any other option is encountered."*/ if (from_dns && !dns_option_allowed (lkey)) { MONGOC_URI_ERROR ( error, "URI option \"%s\" prohibited in TXT record", key); goto CLEANUP; } /* Special case: READPREFERENCETAGS is a composing option. * Multiple instances should append, not overwrite. * Encode them directly to the options field, * bypassing canonicalization and duplicate checks. */ if (!strcmp (lkey, MONGOC_URI_READPREFERENCETAGS)) { if (!mongoc_uri_parse_tags (uri, value)) { MONGOC_URI_ERROR ( error, "Unsupported value for \"%s\": \"%s\"", key, value); goto CLEANUP; } } else if (bson_iter_init_find (&iter, &uri->raw, lkey) || bson_iter_init_find (&iter, options, lkey)) { /* Special case, MONGOC_URI_W == "any non-int" is not overridden * by later values. */ if (!strcmp (lkey, MONGOC_URI_W) && (opt = bson_iter_utf8_unsafe (&iter, &opt_len))) { strtol (opt, &opt_end, 10); if (*opt_end != '\0') { ret = true; goto CLEANUP; } } /* Initial DNS Seedlist Discovery Spec: "Client MUST use options * specified in the Connection String to override options provided * through TXT records." So, do NOT override existing options with TXT * options. */ if (from_dns) { MONGOC_WARNING ( "Cannot override URI option \"%s\" from TXT record \"%s\"", key, str); ret = true; goto CLEANUP; } MONGOC_WARNING ("Overwriting previously provided value for '%s'", key); } if (!(strcmp (lkey, MONGOC_URI_REPLICASET)) && *value == '\0') { MONGOC_URI_ERROR ( error, "Value for URI option \"%s\" cannot be empty string", lkey); goto CLEANUP; } mongoc_uri_bson_append_or_replace_key (options, lkey, value); ret = true; CLEANUP: bson_free (key); bson_free (lkey); bson_free (value); return ret; } /* Check for canonical/deprecated conflicts * between the option list a, and b. * If both names exist either way with differing values, error. */ static bool mongoc_uri_options_validate_names (const bson_t *a, const bson_t *b, bson_error_t *error) { bson_iter_t key_iter, canon_iter; const char *key = NULL; const char *canon = NULL; const char *value = NULL; const char *cval = NULL; size_t value_len = 0; size_t cval_len = 0; /* Scan `a` looking for deprecated names * where the canonical name was also used in `a`, * or was used in `b`. */ bson_iter_init (&key_iter, a); while (bson_iter_next (&key_iter)) { key = bson_iter_key (&key_iter); value = bson_iter_utf8_unsafe (&key_iter, &value_len); canon = mongoc_uri_canonicalize_option (key); if (key == canon) { /* Canonical form, no point checking `b`. */ continue; } /* Check for a conflict in `a`. */ if (bson_iter_init_find (&canon_iter, a, canon)) { cval = bson_iter_utf8_unsafe (&canon_iter, &cval_len); if ((value_len != cval_len) || strcmp (value, cval)) { goto HANDLE_CONFLICT; } } /* Check for a conflict in `b`. */ if (bson_iter_init_find (&canon_iter, b, canon)) { cval = bson_iter_utf8_unsafe (&canon_iter, &cval_len); if ((value_len != cval_len) || strcmp (value, cval)) { goto HANDLE_CONFLICT; } } } return true; HANDLE_CONFLICT: MONGOC_URI_ERROR (error, "Deprecated option '%s=%s' conflicts with " "canonical name '%s=%s'", key, value, canon, cval); return false; } #define HANDLE_DUPE() \ if (from_dns) { \ MONGOC_WARNING ("Cannot override URI option \"%s\" from TXT record", \ key); \ continue; \ } else { \ MONGOC_WARNING ("Overwriting previously provided value for '%s'", key); \ } static bool mongoc_uri_apply_options (mongoc_uri_t *uri, const bson_t *options, bool from_dns, bson_error_t *error) { bson_iter_t iter; int32_t v_int; const char *key = NULL; const char *canon = NULL; const char *value = NULL; size_t value_len; bool bval; bson_iter_init (&iter, options); while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); canon = mongoc_uri_canonicalize_option (key); value = bson_iter_utf8_unsafe (&iter, &value_len); /* Keep a record of how the option was originally presented. */ mongoc_uri_bson_append_or_replace_key (&uri->raw, key, value); if (mongoc_uri_option_is_int32 (key)) { if (!mongoc_uri_parse_int32 (key, value, &v_int)) { goto UNSUPPORTED_VALUE; } if (!_mongoc_uri_set_option_as_int32_with_error ( uri, canon, v_int, error)) { return false; } } else if (!strcmp (key, MONGOC_URI_W)) { if (*value == '-' || isdigit (*value)) { v_int = (int) strtol (value, NULL, 10); _mongoc_uri_set_option_as_int32 (uri, MONGOC_URI_W, v_int); } else if (0 == strcasecmp (value, "majority")) { mongoc_uri_bson_append_or_replace_key ( &uri->options, MONGOC_URI_W, "majority"); } else if (*value) { mongoc_uri_bson_append_or_replace_key ( &uri->options, MONGOC_URI_W, value); } } else if (mongoc_uri_option_is_bool (key)) { if (0 == strcasecmp (value, "true")) { bval = true; } else if (0 == strcasecmp (value, "false")) { bval = false; } else if ((0 == strcmp (value, "1")) || (0 == strcasecmp (value, "yes")) || (0 == strcasecmp (value, "y")) || (0 == strcasecmp (value, "t"))) { MONGOC_WARNING ("Deprecated boolean value for \"%s\": \"%s\", " "please update to \"%s=true\"", key, value, key); bval = true; } else if ((0 == strcasecmp (value, "0")) || (0 == strcasecmp (value, "-1")) || (0 == strcmp (value, "no")) || (0 == strcmp (value, "n")) || (0 == strcmp (value, "f"))) { MONGOC_WARNING ("Deprecated boolean value for \"%s\": \"%s\", " "please update to \"%s=false\"", key, value, key); bval = false; } else { goto UNSUPPORTED_VALUE; } if (!mongoc_uri_set_option_as_bool (uri, canon, bval)) { return false; } } else if (!strcmp (key, MONGOC_URI_READPREFERENCETAGS)) { /* Skip this option here. * It was marshalled during mongoc_uri_split_option() * as a special case composing option. */ } else if (!strcmp (key, MONGOC_URI_AUTHMECHANISM) || !strcmp (key, MONGOC_URI_AUTHSOURCE)) { if (bson_has_field (&uri->credentials, key)) { HANDLE_DUPE (); } mongoc_uri_bson_append_or_replace_key ( &uri->credentials, canon, value); } else if (!strcmp (key, MONGOC_URI_READCONCERNLEVEL)) { if (!mongoc_read_concern_is_default (uri->read_concern)) { HANDLE_DUPE (); } mongoc_read_concern_set_level (uri->read_concern, value); } else if (!strcmp (key, MONGOC_URI_GSSAPISERVICENAME)) { char *tmp = bson_strdup_printf ("SERVICE_NAME:%s", value); if (bson_has_field (&uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES)) { MONGOC_WARNING ("authMechanismProperties SERVICE_NAME already set, " "ignoring '%s'", key); } else if (!mongoc_uri_parse_auth_mechanism_properties (uri, tmp)) { bson_free (tmp); goto UNSUPPORTED_VALUE; } bson_free (tmp); } else if (!strcmp (key, MONGOC_URI_AUTHMECHANISMPROPERTIES)) { if (bson_has_field (&uri->credentials, key)) { HANDLE_DUPE (); } if (!mongoc_uri_parse_auth_mechanism_properties (uri, value)) { goto UNSUPPORTED_VALUE; } } else if (!strcmp (key, MONGOC_URI_APPNAME)) { /* Part of uri->options */ if (!mongoc_uri_set_appname (uri, value)) { goto UNSUPPORTED_VALUE; } } else if (!strcmp (key, MONGOC_URI_COMPRESSORS)) { if (!bson_empty (mongoc_uri_get_compressors (uri))) { HANDLE_DUPE (); } if (!mongoc_uri_set_compressors (uri, value)) { goto UNSUPPORTED_VALUE; } } else if (mongoc_uri_option_is_utf8 (key)) { mongoc_uri_bson_append_or_replace_key (&uri->options, canon, value); } else { /* * Keys that aren't supported by a driver MUST be ignored. * * A WARN level logging message MUST be issued * https://github.com/mongodb/specifications/blob/master/source/connection-string/connection-string-spec.rst#keys */ MONGOC_WARNING ("Unsupported URI option \"%s\"", key); } } return true; UNSUPPORTED_VALUE: MONGOC_URI_ERROR (error, "Unsupported value for \"%s\": \"%s\"", key, value); return false; } /* Processes a query string formatted set of driver options * (i.e. tls=true&connectTimeoutMS=250 ) into a BSON dict of values. * uri->raw is initially populated with the raw split of key/value pairs, * then the keys are canonicalized and the values coerced * to their appropriate type and stored in uri->options. */ bool mongoc_uri_parse_options (mongoc_uri_t *uri, const char *str, bool from_dns, bson_error_t *error) { bson_t options; const char *end_option; char *option; bson_init (&options); while ((option = scan_to_unichar (str, '&', "", &end_option))) { if (!mongoc_uri_split_option (uri, &options, option, from_dns, error)) { bson_free (option); bson_destroy (&options); return false; } bson_free (option); str = end_option + 1; } if (*str && !mongoc_uri_split_option (uri, &options, str, from_dns, error)) { bson_destroy (&options); return false; } /* Walk both sides of this map to handle each ordering: * deprecated first canonical later, and vice-versa. * Then finalize parse by writing final values to uri->options. */ if (!mongoc_uri_options_validate_names (&uri->options, &options, error) || !mongoc_uri_options_validate_names (&options, &uri->options, error) || !mongoc_uri_apply_options (uri, &options, from_dns, error)) { bson_destroy (&options); return false; } bson_destroy (&options); return true; } static bool mongoc_uri_finalize_tls (mongoc_uri_t *uri, bson_error_t *error) { /* Initial DNS Seedlist Discovery Spec: "If mongodb+srv is used, a driver * MUST implicitly also enable TLS." */ if (uri->is_srv && !bson_has_field (&uri->options, MONGOC_URI_TLS)) { mongoc_uri_set_option_as_bool (uri, MONGOC_URI_TLS, true); } if (bson_has_field (&uri->options, MONGOC_URI_TLSINSECURE) && (bson_has_field (&uri->options, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES) || bson_has_field (&uri->options, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES))) { MONGOC_URI_ERROR (error, "%s may not be specified with %s or %s", MONGOC_URI_TLSINSECURE, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES); return false; } return true; } static bool mongoc_uri_finalize_auth (mongoc_uri_t *uri, bson_error_t *error) { bson_iter_t iter; const char *source = NULL; if (bson_iter_init_find_case ( &iter, &uri->credentials, MONGOC_URI_AUTHSOURCE)) { source = bson_iter_utf8 (&iter, NULL); } /* authSource with GSSAPI or X509 should always be external */ if (mongoc_uri_get_auth_mechanism (uri)) { if (!strcasecmp (mongoc_uri_get_auth_mechanism (uri), "GSSAPI") || !strcasecmp (mongoc_uri_get_auth_mechanism (uri), "MONGODB-X509")) { if (source) { if (strcasecmp (source, "$external")) { MONGOC_URI_ERROR ( error, "%s", "GSSAPI and X509 require \"$external\" authSource"); return false; } } else { bson_append_utf8 ( &uri->credentials, MONGOC_URI_AUTHSOURCE, -1, "$external", -1); } } /* MONGODB-X509 is the only mechanism that doesn't require username */ if (strcasecmp (mongoc_uri_get_auth_mechanism (uri), "MONGODB-X509") != 0) { if (!mongoc_uri_get_username (uri) || strcmp (mongoc_uri_get_username (uri), "") == 0) { MONGOC_URI_ERROR (error, "'%s' authentication mechanism requires username", mongoc_uri_get_auth_mechanism (uri)); return false; } } /* MONGODB-X509 errors if a password is supplied. */ if (strcasecmp (mongoc_uri_get_auth_mechanism (uri), "MONGODB-X509") == 0) { if (mongoc_uri_get_password (uri)) { MONGOC_URI_ERROR ( error, "'%s' authentication mechanism does not accept a password", mongoc_uri_get_auth_mechanism (uri)); return false; } } } return true; } static bool mongoc_uri_parse_before_slash (mongoc_uri_t *uri, const char *before_slash, bson_error_t *error) { char *userpass; const char *hosts; userpass = scan_to_unichar (before_slash, '@', "", &hosts); if (userpass) { if (!mongoc_uri_parse_userpass (uri, userpass, error)) { goto error; } hosts++; /* advance past "@" */ if (*hosts == '@') { /* special case: "mongodb://alice@@localhost" */ MONGOC_URI_ERROR ( error, "Invalid username or password. %s", escape_instructions); goto error; } } else { hosts = before_slash; } if (uri->is_srv) { if (!mongoc_uri_parse_srv (uri, hosts)) { MONGOC_URI_ERROR (error, "%s", "Invalid service name in URI"); goto error; } } else { if (!mongoc_uri_parse_hosts (uri, hosts)) { MONGOC_URI_ERROR (error, "%s", "Invalid host string in URI"); goto error; } } bson_free (userpass); return true; error: bson_free (userpass); return false; } static bool mongoc_uri_parse (mongoc_uri_t *uri, const char *str, bson_error_t *error) { char *before_slash = NULL; const char *tmp; if (!bson_utf8_validate (str, strlen (str), false /* allow_null */)) { MONGOC_URI_ERROR (error, "%s", "Invalid UTF-8 in URI"); goto error; } if (!mongoc_uri_parse_scheme (uri, str, &str)) { MONGOC_URI_ERROR ( error, "%s", "Invalid URI Schema, expecting 'mongodb://' or 'mongodb+srv://'"); goto error; } before_slash = scan_to_unichar (str, '/', "", &tmp); if (!before_slash) { before_slash = bson_strdup (str); str += strlen (before_slash); } else { str = tmp; } if (!mongoc_uri_parse_before_slash (uri, before_slash, error)) { goto error; } if (*str) { if (*str == '/') { str++; if (*str) { if (!mongoc_uri_parse_database (uri, str, &str)) { MONGOC_URI_ERROR (error, "%s", "Invalid database name in URI"); goto error; } } if (*str == '?') { str++; if (*str) { if (!mongoc_uri_parse_options ( uri, str, false /* from DNS */, error)) { goto error; } } } } else { MONGOC_URI_ERROR (error, "%s", "Expected end of hostname delimiter"); goto error; } } if (!mongoc_uri_finalize_tls (uri, error)) { goto error; } if (!mongoc_uri_finalize_auth (uri, error)) { goto error; } bson_free (before_slash); return true; error: bson_free (before_slash); return false; } const mongoc_host_list_t * mongoc_uri_get_hosts (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->hosts; } const char * mongoc_uri_get_replica_set (const mongoc_uri_t *uri) { bson_iter_t iter; BSON_ASSERT (uri); if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_REPLICASET) && BSON_ITER_HOLDS_UTF8 (&iter)) { return bson_iter_utf8 (&iter, NULL); } return NULL; } const bson_t * mongoc_uri_get_credentials (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return &uri->credentials; } const char * mongoc_uri_get_auth_mechanism (const mongoc_uri_t *uri) { bson_iter_t iter; BSON_ASSERT (uri); if (bson_iter_init_find_case ( &iter, &uri->credentials, MONGOC_URI_AUTHMECHANISM) && BSON_ITER_HOLDS_UTF8 (&iter)) { return bson_iter_utf8 (&iter, NULL); } return NULL; } bool mongoc_uri_set_auth_mechanism (mongoc_uri_t *uri, const char *value) { size_t len; BSON_ASSERT (value); len = strlen (value); if (!bson_utf8_validate (value, len, false)) { return false; } mongoc_uri_bson_append_or_replace_key ( &uri->credentials, MONGOC_URI_AUTHMECHANISM, value); return true; } bool mongoc_uri_get_mechanism_properties (const mongoc_uri_t *uri, bson_t *properties /* OUT */) { bson_iter_t iter; BSON_ASSERT (uri); BSON_ASSERT (properties); if (bson_iter_init_find_case ( &iter, &uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES) && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len = 0; const uint8_t *data = NULL; bson_iter_document (&iter, &len, &data); BSON_ASSERT (bson_init_static (properties, data, len)); return true; } return false; } bool mongoc_uri_set_mechanism_properties (mongoc_uri_t *uri, const bson_t *properties) { bson_iter_t iter; bson_t tmp = BSON_INITIALIZER; bool r; BSON_ASSERT (uri); BSON_ASSERT (properties); if (bson_iter_init_find ( &iter, &uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES)) { /* copy all elements to tmp besides authMechanismProperties */ bson_copy_to_excluding_noinit (&uri->credentials, &tmp, MONGOC_URI_AUTHMECHANISMPROPERTIES, (char *) NULL); r = BSON_APPEND_DOCUMENT ( &tmp, MONGOC_URI_AUTHMECHANISMPROPERTIES, properties); if (!r) { bson_destroy (&tmp); return false; } bson_destroy (&uri->credentials); bson_copy_to (&tmp, &uri->credentials); bson_destroy (&tmp); return true; } else { bson_destroy (&tmp); return BSON_APPEND_DOCUMENT ( &uri->credentials, MONGOC_URI_AUTHMECHANISMPROPERTIES, properties); } } static bool _mongoc_uri_assign_read_prefs_mode (mongoc_uri_t *uri, bson_error_t *error) { const char *str; bson_iter_t iter; BSON_ASSERT (uri); if (mongoc_uri_get_option_as_bool (uri, MONGOC_URI_SLAVEOK, false)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_SECONDARY_PREFERRED); } if (bson_iter_init_find_case ( &iter, &uri->options, MONGOC_URI_READPREFERENCE) && BSON_ITER_HOLDS_UTF8 (&iter)) { str = bson_iter_utf8 (&iter, NULL); if (0 == strcasecmp ("primary", str)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_PRIMARY); } else if (0 == strcasecmp ("primarypreferred", str)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_PRIMARY_PREFERRED); } else if (0 == strcasecmp ("secondary", str)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_SECONDARY); } else if (0 == strcasecmp ("secondarypreferred", str)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_SECONDARY_PREFERRED); } else if (0 == strcasecmp ("nearest", str)) { mongoc_read_prefs_set_mode (uri->read_prefs, MONGOC_READ_NEAREST); } else { MONGOC_URI_ERROR ( error, "Unsupported readPreference value [readPreference=%s]", str); return false; } } return true; } static bool _mongoc_uri_build_write_concern (mongoc_uri_t *uri, bson_error_t *error) { mongoc_write_concern_t *write_concern; const char *str; bson_iter_t iter; int32_t wtimeoutms; int value; BSON_ASSERT (uri); write_concern = mongoc_write_concern_new (); uri->write_concern = write_concern; if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_SAFE) && BSON_ITER_HOLDS_BOOL (&iter)) { mongoc_write_concern_set_w ( write_concern, bson_iter_bool (&iter) ? 1 : MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED); } wtimeoutms = mongoc_uri_get_option_as_int32 (uri, MONGOC_URI_WTIMEOUTMS, 0); if (wtimeoutms < 0) { MONGOC_URI_ERROR ( error, "Unsupported wtimeoutMS value [w=%d]", wtimeoutms); return false; } else if (wtimeoutms > 0) { mongoc_write_concern_set_wtimeout_int64 (write_concern, wtimeoutms); } if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_JOURNAL) && BSON_ITER_HOLDS_BOOL (&iter)) { mongoc_write_concern_set_journal (write_concern, bson_iter_bool (&iter)); } if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_W)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { value = bson_iter_int32 (&iter); switch (value) { case MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED: case MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED: if (mongoc_write_concern_get_journal (write_concern)) { MONGOC_URI_ERROR ( error, "Journal conflicts with w value [w=%d]", value); return false; } mongoc_write_concern_set_w (write_concern, value); break; default: if (value > 0) { mongoc_write_concern_set_w (write_concern, value); break; } MONGOC_URI_ERROR (error, "Unsupported w value [w=%d]", value); return false; } } else if (BSON_ITER_HOLDS_UTF8 (&iter)) { str = bson_iter_utf8 (&iter, NULL); if (0 == strcasecmp ("majority", str)) { mongoc_write_concern_set_wmajority (write_concern, wtimeoutms); } else { mongoc_write_concern_set_wtag (write_concern, str); } } else { BSON_ASSERT (false); return false; } } return true; } /* can't use mongoc_uri_get_option_as_int32, it treats 0 specially */ static int32_t _mongoc_uri_get_max_staleness_option (const mongoc_uri_t *uri) { const bson_t *options; bson_iter_t iter; int32_t retval = MONGOC_NO_MAX_STALENESS; if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case ( &iter, options, MONGOC_URI_MAXSTALENESSSECONDS) && BSON_ITER_HOLDS_INT32 (&iter)) { retval = bson_iter_int32 (&iter); if (retval == 0) { MONGOC_WARNING ( "Unsupported value for \"" MONGOC_URI_MAXSTALENESSSECONDS "\": \"%d\"", retval); retval = -1; } else if (retval < 0 && retval != -1) { MONGOC_WARNING ( "Unsupported value for \"" MONGOC_URI_MAXSTALENESSSECONDS "\": \"%d\"", retval); retval = MONGOC_NO_MAX_STALENESS; } } return retval; } mongoc_uri_t * mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error) { mongoc_uri_t *uri; int32_t max_staleness_seconds; uri = (mongoc_uri_t *) bson_malloc0 (sizeof *uri); bson_init (&uri->raw); bson_init (&uri->options); bson_init (&uri->credentials); bson_init (&uri->compressors); /* Initialize read_prefs, since parsing may add to it */ uri->read_prefs = mongoc_read_prefs_new (MONGOC_READ_PRIMARY); /* Initialize empty read_concern */ uri->read_concern = mongoc_read_concern_new (); if (!uri_string) { uri_string = "mongodb://127.0.0.1/"; } if (!mongoc_uri_parse (uri, uri_string, error)) { mongoc_uri_destroy (uri); return NULL; } uri->str = bson_strdup (uri_string); if (!_mongoc_uri_assign_read_prefs_mode (uri, error)) { mongoc_uri_destroy (uri); return NULL; } max_staleness_seconds = _mongoc_uri_get_max_staleness_option (uri); mongoc_read_prefs_set_max_staleness_seconds (uri->read_prefs, max_staleness_seconds); if (!mongoc_read_prefs_is_valid (uri->read_prefs)) { mongoc_uri_destroy (uri); MONGOC_URI_ERROR (error, "%s", "Invalid readPreferences"); return NULL; } if (!_mongoc_uri_build_write_concern (uri, error)) { mongoc_uri_destroy (uri); return NULL; } if (!mongoc_write_concern_is_valid (uri->write_concern)) { mongoc_uri_destroy (uri); MONGOC_URI_ERROR (error, "%s", "Invalid writeConcern"); return NULL; } return uri; } mongoc_uri_t * mongoc_uri_new (const char *uri_string) { bson_error_t error = {0}; mongoc_uri_t *uri; uri = mongoc_uri_new_with_error (uri_string, &error); if (error.domain) { MONGOC_WARNING ("Error parsing URI: '%s'", error.message); } return uri; } mongoc_uri_t * mongoc_uri_new_for_host_port (const char *hostname, uint16_t port) { mongoc_uri_t *uri; char *str; BSON_ASSERT (hostname); BSON_ASSERT (port); str = bson_strdup_printf ("mongodb://%s:%hu/", hostname, port); uri = mongoc_uri_new (str); bson_free (str); return uri; } const char * mongoc_uri_get_username (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->username; } bool mongoc_uri_set_username (mongoc_uri_t *uri, const char *username) { size_t len; BSON_ASSERT (username); len = strlen (username); if (!bson_utf8_validate (username, len, false)) { return false; } if (uri->username) { bson_free (uri->username); } uri->username = bson_strdup (username); return true; } const char * mongoc_uri_get_password (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->password; } bool mongoc_uri_set_password (mongoc_uri_t *uri, const char *password) { size_t len; BSON_ASSERT (password); len = strlen (password); if (!bson_utf8_validate (password, len, false)) { return false; } if (uri->password) { bson_free (uri->password); } uri->password = bson_strdup (password); return true; } const char * mongoc_uri_get_database (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->database; } bool mongoc_uri_set_database (mongoc_uri_t *uri, const char *database) { size_t len; BSON_ASSERT (database); len = strlen (database); if (!bson_utf8_validate (database, len, false)) { return false; } if (uri->database) { bson_free (uri->database); } uri->database = bson_strdup (database); return true; } const char * mongoc_uri_get_auth_source (const mongoc_uri_t *uri) { bson_iter_t iter; const char *mechanism; BSON_ASSERT (uri); if (bson_iter_init_find_case ( &iter, &uri->credentials, MONGOC_URI_AUTHSOURCE)) { return bson_iter_utf8 (&iter, NULL); } /* Auth spec: * "For GSSAPI and MONGODB-X509 authMechanisms the authSource defaults to * $external. For PLAIN the authSource defaults to the database name if * supplied on the connection string or $external. For MONGODB-CR, * SCRAM-SHA-1 and SCRAM-SHA-256 authMechanisms, the authSource defaults to * the database name if supplied on the connection string or admin." */ mechanism = mongoc_uri_get_auth_mechanism (uri); if (mechanism) { if (!strcasecmp (mechanism, "GSSAPI") || !strcasecmp (mechanism, "MONGODB-X509")) { return "$external"; } if (!strcasecmp (mechanism, "PLAIN")) { return uri->database ? uri->database : "$external"; } } return uri->database ? uri->database : "admin"; } bool mongoc_uri_set_auth_source (mongoc_uri_t *uri, const char *value) { size_t len; BSON_ASSERT (value); len = strlen (value); if (!bson_utf8_validate (value, len, false)) { return false; } mongoc_uri_bson_append_or_replace_key ( &uri->credentials, MONGOC_URI_AUTHSOURCE, value); return true; } const char * mongoc_uri_get_appname (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return mongoc_uri_get_option_as_utf8 (uri, MONGOC_URI_APPNAME, NULL); } bool mongoc_uri_set_appname (mongoc_uri_t *uri, const char *value) { BSON_ASSERT (value); if (!bson_utf8_validate (value, strlen (value), false)) { return false; } if (!_mongoc_handshake_appname_is_valid (value)) { return false; } mongoc_uri_bson_append_or_replace_key ( &uri->options, MONGOC_URI_APPNAME, value); return true; } bool mongoc_uri_set_compressors (mongoc_uri_t *uri, const char *value) { const char *end_compressor; char *entry; bson_destroy (&uri->compressors); bson_init (&uri->compressors); if (value && !bson_utf8_validate (value, strlen (value), false)) { return false; } while ((entry = scan_to_unichar (value, ',', "", &end_compressor))) { if (mongoc_compressor_supported (entry)) { mongoc_uri_bson_append_or_replace_key ( &uri->compressors, entry, "yes"); } else { MONGOC_WARNING ("Unsupported compressor: '%s'", entry); } value = end_compressor + 1; bson_free (entry); } if (value) { if (mongoc_compressor_supported (value)) { mongoc_uri_bson_append_or_replace_key ( &uri->compressors, value, "yes"); } else { MONGOC_WARNING ("Unsupported compressor: '%s'", value); } } return true; } const bson_t * mongoc_uri_get_compressors (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return &uri->compressors; } /* can't use mongoc_uri_get_option_as_int32, it treats 0 specially */ int32_t mongoc_uri_get_local_threshold_option (const mongoc_uri_t *uri) { const bson_t *options; bson_iter_t iter; int32_t retval = MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS; if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, "localthresholdms") && BSON_ITER_HOLDS_INT32 (&iter)) { retval = bson_iter_int32 (&iter); if (retval < 0) { MONGOC_WARNING ("Invalid localThresholdMS: %d", retval); retval = MONGOC_TOPOLOGY_LOCAL_THRESHOLD_MS; } } return retval; } const char * mongoc_uri_get_service (const mongoc_uri_t *uri) { if (uri->is_srv) { return uri->srv; } return NULL; } const bson_t * mongoc_uri_get_options (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return &uri->options; } void mongoc_uri_destroy (mongoc_uri_t *uri) { if (uri) { _mongoc_host_list_destroy_all (uri->hosts); bson_free (uri->str); bson_free (uri->database); bson_free (uri->username); bson_destroy (&uri->raw); bson_destroy (&uri->options); bson_destroy (&uri->credentials); bson_destroy (&uri->compressors); mongoc_read_prefs_destroy (uri->read_prefs); mongoc_read_concern_destroy (uri->read_concern); mongoc_write_concern_destroy (uri->write_concern); if (uri->password) { bson_zero_free (uri->password, strlen (uri->password)); } bson_free (uri); } } mongoc_uri_t * mongoc_uri_copy (const mongoc_uri_t *uri) { mongoc_uri_t *copy; mongoc_host_list_t *iter; bson_error_t error; BSON_ASSERT (uri); copy = (mongoc_uri_t *) bson_malloc0 (sizeof (*copy)); copy->str = bson_strdup (uri->str); copy->is_srv = uri->is_srv; bson_strncpy (copy->srv, uri->srv, sizeof uri->srv); copy->username = bson_strdup (uri->username); copy->password = bson_strdup (uri->password); copy->database = bson_strdup (uri->database); copy->read_prefs = mongoc_read_prefs_copy (uri->read_prefs); copy->read_concern = mongoc_read_concern_copy (uri->read_concern); copy->write_concern = mongoc_write_concern_copy (uri->write_concern); LL_FOREACH (uri->hosts, iter) { if (!mongoc_uri_upsert_host (copy, iter->host, iter->port, &error)) { MONGOC_ERROR ("%s", error.message); mongoc_uri_destroy (copy); return NULL; } } bson_copy_to (&uri->raw, ©->raw); bson_copy_to (&uri->options, ©->options); bson_copy_to (&uri->credentials, ©->credentials); bson_copy_to (&uri->compressors, ©->compressors); return copy; } const char * mongoc_uri_get_string (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return uri->str; } const bson_t * mongoc_uri_get_read_prefs (const mongoc_uri_t *uri) { BSON_ASSERT (uri); return mongoc_read_prefs_get_tags (uri->read_prefs); } char * mongoc_uri_unescape (const char *escaped_string) { bson_unichar_t c; bson_string_t *str; unsigned int hex = 0; const char *ptr; const char *end; size_t len; BSON_ASSERT (escaped_string); len = strlen (escaped_string); /* * Double check that this is a UTF-8 valid string. Bail out if necessary. */ if (!bson_utf8_validate (escaped_string, len, false)) { MONGOC_WARNING ("%s(): escaped_string contains invalid UTF-8", BSON_FUNC); return NULL; } ptr = escaped_string; end = ptr + len; str = bson_string_new (NULL); for (; *ptr; ptr = bson_utf8_next_char (ptr)) { c = bson_utf8_get_char (ptr); switch (c) { case '%': if (((end - ptr) < 2) || !isxdigit (ptr[1]) || !isxdigit (ptr[2]) || #ifdef _MSC_VER (1 != sscanf_s (&ptr[1], "%02x", &hex)) || #else (1 != sscanf (&ptr[1], "%02x", &hex)) || #endif !isprint (hex)) { bson_string_free (str, true); MONGOC_WARNING ("Invalid %% escape sequence"); return NULL; } bson_string_append_c (str, hex); ptr += 2; break; default: bson_string_append_unichar (str, c); break; } } return bson_string_free (str, false); } const mongoc_read_prefs_t * mongoc_uri_get_read_prefs_t (const mongoc_uri_t *uri) /* IN */ { BSON_ASSERT (uri); return uri->read_prefs; } void mongoc_uri_set_read_prefs_t (mongoc_uri_t *uri, const mongoc_read_prefs_t *prefs) { BSON_ASSERT (uri); BSON_ASSERT (prefs); mongoc_read_prefs_destroy (uri->read_prefs); uri->read_prefs = mongoc_read_prefs_copy (prefs); } const mongoc_read_concern_t * mongoc_uri_get_read_concern (const mongoc_uri_t *uri) /* IN */ { BSON_ASSERT (uri); return uri->read_concern; } void mongoc_uri_set_read_concern (mongoc_uri_t *uri, const mongoc_read_concern_t *rc) { BSON_ASSERT (uri); BSON_ASSERT (rc); mongoc_read_concern_destroy (uri->read_concern); uri->read_concern = mongoc_read_concern_copy (rc); } const mongoc_write_concern_t * mongoc_uri_get_write_concern (const mongoc_uri_t *uri) /* IN */ { BSON_ASSERT (uri); return uri->write_concern; } void mongoc_uri_set_write_concern (mongoc_uri_t *uri, const mongoc_write_concern_t *wc) { BSON_ASSERT (uri); BSON_ASSERT (wc); mongoc_write_concern_destroy (uri->write_concern); uri->write_concern = mongoc_write_concern_copy (wc); } bool mongoc_uri_get_tls (const mongoc_uri_t *uri) /* IN */ { bson_iter_t iter; BSON_ASSERT (uri); if (bson_iter_init_find_case (&iter, &uri->options, MONGOC_URI_TLS) && BSON_ITER_HOLDS_BOOL (&iter)) { return bson_iter_bool (&iter); } if (bson_has_field (&uri->options, MONGOC_URI_TLSCERTIFICATEKEYFILE) || bson_has_field (&uri->options, MONGOC_URI_TLSCAFILE) || bson_has_field (&uri->options, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES) || bson_has_field (&uri->options, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES)) { return true; } return false; } bool mongoc_uri_get_ssl (const mongoc_uri_t *uri) /* IN */ { return mongoc_uri_get_tls (uri); } /* *-------------------------------------------------------------------------- * * mongoc_uri_get_option_as_int32 -- * * Checks if the URI 'option' is set and of correct type (int32). * The special value '0' is considered as "unset". * This is so users can provide * sprintf(mongodb://localhost/?option=%d, myvalue) style connection *strings, * and still apply default values. * * If not set, or set to invalid type, 'fallback' is returned. * * NOTE: 'option' is case*in*sensitive. * * Returns: * The value of 'option' if available as int32 (and not 0), or *'fallback'. * *-------------------------------------------------------------------------- */ int32_t mongoc_uri_get_option_as_int32 (const mongoc_uri_t *uri, const char *option_orig, int32_t fallback) { const char *option; const bson_t *options; bson_iter_t iter; int32_t retval = fallback; option = mongoc_uri_canonicalize_option (option_orig); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option) && BSON_ITER_HOLDS_INT32 (&iter)) { if (!(retval = bson_iter_int32 (&iter))) { retval = fallback; } } return retval; } /* *-------------------------------------------------------------------------- * * mongoc_uri_set_option_as_int32 -- * * Sets a URI option 'after the fact'. Allows users to set individual * URI options without passing them as a connection string. * * Only allows a set of known options to be set. * @see mongoc_uri_option_is_int32 (). * * Does in-place-update of the option BSON if 'option' is already set. * Appends the option to the end otherwise. * * NOTE: If 'option' is already set, and is of invalid type, this * function will return false. * * NOTE: 'option' is case*in*sensitive. * * Returns: * true on successfully setting the option, false on failure. * *-------------------------------------------------------------------------- */ bool mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option_orig, int32_t value) { const char *option; bson_error_t error; bool r; option = mongoc_uri_canonicalize_option (option_orig); if (!mongoc_uri_option_is_int32 (option)) { MONGOC_WARNING ( "Unsupported value for \"%s\": %d, \"%s\" is not an int32 option", option_orig, value, option); return false; } r = _mongoc_uri_set_option_as_int32_with_error (uri, option, value, &error); if (!r) { MONGOC_WARNING ("%s", error.message); } return r; } /* *-------------------------------------------------------------------------- * * _mongoc_uri_set_option_as_int32_with_error -- * * Same as mongoc_uri_set_option_as_int32, with error reporting. * * Precondition: * mongoc_uri_option_is_int32(option) must be true. * * Returns: * true on successfully setting the option, false on failure. * *-------------------------------------------------------------------------- */ static bool _mongoc_uri_set_option_as_int32_with_error (mongoc_uri_t *uri, const char *option_orig, int32_t value, bson_error_t *error) { const char *option; const bson_t *options; bson_iter_t iter; option = mongoc_uri_canonicalize_option (option_orig); /* Server Discovery and Monitoring Spec: "the driver MUST NOT permit users * to configure it less than minHeartbeatFrequencyMS (500ms)." */ if (!bson_strcasecmp (option, MONGOC_URI_HEARTBEATFREQUENCYMS) && value < MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS) { MONGOC_URI_ERROR (error, "Invalid \"%s\" of %d: must be at least %d", option_orig, value, MONGOC_TOPOLOGY_MIN_HEARTBEAT_FREQUENCY_MS); return false; } /* zlib levels are from -1 (default) through 9 (best compression) */ if (!bson_strcasecmp (option, MONGOC_URI_ZLIBCOMPRESSIONLEVEL) && (value < -1 || value > 9)) { MONGOC_URI_ERROR (error, "Invalid \"%s\" of %d: must be between -1 and 9", option_orig, value); return false; } if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { bson_iter_overwrite_int32 (&iter, value); return true; } else { MONGOC_URI_ERROR (error, "Cannot set URI option \"%s\" to %d, it already has " "a non-integer value", option, value); return false; } } if (!bson_append_int32 (&uri->options, option, -1, value)) { MONGOC_URI_ERROR ( error, "Failed to set URI option \"%s\" to %d", option_orig, value); return false; } return true; } /* *-------------------------------------------------------------------------- * * _mongoc_uri_set_option_as_int32 -- * * Same as mongoc_uri_set_option_as_int32, except the option is not * validated against valid int32 options * * Returns: * true on successfully setting the option, false on failure. * *-------------------------------------------------------------------------- */ static bool _mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option_orig, int32_t value) { const char *option; const bson_t *options; bson_iter_t iter; option = mongoc_uri_canonicalize_option (option_orig); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option)) { if (BSON_ITER_HOLDS_INT32 (&iter)) { bson_iter_overwrite_int32 (&iter, value); return true; } else { return false; } } bson_append_int32 (&uri->options, option, -1, value); return true; } /* *-------------------------------------------------------------------------- * * mongoc_uri_get_option_as_bool -- * * Checks if the URI 'option' is set and of correct type (bool). * * If not set, or set to invalid type, 'fallback' is returned. * * NOTE: 'option' is case*in*sensitive. * * Returns: * The value of 'option' if available as bool, or 'fallback'. * *-------------------------------------------------------------------------- */ bool mongoc_uri_get_option_as_bool (const mongoc_uri_t *uri, const char *option_orig, bool fallback) { const char *option; const bson_t *options; bson_iter_t iter; option = mongoc_uri_canonicalize_option (option_orig); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option) && BSON_ITER_HOLDS_BOOL (&iter)) { return bson_iter_bool (&iter); } return fallback; } /* *-------------------------------------------------------------------------- * * mongoc_uri_set_option_as_bool -- * * Sets a URI option 'after the fact'. Allows users to set individual * URI options without passing them as a connection string. * * Only allows a set of known options to be set. * @see mongoc_uri_option_is_bool (). * * Does in-place-update of the option BSON if 'option' is already set. * Appends the option to the end otherwise. * * NOTE: If 'option' is already set, and is of invalid type, this * function will return false. * * NOTE: 'option' is case*in*sensitive. * * Returns: * true on successfully setting the option, false on failure. * *-------------------------------------------------------------------------- */ bool mongoc_uri_set_option_as_bool (mongoc_uri_t *uri, const char *option_orig, bool value) { const char *option; const bson_t *options; bson_iter_t iter; option = mongoc_uri_canonicalize_option (option_orig); BSON_ASSERT (option); if (!mongoc_uri_option_is_bool (option)) { return false; } if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option)) { if (BSON_ITER_HOLDS_BOOL (&iter)) { bson_iter_overwrite_bool (&iter, value); return true; } else { return false; } } bson_append_bool (&uri->options, option, -1, value); return true; } /* *-------------------------------------------------------------------------- * * mongoc_uri_get_option_as_utf8 -- * * Checks if the URI 'option' is set and of correct type (utf8). * * If not set, or set to invalid type, 'fallback' is returned. * * NOTE: 'option' is case*in*sensitive. * * Returns: * The value of 'option' if available as utf8, or 'fallback'. * *-------------------------------------------------------------------------- */ const char * mongoc_uri_get_option_as_utf8 (const mongoc_uri_t *uri, const char *option_orig, const char *fallback) { const char *option; const bson_t *options; bson_iter_t iter; option = mongoc_uri_canonicalize_option (option_orig); if ((options = mongoc_uri_get_options (uri)) && bson_iter_init_find_case (&iter, options, option) && BSON_ITER_HOLDS_UTF8 (&iter)) { return bson_iter_utf8 (&iter, NULL); } return fallback; } /* *-------------------------------------------------------------------------- * * mongoc_uri_set_option_as_utf8 -- * * Sets a URI option 'after the fact'. Allows users to set individual * URI options without passing them as a connection string. * * Only allows a set of known options to be set. * @see mongoc_uri_option_is_utf8 (). * * If the option is not already set, this function will append it to *the end of the options bson. NOTE: If the option is already set the entire *options bson will be overwritten, containing the new option=value *(at the same position). * * NOTE: If 'option' is already set, and is of invalid type, this * function will return false. * * NOTE: 'option' must be valid utf8. * * NOTE: 'option' is case*in*sensitive. * * Returns: * true on successfully setting the option, false on failure. * *-------------------------------------------------------------------------- */ bool mongoc_uri_set_option_as_utf8 (mongoc_uri_t *uri, const char *option_orig, const char *value) { const char *option; size_t len; option = mongoc_uri_canonicalize_option (option_orig); BSON_ASSERT (option); len = strlen (value); if (!bson_utf8_validate (value, len, false)) { return false; } if (!mongoc_uri_option_is_utf8 (option)) { return false; } if (!bson_strcasecmp (option, MONGOC_URI_APPNAME)) { return mongoc_uri_set_appname (uri, value); } else { mongoc_uri_bson_append_or_replace_key (&uri->options, option, value); } return true; } /* *-------------------------------------------------------------------------- * * _mongoc_uri_requires_auth_negotiation -- * * Returns true if auth mechanism is necessary for this uri. According * to the auth spec: "If an application provides a username but does * not provide an authentication mechanism, drivers MUST negotiate a * mechanism". * * Returns: * true if the driver should negotiate the auth mechanism for the uri * *-------------------------------------------------------------------------- */ bool _mongoc_uri_requires_auth_negotiation (const mongoc_uri_t *uri) { return mongoc_uri_get_username (uri) && !mongoc_uri_get_auth_mechanism (uri); } /* A bit of a hack. Needed for multi mongos tests to create a URI with the same * auth, SSL, and compressors settings but with only one specific host. */ mongoc_uri_t * _mongoc_uri_copy_and_replace_host_list (const mongoc_uri_t *original, const char *host) { mongoc_uri_t *uri = mongoc_uri_copy (original); _mongoc_host_list_destroy_all (uri->hosts); uri->hosts = bson_malloc0 (sizeof (mongoc_host_list_t)); _mongoc_host_list_from_string (uri->hosts, host); return uri; }mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-uri.h0000644000076500000240000002050513572250760024256 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_URI_H #define MONGOC_URI_H #include #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-read-prefs.h" #include "mongoc/mongoc-read-concern.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-config.h" #ifndef MONGOC_DEFAULT_PORT #define MONGOC_DEFAULT_PORT 27017 #endif #define MONGOC_URI_APPNAME "appname" #define MONGOC_URI_AUTHMECHANISM "authmechanism" #define MONGOC_URI_AUTHMECHANISMPROPERTIES "authmechanismproperties" #define MONGOC_URI_AUTHSOURCE "authsource" #define MONGOC_URI_CANONICALIZEHOSTNAME "canonicalizehostname" #define MONGOC_URI_CONNECTTIMEOUTMS "connecttimeoutms" #define MONGOC_URI_COMPRESSORS "compressors" #define MONGOC_URI_GSSAPISERVICENAME "gssapiservicename" #define MONGOC_URI_HEARTBEATFREQUENCYMS "heartbeatfrequencyms" #define MONGOC_URI_JOURNAL "journal" #define MONGOC_URI_LOCALTHRESHOLDMS "localthresholdms" #define MONGOC_URI_MAXIDLETIMEMS "maxidletimems" #define MONGOC_URI_MAXPOOLSIZE "maxpoolsize" #define MONGOC_URI_MAXSTALENESSSECONDS "maxstalenessseconds" #define MONGOC_URI_MINPOOLSIZE "minpoolsize" #define MONGOC_URI_READCONCERNLEVEL "readconcernlevel" #define MONGOC_URI_READPREFERENCE "readpreference" #define MONGOC_URI_READPREFERENCETAGS "readpreferencetags" #define MONGOC_URI_REPLICASET "replicaset" #define MONGOC_URI_RETRYREADS "retryreads" #define MONGOC_URI_RETRYWRITES "retrywrites" #define MONGOC_URI_SAFE "safe" #define MONGOC_URI_SERVERSELECTIONTIMEOUTMS "serverselectiontimeoutms" #define MONGOC_URI_SERVERSELECTIONTRYONCE "serverselectiontryonce" #define MONGOC_URI_SLAVEOK "slaveok" #define MONGOC_URI_SOCKETCHECKINTERVALMS "socketcheckintervalms" #define MONGOC_URI_SOCKETTIMEOUTMS "sockettimeoutms" #define MONGOC_URI_TLS "tls" #define MONGOC_URI_TLSCERTIFICATEKEYFILE "tlscertificatekeyfile" #define MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD "tlscertificatekeyfilepassword" #define MONGOC_URI_TLSCAFILE "tlscafile" #define MONGOC_URI_TLSALLOWINVALIDCERTIFICATES "tlsallowinvalidcertificates" #define MONGOC_URI_TLSALLOWINVALIDHOSTNAMES "tlsallowinvalidhostnames" #define MONGOC_URI_TLSINSECURE "tlsinsecure" #define MONGOC_URI_W "w" #define MONGOC_URI_WAITQUEUEMULTIPLE "waitqueuemultiple" #define MONGOC_URI_WAITQUEUETIMEOUTMS "waitqueuetimeoutms" #define MONGOC_URI_WTIMEOUTMS "wtimeoutms" #define MONGOC_URI_ZLIBCOMPRESSIONLEVEL "zlibcompressionlevel" /* Deprecated in MongoDB 4.2, use "tls" variants instead. */ #define MONGOC_URI_SSL "ssl" #define MONGOC_URI_SSLCLIENTCERTIFICATEKEYFILE "sslclientcertificatekeyfile" #define MONGOC_URI_SSLCLIENTCERTIFICATEKEYPASSWORD \ "sslclientcertificatekeypassword" #define MONGOC_URI_SSLCERTIFICATEAUTHORITYFILE "sslcertificateauthorityfile" #define MONGOC_URI_SSLALLOWINVALIDCERTIFICATES "sslallowinvalidcertificates" #define MONGOC_URI_SSLALLOWINVALIDHOSTNAMES "sslallowinvalidhostnames" BSON_BEGIN_DECLS typedef struct _mongoc_uri_t mongoc_uri_t; MONGOC_EXPORT (mongoc_uri_t *) mongoc_uri_copy (const mongoc_uri_t *uri); MONGOC_EXPORT (void) mongoc_uri_destroy (mongoc_uri_t *uri); MONGOC_EXPORT (mongoc_uri_t *) mongoc_uri_new (const char *uri_string) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_uri_t *) mongoc_uri_new_with_error (const char *uri_string, bson_error_t *error) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (mongoc_uri_t *) mongoc_uri_new_for_host_port (const char *hostname, uint16_t port) BSON_GNUC_WARN_UNUSED_RESULT; MONGOC_EXPORT (const mongoc_host_list_t *) mongoc_uri_get_hosts (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_service (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_database (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_database (mongoc_uri_t *uri, const char *database); MONGOC_EXPORT (const bson_t *) mongoc_uri_get_compressors (const mongoc_uri_t *uri); MONGOC_EXPORT (const bson_t *) mongoc_uri_get_options (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_password (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_password (mongoc_uri_t *uri, const char *password); MONGOC_EXPORT (bool) mongoc_uri_option_is_int32 (const char *key); MONGOC_EXPORT (bool) mongoc_uri_option_is_bool (const char *key); MONGOC_EXPORT (bool) mongoc_uri_option_is_utf8 (const char *key); MONGOC_EXPORT (int32_t) mongoc_uri_get_option_as_int32 (const mongoc_uri_t *uri, const char *option, int32_t fallback); MONGOC_EXPORT (bool) mongoc_uri_get_option_as_bool (const mongoc_uri_t *uri, const char *option, bool fallback); MONGOC_EXPORT (const char *) mongoc_uri_get_option_as_utf8 (const mongoc_uri_t *uri, const char *option, const char *fallback); MONGOC_EXPORT (bool) mongoc_uri_set_option_as_int32 (mongoc_uri_t *uri, const char *option, int32_t value); MONGOC_EXPORT (bool) mongoc_uri_set_option_as_bool (mongoc_uri_t *uri, const char *option, bool value); MONGOC_EXPORT (bool) mongoc_uri_set_option_as_utf8 (mongoc_uri_t *uri, const char *option, const char *value); MONGOC_EXPORT (const bson_t *) mongoc_uri_get_read_prefs (const mongoc_uri_t *uri) BSON_GNUC_DEPRECATED_FOR (mongoc_uri_get_read_prefs_t); MONGOC_EXPORT (const char *) mongoc_uri_get_replica_set (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_string (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_username (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_username (mongoc_uri_t *uri, const char *username); MONGOC_EXPORT (const bson_t *) mongoc_uri_get_credentials (const mongoc_uri_t *uri); MONGOC_EXPORT (const char *) mongoc_uri_get_auth_source (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_auth_source (mongoc_uri_t *uri, const char *value); MONGOC_EXPORT (const char *) mongoc_uri_get_appname (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_appname (mongoc_uri_t *uri, const char *value); MONGOC_EXPORT (bool) mongoc_uri_set_compressors (mongoc_uri_t *uri, const char *value); MONGOC_EXPORT (const char *) mongoc_uri_get_auth_mechanism (const mongoc_uri_t *uri); MONGOC_EXPORT (bool) mongoc_uri_set_auth_mechanism (mongoc_uri_t *uri, const char *value); MONGOC_EXPORT (bool) mongoc_uri_get_mechanism_properties (const mongoc_uri_t *uri, bson_t *properties); MONGOC_EXPORT (bool) mongoc_uri_set_mechanism_properties (mongoc_uri_t *uri, const bson_t *properties); MONGOC_EXPORT (bool) mongoc_uri_get_ssl (const mongoc_uri_t *uri) BSON_GNUC_DEPRECATED_FOR (mongoc_uri_get_tls); MONGOC_EXPORT (bool) mongoc_uri_get_tls (const mongoc_uri_t *uri); MONGOC_EXPORT (char *) mongoc_uri_unescape (const char *escaped_string); MONGOC_EXPORT (const mongoc_read_prefs_t *) mongoc_uri_get_read_prefs_t (const mongoc_uri_t *uri); MONGOC_EXPORT (void) mongoc_uri_set_read_prefs_t (mongoc_uri_t *uri, const mongoc_read_prefs_t *prefs); MONGOC_EXPORT (const mongoc_write_concern_t *) mongoc_uri_get_write_concern (const mongoc_uri_t *uri); MONGOC_EXPORT (void) mongoc_uri_set_write_concern (mongoc_uri_t *uri, const mongoc_write_concern_t *wc); MONGOC_EXPORT (const mongoc_read_concern_t *) mongoc_uri_get_read_concern (const mongoc_uri_t *uri); MONGOC_EXPORT (void) mongoc_uri_set_read_concern (mongoc_uri_t *uri, const mongoc_read_concern_t *rc); BSON_END_DECLS #endif /* MONGOC_URI_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-util-private.h0000644000076500000240000000740713572250760026112 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_UTIL_PRIVATE_H #define MONGOC_UTIL_PRIVATE_H #include #include "mongoc/mongoc.h" #ifdef BSON_HAVE_STRINGS_H #include #endif /* string comparison functions for Windows */ #ifdef _WIN32 #define strcasecmp _stricmp #define strncasecmp _strnicmp #endif #if BSON_GNUC_CHECK_VERSION(4, 6) #define BEGIN_IGNORE_DEPRECATIONS \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #define END_IGNORE_DEPRECATIONS _Pragma ("GCC diagnostic pop") #elif defined(__clang__) #define BEGIN_IGNORE_DEPRECATIONS \ _Pragma ("clang diagnostic push") \ _Pragma ("clang diagnostic ignored \"-Wdeprecated-declarations\"") #define END_IGNORE_DEPRECATIONS _Pragma ("clang diagnostic pop") #else #define BEGIN_IGNORE_DEPRECATIONS #define END_IGNORE_DEPRECATIONS #endif #ifndef _WIN32 #define MONGOC_PRINTF_FORMAT(a, b) __attribute__ ((format (__printf__, a, b))) #else #define MONGOC_PRINTF_FORMAT(a, b) /* no-op */ #endif #define COALESCE(x, y) ((x == 0) ? (y) : (x)) /* Helper macros for stringifying things */ #define MONGOC_STR(s) #s #define MONGOC_EVALUATE_STR(s) MONGOC_STR (s) BSON_BEGIN_DECLS extern const bson_validate_flags_t _mongoc_default_insert_vflags; extern const bson_validate_flags_t _mongoc_default_replace_vflags; extern const bson_validate_flags_t _mongoc_default_update_vflags; int _mongoc_rand_simple (unsigned int *seed); char * _mongoc_hex_md5 (const char *input); void _mongoc_usleep (int64_t usec); const char * _mongoc_get_command_name (const bson_t *command); const char * _mongoc_get_documents_field_name (const char *command_name); bool _mongoc_lookup_bool (const bson_t *bson, const char *key, bool default_value); void _mongoc_get_db_name (const char *ns, char *db /* OUT */); void _mongoc_bson_init_if_set (bson_t *bson); const char * _mongoc_bson_type_to_str (bson_type_t t); bool _mongoc_get_server_id_from_opts (const bson_t *opts, mongoc_error_domain_t domain, mongoc_error_code_t code, uint32_t *server_id, bson_error_t *error); bool _mongoc_validate_new_document (const bson_t *insert, bson_validate_flags_t vflags, bson_error_t *error); bool _mongoc_validate_replace (const bson_t *insert, bson_validate_flags_t vflags, bson_error_t *error); bool _mongoc_validate_update (const bson_t *update, bson_validate_flags_t vflags, bson_error_t *error); void mongoc_lowercase (const char *src, char *buf /* OUT */); bool mongoc_parse_port (uint16_t *port, const char *str); void _mongoc_bson_array_add_label (bson_t *bson, const char *label); void _mongoc_bson_array_copy_labels_to (const bson_t *reply, bson_t *dst); void _mongoc_bson_init_with_transient_txn_error (const mongoc_client_session_t *cs, bson_t *reply); bool _mongoc_document_is_pipeline (const bson_t *document); BSON_END_DECLS #endif /* MONGOC_UTIL_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-util.c0000644000076500000240000003272613572250760024437 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifdef _WIN32 #define _CRT_RAND_S #endif #include #include "common-md5-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-trace-private.h" const bson_validate_flags_t _mongoc_default_insert_vflags = BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_EMPTY_KEYS | BSON_VALIDATE_DOT_KEYS | BSON_VALIDATE_DOLLAR_KEYS; const bson_validate_flags_t _mongoc_default_replace_vflags = BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_EMPTY_KEYS | BSON_VALIDATE_DOT_KEYS | BSON_VALIDATE_DOLLAR_KEYS; const bson_validate_flags_t _mongoc_default_update_vflags = BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_EMPTY_KEYS; int _mongoc_rand_simple (unsigned int *seed) { #ifdef _WIN32 /* ignore the seed */ unsigned int ret = 0; errno_t err; err = rand_s (&ret); if (0 != err) { MONGOC_ERROR ("rand_s failed: %"); } return (int) ret; #else return rand_r (seed); #endif } char * _mongoc_hex_md5 (const char *input) { uint8_t digest[16]; bson_md5_t md5; char digest_str[33]; int i; _bson_md5_init (&md5); _bson_md5_append (&md5, (const uint8_t *) input, (uint32_t) strlen (input)); _bson_md5_finish (&md5, digest); for (i = 0; i < sizeof digest; i++) { bson_snprintf (&digest_str[i * 2], 3, "%02x", digest[i]); } digest_str[sizeof digest_str - 1] = '\0'; return bson_strdup (digest_str); } void _mongoc_usleep (int64_t usec) { #ifdef _WIN32 LARGE_INTEGER ft; HANDLE timer; BSON_ASSERT (usec >= 0); ft.QuadPart = -(10 * usec); timer = CreateWaitableTimer (NULL, true, NULL); SetWaitableTimer (timer, &ft, 0, NULL, NULL, 0); WaitForSingleObject (timer, INFINITE); CloseHandle (timer); #else BSON_ASSERT (usec >= 0); usleep ((useconds_t) usec); #endif } const char * _mongoc_get_command_name (const bson_t *command) { bson_iter_t iter; const char *name; bson_iter_t child; const char *wrapper_name = NULL; BSON_ASSERT (command); if (!bson_iter_init (&iter, command) || !bson_iter_next (&iter)) { return NULL; } name = bson_iter_key (&iter); /* wrapped in "$query" or "query"? * * {$query: {count: "collection"}, $readPreference: {...}} */ if (name[0] == '$') { wrapper_name = "$query"; } else if (!strcmp (name, "query")) { wrapper_name = "query"; } if (wrapper_name && bson_iter_init_find (&iter, command, wrapper_name) && BSON_ITER_HOLDS_DOCUMENT (&iter) && bson_iter_recurse (&iter, &child) && bson_iter_next (&child)) { name = bson_iter_key (&child); } return name; } const char * _mongoc_get_documents_field_name (const char *command_name) { if (!strcmp (command_name, "insert")) { return "documents"; } if (!strcmp (command_name, "update")) { return "updates"; } if (!strcmp (command_name, "delete")) { return "deletes"; } return NULL; } bool _mongoc_lookup_bool (const bson_t *bson, const char *key, bool default_value) { bson_iter_t iter; bson_iter_t child; if (!bson) { return default_value; } BSON_ASSERT (bson_iter_init (&iter, bson)); if (!bson_iter_find_descendant (&iter, key, &child)) { return default_value; } return bson_iter_as_bool (&child); } void _mongoc_get_db_name (const char *ns, char *db /* OUT */) { size_t dblen; const char *dot; BSON_ASSERT (ns); dot = strstr (ns, "."); if (dot) { dblen = BSON_MIN (dot - ns + 1, MONGOC_NAMESPACE_MAX); bson_strncpy (db, ns, dblen); } else { bson_strncpy (db, ns, MONGOC_NAMESPACE_MAX); } } void _mongoc_bson_init_if_set (bson_t *bson) { if (bson) { bson_init (bson); } } const char * _mongoc_bson_type_to_str (bson_type_t t) { switch (t) { case BSON_TYPE_EOD: return "EOD"; case BSON_TYPE_DOUBLE: return "DOUBLE"; case BSON_TYPE_UTF8: return "UTF8"; case BSON_TYPE_DOCUMENT: return "DOCUMENT"; case BSON_TYPE_ARRAY: return "ARRAY"; case BSON_TYPE_BINARY: return "BINARY"; case BSON_TYPE_UNDEFINED: return "UNDEFINED"; case BSON_TYPE_OID: return "OID"; case BSON_TYPE_BOOL: return "BOOL"; case BSON_TYPE_DATE_TIME: return "DATE_TIME"; case BSON_TYPE_NULL: return "NULL"; case BSON_TYPE_REGEX: return "REGEX"; case BSON_TYPE_DBPOINTER: return "DBPOINTER"; case BSON_TYPE_CODE: return "CODE"; case BSON_TYPE_SYMBOL: return "SYMBOL"; case BSON_TYPE_CODEWSCOPE: return "CODEWSCOPE"; case BSON_TYPE_INT32: return "INT32"; case BSON_TYPE_TIMESTAMP: return "TIMESTAMP"; case BSON_TYPE_INT64: return "INT64"; case BSON_TYPE_MAXKEY: return "MAXKEY"; case BSON_TYPE_MINKEY: return "MINKEY"; case BSON_TYPE_DECIMAL128: return "DECIMAL128"; default: return "Unknown"; } } /* Get "serverId" from opts. Sets *server_id to the serverId from "opts" or 0 * if absent. On error, fills out *error with domain and code and return false. */ bool _mongoc_get_server_id_from_opts (const bson_t *opts, mongoc_error_domain_t domain, mongoc_error_code_t code, uint32_t *server_id, bson_error_t *error) { bson_iter_t iter; ENTRY; BSON_ASSERT (server_id); *server_id = 0; if (!opts || !bson_iter_init_find (&iter, opts, "serverId")) { RETURN (true); } if (!BSON_ITER_HOLDS_INT (&iter)) { bson_set_error ( error, domain, code, "The serverId option must be an integer"); RETURN (false); } if (bson_iter_as_int64 (&iter) <= 0) { bson_set_error (error, domain, code, "The serverId option must be >= 1"); RETURN (false); } *server_id = (uint32_t) bson_iter_as_int64 (&iter); RETURN (true); } bool _mongoc_validate_new_document (const bson_t *doc, bson_validate_flags_t vflags, bson_error_t *error) { bson_error_t validate_err; if (vflags == BSON_VALIDATE_NONE) { return true; } if (!bson_validate_with_error (doc, vflags, &validate_err)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "invalid document for insert: %s", validate_err.message); return false; } return true; } bool _mongoc_validate_replace (const bson_t *doc, bson_validate_flags_t vflags, bson_error_t *error) { bson_error_t validate_err; if (vflags == BSON_VALIDATE_NONE) { return true; } if (!bson_validate_with_error (doc, vflags, &validate_err)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "invalid argument for replace: %s", validate_err.message); return false; } return true; } bool _mongoc_validate_update (const bson_t *update, bson_validate_flags_t vflags, bson_error_t *error) { bson_error_t validate_err; bson_iter_t iter; const char *key; if (vflags == BSON_VALIDATE_NONE) { return true; } if (!bson_validate_with_error (update, vflags, &validate_err)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "invalid argument for update: %s", validate_err.message); return false; } if (_mongoc_document_is_pipeline (update)) { return true; } if (!bson_iter_init (&iter, update)) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt"); return false; } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); if (key[0] != '$') { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid key '%s': update only works with $ operators" " and pipelines", key); return false; } } return true; } void mongoc_lowercase (const char *src, char *buf /* OUT */) { for (; *src; ++src, ++buf) { /* UTF8 non-ascii characters have a 1 at the leftmost bit. If this is the * case, just copy */ if ((*src & (0x1 << 7)) == 0) { *buf = (char) tolower (*src); } else { *buf = *src; } } } bool mongoc_parse_port (uint16_t *port, const char *str) { unsigned long ul_port; ul_port = strtoul (str, NULL, 10); if (ul_port == 0 || ul_port > UINT16_MAX) { /* Parse error or port number out of range. mongod prohibits port 0. */ return false; } *port = (uint16_t) ul_port; return true; } /*-------------------------------------------------------------------------- * * _mongoc_bson_array_add_label -- * * Append an error label like "TransientTransactionError" to a BSON * array iff the array does not already contain it. * * Side effects: * Aborts if the array is invalid or contains non-string elements. * *-------------------------------------------------------------------------- */ void _mongoc_bson_array_add_label (bson_t *bson, const char *label) { bson_iter_t iter; char buf[16]; uint32_t i = 0; const char *key; BSON_ASSERT (bson_iter_init (&iter, bson)); while (bson_iter_next (&iter)) { if (!strcmp (bson_iter_utf8 (&iter, NULL), label)) { /* already included once */ return; } i++; } bson_uint32_to_string (i, &key, buf, sizeof buf); BSON_APPEND_UTF8 (bson, key, label); } /*-------------------------------------------------------------------------- * * _mongoc_bson_array_copy_labels_to -- * * Copy error labels like "TransientTransactionError" from a server * reply to a BSON array iff the array does not already contain it. * * Side effects: * Aborts if @dst is invalid or contains non-string elements. * *-------------------------------------------------------------------------- */ void _mongoc_bson_array_copy_labels_to (const bson_t *reply, bson_t *dst) { bson_iter_t iter; bson_iter_t label; if (bson_iter_init_find (&iter, reply, "errorLabels")) { BSON_ASSERT (bson_iter_recurse (&iter, &label)); while (bson_iter_next (&label)) { if (BSON_ITER_HOLDS_UTF8 (&label)) { _mongoc_bson_array_add_label (dst, bson_iter_utf8 (&label, NULL)); } } } } /*-------------------------------------------------------------------------- * * _mongoc_bson_init_with_transient_txn_error -- * * If @reply is not NULL, initialize it. If @cs is not NULL and in a * transaction, add errorLabels: ["TransientTransactionError"] to @cs. * * Transactions Spec: TransientTransactionError includes "server * selection error encountered running any command besides * commitTransaction in a transaction. ...in the case of network errors * or server selection errors where the client receives no server reply, * the client adds the label." * * Side effects: * None. * *-------------------------------------------------------------------------- */ void _mongoc_bson_init_with_transient_txn_error (const mongoc_client_session_t *cs, bson_t *reply) { bson_t labels; if (!reply) { return; } bson_init (reply); if (_mongoc_client_session_in_txn (cs)) { BSON_APPEND_ARRAY_BEGIN (reply, "errorLabels", &labels); BSON_APPEND_UTF8 (&labels, "0", TRANSIENT_TXN_ERR); bson_append_array_end (reply, &labels); } } bool _mongoc_document_is_pipeline (const bson_t *document) { bson_iter_t iter; bson_iter_t child; const char *key; int i = 0; char *i_str; if (!bson_iter_init (&iter, document)) { return false; } while (bson_iter_next (&iter)) { key = bson_iter_key (&iter); i_str = bson_strdup_printf ("%d", i++); if (strcmp (key, i_str)) { bson_free (i_str); return false; } bson_free (i_str); if (BSON_ITER_HOLDS_DOCUMENT (&iter)) { if (!bson_iter_recurse (&iter, &child)) { return false; } if (!bson_iter_next (&child)) { return false; } key = bson_iter_key (&child); if (key[0] != '$') { return false; } } else { return false; } } /* should return false when the document is empty */ return i != 0; }mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version-functions.c0000644000076500000240000000332113572250760027142 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-version.h" #include "mongoc/mongoc-version-functions.h" /** * mongoc_get_major_version: * * Helper function to return the runtime major version of the library. */ int mongoc_get_major_version (void) { return MONGOC_MAJOR_VERSION; } /** * mongoc_get_minor_version: * * Helper function to return the runtime minor version of the library. */ int mongoc_get_minor_version (void) { return MONGOC_MINOR_VERSION; } /** * mongoc_get_micro_version: * * Helper function to return the runtime micro version of the library. */ int mongoc_get_micro_version (void) { return MONGOC_MICRO_VERSION; } /** * mongoc_get_version: * * Helper function to return the runtime string version of the library. */ const char * mongoc_get_version (void) { return MONGOC_VERSION_S; } /** * mongoc_check_version: * * True if libmongoc's version is greater than or equal to the required * version. */ bool mongoc_check_version (int required_major, int required_minor, int required_micro) { return MONGOC_CHECK_VERSION (required_major, required_minor, required_micro); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version-functions.h0000644000076500000240000000230213572250760027145 0ustar alcaeusstaff/* * Copyright 2015 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_VERSION_FUNCTIONS_H #define MONGOC_VERSION_FUNCTIONS_H #include /* for "bool" */ #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS MONGOC_EXPORT (int) mongoc_get_major_version (void); MONGOC_EXPORT (int) mongoc_get_minor_version (void); MONGOC_EXPORT (int) mongoc_get_micro_version (void); MONGOC_EXPORT (const char *) mongoc_get_version (void); MONGOC_EXPORT (bool) mongoc_check_version (int required_major, int required_minor, int required_micro); BSON_END_DECLS #endif /* MONGOC_VERSION_FUNCTIONS_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version.h0000644000076500000240000000466213572250760025152 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined (MONGOC_INSIDE) && !defined (MONGOC_COMPILATION) #error "Only can be included directly." #endif #ifndef MONGOC_VERSION_H #define MONGOC_VERSION_H /** * MONGOC_MAJOR_VERSION: * * MONGOC major version component (e.g. 1 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MAJOR_VERSION (1) /** * MONGOC_MINOR_VERSION: * * MONGOC minor version component (e.g. 2 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MINOR_VERSION (15) /** * MONGOC_MICRO_VERSION: * * MONGOC micro version component (e.g. 3 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MICRO_VERSION (2) /** * MONGOC_PRERELEASE_VERSION: * * MONGOC prerelease version component (e.g. pre if %MONGOC_VERSION is 1.2.3-pre) */ #define MONGOC_PRERELEASE_VERSION () /** * MONGOC_VERSION: * * MONGOC version. */ #define MONGOC_VERSION (1.15.2) /** * MONGOC_VERSION_S: * * MONGOC version, encoded as a string, useful for printing and * concatenation. */ #define MONGOC_VERSION_S "1.15.2" /** * MONGOC_VERSION_HEX: * * MONGOC version, encoded as an hexadecimal number, useful for * integer comparisons. */ #define MONGOC_VERSION_HEX (MONGOC_MAJOR_VERSION << 24 | \ MONGOC_MINOR_VERSION << 16 | \ MONGOC_MICRO_VERSION << 8) /** * MONGOC_CHECK_VERSION: * @major: required major version * @minor: required minor version * @micro: required micro version * * Compile-time version checking. Evaluates to %TRUE if the version * of MONGOC is greater than the required one. */ #define MONGOC_CHECK_VERSION(major,minor,micro) \ (MONGOC_MAJOR_VERSION > (major) || \ (MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION > (minor)) || \ (MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION == (minor) && \ MONGOC_MICRO_VERSION >= (micro))) #endif /* MONGOC_VERSION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version.h.in0000644000076500000240000000503713572250760025554 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #if !defined (MONGOC_INSIDE) && !defined (MONGOC_COMPILATION) #error "Only can be included directly." #endif #ifndef MONGOC_VERSION_H #define MONGOC_VERSION_H /** * MONGOC_MAJOR_VERSION: * * MONGOC major version component (e.g. 1 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MAJOR_VERSION (@MONGOC_MAJOR_VERSION@) /** * MONGOC_MINOR_VERSION: * * MONGOC minor version component (e.g. 2 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MINOR_VERSION (@MONGOC_MINOR_VERSION@) /** * MONGOC_MICRO_VERSION: * * MONGOC micro version component (e.g. 3 if %MONGOC_VERSION is 1.2.3) */ #define MONGOC_MICRO_VERSION (@MONGOC_MICRO_VERSION@) /** * MONGOC_PRERELEASE_VERSION: * * MONGOC prerelease version component (e.g. pre if %MONGOC_VERSION is 1.2.3-pre) */ #define MONGOC_PRERELEASE_VERSION (@MONGOC_PRERELEASE_VERSION@) /** * MONGOC_VERSION: * * MONGOC version. */ #define MONGOC_VERSION (@MONGOC_VERSION@) /** * MONGOC_VERSION_S: * * MONGOC version, encoded as a string, useful for printing and * concatenation. */ #define MONGOC_VERSION_S "@MONGOC_VERSION@" /** * MONGOC_VERSION_HEX: * * MONGOC version, encoded as an hexadecimal number, useful for * integer comparisons. */ #define MONGOC_VERSION_HEX (MONGOC_MAJOR_VERSION << 24 | \ MONGOC_MINOR_VERSION << 16 | \ MONGOC_MICRO_VERSION << 8) /** * MONGOC_CHECK_VERSION: * @major: required major version * @minor: required minor version * @micro: required micro version * * Compile-time version checking. Evaluates to %TRUE if the version * of MONGOC is greater than the required one. */ #define MONGOC_CHECK_VERSION(major,minor,micro) \ (MONGOC_MAJOR_VERSION > (major) || \ (MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION > (minor)) || \ (MONGOC_MAJOR_VERSION == (major) && MONGOC_MINOR_VERSION == (minor) && \ MONGOC_MICRO_VERSION >= (micro))) #endif /* MONGOC_VERSION_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-command-legacy-private.h0000644000076500000240000000465513572250760031147 0ustar alcaeusstaff/* * Copyright 2014-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H #define MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H #include #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-write-command-private.h" BSON_BEGIN_DECLS void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error); void _mongoc_write_command_update_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error); void _mongoc_write_command_delete_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_WRITE_COMMAND_LEGACY_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-command-legacy.c0000644000076500000240000004101613572250760027462 0ustar alcaeusstaff/* * Copyright 2014-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-write-command-legacy-private.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-util-private.h" static void _mongoc_monitor_legacy_write (mongoc_client_t *client, mongoc_write_command_t *command, const char *db, const char *collection, mongoc_server_stream_t *stream, int64_t request_id) { bson_t doc; bson_t wc; mongoc_apm_command_started_t event; ENTRY; if (!client->apm_callbacks.started) { EXIT; } bson_init (&doc); _mongoc_write_command_init (&doc, command, collection); BSON_APPEND_DOCUMENT_BEGIN (&doc, "writeConcern", &wc); BSON_APPEND_INT32 (&wc, "w", 0); bson_append_document_end (&doc, &wc); _append_array_from_command (command, &doc); mongoc_apm_command_started_init ( &event, &doc, db, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.started (&event); mongoc_apm_command_started_cleanup (&event); bson_destroy (&doc); } /* fire command-succeeded event as if we'd used a modern write command. * note, cluster.request_id was incremented once for the write, again * for the getLastError, so cluster.request_id is no longer valid; used the * passed-in request_id instead. */ static void _mongoc_monitor_legacy_write_succeeded (mongoc_client_t *client, int64_t duration, mongoc_write_command_t *command, mongoc_server_stream_t *stream, int64_t request_id) { bson_t doc; mongoc_apm_command_succeeded_t event; ENTRY; if (!client->apm_callbacks.succeeded) { EXIT; } bson_init (&doc); /* * Unacknowledged writes must provide a CommandSucceededEvent with a { ok: 1 * } reply. * https://github.com/mongodb/specifications/blob/master/source/command-monitoring/command-monitoring.rst#unacknowledged-acknowledged-writes */ bson_append_int32 (&doc, "ok", 2, 1); bson_append_int32 (&doc, "n", 1, (int32_t) command->n_documents); mongoc_apm_command_succeeded_init ( &event, duration, &doc, _mongoc_command_type_to_name (command->type), request_id, command->operation_id, &stream->sd->host, stream->sd->id, client->apm_context); client->apm_callbacks.succeeded (&event); mongoc_apm_command_succeeded_cleanup (&event); bson_destroy (&doc); EXIT; } void _mongoc_write_command_delete_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; const uint8_t *data; mongoc_rpc_t rpc; uint32_t request_id; bson_iter_t q_iter; uint32_t len; int64_t limit = 0; char ns[MONGOC_NAMESPACE_MAX + 1]; bool r; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_DELETE_FAILED, "Cannot do an empty delete."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { /* the document is like { "q": { }, limit: <0 or 1> } */ r = (bson_iter_init (&q_iter, bson) && bson_iter_find (&q_iter, "q") && BSON_ITER_HOLDS_DOCUMENT (&q_iter)); BSON_ASSERT (r); bson_iter_document (&q_iter, &len, &data); BSON_ASSERT (data); BSON_ASSERT (len >= 5); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_DELETE; rpc.delete_.zero = 0; rpc.delete_.collection = ns; if (bson_iter_find (&q_iter, "limit") && (BSON_ITER_HOLDS_INT (&q_iter))) { limit = bson_iter_as_int64 (&q_iter); } rpc.delete_.flags = limit ? MONGOC_DELETE_SINGLE_REMOVE : MONGOC_DELETE_NONE; rpc.delete_.selector = data; _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } bson_reader_destroy (reader); EXIT; } void _mongoc_write_command_insert_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; mongoc_iovec_t *iov; mongoc_rpc_t rpc; uint32_t size = 0; bool has_more; char ns[MONGOC_NAMESPACE_MAX + 1]; uint32_t n_docs_in_batch; uint32_t request_id = 0; uint32_t idx = 0; int32_t max_msg_size; int32_t max_bson_obj_size; bool singly; bson_reader_t *reader; const bson_t *bson; bool eof; int data_offset = 0; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); singly = !command->u.insert.allow_bulk_op_insert; if (!command->n_documents) { bson_set_error (error, MONGOC_ERROR_COLLECTION, MONGOC_ERROR_COLLECTION_INSERT_FAILED, "Cannot do an empty insert."); result->failed = true; EXIT; } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); iov = (mongoc_iovec_t *) bson_malloc ((sizeof *iov) * command->n_documents); again: has_more = false; n_docs_in_batch = 0; size = (uint32_t) (sizeof (mongoc_rpc_header_t) + 4 + strlen (database) + 1 + strlen (collection) + 1); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); while ((bson = bson_reader_read (reader, &eof))) { BSON_ASSERT (n_docs_in_batch <= idx); BSON_ASSERT (idx <= command->n_documents); if (bson->len > max_bson_obj_size) { /* document is too large */ _mongoc_write_command_too_large_error ( error, idx, bson->len, max_bson_obj_size); data_offset += bson->len; if (command->flags.ordered) { /* send the batch so far (if any) and return the error */ break; } } else if ((n_docs_in_batch == 1 && singly) || size > (max_msg_size - bson->len)) { /* batch is full, send it and then start the next batch */ has_more = true; break; } else { /* add document to batch and continue building the batch */ iov[n_docs_in_batch].iov_base = (void *) bson_get_data (bson); iov[n_docs_in_batch].iov_len = bson->len; size += bson->len; n_docs_in_batch++; data_offset += bson->len; } idx++; } bson_reader_destroy (reader); if (n_docs_in_batch) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_INSERT; rpc.insert.flags = ((command->flags.ordered) ? MONGOC_INSERT_NONE : MONGOC_INSERT_CONTINUE_ON_ERROR); rpc.insert.collection = ns; rpc.insert.documents = iov; rpc.insert.n_documents = n_docs_in_batch; _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; GOTO (cleanup); } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } cleanup: if (has_more) { GOTO (again); } bson_free (iov); EXIT; } void _mongoc_write_command_update_legacy (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { int64_t started; int32_t max_bson_obj_size; mongoc_rpc_t rpc; uint32_t request_id = 0; bson_iter_t subiter, subsubiter; bson_t doc; bson_t update, selector; const uint8_t *data = NULL; uint32_t len = 0; size_t err_offset; bool val = false; char ns[MONGOC_NAMESPACE_MAX + 1]; int vflags = (BSON_VALIDATE_UTF8 | BSON_VALIDATE_UTF8_ALLOW_NULL | BSON_VALIDATE_DOLLAR_KEYS | BSON_VALIDATE_DOT_KEYS); bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); started = bson_get_monotonic_time (); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { if (bson_iter_init (&subiter, bson) && bson_iter_find (&subiter, "u") && BSON_ITER_HOLDS_DOCUMENT (&subiter)) { bson_iter_document (&subiter, &len, &data); BSON_ASSERT (bson_init_static (&doc, data, len)); if (bson_iter_init (&subsubiter, &doc) && bson_iter_next (&subsubiter) && (bson_iter_key (&subsubiter)[0] != '$') && !bson_validate ( &doc, (bson_validate_flags_t) vflags, &err_offset)) { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "update document is corrupt or contains " "invalid keys including $ or ."); bson_reader_destroy (reader); EXIT; } } else { result->failed = true; bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "updates is malformed."); bson_reader_destroy (reader); EXIT; } } bson_snprintf (ns, sizeof ns, "%s.%s", database, collection); bson_reader_destroy (reader); reader = bson_reader_new_from_data (command->payload.data, command->payload.len); while ((bson = bson_reader_read (reader, &eof))) { request_id = ++client->cluster.request_id; rpc.header.msg_len = 0; rpc.header.request_id = request_id; rpc.header.response_to = 0; rpc.header.opcode = MONGOC_OPCODE_UPDATE; rpc.update.zero = 0; rpc.update.collection = ns; rpc.update.flags = MONGOC_UPDATE_NONE; BSON_ASSERT (bson_iter_init (&subiter, bson)); while (bson_iter_next (&subiter)) { if (strcmp (bson_iter_key (&subiter), "u") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.update = data; BSON_ASSERT (bson_init_static (&update, data, len)); } else if (strcmp (bson_iter_key (&subiter), "q") == 0) { bson_iter_document (&subiter, &len, &data); if (len > max_bson_obj_size) { _mongoc_write_command_too_large_error ( error, 0, len, max_bson_obj_size); result->failed = true; bson_reader_destroy (reader); EXIT; } rpc.update.selector = data; BSON_ASSERT (bson_init_static (&selector, data, len)); } else if (strcmp (bson_iter_key (&subiter), "multi") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_MULTI_UPDATE); } } else if (strcmp (bson_iter_key (&subiter), "upsert") == 0) { val = bson_iter_bool (&subiter); if (val) { rpc.update.flags = (mongoc_update_flags_t) ( rpc.update.flags | MONGOC_UPDATE_UPSERT); } } } _mongoc_monitor_legacy_write ( client, command, database, collection, server_stream, request_id); if (!mongoc_cluster_legacy_rpc_sendv_to_server ( &client->cluster, &rpc, server_stream, error)) { result->failed = true; bson_reader_destroy (reader); EXIT; } _mongoc_monitor_legacy_write_succeeded (client, bson_get_monotonic_time () - started, command, server_stream, request_id); started = bson_get_monotonic_time (); } bson_reader_destroy (reader); } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-command-private.h0000644000076500000240000002113313572250760027673 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_WRITE_COMMAND_PRIVATE_H #define MONGOC_WRITE_COMMAND_PRIVATE_H #include #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-server-stream-private.h" #include "mongoc/mongoc-buffer-private.h" BSON_BEGIN_DECLS /* forward decl */ struct _mongoc_crud_opts_t; #define MONGOC_WRITE_COMMAND_DELETE 0 #define MONGOC_WRITE_COMMAND_INSERT 1 #define MONGOC_WRITE_COMMAND_UPDATE 2 /* MongoDB has a extra allowance to allow updating 16mb document, as the update * operators would otherwise overflow the 16mb object limit. See SERVER-10643 * for context. */ #define BSON_OBJECT_ALLOWANCE (16 * 1024) struct _mongoc_bulk_write_flags_t { bool ordered; bool bypass_document_validation; bool has_collation; bool has_multi_write; bool has_array_filters; }; typedef struct { int type; mongoc_buffer_t payload; uint32_t n_documents; mongoc_bulk_write_flags_t flags; int64_t operation_id; bson_t cmd_opts; union { struct { bool allow_bulk_op_insert; } insert; } u; } mongoc_write_command_t; typedef struct { uint32_t nInserted; uint32_t nMatched; uint32_t nModified; uint32_t nRemoved; uint32_t nUpserted; /* like [{"index": int, "code": int, "errmsg": str}, ...] */ bson_t writeErrors; /* like [{"index": int, "_id": value}, ...] */ bson_t upserted; uint32_t n_writeConcernErrors; /* like [{"code": 64, "errmsg": "duplicate"}, ...] */ bson_t writeConcernErrors; /* like ["TransientTransactionError", ...] */ bson_t errorLabels; bool failed; /* The command failed */ bool must_stop; /* The stream may have been disconnected */ bson_error_t error; uint32_t upsert_append_count; } mongoc_write_result_t; typedef enum { MONGOC_WRITE_ERR_NONE, MONGOC_WRITE_ERR_OTHER, MONGOC_WRITE_ERR_RETRY, MONGOC_WRITE_ERR_WRITE_CONCERN, } mongoc_write_err_type_t; const char * _mongoc_command_type_to_field_name (int command_type); const char * _mongoc_command_type_to_name (int command_type); void _mongoc_write_command_destroy (mongoc_write_command_t *command); void _mongoc_write_command_init (bson_t *doc, mongoc_write_command_t *command, const char *collection); void _mongoc_write_command_init_insert (mongoc_write_command_t *command, const bson_t *document, const bson_t *cmd_opts, mongoc_bulk_write_flags_t flags, int64_t operation_id, bool allow_bulk_op_insert); void _mongoc_write_command_init_insert_idl (mongoc_write_command_t *command, const bson_t *document, const bson_t *cmd_opts, int64_t operation_id, bool allow_bulk_op_insert); void _mongoc_write_command_init_delete (mongoc_write_command_t *command, const bson_t *selectors, const bson_t *cmd_opts, const bson_t *opts, mongoc_bulk_write_flags_t flags, int64_t operation_id); void _mongoc_write_command_init_delete_idl (mongoc_write_command_t *command, const bson_t *selector, const bson_t *cmd_opts, const bson_t *opts, int64_t operation_id); void _mongoc_write_command_init_update (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts, mongoc_bulk_write_flags_t flags, int64_t operation_id); void _mongoc_write_command_init_update_idl (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts, int64_t operation_id); void _mongoc_write_command_insert_append (mongoc_write_command_t *command, const bson_t *document); void _mongoc_write_command_update_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts); void _mongoc_write_command_delete_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *opts); void _mongoc_write_command_too_large_error (bson_error_t *error, int32_t idx, int32_t len, int32_t max_bson_size); void _mongoc_write_command_execute (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_client_session_t *cs, mongoc_write_result_t *result); void _mongoc_write_command_execute_idl (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, const struct _mongoc_crud_opts_t *crud, mongoc_write_result_t *result); void _mongoc_write_result_init (mongoc_write_result_t *result); void _mongoc_write_result_append_upsert (mongoc_write_result_t *result, int32_t idx, const bson_value_t *value); int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, bson_t *dest, bson_iter_t *iter); void _mongoc_write_result_merge (mongoc_write_result_t *result, mongoc_write_command_t *command, const bson_t *reply, uint32_t offset); #define MONGOC_WRITE_RESULT_COMPLETE(_result, ...) \ _mongoc_write_result_complete (_result, __VA_ARGS__, NULL) bool _mongoc_write_result_complete (mongoc_write_result_t *result, int32_t error_api_version, const mongoc_write_concern_t *wc, mongoc_error_domain_t err_domain_override, bson_t *reply, bson_error_t *error, ...); void _mongoc_write_result_destroy (mongoc_write_result_t *result); void _append_array_from_command (mongoc_write_command_t *command, bson_t *bson); mongoc_write_err_type_t _mongoc_write_error_get_type (bool cmd_ret, const bson_error_t *cmd_err, const bson_t *reply); bool _mongoc_write_error_update_if_unsupported_storage_engine (bool cmd_ret, bson_error_t *cmd_err, bson_t *reply); BSON_END_DECLS #endif /* MONGOC_WRITE_COMMAND_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-command.c0000644000076500000240000014257313572250760026232 0ustar alcaeusstaff/* * Copyright 2014 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include "mongoc/mongoc-client-private.h" #include "mongoc/mongoc-client-session-private.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-trace-private.h" #include "mongoc/mongoc-write-command-private.h" #include "mongoc/mongoc-write-command-legacy-private.h" #include "mongoc/mongoc-write-concern-private.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-opts-private.h" /* * TODO: * * - Remove error parameter to ops, favor result->error. */ typedef void (*mongoc_write_op_t) (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error); /* indexed by MONGOC_WRITE_COMMAND_DELETE, INSERT, UPDATE */ static const char *gCommandNames[] = {"delete", "insert", "update"}; static const char *gCommandFields[] = {"deletes", "documents", "updates"}; static const uint32_t gCommandFieldLens[] = {7, 9, 7}; static mongoc_write_op_t gLegacyWriteOps[3] = { _mongoc_write_command_delete_legacy, _mongoc_write_command_insert_legacy, _mongoc_write_command_update_legacy}; const char * _mongoc_command_type_to_name (int command_type) { return gCommandNames[command_type]; } const char * _mongoc_command_type_to_field_name (int command_type) { return gCommandFields[command_type]; } void _mongoc_write_command_insert_append (mongoc_write_command_t *command, const bson_t *document) { bson_iter_t iter; bson_oid_t oid; bson_t tmp; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_INSERT); BSON_ASSERT (document); BSON_ASSERT (document->len >= 5); /* * If the document does not contain an "_id" field, we need to generate * a new oid for "_id". */ if (!bson_iter_init_find (&iter, document, "_id")) { bson_init (&tmp); bson_oid_init (&oid, NULL); BSON_APPEND_OID (&tmp, "_id", &oid); bson_concat (&tmp, document); _mongoc_buffer_append (&command->payload, bson_get_data (&tmp), tmp.len); bson_destroy (&tmp); } else { _mongoc_buffer_append ( &command->payload, bson_get_data (document), document->len); } command->n_documents++; EXIT; } void _mongoc_write_command_update_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts) { bson_t document; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_UPDATE); BSON_ASSERT (selector && update); bson_init (&document); BSON_APPEND_DOCUMENT (&document, "q", selector); if (_mongoc_document_is_pipeline (update)) { BSON_APPEND_ARRAY (&document, "u", update); } else { BSON_APPEND_DOCUMENT (&document, "u", update); } if (opts) { bson_concat (&document, opts); } _mongoc_buffer_append ( &command->payload, bson_get_data (&document), document.len); command->n_documents++; bson_destroy (&document); EXIT; } void _mongoc_write_command_delete_append (mongoc_write_command_t *command, const bson_t *selector, const bson_t *opts) { bson_t document; ENTRY; BSON_ASSERT (command); BSON_ASSERT (command->type == MONGOC_WRITE_COMMAND_DELETE); BSON_ASSERT (selector); BSON_ASSERT (selector->len >= 5); bson_init (&document); BSON_APPEND_DOCUMENT (&document, "q", selector); if (opts) { bson_concat (&document, opts); } _mongoc_buffer_append ( &command->payload, bson_get_data (&document), document.len); command->n_documents++; bson_destroy (&document); EXIT; } void _mongoc_write_command_init_bulk (mongoc_write_command_t *command, int type, mongoc_bulk_write_flags_t flags, int64_t operation_id, const bson_t *opts) { ENTRY; BSON_ASSERT (command); command->type = type; command->flags = flags; command->operation_id = operation_id; if (!bson_empty0 (opts)) { bson_copy_to (opts, &command->cmd_opts); } else { bson_init (&command->cmd_opts); } _mongoc_buffer_init (&command->payload, NULL, 0, NULL, NULL); command->n_documents = 0; EXIT; } void _mongoc_write_command_init_insert (mongoc_write_command_t *command, /* IN */ const bson_t *document, /* IN */ const bson_t *cmd_opts, /* IN */ mongoc_bulk_write_flags_t flags, /* IN */ int64_t operation_id, /* IN */ bool allow_bulk_op_insert) /* IN */ { ENTRY; BSON_ASSERT (command); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_INSERT, flags, operation_id, cmd_opts); command->u.insert.allow_bulk_op_insert = (uint8_t) allow_bulk_op_insert; /* must handle NULL document from mongoc_collection_insert_bulk */ if (document) { _mongoc_write_command_insert_append (command, document); } EXIT; } void _mongoc_write_command_init_insert_idl (mongoc_write_command_t *command, const bson_t *document, const bson_t *cmd_opts, int64_t operation_id, bool allow_bulk_op_insert) { mongoc_bulk_write_flags_t flags = MONGOC_BULK_WRITE_FLAGS_INIT; ENTRY; BSON_ASSERT (command); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_INSERT, flags, operation_id, cmd_opts); command->u.insert.allow_bulk_op_insert = (uint8_t) allow_bulk_op_insert; /* must handle NULL document from mongoc_collection_insert_bulk */ if (document) { _mongoc_write_command_insert_append (command, document); } EXIT; } void _mongoc_write_command_init_delete (mongoc_write_command_t *command, /* IN */ const bson_t *selector, /* IN */ const bson_t *cmd_opts, /* IN */ const bson_t *opts, /* IN */ mongoc_bulk_write_flags_t flags, /* IN */ int64_t operation_id) /* IN */ { ENTRY; BSON_ASSERT (command); BSON_ASSERT (selector); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_DELETE, flags, operation_id, cmd_opts); _mongoc_write_command_delete_append (command, selector, opts); EXIT; } void _mongoc_write_command_init_delete_idl (mongoc_write_command_t *command, const bson_t *selector, const bson_t *cmd_opts, const bson_t *opts, int64_t operation_id) { mongoc_bulk_write_flags_t flags = MONGOC_BULK_WRITE_FLAGS_INIT; ENTRY; BSON_ASSERT (command); BSON_ASSERT (selector); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_DELETE, flags, operation_id, cmd_opts); _mongoc_write_command_delete_append (command, selector, opts); EXIT; } void _mongoc_write_command_init_update (mongoc_write_command_t *command, /* IN */ const bson_t *selector, /* IN */ const bson_t *update, /* IN */ const bson_t *opts, /* IN */ mongoc_bulk_write_flags_t flags, /* IN */ int64_t operation_id) /* IN */ { ENTRY; BSON_ASSERT (command); BSON_ASSERT (selector); BSON_ASSERT (update); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_UPDATE, flags, operation_id, NULL); _mongoc_write_command_update_append (command, selector, update, opts); EXIT; } void _mongoc_write_command_init_update_idl (mongoc_write_command_t *command, const bson_t *selector, const bson_t *update, const bson_t *opts, int64_t operation_id) { mongoc_bulk_write_flags_t flags = MONGOC_BULK_WRITE_FLAGS_INIT; ENTRY; BSON_ASSERT (command); _mongoc_write_command_init_bulk ( command, MONGOC_WRITE_COMMAND_UPDATE, flags, operation_id, NULL); _mongoc_write_command_update_append (command, selector, update, opts); EXIT; } /* takes initialized bson_t *doc and begins formatting a write command */ void _mongoc_write_command_init (bson_t *doc, mongoc_write_command_t *command, const char *collection) { ENTRY; if (!command->n_documents) { EXIT; } BSON_APPEND_UTF8 (doc, gCommandNames[command->type], collection); BSON_APPEND_BOOL (doc, "ordered", command->flags.ordered); if (command->flags.bypass_document_validation) { BSON_APPEND_BOOL (doc, "bypassDocumentValidation", command->flags.bypass_document_validation); } EXIT; } /* *------------------------------------------------------------------------- * * _mongoc_write_command_too_large_error -- * * Fill a bson_error_t and optional bson_t with error info after * receiving a document for bulk insert, update, or remove that is * larger than max_bson_size. * * "err_doc" should be NULL or an empty initialized bson_t. * * Returns: * None. * * Side effects: * "error" and optionally "err_doc" are filled out. * *------------------------------------------------------------------------- */ void _mongoc_write_command_too_large_error (bson_error_t *error, int32_t idx, int32_t len, int32_t max_bson_size) { bson_set_error (error, MONGOC_ERROR_BSON, MONGOC_ERROR_BSON_INVALID, "Document %u is too large for the cluster. " "Document is %u bytes, max is %d.", idx, len, max_bson_size); } void _empty_error (mongoc_write_command_t *command, bson_error_t *error) { static const uint32_t codes[] = {MONGOC_ERROR_COLLECTION_DELETE_FAILED, MONGOC_ERROR_COLLECTION_INSERT_FAILED, MONGOC_ERROR_COLLECTION_UPDATE_FAILED}; bson_set_error (error, MONGOC_ERROR_COLLECTION, codes[command->type], "Cannot do an empty %s", gCommandNames[command->type]); } bool _mongoc_write_command_will_overflow (uint32_t len_so_far, uint32_t document_len, uint32_t n_documents_written, int32_t max_bson_size, int32_t max_write_batch_size) { /* max BSON object size + 16k bytes. * server guarantees there is enough room: SERVER-10643 */ int32_t max_cmd_size = max_bson_size + BSON_OBJECT_ALLOWANCE; BSON_ASSERT (max_bson_size); if (len_so_far + document_len > max_cmd_size) { return true; } else if (max_write_batch_size > 0 && n_documents_written >= max_write_batch_size) { return true; } return false; } static void _mongoc_write_opmsg (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t index_offset, mongoc_client_session_t *cs, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; bson_iter_t iter; bson_t cmd; bson_t reply; bool ret = false; int32_t max_msg_size; int32_t max_bson_obj_size; int32_t max_document_count; uint32_t header; uint32_t payload_batch_size = 0; uint32_t payload_total_offset = 0; bool ship_it = false; int document_count = 0; int32_t len; mongoc_server_stream_t *retry_server_stream = NULL; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_msg_size = mongoc_server_stream_max_msg_size (server_stream); max_document_count = mongoc_server_stream_max_write_batch_size (server_stream); bson_init (&cmd); _mongoc_write_command_init (&cmd, command, collection); mongoc_cmd_parts_init (&parts, client, database, MONGOC_QUERY_NONE, &cmd); parts.assembled.operation_id = command->operation_id; parts.is_write_command = true; if (!mongoc_cmd_parts_set_write_concern ( &parts, write_concern, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (parts.assembled.is_acknowledged) { mongoc_cmd_parts_set_session (&parts, cs); } /* Write commands that include multi-document operations are not retryable. * Set this explicitly so that mongoc_cmd_parts_assemble does not need to * inspect the command body later. */ parts.allow_txn_number = (command->flags.has_multi_write || !parts.assembled.is_acknowledged) ? MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_NO : MONGOC_CMD_PARTS_ALLOW_TXN_NUMBER_YES; BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); if (!mongoc_cmd_parts_append_opts ( &parts, &iter, server_stream->sd->max_wire_version, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } if (!mongoc_cmd_parts_assemble (&parts, server_stream, error)) { bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* * OP_MSG header == 16 byte * + 4 bytes flagBits * + 1 byte payload type = 1 * + 1 byte payload type = 2 * + 4 byte size of payload * == 26 bytes opcode overhead * + X Full command document {insert: "test", writeConcern: {...}} * + Y command identifier ("documents", "deletes", "updates") ( + \0) */ header = 26 + parts.assembled.command->len + gCommandFieldLens[command->type] + 1; do { memcpy (&len, command->payload.data + payload_batch_size + payload_total_offset, 4); len = BSON_UINT32_FROM_LE (len); if (len > max_bson_obj_size + BSON_OBJECT_ALLOWANCE) { /* Quit if the document is too large */ _mongoc_write_command_too_large_error ( error, index_offset, len, max_bson_obj_size); result->failed = true; break; } else if ((payload_batch_size + header) + len <= max_msg_size) { /* The current batch is still under max batch size in bytes */ payload_batch_size += len; /* If this document filled the maximum document count */ if (++document_count == max_document_count) { ship_it = true; /* If this document is the last document we have */ } else if (payload_batch_size + payload_total_offset == command->payload.len) { ship_it = true; } else { ship_it = false; } } else { ship_it = true; } if (ship_it) { bool is_retryable = parts.is_retryable_write; mongoc_write_err_type_t error_type; /* Seek past the document offset we have already sent */ parts.assembled.payload = command->payload.data + payload_total_offset; /* Only send the documents up to this size */ parts.assembled.payload_size = payload_batch_size; parts.assembled.payload_identifier = gCommandFields[command->type]; /* increment the transaction number for the first attempt of each * retryable write command */ if (is_retryable) { bson_iter_t txn_number_iter; BSON_ASSERT (bson_iter_init_find ( &txn_number_iter, parts.assembled.command, "txnNumber")); bson_iter_overwrite_int64 ( &txn_number_iter, ++parts.assembled.session->server_session->txn_number); } retry: ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); /* Add this batch size so we skip these documents next time */ payload_total_offset += payload_batch_size; payload_batch_size = 0; /* If a retryable error is encountered and the write is retryable, * select a new writable stream and retry. If server selection fails or * the selected server does not support retryable writes, fall through * and allow the original error to be reported. */ error_type = _mongoc_write_error_get_type (ret, error, &reply); if (is_retryable) { _mongoc_write_error_update_if_unsupported_storage_engine ( ret, error, &reply); } if (is_retryable && error_type == MONGOC_WRITE_ERR_RETRY) { bson_error_t ignored_error; /* each write command may be retried at most once */ is_retryable = false; if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } retry_server_stream = mongoc_cluster_stream_for_writes ( &client->cluster, cs, NULL, &ignored_error); if (retry_server_stream && retry_server_stream->sd->max_wire_version >= WIRE_VERSION_RETRY_WRITES) { parts.assembled.server_stream = retry_server_stream; bson_destroy (&reply); GOTO (retry); } } if (!ret) { result->failed = true; /* Conservatively set must_stop to true. Per CDRIVER-3305 we * shouldn't stop for unordered bulk writes, but also need to check * if the server stream was invalidated per CDRIVER-3306. */ result->must_stop = true; } /* Result merge needs to know the absolute index for a document * so it can rewrite the error message which contains the relative * document index per batch */ _mongoc_write_result_merge (result, command, &reply, index_offset); index_offset += document_count; document_count = 0; bson_destroy (&reply); } /* While we have more documents to write */ } while (payload_total_offset < command->payload.len && !result->must_stop); bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); if (retry_server_stream) { mongoc_server_stream_cleanup (retry_server_stream); } if (ret) { /* if a retry succeeded, clear the initial error */ memset (&result->error, 0, sizeof (bson_error_t)); } EXIT; } void _append_array_from_command (mongoc_write_command_t *command, bson_t *bson) { bson_t ar; bson_reader_t *reader; char str[16]; uint32_t i = 0; const char *key; bool eof; const bson_t *current; reader = bson_reader_new_from_data (command->payload.data, command->payload.len); bson_append_array_begin (bson, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((current = bson_reader_read (reader, &eof))) { bson_uint32_to_string (i, &key, str, sizeof str); BSON_APPEND_DOCUMENT (&ar, key, current); i++; } bson_append_array_end (bson, &ar); bson_reader_destroy (reader); } /* Assemble the base @cmd with all of the command options. * @parts is always initialized, even on error. * This is called twice in _mongoc_write_opquery. * Once with no payload documents, to determine the total size. And once with * payload documents, to send the final command. */ static bool _assemble_cmd (bson_t *cmd, mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const mongoc_write_concern_t *write_concern, mongoc_cmd_parts_t *parts, bson_error_t *error) { bool ret; bson_iter_t iter; mongoc_cmd_parts_init (parts, client, database, MONGOC_QUERY_NONE, cmd); parts->is_write_command = true; parts->assembled.operation_id = command->operation_id; ret = mongoc_cmd_parts_set_write_concern ( parts, write_concern, server_stream->sd->max_wire_version, error); if (ret) { BSON_ASSERT (bson_iter_init (&iter, &command->cmd_opts)); ret = mongoc_cmd_parts_append_opts ( parts, &iter, server_stream->sd->max_wire_version, error); } if (ret) { ret = mongoc_cmd_parts_assemble (parts, server_stream, error); } return ret; } static void _mongoc_write_opquery (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, const mongoc_write_concern_t *write_concern, uint32_t offset, mongoc_write_result_t *result, bson_error_t *error) { mongoc_cmd_parts_t parts; const char *key; uint32_t len = 0; bson_t ar; bson_t cmd; char str[16]; bool has_more; bool ret = false; uint32_t i; int32_t max_bson_obj_size; int32_t max_write_batch_size; uint32_t overhead; uint32_t key_len; int data_offset = 0; bson_reader_t *reader; const bson_t *bson; bool eof; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (database); BSON_ASSERT (server_stream); BSON_ASSERT (collection); bson_init (&cmd); max_bson_obj_size = mongoc_server_stream_max_bson_obj_size (server_stream); max_write_batch_size = mongoc_server_stream_max_write_batch_size (server_stream); again: has_more = false; i = 0; _mongoc_write_command_init (&cmd, command, collection); /* If any part of assembling failed, return with failure. */ if (!_assemble_cmd (&cmd, command, client, server_stream, database, write_concern, &parts, error)) { result->failed = true; bson_destroy (&cmd); mongoc_cmd_parts_cleanup (&parts); EXIT; } /* Use the assembled command to compute the overhead, since it may be a new * BSON document with options applied. If no options were applied, then * parts.assembled.command points to cmd. The constant 2 is due to 1 byte to * specify array type and 1 byte for field name's null terminator. */ overhead = parts.assembled.command->len + 2 + gCommandFieldLens[command->type]; /* Toss out the assembled command, we'll assemble again after adding all of * the payload documents. */ mongoc_cmd_parts_cleanup (&parts); reader = bson_reader_new_from_data (command->payload.data + data_offset, command->payload.len - data_offset); bson_append_array_begin (&cmd, gCommandFields[command->type], gCommandFieldLens[command->type], &ar); while ((bson = bson_reader_read (reader, &eof))) { key_len = (uint32_t) bson_uint32_to_string (i, &key, str, sizeof str); len = bson->len; /* 1 byte to specify document type, 1 byte for key's null terminator */ if (_mongoc_write_command_will_overflow (overhead, key_len + len + 2 + ar.len, i, max_bson_obj_size, max_write_batch_size)) { has_more = true; break; } BSON_APPEND_DOCUMENT (&ar, key, bson); data_offset += len; i++; } bson_append_array_end (&cmd, &ar); if (!i) { _mongoc_write_command_too_large_error (error, i, len, max_bson_obj_size); result->failed = true; result->must_stop = true; ret = false; if (bson) { data_offset += len; } } else { bson_t reply; ret = _assemble_cmd (&cmd, command, client, server_stream, database, write_concern, &parts, error); if (ret) { ret = mongoc_cluster_run_command_monitored ( &client->cluster, &parts.assembled, &reply, error); } else { bson_init (&reply); } if (!ret) { result->failed = true; if (bson_empty (&reply)) { /* assembling failed, or a network error running the command */ result->must_stop = true; } } _mongoc_write_result_merge (result, command, &reply, offset); offset += i; bson_destroy (&reply); mongoc_cmd_parts_cleanup (&parts); } bson_reader_destroy (reader); if (has_more && (ret || !command->flags.ordered) && !result->must_stop) { bson_reinit (&cmd); GOTO (again); } bson_destroy (&cmd); EXIT; } void _mongoc_write_command_execute ( mongoc_write_command_t *command, /* IN */ mongoc_client_t *client, /* IN */ mongoc_server_stream_t *server_stream, /* IN */ const char *database, /* IN */ const char *collection, /* IN */ const mongoc_write_concern_t *write_concern, /* IN */ uint32_t offset, /* IN */ mongoc_client_session_t *cs, /* IN */ mongoc_write_result_t *result) /* OUT */ { mongoc_crud_opts_t crud = {0}; ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (server_stream); BSON_ASSERT (database); BSON_ASSERT (collection); BSON_ASSERT (result); if (!write_concern) { write_concern = client->write_concern; } if (!mongoc_write_concern_is_valid (write_concern)) { bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "The write concern is invalid."); result->failed = true; EXIT; } crud.client_session = cs; crud.writeConcern = (mongoc_write_concern_t *) write_concern; _mongoc_write_command_execute_idl (command, client, server_stream, database, collection, offset, &crud, result); EXIT; } void _mongoc_write_command_execute_idl (mongoc_write_command_t *command, mongoc_client_t *client, mongoc_server_stream_t *server_stream, const char *database, const char *collection, uint32_t offset, const mongoc_crud_opts_t *crud, mongoc_write_result_t *result) { ENTRY; BSON_ASSERT (command); BSON_ASSERT (client); BSON_ASSERT (server_stream); BSON_ASSERT (database); BSON_ASSERT (collection); BSON_ASSERT (result); if (command->flags.has_collation) { if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) { result->failed = true; bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set collation for unacknowledged writes"); EXIT; } if (server_stream->sd->max_wire_version < WIRE_VERSION_COLLATION) { bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support collation"); result->failed = true; EXIT; } } if (command->flags.has_array_filters) { if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) { result->failed = true; bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot use array filters with unacknowledged writes"); EXIT; } if (server_stream->sd->max_wire_version < WIRE_VERSION_ARRAY_FILTERS) { bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION, "The selected server does not support array filters"); result->failed = true; EXIT; } } if (command->flags.bypass_document_validation) { if (!mongoc_write_concern_is_acknowledged (crud->writeConcern)) { result->failed = true; bson_set_error ( &result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot set bypassDocumentValidation for unacknowledged writes"); EXIT; } } if (crud->client_session && !mongoc_write_concern_is_acknowledged (crud->writeConcern)) { result->failed = true; bson_set_error (&result->error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Cannot use client session with unacknowledged writes"); EXIT; } if (command->payload.len == 0) { _empty_error (command, &result->error); EXIT; } if (server_stream->sd->max_wire_version >= WIRE_VERSION_OP_MSG) { _mongoc_write_opmsg (command, client, server_stream, database, collection, crud->writeConcern, offset, crud->client_session, result, &result->error); } else { if (mongoc_write_concern_is_acknowledged (crud->writeConcern)) { _mongoc_write_opquery (command, client, server_stream, database, collection, crud->writeConcern, offset, result, &result->error); } else { gLegacyWriteOps[command->type](command, client, server_stream, database, collection, offset, result, &result->error); } } EXIT; } void _mongoc_write_command_destroy (mongoc_write_command_t *command) { ENTRY; if (command) { bson_destroy (&command->cmd_opts); _mongoc_buffer_destroy (&command->payload); } EXIT; } void _mongoc_write_result_init (mongoc_write_result_t *result) /* IN */ { ENTRY; BSON_ASSERT (result); memset (result, 0, sizeof *result); bson_init (&result->upserted); bson_init (&result->writeConcernErrors); bson_init (&result->writeErrors); bson_init (&result->errorLabels); EXIT; } void _mongoc_write_result_destroy (mongoc_write_result_t *result) { ENTRY; BSON_ASSERT (result); bson_destroy (&result->upserted); bson_destroy (&result->writeConcernErrors); bson_destroy (&result->writeErrors); bson_destroy (&result->errorLabels); EXIT; } void _mongoc_write_result_append_upsert (mongoc_write_result_t *result, int32_t idx, const bson_value_t *value) { bson_t child; const char *keyptr = NULL; char key[12]; int len; BSON_ASSERT (result); BSON_ASSERT (value); len = (int) bson_uint32_to_string ( result->upsert_append_count, &keyptr, key, sizeof key); bson_append_document_begin (&result->upserted, keyptr, len, &child); BSON_APPEND_INT32 (&child, "index", idx); BSON_APPEND_VALUE (&child, "_id", value); bson_append_document_end (&result->upserted, &child); result->upsert_append_count++; } int32_t _mongoc_write_result_merge_arrays (uint32_t offset, mongoc_write_result_t *result, /* IN */ bson_t *dest, /* IN */ bson_iter_t *iter) /* IN */ { const bson_value_t *value; bson_iter_t ar; bson_iter_t citer; int32_t idx; int32_t count = 0; int32_t aridx; bson_t child; const char *keyptr = NULL; char key[12]; int len; ENTRY; BSON_ASSERT (result); BSON_ASSERT (dest); BSON_ASSERT (iter); BSON_ASSERT (BSON_ITER_HOLDS_ARRAY (iter)); aridx = bson_count_keys (dest); if (bson_iter_recurse (iter, &ar)) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer)) { len = (int) bson_uint32_to_string (aridx++, &keyptr, key, sizeof key); bson_append_document_begin (dest, keyptr, len, &child); while (bson_iter_next (&citer)) { if (BSON_ITER_IS_KEY (&citer, "index")) { idx = bson_iter_int32 (&citer) + offset; BSON_APPEND_INT32 (&child, "index", idx); } else { value = bson_iter_value (&citer); BSON_APPEND_VALUE (&child, bson_iter_key (&citer), value); } } bson_append_document_end (dest, &child); count++; } } } RETURN (count); } void _mongoc_write_result_merge (mongoc_write_result_t *result, /* IN */ mongoc_write_command_t *command, /* IN */ const bson_t *reply, /* IN */ uint32_t offset) { int32_t server_index = 0; const bson_value_t *value; bson_iter_t iter; bson_iter_t citer; bson_iter_t ar; int32_t n_upserted = 0; int32_t affected = 0; ENTRY; BSON_ASSERT (result); BSON_ASSERT (reply); if (bson_iter_init_find (&iter, reply, "n") && BSON_ITER_HOLDS_INT32 (&iter)) { affected = bson_iter_int32 (&iter); } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter) && bson_iter_recurse (&iter, &citer) && bson_iter_next (&citer)) { result->failed = true; } switch (command->type) { case MONGOC_WRITE_COMMAND_INSERT: result->nInserted += affected; break; case MONGOC_WRITE_COMMAND_DELETE: result->nRemoved += affected; break; case MONGOC_WRITE_COMMAND_UPDATE: /* server returns each upserted _id with its index into this batch * look for "upserted": [{"index": 4, "_id": ObjectId()}, ...] */ if (bson_iter_init_find (&iter, reply, "upserted")) { if (BSON_ITER_HOLDS_ARRAY (&iter) && (bson_iter_recurse (&iter, &ar))) { while (bson_iter_next (&ar)) { if (BSON_ITER_HOLDS_DOCUMENT (&ar) && bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "index") && BSON_ITER_HOLDS_INT32 (&citer)) { server_index = bson_iter_int32 (&citer); if (bson_iter_recurse (&ar, &citer) && bson_iter_find (&citer, "_id")) { value = bson_iter_value (&citer); _mongoc_write_result_append_upsert ( result, offset + server_index, value); n_upserted++; } } } } result->nUpserted += n_upserted; /* * XXX: The following addition to nMatched needs some checking. * I'm highly skeptical of it. */ result->nMatched += BSON_MAX (0, (affected - n_upserted)); } else { result->nMatched += affected; } if (bson_iter_init_find (&iter, reply, "nModified") && BSON_ITER_HOLDS_INT32 (&iter)) { result->nModified += bson_iter_int32 (&iter); } break; default: BSON_ASSERT (false); break; } if (bson_iter_init_find (&iter, reply, "writeErrors") && BSON_ITER_HOLDS_ARRAY (&iter)) { _mongoc_write_result_merge_arrays ( offset, result, &result->writeErrors, &iter); } if (bson_iter_init_find (&iter, reply, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { uint32_t len; const uint8_t *data; bson_t write_concern_error; char str[16]; const char *key; /* writeConcernError is a subdocument in the server response * append it to the result->writeConcernErrors array */ bson_iter_document (&iter, &len, &data); BSON_ASSERT (bson_init_static (&write_concern_error, data, len)); bson_uint32_to_string ( result->n_writeConcernErrors, &key, str, sizeof str); if (!bson_append_document ( &result->writeConcernErrors, key, -1, &write_concern_error)) { MONGOC_ERROR ("Error adding \"%s\" to writeConcernErrors.\n", key); } result->n_writeConcernErrors++; } /* inefficient if there are ever large numbers: for each label in each err, * we linear-search result->errorLabels to see if it's included yet */ _mongoc_bson_array_copy_labels_to (reply, &result->errorLabels); EXIT; } /* * If error is not set, set code from first document in array like * [{"code": 64, "errmsg": "duplicate"}, ...]. Format the error message * from all errors in array. */ static void _set_error_from_response (bson_t *bson_array, mongoc_error_domain_t domain, const char *error_type, bson_error_t *error /* OUT */) { bson_iter_t array_iter; bson_iter_t doc_iter; bson_string_t *compound_err; const char *errmsg = NULL; int32_t code = 0; uint32_t n_keys, i; compound_err = bson_string_new (NULL); n_keys = bson_count_keys (bson_array); if (n_keys > 1) { bson_string_append_printf ( compound_err, "Multiple %s errors: ", error_type); } if (!bson_empty0 (bson_array) && bson_iter_init (&array_iter, bson_array)) { /* get first code and all error messages */ i = 0; while (bson_iter_next (&array_iter)) { if (BSON_ITER_HOLDS_DOCUMENT (&array_iter) && bson_iter_recurse (&array_iter, &doc_iter)) { /* parse doc, which is like {"code": 64, "errmsg": "duplicate"} */ while (bson_iter_next (&doc_iter)) { /* use the first error code we find */ if (BSON_ITER_IS_KEY (&doc_iter, "code") && code == 0) { code = bson_iter_int32 (&doc_iter); } else if (BSON_ITER_IS_KEY (&doc_iter, "errmsg")) { errmsg = bson_iter_utf8 (&doc_iter, NULL); /* build message like 'Multiple write errors: "foo", "bar"' */ if (n_keys > 1) { bson_string_append_printf (compound_err, "\"%s\"", errmsg); if (i < n_keys - 1) { bson_string_append (compound_err, ", "); } } else { /* single error message */ bson_string_append (compound_err, errmsg); } } } i++; } } if (code && compound_err->len) { bson_set_error ( error, domain, (uint32_t) code, "%s", compound_err->str); } } bson_string_free (compound_err, true); } /* complete a write result, including only certain fields */ bool _mongoc_write_result_complete ( mongoc_write_result_t *result, /* IN */ int32_t error_api_version, /* IN */ const mongoc_write_concern_t *wc, /* IN */ mongoc_error_domain_t err_domain_override, /* IN */ bson_t *bson, /* OUT */ bson_error_t *error, /* OUT */ ...) { mongoc_error_domain_t domain; va_list args; const char *field; int n_args; bson_iter_t iter; bson_iter_t child; ENTRY; BSON_ASSERT (result); if (error_api_version >= MONGOC_ERROR_API_VERSION_2) { domain = MONGOC_ERROR_SERVER; } else if (err_domain_override) { domain = err_domain_override; } else if (result->error.domain) { domain = (mongoc_error_domain_t) result->error.domain; } else { domain = MONGOC_ERROR_COLLECTION; } /* produce either old fields like nModified from the deprecated Bulk API Spec * or new fields like modifiedCount from the CRUD Spec, which we partly obey */ if (bson && mongoc_write_concern_is_acknowledged (wc)) { n_args = 0; va_start (args, error); while ((field = va_arg (args, const char *))) { n_args++; if (!strcmp (field, "nInserted")) { BSON_APPEND_INT32 (bson, field, result->nInserted); } else if (!strcmp (field, "insertedCount")) { BSON_APPEND_INT32 (bson, field, result->nInserted); } else if (!strcmp (field, "nMatched")) { BSON_APPEND_INT32 (bson, field, result->nMatched); } else if (!strcmp (field, "matchedCount")) { BSON_APPEND_INT32 (bson, field, result->nMatched); } else if (!strcmp (field, "nModified")) { BSON_APPEND_INT32 (bson, field, result->nModified); } else if (!strcmp (field, "modifiedCount")) { BSON_APPEND_INT32 (bson, field, result->nModified); } else if (!strcmp (field, "nRemoved")) { BSON_APPEND_INT32 (bson, field, result->nRemoved); } else if (!strcmp (field, "deletedCount")) { BSON_APPEND_INT32 (bson, field, result->nRemoved); } else if (!strcmp (field, "nUpserted")) { BSON_APPEND_INT32 (bson, field, result->nUpserted); } else if (!strcmp (field, "upsertedCount")) { BSON_APPEND_INT32 (bson, field, result->nUpserted); } else if (!strcmp (field, "upserted") && !bson_empty0 (&result->upserted)) { BSON_APPEND_ARRAY (bson, field, &result->upserted); } else if (!strcmp (field, "upsertedId") && !bson_empty0 (&result->upserted) && bson_iter_init_find (&iter, &result->upserted, "0") && bson_iter_recurse (&iter, &child) && bson_iter_find (&child, "_id")) { /* "upsertedId", singular, for update_one() */ BSON_APPEND_VALUE (bson, "upsertedId", bson_iter_value (&child)); } } va_end (args); /* default: a standard result includes all Bulk API fields */ if (!n_args) { BSON_APPEND_INT32 (bson, "nInserted", result->nInserted); BSON_APPEND_INT32 (bson, "nMatched", result->nMatched); BSON_APPEND_INT32 (bson, "nModified", result->nModified); BSON_APPEND_INT32 (bson, "nRemoved", result->nRemoved); BSON_APPEND_INT32 (bson, "nUpserted", result->nUpserted); if (!bson_empty0 (&result->upserted)) { BSON_APPEND_ARRAY (bson, "upserted", &result->upserted); } } /* always append errors if there are any */ if (!n_args || !bson_empty (&result->writeErrors)) { BSON_APPEND_ARRAY (bson, "writeErrors", &result->writeErrors); } if (result->n_writeConcernErrors) { BSON_APPEND_ARRAY ( bson, "writeConcernErrors", &result->writeConcernErrors); } } /* set bson_error_t from first write error or write concern error */ _set_error_from_response ( &result->writeErrors, domain, "write", &result->error); if (!result->error.code) { _set_error_from_response (&result->writeConcernErrors, MONGOC_ERROR_WRITE_CONCERN, "write concern", &result->error); } if (bson && !bson_empty (&result->errorLabels)) { BSON_APPEND_ARRAY (bson, "errorLabels", &result->errorLabels); } if (error) { memcpy (error, &result->error, sizeof *error); } RETURN (!result->failed && result->error.code == 0); } /*-------------------------------------------------------------------------- * * _mongoc_write_error_get_type -- * * Checks if the error or reply from a write command is considered * retryable according to the retryable writes spec. Checks both * for a client error (a network exception) and a server error in * the reply. @cmd_ret and @cmd_err come from the result of a * write_command function. * * * Return: * A mongoc_write_error_type_t indicating the type of error (if any). * *-------------------------------------------------------------------------- */ mongoc_write_err_type_t _mongoc_write_error_get_type (bool cmd_ret, const bson_error_t *cmd_err, const bson_t *reply) { bson_error_t error; /* check for a client error. */ if (!cmd_ret && cmd_err->domain == MONGOC_ERROR_STREAM) { /* Retryable writes spec: "considered retryable if [...] any network * exception (e.g. socket timeout or error) */ return MONGOC_WRITE_ERR_RETRY; } /* check for a server error. */ if (_mongoc_cmd_check_ok_no_wce ( reply, MONGOC_ERROR_API_VERSION_2, &error)) { return MONGOC_WRITE_ERR_NONE; } switch (error.code) { case 11600: /* InterruptedAtShutdown */ case 11602: /* InterruptedDueToReplStateChange */ case 10107: /* NotMaster */ case 13435: /* NotMasterNoSlaveOk */ case 13436: /* NotMasterOrSecondary */ case 189: /* PrimarySteppedDown */ case 91: /* ShutdownInProgress */ case 7: /* HostNotFound */ case 6: /* HostUnreachable */ case 89: /* NetworkTimeout */ case 9001: /* SocketException */ return MONGOC_WRITE_ERR_RETRY; case 64: /* WriteConcernFailed */ return MONGOC_WRITE_ERR_WRITE_CONCERN; default: if (strstr (error.message, "not master") || strstr (error.message, "node is recovering")) { return MONGOC_WRITE_ERR_RETRY; } return MONGOC_WRITE_ERR_OTHER; } } /* Returns true and modifies reply and cmd_err. */ bool _mongoc_write_error_update_if_unsupported_storage_engine (bool cmd_ret, bson_error_t *cmd_err, bson_t *reply) { bson_error_t server_error; if (cmd_ret) { return false; } if (_mongoc_cmd_check_ok_no_wce ( reply, MONGOC_ERROR_API_VERSION_2, &server_error)) { return false; } if (server_error.code == 20 && strstr (server_error.message, "Transaction numbers") == server_error.message) { const char *replacement = "This MongoDB deployment does not support " "retryable writes. Please add " "retryWrites=false to your connection string."; strcpy (cmd_err->message, replacement); if (reply) { bson_t *new_reply = bson_new (); bson_copy_to_excluding_noinit (reply, new_reply, "errmsg", NULL); BSON_APPEND_UTF8 (new_reply, "errmsg", replacement); bson_destroy (reply); bson_steal (reply, new_reply); } return true; } return false; }mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-concern-private.h0000644000076500000240000000260413572250760027706 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_WRITE_CONCERN_PRIVATE_H #define MONGOC_WRITE_CONCERN_PRIVATE_H #include BSON_BEGIN_DECLS #define MONGOC_WRITE_CONCERN_FSYNC_DEFAULT -1 #define MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT -1 struct _mongoc_write_concern_t { int8_t fsync_; /* deprecated */ int8_t journal; int32_t w; int64_t wtimeout; char *wtag; bool frozen; bson_t compiled; bool is_default; }; mongoc_write_concern_t * _mongoc_write_concern_new_from_iter (const bson_iter_t *iter, bson_error_t *error); const bson_t * _mongoc_write_concern_get_bson (mongoc_write_concern_t *write_concern); bool _mongoc_parse_wc_err (const bson_t *doc, bson_error_t *error); BSON_END_DECLS #endif /* MONGOC_WRITE_CONCERN_PRIVATE_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-concern.c0000644000076500000240000004014413572250760026232 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-util-private.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-write-concern-private.h" static void _mongoc_write_concern_freeze (mongoc_write_concern_t *write_concern); /** * mongoc_write_concern_new: * * Create a new mongoc_write_concern_t. * * Returns: A newly allocated mongoc_write_concern_t. This should be freed * with mongoc_write_concern_destroy(). */ mongoc_write_concern_t * mongoc_write_concern_new (void) { mongoc_write_concern_t *write_concern; write_concern = (mongoc_write_concern_t *) bson_malloc0 (sizeof *write_concern); write_concern->w = MONGOC_WRITE_CONCERN_W_DEFAULT; write_concern->fsync_ = MONGOC_WRITE_CONCERN_FSYNC_DEFAULT; write_concern->journal = MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT; write_concern->is_default = true; bson_init (&write_concern->compiled); return write_concern; } mongoc_write_concern_t * mongoc_write_concern_copy (const mongoc_write_concern_t *write_concern) { mongoc_write_concern_t *ret = NULL; if (write_concern) { ret = mongoc_write_concern_new (); ret->fsync_ = write_concern->fsync_; ret->journal = write_concern->journal; ret->w = write_concern->w; ret->wtimeout = write_concern->wtimeout; ret->frozen = false; ret->wtag = bson_strdup (write_concern->wtag); ret->is_default = write_concern->is_default; } return ret; } /** * mongoc_write_concern_destroy: * @write_concern: A mongoc_write_concern_t. * * Releases a mongoc_write_concern_t and all associated memory. */ void mongoc_write_concern_destroy (mongoc_write_concern_t *write_concern) { if (write_concern) { bson_destroy (&write_concern->compiled); bson_free (write_concern->wtag); bson_free (write_concern); } } bool mongoc_write_concern_get_fsync (const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return (write_concern->fsync_ == true); } /** * mongoc_write_concern_set_fsync: * @write_concern: A mongoc_write_concern_t. * @fsync_: If the write concern requires fsync() by the server. * * Set if fsync() should be called on the server before acknowledging a * write request. */ void mongoc_write_concern_set_fsync (mongoc_write_concern_t *write_concern, bool fsync_) { BSON_ASSERT (write_concern); write_concern->fsync_ = !!fsync_; write_concern->is_default = false; write_concern->frozen = false; } bool mongoc_write_concern_get_journal (const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return (write_concern->journal == true); } bool mongoc_write_concern_journal_is_set ( const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return (write_concern->journal != MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT); } /** * mongoc_write_concern_set_journal: * @write_concern: A mongoc_write_concern_t. * @journal: If the write should be journaled. * * Set if the write request should be journaled before acknowledging the * write request. */ void mongoc_write_concern_set_journal (mongoc_write_concern_t *write_concern, bool journal) { BSON_ASSERT (write_concern); write_concern->journal = !!journal; write_concern->is_default = false; write_concern->frozen = false; } int32_t mongoc_write_concern_get_w (const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return write_concern->w; } /** * mongoc_write_concern_set_w: * @w: The number of nodes for write or MONGOC_WRITE_CONCERN_W_MAJORITY * for "majority". * * Sets the number of nodes that must acknowledge the write request before * acknowledging the write request to the client. * * You may specify @w as MONGOC_WRITE_CONCERN_W_MAJORITY to request that * a "majority" of nodes acknowledge the request. */ void mongoc_write_concern_set_w (mongoc_write_concern_t *write_concern, int32_t w) { BSON_ASSERT (write_concern); BSON_ASSERT (w >= -3); write_concern->w = w; if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) { write_concern->is_default = false; } write_concern->frozen = false; } int32_t mongoc_write_concern_get_wtimeout (const mongoc_write_concern_t *write_concern) { return (int32_t) mongoc_write_concern_get_wtimeout_int64 (write_concern); } int64_t mongoc_write_concern_get_wtimeout_int64 ( const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return write_concern->wtimeout; } void mongoc_write_concern_set_wtimeout (mongoc_write_concern_t *write_concern, int32_t wtimeout_msec) { mongoc_write_concern_set_wtimeout_int64 (write_concern, (int64_t) wtimeout_msec); } void mongoc_write_concern_set_wtimeout_int64 (mongoc_write_concern_t *write_concern, int64_t wtimeout_msec) { BSON_ASSERT (write_concern); if (wtimeout_msec < 0) { return; } write_concern->wtimeout = wtimeout_msec; write_concern->is_default = false; write_concern->frozen = false; } bool mongoc_write_concern_get_wmajority (const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); return (write_concern->w == MONGOC_WRITE_CONCERN_W_MAJORITY); } /** * mongoc_write_concern_set_wmajority: * @write_concern: A mongoc_write_concern_t. * @wtimeout_msec: Number of milliseconds before timeout. * * Sets the "w" of a write concern to "majority". It is suggested that * you provide a reasonable @wtimeout_msec to wait before considering the * write request failed. A @wtimeout_msec value of 0 indicates no write timeout. * * The @wtimeout_msec parameter must be positive or zero. Negative values will * be ignored. */ void mongoc_write_concern_set_wmajority (mongoc_write_concern_t *write_concern, int32_t wtimeout_msec) { BSON_ASSERT (write_concern); write_concern->w = MONGOC_WRITE_CONCERN_W_MAJORITY; write_concern->is_default = false; write_concern->frozen = false; if (wtimeout_msec >= 0) { write_concern->wtimeout = wtimeout_msec; } } const char * mongoc_write_concern_get_wtag (const mongoc_write_concern_t *write_concern) { BSON_ASSERT (write_concern); if (write_concern->w == MONGOC_WRITE_CONCERN_W_TAG) { return write_concern->wtag; } return NULL; } void mongoc_write_concern_set_wtag (mongoc_write_concern_t *write_concern, const char *wtag) { BSON_ASSERT (write_concern); bson_free (write_concern->wtag); write_concern->wtag = bson_strdup (wtag); write_concern->w = MONGOC_WRITE_CONCERN_W_TAG; write_concern->is_default = false; write_concern->frozen = false; } /** * mongoc_write_concern_get_bson: * @write_concern: A mongoc_write_concern_t. * * This is an internal function. * * Returns: A bson_t representing the write concern, which is owned by the * mongoc_write_concern_t instance and should not be modified or freed. */ const bson_t * _mongoc_write_concern_get_bson (mongoc_write_concern_t *write_concern) { if (!write_concern->frozen) { _mongoc_write_concern_freeze (write_concern); } return &write_concern->compiled; } /** * mongoc_write_concern_is_default: * @write_concern: A mongoc_write_concern_t. * * Returns is_default, which is true when write_concern has not been modified. * */ bool mongoc_write_concern_is_default (const mongoc_write_concern_t *write_concern) { return !write_concern || write_concern->is_default; } /** * mongoc_write_concern_freeze: * @write_concern: A mongoc_write_concern_t. * * This is an internal function. * * Encodes the write concern into a bson_t, which may then be returned by * _mongoc_write_concern_get_bson(). */ static void _mongoc_write_concern_freeze (mongoc_write_concern_t *write_concern) { bson_t *compiled; BSON_ASSERT (write_concern); compiled = &write_concern->compiled; write_concern->frozen = true; bson_reinit (compiled); if (write_concern->w == MONGOC_WRITE_CONCERN_W_TAG) { BSON_ASSERT (write_concern->wtag); BSON_APPEND_UTF8 (compiled, "w", write_concern->wtag); } else if (write_concern->w == MONGOC_WRITE_CONCERN_W_MAJORITY) { BSON_APPEND_UTF8 (compiled, "w", "majority"); } else if (write_concern->w == MONGOC_WRITE_CONCERN_W_DEFAULT) { /* Do Nothing */ } else { BSON_APPEND_INT32 (compiled, "w", write_concern->w); } if (write_concern->fsync_ != MONGOC_WRITE_CONCERN_FSYNC_DEFAULT) { bson_append_bool (compiled, "fsync", 5, !!write_concern->fsync_); } if (write_concern->journal != MONGOC_WRITE_CONCERN_JOURNAL_DEFAULT) { bson_append_bool (compiled, "j", 1, !!write_concern->journal); } if (write_concern->wtimeout) { bson_append_int64 (compiled, "wtimeout", 8, write_concern->wtimeout); } } /** * mongoc_write_concern_is_acknowledged: * @concern: (in): A mongoc_write_concern_t. * * Checks to see if @write_concern requests that a getlasterror command is to * be delivered to the MongoDB server. * * Returns: true if a getlasterror command should be sent. */ bool mongoc_write_concern_is_acknowledged ( const mongoc_write_concern_t *write_concern) { if (write_concern) { return (((write_concern->w != MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED) && (write_concern->w != MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED)) || write_concern->fsync_ == true || mongoc_write_concern_get_journal (write_concern)); } return true; } /** * mongoc_write_concern_is_valid: * @write_concern: (in): A mongoc_write_concern_t. * * Checks to see if @write_concern is valid and does not contain conflicting * options. * * Returns: true if the write concern is valid; otherwise false. */ bool mongoc_write_concern_is_valid (const mongoc_write_concern_t *write_concern) { if (!write_concern) { return false; } /* Journal or fsync should require acknowledgement. */ if ((write_concern->fsync_ == true || mongoc_write_concern_get_journal (write_concern)) && (write_concern->w == MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED || write_concern->w == MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED)) { return false; } if (write_concern->wtimeout < 0) { return false; } return true; } static bool _mongoc_write_concern_validate (const mongoc_write_concern_t *write_concern, bson_error_t *error) { if (write_concern && !mongoc_write_concern_is_valid (write_concern)) { bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid writeConcern"); return false; } return true; } /** * _mongoc_parse_wc_err: * @doc: (in): A bson document. * @error: (out): A bson_error_t. * * Parses a document, usually a server reply, * looking for a writeConcernError. Returns true if * there is a writeConcernError, false otherwise. */ bool _mongoc_parse_wc_err (const bson_t *doc, bson_error_t *error) { bson_iter_t iter; bson_iter_t inner; if (bson_iter_init_find (&iter, doc, "writeConcernError") && BSON_ITER_HOLDS_DOCUMENT (&iter)) { const char *errmsg = NULL; int32_t code = 0; BSON_ASSERT (bson_iter_recurse (&iter, &inner)); while (bson_iter_next (&inner)) { if (BSON_ITER_IS_KEY (&inner, "code")) { code = bson_iter_int32 (&inner); } else if (BSON_ITER_IS_KEY (&inner, "errmsg")) { errmsg = bson_iter_utf8 (&inner, NULL); } } bson_set_error (error, MONGOC_ERROR_WRITE_CONCERN, code, "Write Concern error: %s", errmsg); return true; } return false; } /** * mongoc_write_concern_append: * @write_concern: (in): A mongoc_write_concern_t. * @command: (out): A pointer to a bson document. * * Appends a write_concern document to a command, to send to * a server. * * Returns true on success, false on failure. * */ bool mongoc_write_concern_append (mongoc_write_concern_t *write_concern, bson_t *command) { if (!mongoc_write_concern_is_valid (write_concern)) { MONGOC_ERROR ("Invalid writeConcern passed into " "mongoc_write_concern_append."); return false; } if (!bson_append_document (command, "writeConcern", 12, _mongoc_write_concern_get_bson (write_concern))) { MONGOC_ERROR ("Could not append writeConcern to command."); return false; } return true; } /** * _mongoc_write_concern_new_from_iter: * * Create a new mongoc_write_concern_t from an iterator positioned on * a "writeConcern" document. * * Returns: A newly allocated mongoc_write_concern_t. This should be freed * with mongoc_write_concern_destroy(). */ mongoc_write_concern_t * _mongoc_write_concern_new_from_iter (const bson_iter_t *iter, bson_error_t *error) { bson_iter_t inner; mongoc_write_concern_t *write_concern; int32_t w; BSON_ASSERT (iter); write_concern = mongoc_write_concern_new (); if (!BSON_ITER_HOLDS_DOCUMENT (iter)) { goto fail; } BSON_ASSERT (bson_iter_recurse (iter, &inner)); while (bson_iter_next (&inner)) { if (BSON_ITER_IS_KEY (&inner, "w")) { if (BSON_ITER_HOLDS_INT32 (&inner)) { w = bson_iter_int32 (&inner); if (w < MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED) { goto fail; } mongoc_write_concern_set_w (write_concern, w); } else if (BSON_ITER_HOLDS_UTF8 (&inner)) { if (!strcmp (bson_iter_utf8 (&inner, NULL), "majority")) { /* mongoc_write_concern_set_wmajority() only assigns wtimeout if * it is >= 0. Since we set wtimeout below, pass -1 here. */ mongoc_write_concern_set_wmajority (write_concern, -1); } else { mongoc_write_concern_set_wtag (write_concern, bson_iter_utf8 (&inner, NULL)); } } else { /* wrong type for "w" */ goto fail; } } else if (BSON_ITER_IS_KEY (&inner, "fsync")) { if (!BSON_ITER_HOLDS_BOOL (&inner)) { goto fail; } BEGIN_IGNORE_DEPRECATIONS; mongoc_write_concern_set_fsync (write_concern, bson_iter_bool (&inner)); END_IGNORE_DEPRECATIONS; } else if (BSON_ITER_IS_KEY (&inner, "j")) { if (!BSON_ITER_HOLDS_BOOL (&inner)) { goto fail; } mongoc_write_concern_set_journal (write_concern, bson_iter_bool (&inner)); } else if (BSON_ITER_IS_KEY (&inner, "wtimeout")) { if (!BSON_ITER_HOLDS_INT (&inner) || bson_iter_as_int64 (&inner) < 0) { goto fail; } mongoc_write_concern_set_wtimeout_int64 (write_concern, bson_iter_as_int64 (&inner)); } } if (!_mongoc_write_concern_validate (write_concern, error)) { mongoc_write_concern_destroy (write_concern); return NULL; } return write_concern; fail: bson_set_error (error, MONGOC_ERROR_COMMAND, MONGOC_ERROR_COMMAND_INVALID_ARG, "Invalid writeConcern"); mongoc_write_concern_destroy (write_concern); return NULL; } mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc-write-concern.h0000644000076500000240000000722213572250760026237 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "mongoc/mongoc-prelude.h" #ifndef MONGOC_WRITE_CONCERN_H #define MONGOC_WRITE_CONCERN_H #include #include "mongoc/mongoc-macros.h" BSON_BEGIN_DECLS #define MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED 0 #define MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED -1 /* deprecated */ #define MONGOC_WRITE_CONCERN_W_DEFAULT -2 #define MONGOC_WRITE_CONCERN_W_MAJORITY -3 #define MONGOC_WRITE_CONCERN_W_TAG -4 typedef struct _mongoc_write_concern_t mongoc_write_concern_t; MONGOC_EXPORT (mongoc_write_concern_t *) mongoc_write_concern_new (void); MONGOC_EXPORT (mongoc_write_concern_t *) mongoc_write_concern_copy (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_destroy (mongoc_write_concern_t *write_concern); MONGOC_EXPORT (bool) mongoc_write_concern_get_fsync (const mongoc_write_concern_t *write_concern) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (void) mongoc_write_concern_set_fsync (mongoc_write_concern_t *write_concern, bool fsync_) BSON_GNUC_DEPRECATED; MONGOC_EXPORT (bool) mongoc_write_concern_get_journal (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (bool) mongoc_write_concern_journal_is_set ( const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_set_journal (mongoc_write_concern_t *write_concern, bool journal); MONGOC_EXPORT (int32_t) mongoc_write_concern_get_w (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_set_w (mongoc_write_concern_t *write_concern, int32_t w); MONGOC_EXPORT (const char *) mongoc_write_concern_get_wtag (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_set_wtag (mongoc_write_concern_t *write_concern, const char *tag); MONGOC_EXPORT (int32_t) mongoc_write_concern_get_wtimeout (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (int64_t) mongoc_write_concern_get_wtimeout_int64 ( const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_set_wtimeout (mongoc_write_concern_t *write_concern, int32_t wtimeout_msec); MONGOC_EXPORT (void) mongoc_write_concern_set_wtimeout_int64 (mongoc_write_concern_t *write_concern, int64_t wtimeout_msec); MONGOC_EXPORT (bool) mongoc_write_concern_get_wmajority ( const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (void) mongoc_write_concern_set_wmajority (mongoc_write_concern_t *write_concern, int32_t wtimeout_msec); MONGOC_EXPORT (bool) mongoc_write_concern_is_acknowledged ( const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (bool) mongoc_write_concern_is_valid (const mongoc_write_concern_t *write_concern); MONGOC_EXPORT (bool) mongoc_write_concern_append (mongoc_write_concern_t *write_concern, bson_t *doc); MONGOC_EXPORT (bool) mongoc_write_concern_is_default (const mongoc_write_concern_t *write_concern); BSON_END_DECLS #endif /* MONGOC_WRITE_CONCERN_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/mongoc.h0000644000076500000240000000415113572250760023460 0ustar alcaeusstaff/* * Copyright 2013 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MONGOC_H #define MONGOC_H #include #define MONGOC_INSIDE #include "mongoc/mongoc-macros.h" #include "mongoc/mongoc-apm.h" #include "mongoc/mongoc-bulk-operation.h" #include "mongoc/mongoc-change-stream.h" #include "mongoc/mongoc-client.h" #include "mongoc/mongoc-client-pool.h" #include "mongoc/mongoc-collection.h" #include "mongoc/mongoc-config.h" #include "mongoc/mongoc-cursor.h" #include "mongoc/mongoc-database.h" #include "mongoc/mongoc-index.h" #include "mongoc/mongoc-error.h" #include "mongoc/mongoc-flags.h" #include "mongoc/mongoc-gridfs.h" #include "mongoc/mongoc-gridfs-bucket.h" #include "mongoc/mongoc-gridfs-file.h" #include "mongoc/mongoc-gridfs-file-list.h" #include "mongoc/mongoc-gridfs-file-page.h" #include "mongoc/mongoc-host-list.h" #include "mongoc/mongoc-init.h" #include "mongoc/mongoc-matcher.h" #include "mongoc/mongoc-handshake.h" #include "mongoc/mongoc-opcode.h" #include "mongoc/mongoc-log.h" #include "mongoc/mongoc-socket.h" #include "mongoc/mongoc-client-session.h" #include "mongoc/mongoc-stream.h" #include "mongoc/mongoc-stream-buffered.h" #include "mongoc/mongoc-stream-file.h" #include "mongoc/mongoc-stream-gridfs.h" #include "mongoc/mongoc-stream-socket.h" #include "mongoc/mongoc-uri.h" #include "mongoc/mongoc-write-concern.h" #include "mongoc/mongoc-version.h" #include "mongoc/mongoc-version-functions.h" #ifdef MONGOC_ENABLE_SSL #include "mongoc/mongoc-rand.h" #include "mongoc/mongoc-stream-tls.h" #include "mongoc/mongoc-ssl.h" #endif #undef MONGOC_INSIDE #endif /* MONGOC_H */ mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-compressed.def0000644000076500000240000000037313572250760025267 0ustar alcaeusstaffRPC( compressed, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) INT32_FIELD(original_opcode) INT32_FIELD(uncompressed_size) UINT8_FIELD(compressor_id) RAW_BUFFER_FIELD(compressed_message) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-delete.def0000644000076500000240000000031613572250760024362 0ustar alcaeusstaffRPC( delete, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) INT32_FIELD(zero) CSTRING_FIELD(collection) ENUM_FIELD(flags) BSON_FIELD(selector) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-get-more.def0000644000076500000240000000032613572250760024640 0ustar alcaeusstaffRPC( get_more, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) INT32_FIELD(zero) CSTRING_FIELD(collection) INT32_FIELD(n_return) INT64_FIELD(cursor_id) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-header.def0000644000076500000240000000016313572250760024350 0ustar alcaeusstaffRPC( header, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-insert.def0000644000076500000240000000030213572250760024417 0ustar alcaeusstaffRPC( insert, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ENUM_FIELD(flags) CSTRING_FIELD(collection) IOVEC_ARRAY_FIELD(documents) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-kill-cursors.def0000644000076500000240000000026513572250760025554 0ustar alcaeusstaffRPC( kill_cursors, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) INT32_FIELD(zero) INT64_ARRAY_FIELD(n_cursors, cursors) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-msg.def0000644000076500000240000000024413572250760023706 0ustar alcaeusstaffRPC( msg, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ENUM_FIELD(flags) SECTION_ARRAY_FIELD(sections) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-query.def0000644000076500000240000000041613572250760024266 0ustar alcaeusstaffRPC( query, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ENUM_FIELD(flags) CSTRING_FIELD(collection) INT32_FIELD(skip) INT32_FIELD(n_return) BSON_FIELD(query) BSON_OPTIONAL(fields, BSON_FIELD(fields)) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-reply-header.def0000644000076500000240000000033213572250760025477 0ustar alcaeusstaffRPC( reply_header, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ENUM_FIELD(flags) INT64_FIELD(cursor_id) INT32_FIELD(start_from) INT32_FIELD(n_returned) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-reply.def0000644000076500000240000000036113572250760024253 0ustar alcaeusstaffRPC( reply, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) ENUM_FIELD(flags) INT64_FIELD(cursor_id) INT32_FIELD(start_from) INT32_FIELD(n_returned) BSON_ARRAY_FIELD(documents) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/op-update.def0000644000076500000240000000034313572250760024402 0ustar alcaeusstaffRPC( update, INT32_FIELD(msg_len) INT32_FIELD(request_id) INT32_FIELD(response_to) INT32_FIELD(opcode) INT32_FIELD(zero) CSTRING_FIELD(collection) ENUM_FIELD(flags) BSON_FIELD(selector) BSON_FIELD(update) ) mongodb-1.6.1/src/libmongoc/src/libmongoc/src/mongoc/utlist.h0000644000076500000240000012575513572250760023540 0ustar alcaeusstaff/* Copyright (c) 2007-2014, Troy D. Hanson http://troydhanson.github.com/uthash/ All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "mongoc/mongoc-prelude.h" #ifndef UTLIST_H #define UTLIST_H #define UTLIST_VERSION 1.9.9 /* * This file contains macros to manipulate singly and doubly-linked lists. * * 1. LL_ macros: singly-linked lists. * 2. DL_ macros: doubly-linked lists. * 3. CDL_ macros: circular doubly-linked lists. * * To use singly-linked lists, your structure must have a "next" pointer. * To use doubly-linked lists, your structure must "prev" and "next" pointers. * Either way, the pointer to the head of the list must be initialized to NULL. * * ----------------.EXAMPLE ------------------------- * struct item { * int id; * struct item *prev, *next; * } * * struct item *list = NULL: * * int main() { * struct item *item; * ... allocate and populate item ... * DL_APPEND(list, item); * } * -------------------------------------------------- * * For doubly-linked lists, the append and delete macros are O(1) * For singly-linked lists, append and delete are O(n) but prepend is O(1) * The sort macro is O(n log(n)) for all types of single/double/circular lists. */ /* These macros use decltype or the earlier __typeof GNU extension. As decltype is only available in newer compilers (VS2010 or gcc 4.3+ when compiling c++ code), this code uses whatever method is needed or, for VS2008 where neither is available, uses casting workarounds. */ #ifdef _MSC_VER /* MS compiler */ #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ #define LDECLTYPE(x) decltype (x) #else /* VS2008 or older (or VS2010 in C mode) */ #define NO_DECLTYPE #define LDECLTYPE(x) char * #endif #elif defined(__ICCARM__) #define NO_DECLTYPE #define LDECLTYPE(x) char * #else /* GNU, Sun and other compilers */ #define LDECLTYPE(x) __typeof(x) #endif /* for VS2008 we use some workarounds to get around the lack of decltype, * namely, we always reassign our tmp variable to the list head if we need * to dereference its prev/next pointers, and save/restore the real head.*/ #ifdef NO_DECLTYPE #define _SV(elt, list) \ _tmp = (char *) (list); \ { \ char **_alias = (char **) &(list); \ *_alias = (elt); \ } #define _NEXT(elt, list, next) ((char *) ((list)->next)) #define _NEXTASGN(elt, list, to, next) \ { \ char **_alias = (char **) &((list)->next); \ *_alias = (char *) (to); \ } /* #define _PREV(elt,list,prev) ((char*)((list)->prev)) */ #define _PREVASGN(elt, list, to, prev) \ { \ char **_alias = (char **) &((list)->prev); \ *_alias = (char *) (to); \ } #define _RS(list) \ { \ char **_alias = (char **) &(list); \ *_alias = _tmp; \ } #define _CASTASGN(a, b) \ { \ char **_alias = (char **) &(a); \ *_alias = (char *) (b); \ } #else #define _SV(elt, list) #define _NEXT(elt, list, next) ((elt)->next) #define _NEXTASGN(elt, list, to, next) ((elt)->next) = (to) /* #define _PREV(elt,list,prev) ((elt)->prev) */ #define _PREVASGN(elt, list, to, prev) ((elt)->prev) = (to) #define _RS(list) #define _CASTASGN(a, b) (a) = (b) #endif /****************************************************************************** * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort * * Unwieldy variable names used here to avoid shadowing passed-in variables. * *****************************************************************************/ #define LL_SORT(list, cmp) LL_SORT2 (list, cmp, next) #define LL_SORT2(list, cmp, next) \ do { \ LDECLTYPE (list) _ls_p; \ LDECLTYPE (list) _ls_q; \ LDECLTYPE (list) _ls_e; \ LDECLTYPE (list) _ls_tail; \ int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ if (list) { \ _ls_insize = 1; \ _ls_looping = 1; \ while (_ls_looping) { \ _CASTASGN (_ls_p, list); \ list = NULL; \ _ls_tail = NULL; \ _ls_nmerges = 0; \ while (_ls_p) { \ _ls_nmerges++; \ _ls_q = _ls_p; \ _ls_psize = 0; \ for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ _ls_psize++; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ if (!_ls_q) \ break; \ } \ _ls_qsize = _ls_insize; \ while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ if (_ls_psize == 0) { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ } else if (_ls_qsize == 0 || !_ls_q) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ } else if (cmp (_ls_p, _ls_q) <= 0) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ } else { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ } \ if (_ls_tail) { \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, _ls_e, next); \ _RS (list); \ } else { \ _CASTASGN (list, _ls_e); \ } \ _ls_tail = _ls_e; \ } \ _ls_p = _ls_q; \ } \ if (_ls_tail) { \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, NULL, next); \ _RS (list); \ } \ if (_ls_nmerges <= 1) { \ _ls_looping = 0; \ } \ _ls_insize *= 2; \ } \ } \ } while (0) #define DL_SORT(list, cmp) DL_SORT2 (list, cmp, prev, next) #define DL_SORT2(list, cmp, prev, next) \ do { \ LDECLTYPE (list) _ls_p; \ LDECLTYPE (list) _ls_q; \ LDECLTYPE (list) _ls_e; \ LDECLTYPE (list) _ls_tail; \ int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ if (list) { \ _ls_insize = 1; \ _ls_looping = 1; \ while (_ls_looping) { \ _CASTASGN (_ls_p, list); \ list = NULL; \ _ls_tail = NULL; \ _ls_nmerges = 0; \ while (_ls_p) { \ _ls_nmerges++; \ _ls_q = _ls_p; \ _ls_psize = 0; \ for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ _ls_psize++; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ if (!_ls_q) \ break; \ } \ _ls_qsize = _ls_insize; \ while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ if (_ls_psize == 0) { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ } else if (_ls_qsize == 0 || !_ls_q) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ } else if (cmp (_ls_p, _ls_q) <= 0) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ } else { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ } \ if (_ls_tail) { \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, _ls_e, next); \ _RS (list); \ } else { \ _CASTASGN (list, _ls_e); \ } \ _SV (_ls_e, list); \ _PREVASGN (_ls_e, list, _ls_tail, prev); \ _RS (list); \ _ls_tail = _ls_e; \ } \ _ls_p = _ls_q; \ } \ _CASTASGN (list->prev, _ls_tail); \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, NULL, next); \ _RS (list); \ if (_ls_nmerges <= 1) { \ _ls_looping = 0; \ } \ _ls_insize *= 2; \ } \ } \ } while (0) #define CDL_SORT(list, cmp) CDL_SORT2 (list, cmp, prev, next) #define CDL_SORT2(list, cmp, prev, next) \ do { \ LDECLTYPE (list) _ls_p; \ LDECLTYPE (list) _ls_q; \ LDECLTYPE (list) _ls_e; \ LDECLTYPE (list) _ls_tail; \ LDECLTYPE (list) _ls_oldhead; \ LDECLTYPE (list) _tmp; \ int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ if (list) { \ _ls_insize = 1; \ _ls_looping = 1; \ while (_ls_looping) { \ _CASTASGN (_ls_p, list); \ _CASTASGN (_ls_oldhead, list); \ list = NULL; \ _ls_tail = NULL; \ _ls_nmerges = 0; \ while (_ls_p) { \ _ls_nmerges++; \ _ls_q = _ls_p; \ _ls_psize = 0; \ for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ _ls_psize++; \ _SV (_ls_q, list); \ if (_NEXT (_ls_q, list, next) == _ls_oldhead) { \ _ls_q = NULL; \ } else { \ _ls_q = _NEXT (_ls_q, list, next); \ } \ _RS (list); \ if (!_ls_q) \ break; \ } \ _ls_qsize = _ls_insize; \ while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ if (_ls_psize == 0) { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ if (_ls_q == _ls_oldhead) { \ _ls_q = NULL; \ } \ } else if (_ls_qsize == 0 || !_ls_q) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ if (_ls_p == _ls_oldhead) { \ _ls_p = NULL; \ } \ } else if (cmp (_ls_p, _ls_q) <= 0) { \ _ls_e = _ls_p; \ _SV (_ls_p, list); \ _ls_p = _NEXT (_ls_p, list, next); \ _RS (list); \ _ls_psize--; \ if (_ls_p == _ls_oldhead) { \ _ls_p = NULL; \ } \ } else { \ _ls_e = _ls_q; \ _SV (_ls_q, list); \ _ls_q = _NEXT (_ls_q, list, next); \ _RS (list); \ _ls_qsize--; \ if (_ls_q == _ls_oldhead) { \ _ls_q = NULL; \ } \ } \ if (_ls_tail) { \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, _ls_e, next); \ _RS (list); \ } else { \ _CASTASGN (list, _ls_e); \ } \ _SV (_ls_e, list); \ _PREVASGN (_ls_e, list, _ls_tail, prev); \ _RS (list); \ _ls_tail = _ls_e; \ } \ _ls_p = _ls_q; \ } \ _CASTASGN (list->prev, _ls_tail); \ _CASTASGN (_tmp, list); \ _SV (_ls_tail, list); \ _NEXTASGN (_ls_tail, list, _tmp, next); \ _RS (list); \ if (_ls_nmerges <= 1) { \ _ls_looping = 0; \ } \ _ls_insize *= 2; \ } \ } \ } while (0) /****************************************************************************** * singly linked list macros (non-circular) * *****************************************************************************/ #define LL_PREPEND(head, add) LL_PREPEND2 (head, add, next) #define LL_PREPEND2(head, add, next) \ do { \ (add)->next = head; \ head = add; \ } while (0) #define LL_CONCAT(head1, head2) LL_CONCAT2 (head1, head2, next) #define LL_CONCAT2(head1, head2, next) \ do { \ LDECLTYPE (head1) _tmp; \ if (head1) { \ _tmp = head1; \ while (_tmp->next) { \ _tmp = _tmp->next; \ } \ _tmp->next = (head2); \ } else { \ (head1) = (head2); \ } \ } while (0) #define LL_APPEND(head, add) LL_APPEND2 (head, add, next) #define LL_APPEND2(head, add, next) \ do { \ LDECLTYPE (head) _tmp; \ (add)->next = NULL; \ if (head) { \ _tmp = head; \ while (_tmp->next) { \ _tmp = _tmp->next; \ } \ _tmp->next = (add); \ } else { \ (head) = (add); \ } \ } while (0) #define LL_DELETE(head, del) LL_DELETE2 (head, del, next) #define LL_DELETE2(head, del, next) \ do { \ LDECLTYPE (head) _tmp; \ if ((head) == (del)) { \ (head) = (head)->next; \ } else { \ _tmp = head; \ while (_tmp->next && (_tmp->next != (del))) { \ _tmp = _tmp->next; \ } \ if (_tmp->next) { \ _tmp->next = ((del)->next); \ } \ } \ } while (0) /* Here are VS2008 replacements for LL_APPEND and LL_DELETE */ #define LL_APPEND_VS2008(head, add) LL_APPEND2_VS2008 (head, add, next) #define LL_APPEND2_VS2008(head, add, next) \ do { \ if (head) { \ (add)->next = head; /* use add->next as a temp variable */ \ while ((add)->next->next) { \ (add)->next = (add)->next->next; \ } \ (add)->next->next = (add); \ } else { \ (head) = (add); \ } \ (add)->next = NULL; \ } while (0) #define LL_DELETE_VS2008(head, del) LL_DELETE2_VS2008 (head, del, next) #define LL_DELETE2_VS2008(head, del, next) \ do { \ if ((head) == (del)) { \ (head) = (head)->next; \ } else { \ char *_tmp = (char *) (head); \ while ((head)->next && ((head)->next != (del))) { \ head = (head)->next; \ } \ if ((head)->next) { \ (head)->next = ((del)->next); \ } \ { \ char **_head_alias = (char **) &(head); \ *_head_alias = _tmp; \ } \ } \ } while (0) #ifdef NO_DECLTYPE #undef LL_APPEND #define LL_APPEND LL_APPEND_VS2008 #undef LL_DELETE #define LL_DELETE LL_DELETE_VS2008 #undef LL_DELETE2 #define LL_DELETE2 LL_DELETE2_VS2008 #undef LL_APPEND2 #define LL_APPEND2 LL_APPEND2_VS2008 #undef LL_CONCAT /* no LL_CONCAT_VS2008 */ #undef DL_CONCAT /* no DL_CONCAT_VS2008 */ #endif /* end VS2008 replacements */ #define LL_COUNT(head, el, counter) LL_COUNT2 (head, el, counter, next) #define LL_COUNT2(head, el, counter, next) \ { \ counter = 0; \ LL_FOREACH2 (head, el, next) \ { \ ++counter; \ } \ } #define LL_FOREACH(head, el) LL_FOREACH2 (head, el, next) #define LL_FOREACH2(head, el, next) for (el = head; el; el = (el)->next) #define LL_FOREACH_SAFE(head, el, tmp) LL_FOREACH_SAFE2 (head, el, tmp, next) #define LL_FOREACH_SAFE2(head, el, tmp, next) \ for ((el) = (head); (el) && (tmp = (el)->next, 1); (el) = tmp) #define LL_SEARCH_SCALAR(head, out, field, val) \ LL_SEARCH_SCALAR2 (head, out, field, val, next) #define LL_SEARCH_SCALAR2(head, out, field, val, next) \ do { \ LL_FOREACH2 (head, out, next) \ { \ if ((out)->field == (val)) \ break; \ } \ } while (0) #define LL_SEARCH(head, out, elt, cmp) LL_SEARCH2 (head, out, elt, cmp, next) #define LL_SEARCH2(head, out, elt, cmp, next) \ do { \ LL_FOREACH2 (head, out, next) \ { \ if ((cmp (out, elt)) == 0) \ break; \ } \ } while (0) #define LL_REPLACE_ELEM(head, el, add) \ do { \ LDECLTYPE (head) _tmp; \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ (add)->next = (el)->next; \ if ((head) == (el)) { \ (head) = (add); \ } else { \ _tmp = head; \ while (_tmp->next && (_tmp->next != (el))) { \ _tmp = _tmp->next; \ } \ if (_tmp->next) { \ _tmp->next = (add); \ } \ } \ } while (0) #define LL_PREPEND_ELEM(head, el, add) \ do { \ LDECLTYPE (head) _tmp; \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ (add)->next = (el); \ if ((head) == (el)) { \ (head) = (add); \ } else { \ _tmp = head; \ while (_tmp->next && (_tmp->next != (el))) { \ _tmp = _tmp->next; \ } \ if (_tmp->next) { \ _tmp->next = (add); \ } \ } \ } while (0) /****************************************************************************** * doubly linked list macros (non-circular) * *****************************************************************************/ #define DL_PREPEND(head, add) DL_PREPEND2 (head, add, prev, next) #define DL_PREPEND2(head, add, prev, next) \ do { \ (add)->next = head; \ if (head) { \ (add)->prev = (head)->prev; \ (head)->prev = (add); \ } else { \ (add)->prev = (add); \ } \ (head) = (add); \ } while (0) #define DL_APPEND(head, add) DL_APPEND2 (head, add, prev, next) #define DL_APPEND2(head, add, prev, next) \ do { \ if (head) { \ (add)->prev = (head)->prev; \ (head)->prev->next = (add); \ (head)->prev = (add); \ (add)->next = NULL; \ } else { \ (head) = (add); \ (head)->prev = (head); \ (head)->next = NULL; \ } \ } while (0) #define DL_CONCAT(head1, head2) DL_CONCAT2 (head1, head2, prev, next) #define DL_CONCAT2(head1, head2, prev, next) \ do { \ LDECLTYPE (head1) _tmp; \ if (head2) { \ if (head1) { \ _tmp = (head2)->prev; \ (head2)->prev = (head1)->prev; \ (head1)->prev->next = (head2); \ (head1)->prev = _tmp; \ } else { \ (head1) = (head2); \ } \ } \ } while (0) #define DL_DELETE(head, del) DL_DELETE2 (head, del, prev, next) #define DL_DELETE2(head, del, prev, next) \ do { \ BSON_ASSERT ((del)->prev != NULL); \ if ((del)->prev == (del)) { \ (head) = NULL; \ } else if ((del) == (head)) { \ (del)->next->prev = (del)->prev; \ (head) = (del)->next; \ } else { \ (del)->prev->next = (del)->next; \ if ((del)->next) { \ (del)->next->prev = (del)->prev; \ } else { \ (head)->prev = (del)->prev; \ } \ } \ } while (0) #define DL_COUNT(head, el, counter) DL_COUNT2 (head, el, counter, next) #define DL_COUNT2(head, el, counter, next) \ { \ counter = 0; \ DL_FOREACH2 (head, el, next) \ { \ ++counter; \ } \ } #define DL_FOREACH(head, el) DL_FOREACH2 (head, el, next) #define DL_FOREACH2(head, el, next) for (el = head; el; el = (el)->next) /* this version is safe for deleting the elements during iteration */ #define DL_FOREACH_SAFE(head, el, tmp) DL_FOREACH_SAFE2 (head, el, tmp, next) #define DL_FOREACH_SAFE2(head, el, tmp, next) \ for ((el) = (head); (el) && (tmp = (el)->next, 1); (el) = tmp) /* these are identical to their singly-linked list counterparts */ #define DL_SEARCH_SCALAR LL_SEARCH_SCALAR #define DL_SEARCH LL_SEARCH #define DL_SEARCH_SCALAR2 LL_SEARCH_SCALAR2 #define DL_SEARCH2 LL_SEARCH2 #define DL_REPLACE_ELEM(head, el, add) \ do { \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ if ((head) == (el)) { \ (head) = (add); \ (add)->next = (el)->next; \ if ((el)->next == NULL) { \ (add)->prev = (add); \ } else { \ (add)->prev = (el)->prev; \ (add)->next->prev = (add); \ } \ } else { \ (add)->next = (el)->next; \ (add)->prev = (el)->prev; \ (add)->prev->next = (add); \ if ((el)->next == NULL) { \ (head)->prev = (add); \ } else { \ (add)->next->prev = (add); \ } \ } \ } while (0) #define DL_PREPEND_ELEM(head, el, add) \ do { \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ (add)->next = (el); \ (add)->prev = (el)->prev; \ (el)->prev = (add); \ if ((head) == (el)) { \ (head) = (add); \ } else { \ (add)->prev->next = (add); \ } \ } while (0) /****************************************************************************** * circular doubly linked list macros * *****************************************************************************/ #define CDL_PREPEND(head, add) CDL_PREPEND2 (head, add, prev, next) #define CDL_PREPEND2(head, add, prev, next) \ do { \ if (head) { \ (add)->prev = (head)->prev; \ (add)->next = (head); \ (head)->prev = (add); \ (add)->prev->next = (add); \ } else { \ (add)->prev = (add); \ (add)->next = (add); \ } \ (head) = (add); \ } while (0) #define CDL_DELETE(head, del) CDL_DELETE2 (head, del, prev, next) #define CDL_DELETE2(head, del, prev, next) \ do { \ if (((head) == (del)) && ((head)->next == (head))) { \ (head) = 0L; \ } else { \ (del)->next->prev = (del)->prev; \ (del)->prev->next = (del)->next; \ if ((del) == (head)) \ (head) = (del)->next; \ } \ } while (0) #define CDL_COUNT(head, el, counter) CDL_COUNT2 (head, el, counter, next) #define CDL_COUNT2(head, el, counter, next) \ { \ counter = 0; \ CDL_FOREACH2 (head, el, next) \ { \ ++counter; \ } \ } #define CDL_FOREACH(head, el) CDL_FOREACH2 (head, el, next) #define CDL_FOREACH2(head, el, next) \ for (el = head; el; el = ((el)->next == head ? 0L : (el)->next)) #define CDL_FOREACH_SAFE(head, el, tmp1, tmp2) \ CDL_FOREACH_SAFE2 (head, el, tmp1, tmp2, prev, next) #define CDL_FOREACH_SAFE2(head, el, tmp1, tmp2, prev, next) \ for ((el) = (head), ((tmp1) = (head) ? ((head)->prev) : NULL); \ (el) && ((tmp2) = (el)->next, 1); \ ((el) = (((el) == (tmp1)) ? 0L : (tmp2)))) #define CDL_SEARCH_SCALAR(head, out, field, val) \ CDL_SEARCH_SCALAR2 (head, out, field, val, next) #define CDL_SEARCH_SCALAR2(head, out, field, val, next) \ do { \ CDL_FOREACH2 (head, out, next) \ { \ if ((out)->field == (val)) \ break; \ } \ } while (0) #define CDL_SEARCH(head, out, elt, cmp) CDL_SEARCH2 (head, out, elt, cmp, next) #define CDL_SEARCH2(head, out, elt, cmp, next) \ do { \ CDL_FOREACH2 (head, out, next) \ { \ if ((cmp (out, elt)) == 0) \ break; \ } \ } while (0) #define CDL_REPLACE_ELEM(head, el, add) \ do { \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ if ((el)->next == (el)) { \ (add)->next = (add); \ (add)->prev = (add); \ (head) = (add); \ } else { \ (add)->next = (el)->next; \ (add)->prev = (el)->prev; \ (add)->next->prev = (add); \ (add)->prev->next = (add); \ if ((head) == (el)) { \ (head) = (add); \ } \ } \ } while (0) #define CDL_PREPEND_ELEM(head, el, add) \ do { \ BSON_ASSERT (head != NULL); \ BSON_ASSERT (el != NULL); \ BSON_ASSERT (add != NULL); \ (add)->next = (el); \ (add)->prev = (el)->prev; \ (el)->prev = (add); \ (add)->prev->next = (add); \ if ((head) == (el)) { \ (head) = (add); \ } \ } while (0) #endif /* UTLIST_H */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/adler32.c0000644000076500000240000001212413572250760021060 0ustar alcaeusstaff/* adler32.c -- compute the Adler-32 checksum of a data stream * Copyright (C) 1995-2011, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" local uLong adler32_combine_ OF((uLong adler1, uLong adler2, z_off64_t len2)); #define BASE 65521U /* largest prime smaller than 65536 */ #define NMAX 5552 /* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */ #define DO1(buf,i) {adler += (buf)[i]; sum2 += adler;} #define DO2(buf,i) DO1(buf,i); DO1(buf,i+1); #define DO4(buf,i) DO2(buf,i); DO2(buf,i+2); #define DO8(buf,i) DO4(buf,i); DO4(buf,i+4); #define DO16(buf) DO8(buf,0); DO8(buf,8); /* use NO_DIVIDE if your processor does not do division in hardware -- try it both ways to see which is faster */ #ifdef NO_DIVIDE /* note that this assumes BASE is 65521, where 65536 % 65521 == 15 (thank you to John Reiser for pointing this out) */ # define CHOP(a) \ do { \ unsigned long tmp = a >> 16; \ a &= 0xffffUL; \ a += (tmp << 4) - tmp; \ } while (0) # define MOD28(a) \ do { \ CHOP(a); \ if (a >= BASE) a -= BASE; \ } while (0) # define MOD(a) \ do { \ CHOP(a); \ MOD28(a); \ } while (0) # define MOD63(a) \ do { /* this assumes a is not negative */ \ z_off64_t tmp = a >> 32; \ a &= 0xffffffffL; \ a += (tmp << 8) - (tmp << 5) + tmp; \ tmp = a >> 16; \ a &= 0xffffL; \ a += (tmp << 4) - tmp; \ tmp = a >> 16; \ a &= 0xffffL; \ a += (tmp << 4) - tmp; \ if (a >= BASE) a -= BASE; \ } while (0) #else # define MOD(a) a %= BASE # define MOD28(a) a %= BASE # define MOD63(a) a %= BASE #endif /* ========================================================================= */ uLong ZEXPORT adler32_z(adler, buf, len) uLong adler; const Bytef *buf; z_size_t len; { unsigned long sum2; unsigned n; /* split Adler-32 into component sums */ sum2 = (adler >> 16) & 0xffff; adler &= 0xffff; /* in case user likes doing a byte at a time, keep it fast */ if (len == 1) { adler += buf[0]; if (adler >= BASE) adler -= BASE; sum2 += adler; if (sum2 >= BASE) sum2 -= BASE; return adler | (sum2 << 16); } /* initial Adler-32 value (deferred check for len == 1 speed) */ if (buf == Z_NULL) return 1L; /* in case short lengths are provided, keep it somewhat fast */ if (len < 16) { while (len--) { adler += *buf++; sum2 += adler; } if (adler >= BASE) adler -= BASE; MOD28(sum2); /* only added so many BASE's */ return adler | (sum2 << 16); } /* do length NMAX blocks -- requires just one modulo operation */ while (len >= NMAX) { len -= NMAX; n = NMAX / 16; /* NMAX is divisible by 16 */ do { DO16(buf); /* 16 sums unrolled */ buf += 16; } while (--n); MOD(adler); MOD(sum2); } /* do remaining bytes (less than NMAX, still just one modulo) */ if (len) { /* avoid modulos if none remaining */ while (len >= 16) { len -= 16; DO16(buf); buf += 16; } while (len--) { adler += *buf++; sum2 += adler; } MOD(adler); MOD(sum2); } /* return recombined sums */ return adler | (sum2 << 16); } /* ========================================================================= */ uLong ZEXPORT adler32(adler, buf, len) uLong adler; const Bytef *buf; uInt len; { return adler32_z(adler, buf, len); } /* ========================================================================= */ local uLong adler32_combine_(adler1, adler2, len2) uLong adler1; uLong adler2; z_off64_t len2; { unsigned long sum1; unsigned long sum2; unsigned rem; /* for negative len, return invalid adler32 as a clue for debugging */ if (len2 < 0) return 0xffffffffUL; /* the derivation of this formula is left as an exercise for the reader */ MOD63(len2); /* assumes len2 >= 0 */ rem = (unsigned)len2; sum1 = adler1 & 0xffff; sum2 = rem * sum1; MOD(sum2); sum1 += (adler2 & 0xffff) + BASE - 1; sum2 += ((adler1 >> 16) & 0xffff) + ((adler2 >> 16) & 0xffff) + BASE - rem; if (sum1 >= BASE) sum1 -= BASE; if (sum1 >= BASE) sum1 -= BASE; if (sum2 >= ((unsigned long)BASE << 1)) sum2 -= ((unsigned long)BASE << 1); if (sum2 >= BASE) sum2 -= BASE; return sum1 | (sum2 << 16); } /* ========================================================================= */ uLong ZEXPORT adler32_combine(adler1, adler2, len2) uLong adler1; uLong adler2; z_off_t len2; { return adler32_combine_(adler1, adler2, len2); } uLong ZEXPORT adler32_combine64(adler1, adler2, len2) uLong adler1; uLong adler2; z_off64_t len2; { return adler32_combine_(adler1, adler2, len2); } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/compress.c0000644000076500000240000000521313572250760021460 0ustar alcaeusstaff/* compress.c -- compress a memory buffer * Copyright (C) 1995-2005, 2014, 2016 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #define ZLIB_INTERNAL #include "zlib.h" /* =========================================================================== Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least 0.1% larger than sourceLen plus 12 bytes. Upon exit, destLen is the actual size of the compressed buffer. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, Z_STREAM_ERROR if the level parameter is invalid. */ int ZEXPORT compress2 (dest, destLen, source, sourceLen, level) Bytef *dest; uLongf *destLen; const Bytef *source; uLong sourceLen; int level; { z_stream stream; int err; const uInt max = (uInt)-1; uLong left; left = *destLen; *destLen = 0; stream.zalloc = (alloc_func)0; stream.zfree = (free_func)0; stream.opaque = (voidpf)0; err = deflateInit(&stream, level); if (err != Z_OK) return err; stream.next_out = dest; stream.avail_out = 0; stream.next_in = (z_const Bytef *)source; stream.avail_in = 0; do { if (stream.avail_out == 0) { stream.avail_out = left > (uLong)max ? max : (uInt)left; left -= stream.avail_out; } if (stream.avail_in == 0) { stream.avail_in = sourceLen > (uLong)max ? max : (uInt)sourceLen; sourceLen -= stream.avail_in; } err = deflate(&stream, sourceLen ? Z_NO_FLUSH : Z_FINISH); } while (err == Z_OK); *destLen = stream.total_out; deflateEnd(&stream); return err == Z_STREAM_END ? Z_OK : err; } /* =========================================================================== */ int ZEXPORT compress (dest, destLen, source, sourceLen) Bytef *dest; uLongf *destLen; const Bytef *source; uLong sourceLen; { return compress2(dest, destLen, source, sourceLen, Z_DEFAULT_COMPRESSION); } /* =========================================================================== If the default memLevel or windowBits for deflateInit() is changed, then this function needs to be updated. */ uLong ZEXPORT compressBound (sourceLen) uLong sourceLen; { return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13; } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/crc32.c0000644000076500000240000003334513572250760020550 0ustar alcaeusstaff/* crc32.c -- compute the CRC-32 of a data stream * Copyright (C) 1995-2006, 2010, 2011, 2012, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h * * Thanks to Rodney Brown for his contribution of faster * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing * tables for updating the shift register in one step with three exclusive-ors * instead of four steps with four exclusive-ors. This results in about a * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. */ /* @(#) $Id$ */ /* Note on the use of DYNAMIC_CRC_TABLE: there is no mutex or semaphore protection on the static variables used to control the first-use generation of the crc tables. Therefore, if you #define DYNAMIC_CRC_TABLE, you should first call get_crc_table() to initialize the tables before allowing more than one thread to use crc32(). DYNAMIC_CRC_TABLE and MAKECRCH can be #defined to write out crc32.h. */ #ifdef MAKECRCH # include # ifndef DYNAMIC_CRC_TABLE # define DYNAMIC_CRC_TABLE # endif /* !DYNAMIC_CRC_TABLE */ #endif /* MAKECRCH */ #include "zutil.h" /* for STDC and FAR definitions */ /* Definitions for doing the crc four data bytes at a time. */ #if !defined(NOBYFOUR) && defined(Z_U4) # define BYFOUR #endif #ifdef BYFOUR local unsigned long crc32_little OF((unsigned long, const unsigned char FAR *, z_size_t)); local unsigned long crc32_big OF((unsigned long, const unsigned char FAR *, z_size_t)); # define TBLS 8 #else # define TBLS 1 #endif /* BYFOUR */ /* Local functions for crc concatenation */ local unsigned long gf2_matrix_times OF((unsigned long *mat, unsigned long vec)); local void gf2_matrix_square OF((unsigned long *square, unsigned long *mat)); local uLong crc32_combine_ OF((uLong crc1, uLong crc2, z_off64_t len2)); #ifdef DYNAMIC_CRC_TABLE local volatile int crc_table_empty = 1; local z_crc_t FAR crc_table[TBLS][256]; local void make_crc_table OF((void)); #ifdef MAKECRCH local void write_table OF((FILE *, const z_crc_t FAR *)); #endif /* MAKECRCH */ /* Generate tables for a byte-wise 32-bit CRC calculation on the polynomial: x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x+1. Polynomials over GF(2) are represented in binary, one bit per coefficient, with the lowest powers in the most significant bit. Then adding polynomials is just exclusive-or, and multiplying a polynomial by x is a right shift by one. If we call the above polynomial p, and represent a byte as the polynomial q, also with the lowest power in the most significant bit (so the byte 0xb1 is the polynomial x^7+x^3+x+1), then the CRC is (q*x^32) mod p, where a mod b means the remainder after dividing a by b. This calculation is done using the shift-register method of multiplying and taking the remainder. The register is initialized to zero, and for each incoming bit, x^32 is added mod p to the register if the bit is a one (where x^32 mod p is p+x^32 = x^26+...+1), and the register is multiplied mod p by x (which is shifting right by one and adding x^32 mod p if the bit shifted out is a one). We start with the highest power (least significant bit) of q and repeat for all eight bits of q. The first table is simply the CRC of all possible eight bit values. This is all the information needed to generate CRCs on data a byte at a time for all combinations of CRC register values and incoming bytes. The remaining tables allow for word-at-a-time CRC calculation for both big-endian and little- endian machines, where a word is four bytes. */ local void make_crc_table() { z_crc_t c; int n, k; z_crc_t poly; /* polynomial exclusive-or pattern */ /* terms of polynomial defining this crc (except x^32): */ static volatile int first = 1; /* flag to limit concurrent making */ static const unsigned char p[] = {0,1,2,4,5,7,8,10,11,12,16,22,23,26}; /* See if another task is already doing this (not thread-safe, but better than nothing -- significantly reduces duration of vulnerability in case the advice about DYNAMIC_CRC_TABLE is ignored) */ if (first) { first = 0; /* make exclusive-or pattern from polynomial (0xedb88320UL) */ poly = 0; for (n = 0; n < (int)(sizeof(p)/sizeof(unsigned char)); n++) poly |= (z_crc_t)1 << (31 - p[n]); /* generate a crc for every 8-bit value */ for (n = 0; n < 256; n++) { c = (z_crc_t)n; for (k = 0; k < 8; k++) c = c & 1 ? poly ^ (c >> 1) : c >> 1; crc_table[0][n] = c; } #ifdef BYFOUR /* generate crc for each value followed by one, two, and three zeros, and then the byte reversal of those as well as the first table */ for (n = 0; n < 256; n++) { c = crc_table[0][n]; crc_table[4][n] = ZSWAP32(c); for (k = 1; k < 4; k++) { c = crc_table[0][c & 0xff] ^ (c >> 8); crc_table[k][n] = c; crc_table[k + 4][n] = ZSWAP32(c); } } #endif /* BYFOUR */ crc_table_empty = 0; } else { /* not first */ /* wait for the other guy to finish (not efficient, but rare) */ while (crc_table_empty) ; } #ifdef MAKECRCH /* write out CRC tables to crc32.h */ { FILE *out; out = fopen("crc32.h", "w"); if (out == NULL) return; fprintf(out, "/* crc32.h -- tables for rapid CRC calculation\n"); fprintf(out, " * Generated automatically by crc32.c\n */\n\n"); fprintf(out, "local const z_crc_t FAR "); fprintf(out, "crc_table[TBLS][256] =\n{\n {\n"); write_table(out, crc_table[0]); # ifdef BYFOUR fprintf(out, "#ifdef BYFOUR\n"); for (k = 1; k < 8; k++) { fprintf(out, " },\n {\n"); write_table(out, crc_table[k]); } fprintf(out, "#endif\n"); # endif /* BYFOUR */ fprintf(out, " }\n};\n"); fclose(out); } #endif /* MAKECRCH */ } #ifdef MAKECRCH local void write_table(out, table) FILE *out; const z_crc_t FAR *table; { int n; for (n = 0; n < 256; n++) fprintf(out, "%s0x%08lxUL%s", n % 5 ? "" : " ", (unsigned long)(table[n]), n == 255 ? "\n" : (n % 5 == 4 ? ",\n" : ", ")); } #endif /* MAKECRCH */ #else /* !DYNAMIC_CRC_TABLE */ /* ======================================================================== * Tables of CRC-32s of all single-byte values, made by make_crc_table(). */ #include "crc32.h" #endif /* DYNAMIC_CRC_TABLE */ /* ========================================================================= * This function can be used by asm versions of crc32() */ const z_crc_t FAR * ZEXPORT get_crc_table() { #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ return (const z_crc_t FAR *)crc_table; } /* ========================================================================= */ #define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) #define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 /* ========================================================================= */ unsigned long ZEXPORT crc32_z(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; z_size_t len; { if (buf == Z_NULL) return 0UL; #ifdef DYNAMIC_CRC_TABLE if (crc_table_empty) make_crc_table(); #endif /* DYNAMIC_CRC_TABLE */ #ifdef BYFOUR if (sizeof(void *) == sizeof(ptrdiff_t)) { z_crc_t endian; endian = 1; if (*((unsigned char *)(&endian))) return crc32_little(crc, buf, len); else return crc32_big(crc, buf, len); } #endif /* BYFOUR */ crc = crc ^ 0xffffffffUL; while (len >= 8) { DO8; len -= 8; } if (len) do { DO1; } while (--len); return crc ^ 0xffffffffUL; } /* ========================================================================= */ unsigned long ZEXPORT crc32(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; uInt len; { return crc32_z(crc, buf, len); } #ifdef BYFOUR /* This BYFOUR code accesses the passed unsigned char * buffer with a 32-bit integer pointer type. This violates the strict aliasing rule, where a compiler can assume, for optimization purposes, that two pointers to fundamentally different types won't ever point to the same memory. This can manifest as a problem only if one of the pointers is written to. This code only reads from those pointers. So long as this code remains isolated in this compilation unit, there won't be a problem. For this reason, this code should not be copied and pasted into a compilation unit in which other code writes to the buffer that is passed to these routines. */ /* ========================================================================= */ #define DOLIT4 c ^= *buf4++; \ c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 /* ========================================================================= */ local unsigned long crc32_little(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; z_size_t len; { register z_crc_t c; register const z_crc_t FAR *buf4; c = (z_crc_t)crc; c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); len--; } buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOLIT32; len -= 32; } while (len >= 4) { DOLIT4; len -= 4; } buf = (const unsigned char FAR *)buf4; if (len) do { c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); } while (--len); c = ~c; return (unsigned long)c; } /* ========================================================================= */ #define DOBIG4 c ^= *buf4++; \ c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 /* ========================================================================= */ local unsigned long crc32_big(crc, buf, len) unsigned long crc; const unsigned char FAR *buf; z_size_t len; { register z_crc_t c; register const z_crc_t FAR *buf4; c = ZSWAP32((z_crc_t)crc); c = ~c; while (len && ((ptrdiff_t)buf & 3)) { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); len--; } buf4 = (const z_crc_t FAR *)(const void FAR *)buf; while (len >= 32) { DOBIG32; len -= 32; } while (len >= 4) { DOBIG4; len -= 4; } buf = (const unsigned char FAR *)buf4; if (len) do { c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); } while (--len); c = ~c; return (unsigned long)(ZSWAP32(c)); } #endif /* BYFOUR */ #define GF2_DIM 32 /* dimension of GF(2) vectors (length of CRC) */ /* ========================================================================= */ local unsigned long gf2_matrix_times(mat, vec) unsigned long *mat; unsigned long vec; { unsigned long sum; sum = 0; while (vec) { if (vec & 1) sum ^= *mat; vec >>= 1; mat++; } return sum; } /* ========================================================================= */ local void gf2_matrix_square(square, mat) unsigned long *square; unsigned long *mat; { int n; for (n = 0; n < GF2_DIM; n++) square[n] = gf2_matrix_times(mat, mat[n]); } /* ========================================================================= */ local uLong crc32_combine_(crc1, crc2, len2) uLong crc1; uLong crc2; z_off64_t len2; { int n; unsigned long row; unsigned long even[GF2_DIM]; /* even-power-of-two zeros operator */ unsigned long odd[GF2_DIM]; /* odd-power-of-two zeros operator */ /* degenerate case (also disallow negative lengths) */ if (len2 <= 0) return crc1; /* put operator for one zero bit in odd */ odd[0] = 0xedb88320UL; /* CRC-32 polynomial */ row = 1; for (n = 1; n < GF2_DIM; n++) { odd[n] = row; row <<= 1; } /* put operator for two zero bits in even */ gf2_matrix_square(even, odd); /* put operator for four zero bits in odd */ gf2_matrix_square(odd, even); /* apply len2 zeros to crc1 (first square will put the operator for one zero byte, eight zero bits, in even) */ do { /* apply zeros operator for this bit of len2 */ gf2_matrix_square(even, odd); if (len2 & 1) crc1 = gf2_matrix_times(even, crc1); len2 >>= 1; /* if no more bits set, then done */ if (len2 == 0) break; /* another iteration of the loop with odd and even swapped */ gf2_matrix_square(odd, even); if (len2 & 1) crc1 = gf2_matrix_times(odd, crc1); len2 >>= 1; /* if no more bits set, then done */ } while (len2 != 0); /* return combined crc */ crc1 ^= crc2; return crc1; } /* ========================================================================= */ uLong ZEXPORT crc32_combine(crc1, crc2, len2) uLong crc1; uLong crc2; z_off_t len2; { return crc32_combine_(crc1, crc2, len2); } uLong ZEXPORT crc32_combine64(crc1, crc2, len2) uLong crc1; uLong crc2; z_off64_t len2; { return crc32_combine_(crc1, crc2, len2); } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/crc32.h0000644000076500000240000007354213572250760020560 0ustar alcaeusstaff/* crc32.h -- tables for rapid CRC calculation * Generated automatically by crc32.c */ local const z_crc_t FAR crc_table[TBLS][256] = { { 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, 0x2d02ef8dUL #ifdef BYFOUR }, { 0x00000000UL, 0x191b3141UL, 0x32366282UL, 0x2b2d53c3UL, 0x646cc504UL, 0x7d77f445UL, 0x565aa786UL, 0x4f4196c7UL, 0xc8d98a08UL, 0xd1c2bb49UL, 0xfaefe88aUL, 0xe3f4d9cbUL, 0xacb54f0cUL, 0xb5ae7e4dUL, 0x9e832d8eUL, 0x87981ccfUL, 0x4ac21251UL, 0x53d92310UL, 0x78f470d3UL, 0x61ef4192UL, 0x2eaed755UL, 0x37b5e614UL, 0x1c98b5d7UL, 0x05838496UL, 0x821b9859UL, 0x9b00a918UL, 0xb02dfadbUL, 0xa936cb9aUL, 0xe6775d5dUL, 0xff6c6c1cUL, 0xd4413fdfUL, 0xcd5a0e9eUL, 0x958424a2UL, 0x8c9f15e3UL, 0xa7b24620UL, 0xbea97761UL, 0xf1e8e1a6UL, 0xe8f3d0e7UL, 0xc3de8324UL, 0xdac5b265UL, 0x5d5daeaaUL, 0x44469febUL, 0x6f6bcc28UL, 0x7670fd69UL, 0x39316baeUL, 0x202a5aefUL, 0x0b07092cUL, 0x121c386dUL, 0xdf4636f3UL, 0xc65d07b2UL, 0xed705471UL, 0xf46b6530UL, 0xbb2af3f7UL, 0xa231c2b6UL, 0x891c9175UL, 0x9007a034UL, 0x179fbcfbUL, 0x0e848dbaUL, 0x25a9de79UL, 0x3cb2ef38UL, 0x73f379ffUL, 0x6ae848beUL, 0x41c51b7dUL, 0x58de2a3cUL, 0xf0794f05UL, 0xe9627e44UL, 0xc24f2d87UL, 0xdb541cc6UL, 0x94158a01UL, 0x8d0ebb40UL, 0xa623e883UL, 0xbf38d9c2UL, 0x38a0c50dUL, 0x21bbf44cUL, 0x0a96a78fUL, 0x138d96ceUL, 0x5ccc0009UL, 0x45d73148UL, 0x6efa628bUL, 0x77e153caUL, 0xbabb5d54UL, 0xa3a06c15UL, 0x888d3fd6UL, 0x91960e97UL, 0xded79850UL, 0xc7cca911UL, 0xece1fad2UL, 0xf5facb93UL, 0x7262d75cUL, 0x6b79e61dUL, 0x4054b5deUL, 0x594f849fUL, 0x160e1258UL, 0x0f152319UL, 0x243870daUL, 0x3d23419bUL, 0x65fd6ba7UL, 0x7ce65ae6UL, 0x57cb0925UL, 0x4ed03864UL, 0x0191aea3UL, 0x188a9fe2UL, 0x33a7cc21UL, 0x2abcfd60UL, 0xad24e1afUL, 0xb43fd0eeUL, 0x9f12832dUL, 0x8609b26cUL, 0xc94824abUL, 0xd05315eaUL, 0xfb7e4629UL, 0xe2657768UL, 0x2f3f79f6UL, 0x362448b7UL, 0x1d091b74UL, 0x04122a35UL, 0x4b53bcf2UL, 0x52488db3UL, 0x7965de70UL, 0x607eef31UL, 0xe7e6f3feUL, 0xfefdc2bfUL, 0xd5d0917cUL, 0xcccba03dUL, 0x838a36faUL, 0x9a9107bbUL, 0xb1bc5478UL, 0xa8a76539UL, 0x3b83984bUL, 0x2298a90aUL, 0x09b5fac9UL, 0x10aecb88UL, 0x5fef5d4fUL, 0x46f46c0eUL, 0x6dd93fcdUL, 0x74c20e8cUL, 0xf35a1243UL, 0xea412302UL, 0xc16c70c1UL, 0xd8774180UL, 0x9736d747UL, 0x8e2de606UL, 0xa500b5c5UL, 0xbc1b8484UL, 0x71418a1aUL, 0x685abb5bUL, 0x4377e898UL, 0x5a6cd9d9UL, 0x152d4f1eUL, 0x0c367e5fUL, 0x271b2d9cUL, 0x3e001cddUL, 0xb9980012UL, 0xa0833153UL, 0x8bae6290UL, 0x92b553d1UL, 0xddf4c516UL, 0xc4eff457UL, 0xefc2a794UL, 0xf6d996d5UL, 0xae07bce9UL, 0xb71c8da8UL, 0x9c31de6bUL, 0x852aef2aUL, 0xca6b79edUL, 0xd37048acUL, 0xf85d1b6fUL, 0xe1462a2eUL, 0x66de36e1UL, 0x7fc507a0UL, 0x54e85463UL, 0x4df36522UL, 0x02b2f3e5UL, 0x1ba9c2a4UL, 0x30849167UL, 0x299fa026UL, 0xe4c5aeb8UL, 0xfdde9ff9UL, 0xd6f3cc3aUL, 0xcfe8fd7bUL, 0x80a96bbcUL, 0x99b25afdUL, 0xb29f093eUL, 0xab84387fUL, 0x2c1c24b0UL, 0x350715f1UL, 0x1e2a4632UL, 0x07317773UL, 0x4870e1b4UL, 0x516bd0f5UL, 0x7a468336UL, 0x635db277UL, 0xcbfad74eUL, 0xd2e1e60fUL, 0xf9ccb5ccUL, 0xe0d7848dUL, 0xaf96124aUL, 0xb68d230bUL, 0x9da070c8UL, 0x84bb4189UL, 0x03235d46UL, 0x1a386c07UL, 0x31153fc4UL, 0x280e0e85UL, 0x674f9842UL, 0x7e54a903UL, 0x5579fac0UL, 0x4c62cb81UL, 0x8138c51fUL, 0x9823f45eUL, 0xb30ea79dUL, 0xaa1596dcUL, 0xe554001bUL, 0xfc4f315aUL, 0xd7626299UL, 0xce7953d8UL, 0x49e14f17UL, 0x50fa7e56UL, 0x7bd72d95UL, 0x62cc1cd4UL, 0x2d8d8a13UL, 0x3496bb52UL, 0x1fbbe891UL, 0x06a0d9d0UL, 0x5e7ef3ecUL, 0x4765c2adUL, 0x6c48916eUL, 0x7553a02fUL, 0x3a1236e8UL, 0x230907a9UL, 0x0824546aUL, 0x113f652bUL, 0x96a779e4UL, 0x8fbc48a5UL, 0xa4911b66UL, 0xbd8a2a27UL, 0xf2cbbce0UL, 0xebd08da1UL, 0xc0fdde62UL, 0xd9e6ef23UL, 0x14bce1bdUL, 0x0da7d0fcUL, 0x268a833fUL, 0x3f91b27eUL, 0x70d024b9UL, 0x69cb15f8UL, 0x42e6463bUL, 0x5bfd777aUL, 0xdc656bb5UL, 0xc57e5af4UL, 0xee530937UL, 0xf7483876UL, 0xb809aeb1UL, 0xa1129ff0UL, 0x8a3fcc33UL, 0x9324fd72UL }, { 0x00000000UL, 0x01c26a37UL, 0x0384d46eUL, 0x0246be59UL, 0x0709a8dcUL, 0x06cbc2ebUL, 0x048d7cb2UL, 0x054f1685UL, 0x0e1351b8UL, 0x0fd13b8fUL, 0x0d9785d6UL, 0x0c55efe1UL, 0x091af964UL, 0x08d89353UL, 0x0a9e2d0aUL, 0x0b5c473dUL, 0x1c26a370UL, 0x1de4c947UL, 0x1fa2771eUL, 0x1e601d29UL, 0x1b2f0bacUL, 0x1aed619bUL, 0x18abdfc2UL, 0x1969b5f5UL, 0x1235f2c8UL, 0x13f798ffUL, 0x11b126a6UL, 0x10734c91UL, 0x153c5a14UL, 0x14fe3023UL, 0x16b88e7aUL, 0x177ae44dUL, 0x384d46e0UL, 0x398f2cd7UL, 0x3bc9928eUL, 0x3a0bf8b9UL, 0x3f44ee3cUL, 0x3e86840bUL, 0x3cc03a52UL, 0x3d025065UL, 0x365e1758UL, 0x379c7d6fUL, 0x35dac336UL, 0x3418a901UL, 0x3157bf84UL, 0x3095d5b3UL, 0x32d36beaUL, 0x331101ddUL, 0x246be590UL, 0x25a98fa7UL, 0x27ef31feUL, 0x262d5bc9UL, 0x23624d4cUL, 0x22a0277bUL, 0x20e69922UL, 0x2124f315UL, 0x2a78b428UL, 0x2bbade1fUL, 0x29fc6046UL, 0x283e0a71UL, 0x2d711cf4UL, 0x2cb376c3UL, 0x2ef5c89aUL, 0x2f37a2adUL, 0x709a8dc0UL, 0x7158e7f7UL, 0x731e59aeUL, 0x72dc3399UL, 0x7793251cUL, 0x76514f2bUL, 0x7417f172UL, 0x75d59b45UL, 0x7e89dc78UL, 0x7f4bb64fUL, 0x7d0d0816UL, 0x7ccf6221UL, 0x798074a4UL, 0x78421e93UL, 0x7a04a0caUL, 0x7bc6cafdUL, 0x6cbc2eb0UL, 0x6d7e4487UL, 0x6f38fadeUL, 0x6efa90e9UL, 0x6bb5866cUL, 0x6a77ec5bUL, 0x68315202UL, 0x69f33835UL, 0x62af7f08UL, 0x636d153fUL, 0x612bab66UL, 0x60e9c151UL, 0x65a6d7d4UL, 0x6464bde3UL, 0x662203baUL, 0x67e0698dUL, 0x48d7cb20UL, 0x4915a117UL, 0x4b531f4eUL, 0x4a917579UL, 0x4fde63fcUL, 0x4e1c09cbUL, 0x4c5ab792UL, 0x4d98dda5UL, 0x46c49a98UL, 0x4706f0afUL, 0x45404ef6UL, 0x448224c1UL, 0x41cd3244UL, 0x400f5873UL, 0x4249e62aUL, 0x438b8c1dUL, 0x54f16850UL, 0x55330267UL, 0x5775bc3eUL, 0x56b7d609UL, 0x53f8c08cUL, 0x523aaabbUL, 0x507c14e2UL, 0x51be7ed5UL, 0x5ae239e8UL, 0x5b2053dfUL, 0x5966ed86UL, 0x58a487b1UL, 0x5deb9134UL, 0x5c29fb03UL, 0x5e6f455aUL, 0x5fad2f6dUL, 0xe1351b80UL, 0xe0f771b7UL, 0xe2b1cfeeUL, 0xe373a5d9UL, 0xe63cb35cUL, 0xe7fed96bUL, 0xe5b86732UL, 0xe47a0d05UL, 0xef264a38UL, 0xeee4200fUL, 0xeca29e56UL, 0xed60f461UL, 0xe82fe2e4UL, 0xe9ed88d3UL, 0xebab368aUL, 0xea695cbdUL, 0xfd13b8f0UL, 0xfcd1d2c7UL, 0xfe976c9eUL, 0xff5506a9UL, 0xfa1a102cUL, 0xfbd87a1bUL, 0xf99ec442UL, 0xf85cae75UL, 0xf300e948UL, 0xf2c2837fUL, 0xf0843d26UL, 0xf1465711UL, 0xf4094194UL, 0xf5cb2ba3UL, 0xf78d95faUL, 0xf64fffcdUL, 0xd9785d60UL, 0xd8ba3757UL, 0xdafc890eUL, 0xdb3ee339UL, 0xde71f5bcUL, 0xdfb39f8bUL, 0xddf521d2UL, 0xdc374be5UL, 0xd76b0cd8UL, 0xd6a966efUL, 0xd4efd8b6UL, 0xd52db281UL, 0xd062a404UL, 0xd1a0ce33UL, 0xd3e6706aUL, 0xd2241a5dUL, 0xc55efe10UL, 0xc49c9427UL, 0xc6da2a7eUL, 0xc7184049UL, 0xc25756ccUL, 0xc3953cfbUL, 0xc1d382a2UL, 0xc011e895UL, 0xcb4dafa8UL, 0xca8fc59fUL, 0xc8c97bc6UL, 0xc90b11f1UL, 0xcc440774UL, 0xcd866d43UL, 0xcfc0d31aUL, 0xce02b92dUL, 0x91af9640UL, 0x906dfc77UL, 0x922b422eUL, 0x93e92819UL, 0x96a63e9cUL, 0x976454abUL, 0x9522eaf2UL, 0x94e080c5UL, 0x9fbcc7f8UL, 0x9e7eadcfUL, 0x9c381396UL, 0x9dfa79a1UL, 0x98b56f24UL, 0x99770513UL, 0x9b31bb4aUL, 0x9af3d17dUL, 0x8d893530UL, 0x8c4b5f07UL, 0x8e0de15eUL, 0x8fcf8b69UL, 0x8a809decUL, 0x8b42f7dbUL, 0x89044982UL, 0x88c623b5UL, 0x839a6488UL, 0x82580ebfUL, 0x801eb0e6UL, 0x81dcdad1UL, 0x8493cc54UL, 0x8551a663UL, 0x8717183aUL, 0x86d5720dUL, 0xa9e2d0a0UL, 0xa820ba97UL, 0xaa6604ceUL, 0xaba46ef9UL, 0xaeeb787cUL, 0xaf29124bUL, 0xad6fac12UL, 0xacadc625UL, 0xa7f18118UL, 0xa633eb2fUL, 0xa4755576UL, 0xa5b73f41UL, 0xa0f829c4UL, 0xa13a43f3UL, 0xa37cfdaaUL, 0xa2be979dUL, 0xb5c473d0UL, 0xb40619e7UL, 0xb640a7beUL, 0xb782cd89UL, 0xb2cddb0cUL, 0xb30fb13bUL, 0xb1490f62UL, 0xb08b6555UL, 0xbbd72268UL, 0xba15485fUL, 0xb853f606UL, 0xb9919c31UL, 0xbcde8ab4UL, 0xbd1ce083UL, 0xbf5a5edaUL, 0xbe9834edUL }, { 0x00000000UL, 0xb8bc6765UL, 0xaa09c88bUL, 0x12b5afeeUL, 0x8f629757UL, 0x37def032UL, 0x256b5fdcUL, 0x9dd738b9UL, 0xc5b428efUL, 0x7d084f8aUL, 0x6fbde064UL, 0xd7018701UL, 0x4ad6bfb8UL, 0xf26ad8ddUL, 0xe0df7733UL, 0x58631056UL, 0x5019579fUL, 0xe8a530faUL, 0xfa109f14UL, 0x42acf871UL, 0xdf7bc0c8UL, 0x67c7a7adUL, 0x75720843UL, 0xcdce6f26UL, 0x95ad7f70UL, 0x2d111815UL, 0x3fa4b7fbUL, 0x8718d09eUL, 0x1acfe827UL, 0xa2738f42UL, 0xb0c620acUL, 0x087a47c9UL, 0xa032af3eUL, 0x188ec85bUL, 0x0a3b67b5UL, 0xb28700d0UL, 0x2f503869UL, 0x97ec5f0cUL, 0x8559f0e2UL, 0x3de59787UL, 0x658687d1UL, 0xdd3ae0b4UL, 0xcf8f4f5aUL, 0x7733283fUL, 0xeae41086UL, 0x525877e3UL, 0x40edd80dUL, 0xf851bf68UL, 0xf02bf8a1UL, 0x48979fc4UL, 0x5a22302aUL, 0xe29e574fUL, 0x7f496ff6UL, 0xc7f50893UL, 0xd540a77dUL, 0x6dfcc018UL, 0x359fd04eUL, 0x8d23b72bUL, 0x9f9618c5UL, 0x272a7fa0UL, 0xbafd4719UL, 0x0241207cUL, 0x10f48f92UL, 0xa848e8f7UL, 0x9b14583dUL, 0x23a83f58UL, 0x311d90b6UL, 0x89a1f7d3UL, 0x1476cf6aUL, 0xaccaa80fUL, 0xbe7f07e1UL, 0x06c36084UL, 0x5ea070d2UL, 0xe61c17b7UL, 0xf4a9b859UL, 0x4c15df3cUL, 0xd1c2e785UL, 0x697e80e0UL, 0x7bcb2f0eUL, 0xc377486bUL, 0xcb0d0fa2UL, 0x73b168c7UL, 0x6104c729UL, 0xd9b8a04cUL, 0x446f98f5UL, 0xfcd3ff90UL, 0xee66507eUL, 0x56da371bUL, 0x0eb9274dUL, 0xb6054028UL, 0xa4b0efc6UL, 0x1c0c88a3UL, 0x81dbb01aUL, 0x3967d77fUL, 0x2bd27891UL, 0x936e1ff4UL, 0x3b26f703UL, 0x839a9066UL, 0x912f3f88UL, 0x299358edUL, 0xb4446054UL, 0x0cf80731UL, 0x1e4da8dfUL, 0xa6f1cfbaUL, 0xfe92dfecUL, 0x462eb889UL, 0x549b1767UL, 0xec277002UL, 0x71f048bbUL, 0xc94c2fdeUL, 0xdbf98030UL, 0x6345e755UL, 0x6b3fa09cUL, 0xd383c7f9UL, 0xc1366817UL, 0x798a0f72UL, 0xe45d37cbUL, 0x5ce150aeUL, 0x4e54ff40UL, 0xf6e89825UL, 0xae8b8873UL, 0x1637ef16UL, 0x048240f8UL, 0xbc3e279dUL, 0x21e91f24UL, 0x99557841UL, 0x8be0d7afUL, 0x335cb0caUL, 0xed59b63bUL, 0x55e5d15eUL, 0x47507eb0UL, 0xffec19d5UL, 0x623b216cUL, 0xda874609UL, 0xc832e9e7UL, 0x708e8e82UL, 0x28ed9ed4UL, 0x9051f9b1UL, 0x82e4565fUL, 0x3a58313aUL, 0xa78f0983UL, 0x1f336ee6UL, 0x0d86c108UL, 0xb53aa66dUL, 0xbd40e1a4UL, 0x05fc86c1UL, 0x1749292fUL, 0xaff54e4aUL, 0x322276f3UL, 0x8a9e1196UL, 0x982bbe78UL, 0x2097d91dUL, 0x78f4c94bUL, 0xc048ae2eUL, 0xd2fd01c0UL, 0x6a4166a5UL, 0xf7965e1cUL, 0x4f2a3979UL, 0x5d9f9697UL, 0xe523f1f2UL, 0x4d6b1905UL, 0xf5d77e60UL, 0xe762d18eUL, 0x5fdeb6ebUL, 0xc2098e52UL, 0x7ab5e937UL, 0x680046d9UL, 0xd0bc21bcUL, 0x88df31eaUL, 0x3063568fUL, 0x22d6f961UL, 0x9a6a9e04UL, 0x07bda6bdUL, 0xbf01c1d8UL, 0xadb46e36UL, 0x15080953UL, 0x1d724e9aUL, 0xa5ce29ffUL, 0xb77b8611UL, 0x0fc7e174UL, 0x9210d9cdUL, 0x2aacbea8UL, 0x38191146UL, 0x80a57623UL, 0xd8c66675UL, 0x607a0110UL, 0x72cfaefeUL, 0xca73c99bUL, 0x57a4f122UL, 0xef189647UL, 0xfdad39a9UL, 0x45115eccUL, 0x764dee06UL, 0xcef18963UL, 0xdc44268dUL, 0x64f841e8UL, 0xf92f7951UL, 0x41931e34UL, 0x5326b1daUL, 0xeb9ad6bfUL, 0xb3f9c6e9UL, 0x0b45a18cUL, 0x19f00e62UL, 0xa14c6907UL, 0x3c9b51beUL, 0x842736dbUL, 0x96929935UL, 0x2e2efe50UL, 0x2654b999UL, 0x9ee8defcUL, 0x8c5d7112UL, 0x34e11677UL, 0xa9362eceUL, 0x118a49abUL, 0x033fe645UL, 0xbb838120UL, 0xe3e09176UL, 0x5b5cf613UL, 0x49e959fdUL, 0xf1553e98UL, 0x6c820621UL, 0xd43e6144UL, 0xc68bceaaUL, 0x7e37a9cfUL, 0xd67f4138UL, 0x6ec3265dUL, 0x7c7689b3UL, 0xc4caeed6UL, 0x591dd66fUL, 0xe1a1b10aUL, 0xf3141ee4UL, 0x4ba87981UL, 0x13cb69d7UL, 0xab770eb2UL, 0xb9c2a15cUL, 0x017ec639UL, 0x9ca9fe80UL, 0x241599e5UL, 0x36a0360bUL, 0x8e1c516eUL, 0x866616a7UL, 0x3eda71c2UL, 0x2c6fde2cUL, 0x94d3b949UL, 0x090481f0UL, 0xb1b8e695UL, 0xa30d497bUL, 0x1bb12e1eUL, 0x43d23e48UL, 0xfb6e592dUL, 0xe9dbf6c3UL, 0x516791a6UL, 0xccb0a91fUL, 0x740cce7aUL, 0x66b96194UL, 0xde0506f1UL }, { 0x00000000UL, 0x96300777UL, 0x2c610eeeUL, 0xba510999UL, 0x19c46d07UL, 0x8ff46a70UL, 0x35a563e9UL, 0xa395649eUL, 0x3288db0eUL, 0xa4b8dc79UL, 0x1ee9d5e0UL, 0x88d9d297UL, 0x2b4cb609UL, 0xbd7cb17eUL, 0x072db8e7UL, 0x911dbf90UL, 0x6410b71dUL, 0xf220b06aUL, 0x4871b9f3UL, 0xde41be84UL, 0x7dd4da1aUL, 0xebe4dd6dUL, 0x51b5d4f4UL, 0xc785d383UL, 0x56986c13UL, 0xc0a86b64UL, 0x7af962fdUL, 0xecc9658aUL, 0x4f5c0114UL, 0xd96c0663UL, 0x633d0ffaUL, 0xf50d088dUL, 0xc8206e3bUL, 0x5e10694cUL, 0xe44160d5UL, 0x727167a2UL, 0xd1e4033cUL, 0x47d4044bUL, 0xfd850dd2UL, 0x6bb50aa5UL, 0xfaa8b535UL, 0x6c98b242UL, 0xd6c9bbdbUL, 0x40f9bcacUL, 0xe36cd832UL, 0x755cdf45UL, 0xcf0dd6dcUL, 0x593dd1abUL, 0xac30d926UL, 0x3a00de51UL, 0x8051d7c8UL, 0x1661d0bfUL, 0xb5f4b421UL, 0x23c4b356UL, 0x9995bacfUL, 0x0fa5bdb8UL, 0x9eb80228UL, 0x0888055fUL, 0xb2d90cc6UL, 0x24e90bb1UL, 0x877c6f2fUL, 0x114c6858UL, 0xab1d61c1UL, 0x3d2d66b6UL, 0x9041dc76UL, 0x0671db01UL, 0xbc20d298UL, 0x2a10d5efUL, 0x8985b171UL, 0x1fb5b606UL, 0xa5e4bf9fUL, 0x33d4b8e8UL, 0xa2c90778UL, 0x34f9000fUL, 0x8ea80996UL, 0x18980ee1UL, 0xbb0d6a7fUL, 0x2d3d6d08UL, 0x976c6491UL, 0x015c63e6UL, 0xf4516b6bUL, 0x62616c1cUL, 0xd8306585UL, 0x4e0062f2UL, 0xed95066cUL, 0x7ba5011bUL, 0xc1f40882UL, 0x57c40ff5UL, 0xc6d9b065UL, 0x50e9b712UL, 0xeab8be8bUL, 0x7c88b9fcUL, 0xdf1ddd62UL, 0x492dda15UL, 0xf37cd38cUL, 0x654cd4fbUL, 0x5861b24dUL, 0xce51b53aUL, 0x7400bca3UL, 0xe230bbd4UL, 0x41a5df4aUL, 0xd795d83dUL, 0x6dc4d1a4UL, 0xfbf4d6d3UL, 0x6ae96943UL, 0xfcd96e34UL, 0x468867adUL, 0xd0b860daUL, 0x732d0444UL, 0xe51d0333UL, 0x5f4c0aaaUL, 0xc97c0dddUL, 0x3c710550UL, 0xaa410227UL, 0x10100bbeUL, 0x86200cc9UL, 0x25b56857UL, 0xb3856f20UL, 0x09d466b9UL, 0x9fe461ceUL, 0x0ef9de5eUL, 0x98c9d929UL, 0x2298d0b0UL, 0xb4a8d7c7UL, 0x173db359UL, 0x810db42eUL, 0x3b5cbdb7UL, 0xad6cbac0UL, 0x2083b8edUL, 0xb6b3bf9aUL, 0x0ce2b603UL, 0x9ad2b174UL, 0x3947d5eaUL, 0xaf77d29dUL, 0x1526db04UL, 0x8316dc73UL, 0x120b63e3UL, 0x843b6494UL, 0x3e6a6d0dUL, 0xa85a6a7aUL, 0x0bcf0ee4UL, 0x9dff0993UL, 0x27ae000aUL, 0xb19e077dUL, 0x44930ff0UL, 0xd2a30887UL, 0x68f2011eUL, 0xfec20669UL, 0x5d5762f7UL, 0xcb676580UL, 0x71366c19UL, 0xe7066b6eUL, 0x761bd4feUL, 0xe02bd389UL, 0x5a7ada10UL, 0xcc4add67UL, 0x6fdfb9f9UL, 0xf9efbe8eUL, 0x43beb717UL, 0xd58eb060UL, 0xe8a3d6d6UL, 0x7e93d1a1UL, 0xc4c2d838UL, 0x52f2df4fUL, 0xf167bbd1UL, 0x6757bca6UL, 0xdd06b53fUL, 0x4b36b248UL, 0xda2b0dd8UL, 0x4c1b0aafUL, 0xf64a0336UL, 0x607a0441UL, 0xc3ef60dfUL, 0x55df67a8UL, 0xef8e6e31UL, 0x79be6946UL, 0x8cb361cbUL, 0x1a8366bcUL, 0xa0d26f25UL, 0x36e26852UL, 0x95770cccUL, 0x03470bbbUL, 0xb9160222UL, 0x2f260555UL, 0xbe3bbac5UL, 0x280bbdb2UL, 0x925ab42bUL, 0x046ab35cUL, 0xa7ffd7c2UL, 0x31cfd0b5UL, 0x8b9ed92cUL, 0x1daede5bUL, 0xb0c2649bUL, 0x26f263ecUL, 0x9ca36a75UL, 0x0a936d02UL, 0xa906099cUL, 0x3f360eebUL, 0x85670772UL, 0x13570005UL, 0x824abf95UL, 0x147ab8e2UL, 0xae2bb17bUL, 0x381bb60cUL, 0x9b8ed292UL, 0x0dbed5e5UL, 0xb7efdc7cUL, 0x21dfdb0bUL, 0xd4d2d386UL, 0x42e2d4f1UL, 0xf8b3dd68UL, 0x6e83da1fUL, 0xcd16be81UL, 0x5b26b9f6UL, 0xe177b06fUL, 0x7747b718UL, 0xe65a0888UL, 0x706a0fffUL, 0xca3b0666UL, 0x5c0b0111UL, 0xff9e658fUL, 0x69ae62f8UL, 0xd3ff6b61UL, 0x45cf6c16UL, 0x78e20aa0UL, 0xeed20dd7UL, 0x5483044eUL, 0xc2b30339UL, 0x612667a7UL, 0xf71660d0UL, 0x4d476949UL, 0xdb776e3eUL, 0x4a6ad1aeUL, 0xdc5ad6d9UL, 0x660bdf40UL, 0xf03bd837UL, 0x53aebca9UL, 0xc59ebbdeUL, 0x7fcfb247UL, 0xe9ffb530UL, 0x1cf2bdbdUL, 0x8ac2bacaUL, 0x3093b353UL, 0xa6a3b424UL, 0x0536d0baUL, 0x9306d7cdUL, 0x2957de54UL, 0xbf67d923UL, 0x2e7a66b3UL, 0xb84a61c4UL, 0x021b685dUL, 0x942b6f2aUL, 0x37be0bb4UL, 0xa18e0cc3UL, 0x1bdf055aUL, 0x8def022dUL }, { 0x00000000UL, 0x41311b19UL, 0x82623632UL, 0xc3532d2bUL, 0x04c56c64UL, 0x45f4777dUL, 0x86a75a56UL, 0xc796414fUL, 0x088ad9c8UL, 0x49bbc2d1UL, 0x8ae8effaUL, 0xcbd9f4e3UL, 0x0c4fb5acUL, 0x4d7eaeb5UL, 0x8e2d839eUL, 0xcf1c9887UL, 0x5112c24aUL, 0x1023d953UL, 0xd370f478UL, 0x9241ef61UL, 0x55d7ae2eUL, 0x14e6b537UL, 0xd7b5981cUL, 0x96848305UL, 0x59981b82UL, 0x18a9009bUL, 0xdbfa2db0UL, 0x9acb36a9UL, 0x5d5d77e6UL, 0x1c6c6cffUL, 0xdf3f41d4UL, 0x9e0e5acdUL, 0xa2248495UL, 0xe3159f8cUL, 0x2046b2a7UL, 0x6177a9beUL, 0xa6e1e8f1UL, 0xe7d0f3e8UL, 0x2483dec3UL, 0x65b2c5daUL, 0xaaae5d5dUL, 0xeb9f4644UL, 0x28cc6b6fUL, 0x69fd7076UL, 0xae6b3139UL, 0xef5a2a20UL, 0x2c09070bUL, 0x6d381c12UL, 0xf33646dfUL, 0xb2075dc6UL, 0x715470edUL, 0x30656bf4UL, 0xf7f32abbUL, 0xb6c231a2UL, 0x75911c89UL, 0x34a00790UL, 0xfbbc9f17UL, 0xba8d840eUL, 0x79dea925UL, 0x38efb23cUL, 0xff79f373UL, 0xbe48e86aUL, 0x7d1bc541UL, 0x3c2ade58UL, 0x054f79f0UL, 0x447e62e9UL, 0x872d4fc2UL, 0xc61c54dbUL, 0x018a1594UL, 0x40bb0e8dUL, 0x83e823a6UL, 0xc2d938bfUL, 0x0dc5a038UL, 0x4cf4bb21UL, 0x8fa7960aUL, 0xce968d13UL, 0x0900cc5cUL, 0x4831d745UL, 0x8b62fa6eUL, 0xca53e177UL, 0x545dbbbaUL, 0x156ca0a3UL, 0xd63f8d88UL, 0x970e9691UL, 0x5098d7deUL, 0x11a9ccc7UL, 0xd2fae1ecUL, 0x93cbfaf5UL, 0x5cd76272UL, 0x1de6796bUL, 0xdeb55440UL, 0x9f844f59UL, 0x58120e16UL, 0x1923150fUL, 0xda703824UL, 0x9b41233dUL, 0xa76bfd65UL, 0xe65ae67cUL, 0x2509cb57UL, 0x6438d04eUL, 0xa3ae9101UL, 0xe29f8a18UL, 0x21cca733UL, 0x60fdbc2aUL, 0xafe124adUL, 0xeed03fb4UL, 0x2d83129fUL, 0x6cb20986UL, 0xab2448c9UL, 0xea1553d0UL, 0x29467efbUL, 0x687765e2UL, 0xf6793f2fUL, 0xb7482436UL, 0x741b091dUL, 0x352a1204UL, 0xf2bc534bUL, 0xb38d4852UL, 0x70de6579UL, 0x31ef7e60UL, 0xfef3e6e7UL, 0xbfc2fdfeUL, 0x7c91d0d5UL, 0x3da0cbccUL, 0xfa368a83UL, 0xbb07919aUL, 0x7854bcb1UL, 0x3965a7a8UL, 0x4b98833bUL, 0x0aa99822UL, 0xc9fab509UL, 0x88cbae10UL, 0x4f5def5fUL, 0x0e6cf446UL, 0xcd3fd96dUL, 0x8c0ec274UL, 0x43125af3UL, 0x022341eaUL, 0xc1706cc1UL, 0x804177d8UL, 0x47d73697UL, 0x06e62d8eUL, 0xc5b500a5UL, 0x84841bbcUL, 0x1a8a4171UL, 0x5bbb5a68UL, 0x98e87743UL, 0xd9d96c5aUL, 0x1e4f2d15UL, 0x5f7e360cUL, 0x9c2d1b27UL, 0xdd1c003eUL, 0x120098b9UL, 0x533183a0UL, 0x9062ae8bUL, 0xd153b592UL, 0x16c5f4ddUL, 0x57f4efc4UL, 0x94a7c2efUL, 0xd596d9f6UL, 0xe9bc07aeUL, 0xa88d1cb7UL, 0x6bde319cUL, 0x2aef2a85UL, 0xed796bcaUL, 0xac4870d3UL, 0x6f1b5df8UL, 0x2e2a46e1UL, 0xe136de66UL, 0xa007c57fUL, 0x6354e854UL, 0x2265f34dUL, 0xe5f3b202UL, 0xa4c2a91bUL, 0x67918430UL, 0x26a09f29UL, 0xb8aec5e4UL, 0xf99fdefdUL, 0x3accf3d6UL, 0x7bfde8cfUL, 0xbc6ba980UL, 0xfd5ab299UL, 0x3e099fb2UL, 0x7f3884abUL, 0xb0241c2cUL, 0xf1150735UL, 0x32462a1eUL, 0x73773107UL, 0xb4e17048UL, 0xf5d06b51UL, 0x3683467aUL, 0x77b25d63UL, 0x4ed7facbUL, 0x0fe6e1d2UL, 0xccb5ccf9UL, 0x8d84d7e0UL, 0x4a1296afUL, 0x0b238db6UL, 0xc870a09dUL, 0x8941bb84UL, 0x465d2303UL, 0x076c381aUL, 0xc43f1531UL, 0x850e0e28UL, 0x42984f67UL, 0x03a9547eUL, 0xc0fa7955UL, 0x81cb624cUL, 0x1fc53881UL, 0x5ef42398UL, 0x9da70eb3UL, 0xdc9615aaUL, 0x1b0054e5UL, 0x5a314ffcUL, 0x996262d7UL, 0xd85379ceUL, 0x174fe149UL, 0x567efa50UL, 0x952dd77bUL, 0xd41ccc62UL, 0x138a8d2dUL, 0x52bb9634UL, 0x91e8bb1fUL, 0xd0d9a006UL, 0xecf37e5eUL, 0xadc26547UL, 0x6e91486cUL, 0x2fa05375UL, 0xe836123aUL, 0xa9070923UL, 0x6a542408UL, 0x2b653f11UL, 0xe479a796UL, 0xa548bc8fUL, 0x661b91a4UL, 0x272a8abdUL, 0xe0bccbf2UL, 0xa18dd0ebUL, 0x62defdc0UL, 0x23efe6d9UL, 0xbde1bc14UL, 0xfcd0a70dUL, 0x3f838a26UL, 0x7eb2913fUL, 0xb924d070UL, 0xf815cb69UL, 0x3b46e642UL, 0x7a77fd5bUL, 0xb56b65dcUL, 0xf45a7ec5UL, 0x370953eeUL, 0x763848f7UL, 0xb1ae09b8UL, 0xf09f12a1UL, 0x33cc3f8aUL, 0x72fd2493UL }, { 0x00000000UL, 0x376ac201UL, 0x6ed48403UL, 0x59be4602UL, 0xdca80907UL, 0xebc2cb06UL, 0xb27c8d04UL, 0x85164f05UL, 0xb851130eUL, 0x8f3bd10fUL, 0xd685970dUL, 0xe1ef550cUL, 0x64f91a09UL, 0x5393d808UL, 0x0a2d9e0aUL, 0x3d475c0bUL, 0x70a3261cUL, 0x47c9e41dUL, 0x1e77a21fUL, 0x291d601eUL, 0xac0b2f1bUL, 0x9b61ed1aUL, 0xc2dfab18UL, 0xf5b56919UL, 0xc8f23512UL, 0xff98f713UL, 0xa626b111UL, 0x914c7310UL, 0x145a3c15UL, 0x2330fe14UL, 0x7a8eb816UL, 0x4de47a17UL, 0xe0464d38UL, 0xd72c8f39UL, 0x8e92c93bUL, 0xb9f80b3aUL, 0x3cee443fUL, 0x0b84863eUL, 0x523ac03cUL, 0x6550023dUL, 0x58175e36UL, 0x6f7d9c37UL, 0x36c3da35UL, 0x01a91834UL, 0x84bf5731UL, 0xb3d59530UL, 0xea6bd332UL, 0xdd011133UL, 0x90e56b24UL, 0xa78fa925UL, 0xfe31ef27UL, 0xc95b2d26UL, 0x4c4d6223UL, 0x7b27a022UL, 0x2299e620UL, 0x15f32421UL, 0x28b4782aUL, 0x1fdeba2bUL, 0x4660fc29UL, 0x710a3e28UL, 0xf41c712dUL, 0xc376b32cUL, 0x9ac8f52eUL, 0xada2372fUL, 0xc08d9a70UL, 0xf7e75871UL, 0xae591e73UL, 0x9933dc72UL, 0x1c259377UL, 0x2b4f5176UL, 0x72f11774UL, 0x459bd575UL, 0x78dc897eUL, 0x4fb64b7fUL, 0x16080d7dUL, 0x2162cf7cUL, 0xa4748079UL, 0x931e4278UL, 0xcaa0047aUL, 0xfdcac67bUL, 0xb02ebc6cUL, 0x87447e6dUL, 0xdefa386fUL, 0xe990fa6eUL, 0x6c86b56bUL, 0x5bec776aUL, 0x02523168UL, 0x3538f369UL, 0x087faf62UL, 0x3f156d63UL, 0x66ab2b61UL, 0x51c1e960UL, 0xd4d7a665UL, 0xe3bd6464UL, 0xba032266UL, 0x8d69e067UL, 0x20cbd748UL, 0x17a11549UL, 0x4e1f534bUL, 0x7975914aUL, 0xfc63de4fUL, 0xcb091c4eUL, 0x92b75a4cUL, 0xa5dd984dUL, 0x989ac446UL, 0xaff00647UL, 0xf64e4045UL, 0xc1248244UL, 0x4432cd41UL, 0x73580f40UL, 0x2ae64942UL, 0x1d8c8b43UL, 0x5068f154UL, 0x67023355UL, 0x3ebc7557UL, 0x09d6b756UL, 0x8cc0f853UL, 0xbbaa3a52UL, 0xe2147c50UL, 0xd57ebe51UL, 0xe839e25aUL, 0xdf53205bUL, 0x86ed6659UL, 0xb187a458UL, 0x3491eb5dUL, 0x03fb295cUL, 0x5a456f5eUL, 0x6d2fad5fUL, 0x801b35e1UL, 0xb771f7e0UL, 0xeecfb1e2UL, 0xd9a573e3UL, 0x5cb33ce6UL, 0x6bd9fee7UL, 0x3267b8e5UL, 0x050d7ae4UL, 0x384a26efUL, 0x0f20e4eeUL, 0x569ea2ecUL, 0x61f460edUL, 0xe4e22fe8UL, 0xd388ede9UL, 0x8a36abebUL, 0xbd5c69eaUL, 0xf0b813fdUL, 0xc7d2d1fcUL, 0x9e6c97feUL, 0xa90655ffUL, 0x2c101afaUL, 0x1b7ad8fbUL, 0x42c49ef9UL, 0x75ae5cf8UL, 0x48e900f3UL, 0x7f83c2f2UL, 0x263d84f0UL, 0x115746f1UL, 0x944109f4UL, 0xa32bcbf5UL, 0xfa958df7UL, 0xcdff4ff6UL, 0x605d78d9UL, 0x5737bad8UL, 0x0e89fcdaUL, 0x39e33edbUL, 0xbcf571deUL, 0x8b9fb3dfUL, 0xd221f5ddUL, 0xe54b37dcUL, 0xd80c6bd7UL, 0xef66a9d6UL, 0xb6d8efd4UL, 0x81b22dd5UL, 0x04a462d0UL, 0x33cea0d1UL, 0x6a70e6d3UL, 0x5d1a24d2UL, 0x10fe5ec5UL, 0x27949cc4UL, 0x7e2adac6UL, 0x494018c7UL, 0xcc5657c2UL, 0xfb3c95c3UL, 0xa282d3c1UL, 0x95e811c0UL, 0xa8af4dcbUL, 0x9fc58fcaUL, 0xc67bc9c8UL, 0xf1110bc9UL, 0x740744ccUL, 0x436d86cdUL, 0x1ad3c0cfUL, 0x2db902ceUL, 0x4096af91UL, 0x77fc6d90UL, 0x2e422b92UL, 0x1928e993UL, 0x9c3ea696UL, 0xab546497UL, 0xf2ea2295UL, 0xc580e094UL, 0xf8c7bc9fUL, 0xcfad7e9eUL, 0x9613389cUL, 0xa179fa9dUL, 0x246fb598UL, 0x13057799UL, 0x4abb319bUL, 0x7dd1f39aUL, 0x3035898dUL, 0x075f4b8cUL, 0x5ee10d8eUL, 0x698bcf8fUL, 0xec9d808aUL, 0xdbf7428bUL, 0x82490489UL, 0xb523c688UL, 0x88649a83UL, 0xbf0e5882UL, 0xe6b01e80UL, 0xd1dadc81UL, 0x54cc9384UL, 0x63a65185UL, 0x3a181787UL, 0x0d72d586UL, 0xa0d0e2a9UL, 0x97ba20a8UL, 0xce0466aaUL, 0xf96ea4abUL, 0x7c78ebaeUL, 0x4b1229afUL, 0x12ac6fadUL, 0x25c6adacUL, 0x1881f1a7UL, 0x2feb33a6UL, 0x765575a4UL, 0x413fb7a5UL, 0xc429f8a0UL, 0xf3433aa1UL, 0xaafd7ca3UL, 0x9d97bea2UL, 0xd073c4b5UL, 0xe71906b4UL, 0xbea740b6UL, 0x89cd82b7UL, 0x0cdbcdb2UL, 0x3bb10fb3UL, 0x620f49b1UL, 0x55658bb0UL, 0x6822d7bbUL, 0x5f4815baUL, 0x06f653b8UL, 0x319c91b9UL, 0xb48adebcUL, 0x83e01cbdUL, 0xda5e5abfUL, 0xed3498beUL }, { 0x00000000UL, 0x6567bcb8UL, 0x8bc809aaUL, 0xeeafb512UL, 0x5797628fUL, 0x32f0de37UL, 0xdc5f6b25UL, 0xb938d79dUL, 0xef28b4c5UL, 0x8a4f087dUL, 0x64e0bd6fUL, 0x018701d7UL, 0xb8bfd64aUL, 0xddd86af2UL, 0x3377dfe0UL, 0x56106358UL, 0x9f571950UL, 0xfa30a5e8UL, 0x149f10faUL, 0x71f8ac42UL, 0xc8c07bdfUL, 0xada7c767UL, 0x43087275UL, 0x266fcecdUL, 0x707fad95UL, 0x1518112dUL, 0xfbb7a43fUL, 0x9ed01887UL, 0x27e8cf1aUL, 0x428f73a2UL, 0xac20c6b0UL, 0xc9477a08UL, 0x3eaf32a0UL, 0x5bc88e18UL, 0xb5673b0aUL, 0xd00087b2UL, 0x6938502fUL, 0x0c5fec97UL, 0xe2f05985UL, 0x8797e53dUL, 0xd1878665UL, 0xb4e03addUL, 0x5a4f8fcfUL, 0x3f283377UL, 0x8610e4eaUL, 0xe3775852UL, 0x0dd8ed40UL, 0x68bf51f8UL, 0xa1f82bf0UL, 0xc49f9748UL, 0x2a30225aUL, 0x4f579ee2UL, 0xf66f497fUL, 0x9308f5c7UL, 0x7da740d5UL, 0x18c0fc6dUL, 0x4ed09f35UL, 0x2bb7238dUL, 0xc518969fUL, 0xa07f2a27UL, 0x1947fdbaUL, 0x7c204102UL, 0x928ff410UL, 0xf7e848a8UL, 0x3d58149bUL, 0x583fa823UL, 0xb6901d31UL, 0xd3f7a189UL, 0x6acf7614UL, 0x0fa8caacUL, 0xe1077fbeUL, 0x8460c306UL, 0xd270a05eUL, 0xb7171ce6UL, 0x59b8a9f4UL, 0x3cdf154cUL, 0x85e7c2d1UL, 0xe0807e69UL, 0x0e2fcb7bUL, 0x6b4877c3UL, 0xa20f0dcbUL, 0xc768b173UL, 0x29c70461UL, 0x4ca0b8d9UL, 0xf5986f44UL, 0x90ffd3fcUL, 0x7e5066eeUL, 0x1b37da56UL, 0x4d27b90eUL, 0x284005b6UL, 0xc6efb0a4UL, 0xa3880c1cUL, 0x1ab0db81UL, 0x7fd76739UL, 0x9178d22bUL, 0xf41f6e93UL, 0x03f7263bUL, 0x66909a83UL, 0x883f2f91UL, 0xed589329UL, 0x546044b4UL, 0x3107f80cUL, 0xdfa84d1eUL, 0xbacff1a6UL, 0xecdf92feUL, 0x89b82e46UL, 0x67179b54UL, 0x027027ecUL, 0xbb48f071UL, 0xde2f4cc9UL, 0x3080f9dbUL, 0x55e74563UL, 0x9ca03f6bUL, 0xf9c783d3UL, 0x176836c1UL, 0x720f8a79UL, 0xcb375de4UL, 0xae50e15cUL, 0x40ff544eUL, 0x2598e8f6UL, 0x73888baeUL, 0x16ef3716UL, 0xf8408204UL, 0x9d273ebcUL, 0x241fe921UL, 0x41785599UL, 0xafd7e08bUL, 0xcab05c33UL, 0x3bb659edUL, 0x5ed1e555UL, 0xb07e5047UL, 0xd519ecffUL, 0x6c213b62UL, 0x094687daUL, 0xe7e932c8UL, 0x828e8e70UL, 0xd49eed28UL, 0xb1f95190UL, 0x5f56e482UL, 0x3a31583aUL, 0x83098fa7UL, 0xe66e331fUL, 0x08c1860dUL, 0x6da63ab5UL, 0xa4e140bdUL, 0xc186fc05UL, 0x2f294917UL, 0x4a4ef5afUL, 0xf3762232UL, 0x96119e8aUL, 0x78be2b98UL, 0x1dd99720UL, 0x4bc9f478UL, 0x2eae48c0UL, 0xc001fdd2UL, 0xa566416aUL, 0x1c5e96f7UL, 0x79392a4fUL, 0x97969f5dUL, 0xf2f123e5UL, 0x05196b4dUL, 0x607ed7f5UL, 0x8ed162e7UL, 0xebb6de5fUL, 0x528e09c2UL, 0x37e9b57aUL, 0xd9460068UL, 0xbc21bcd0UL, 0xea31df88UL, 0x8f566330UL, 0x61f9d622UL, 0x049e6a9aUL, 0xbda6bd07UL, 0xd8c101bfUL, 0x366eb4adUL, 0x53090815UL, 0x9a4e721dUL, 0xff29cea5UL, 0x11867bb7UL, 0x74e1c70fUL, 0xcdd91092UL, 0xa8beac2aUL, 0x46111938UL, 0x2376a580UL, 0x7566c6d8UL, 0x10017a60UL, 0xfeaecf72UL, 0x9bc973caUL, 0x22f1a457UL, 0x479618efUL, 0xa939adfdUL, 0xcc5e1145UL, 0x06ee4d76UL, 0x6389f1ceUL, 0x8d2644dcUL, 0xe841f864UL, 0x51792ff9UL, 0x341e9341UL, 0xdab12653UL, 0xbfd69aebUL, 0xe9c6f9b3UL, 0x8ca1450bUL, 0x620ef019UL, 0x07694ca1UL, 0xbe519b3cUL, 0xdb362784UL, 0x35999296UL, 0x50fe2e2eUL, 0x99b95426UL, 0xfcdee89eUL, 0x12715d8cUL, 0x7716e134UL, 0xce2e36a9UL, 0xab498a11UL, 0x45e63f03UL, 0x208183bbUL, 0x7691e0e3UL, 0x13f65c5bUL, 0xfd59e949UL, 0x983e55f1UL, 0x2106826cUL, 0x44613ed4UL, 0xaace8bc6UL, 0xcfa9377eUL, 0x38417fd6UL, 0x5d26c36eUL, 0xb389767cUL, 0xd6eecac4UL, 0x6fd61d59UL, 0x0ab1a1e1UL, 0xe41e14f3UL, 0x8179a84bUL, 0xd769cb13UL, 0xb20e77abUL, 0x5ca1c2b9UL, 0x39c67e01UL, 0x80fea99cUL, 0xe5991524UL, 0x0b36a036UL, 0x6e511c8eUL, 0xa7166686UL, 0xc271da3eUL, 0x2cde6f2cUL, 0x49b9d394UL, 0xf0810409UL, 0x95e6b8b1UL, 0x7b490da3UL, 0x1e2eb11bUL, 0x483ed243UL, 0x2d596efbUL, 0xc3f6dbe9UL, 0xa6916751UL, 0x1fa9b0ccUL, 0x7ace0c74UL, 0x9461b966UL, 0xf10605deUL #endif } }; mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/deflate.c0000644000076500000240000023205113572250760021233 0ustar alcaeusstaff/* deflate.c -- compress data using the deflation algorithm * Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process depends on being able to identify portions * of the input text which are identical to earlier input (within a * sliding window trailing behind the input currently being processed). * * The most straightforward technique turns out to be the fastest for * most input files: try all possible matches and select the longest. * The key feature of this algorithm is that insertions into the string * dictionary are very simple and thus fast, and deletions are avoided * completely. Insertions are performed at each input character, whereas * string matches are performed only when the previous match ends. So it * is preferable to spend more time in matches to allow very fast string * insertions and avoid deletions. The matching algorithm for small * strings is inspired from that of Rabin & Karp. A brute force approach * is used to find longer strings when a small match has been found. * A similar algorithm is used in comic (by Jan-Mark Wams) and freeze * (by Leonid Broukhis). * A previous version of this file used a more sophisticated algorithm * (by Fiala and Greene) which is guaranteed to run in linear amortized * time, but has a larger average cost, uses more memory and is patented. * However the F&G algorithm may be faster for some highly redundant * files if the parameter max_chain_length (described below) is too large. * * ACKNOWLEDGEMENTS * * The idea of lazy evaluation of matches is due to Jan-Mark Wams, and * I found it in 'freeze' written by Leonid Broukhis. * Thanks to many people for bug reports and testing. * * REFERENCES * * Deutsch, L.P.,"DEFLATE Compressed Data Format Specification". * Available in http://tools.ietf.org/html/rfc1951 * * A description of the Rabin and Karp algorithm is given in the book * "Algorithms" by R. Sedgewick, Addison-Wesley, p252. * * Fiala,E.R., and Greene,D.H. * Data Compression with Finite Windows, Comm.ACM, 32,4 (1989) 490-595 * */ /* @(#) $Id$ */ #include "deflate.h" const char deflate_copyright[] = " deflate 1.2.11 Copyright 1995-2017 Jean-loup Gailly and Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* =========================================================================== * Function prototypes. */ typedef enum { need_more, /* block not completed, need more input or more output */ block_done, /* block flush performed */ finish_started, /* finish started, need only more output at next deflate */ finish_done /* finish done, accept no more input or output */ } block_state; typedef block_state (*compress_func) OF((deflate_state *s, int flush)); /* Compression function. Returns the block state after the call. */ local int deflateStateCheck OF((z_streamp strm)); local void slide_hash OF((deflate_state *s)); local void fill_window OF((deflate_state *s)); local block_state deflate_stored OF((deflate_state *s, int flush)); local block_state deflate_fast OF((deflate_state *s, int flush)); #ifndef FASTEST local block_state deflate_slow OF((deflate_state *s, int flush)); #endif local block_state deflate_rle OF((deflate_state *s, int flush)); local block_state deflate_huff OF((deflate_state *s, int flush)); local void lm_init OF((deflate_state *s)); local void putShortMSB OF((deflate_state *s, uInt b)); local void flush_pending OF((z_streamp strm)); local unsigned read_buf OF((z_streamp strm, Bytef *buf, unsigned size)); #ifdef ASMV # pragma message("Assembler code may have bugs -- use at your own risk") void match_init OF((void)); /* asm code initialization */ uInt longest_match OF((deflate_state *s, IPos cur_match)); #else local uInt longest_match OF((deflate_state *s, IPos cur_match)); #endif #ifdef ZLIB_DEBUG local void check_match OF((deflate_state *s, IPos start, IPos match, int length)); #endif /* =========================================================================== * Local data */ #define NIL 0 /* Tail of hash chains */ #ifndef TOO_FAR # define TOO_FAR 4096 #endif /* Matches of length 3 are discarded if their distance exceeds TOO_FAR */ /* Values for max_lazy_match, good_match and max_chain_length, depending on * the desired pack level (0..9). The values given below have been tuned to * exclude worst case performance for pathological files. Better values may be * found for specific files. */ typedef struct config_s { ush good_length; /* reduce lazy search above this match length */ ush max_lazy; /* do not perform lazy search above this match length */ ush nice_length; /* quit search above this match length */ ush max_chain; compress_func func; } config; #ifdef FASTEST local const config configuration_table[2] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}}; /* max speed, no lazy matches */ #else local const config configuration_table[10] = { /* good lazy nice chain */ /* 0 */ {0, 0, 0, 0, deflate_stored}, /* store only */ /* 1 */ {4, 4, 8, 4, deflate_fast}, /* max speed, no lazy matches */ /* 2 */ {4, 5, 16, 8, deflate_fast}, /* 3 */ {4, 6, 32, 32, deflate_fast}, /* 4 */ {4, 4, 16, 16, deflate_slow}, /* lazy matches */ /* 5 */ {8, 16, 32, 32, deflate_slow}, /* 6 */ {8, 16, 128, 128, deflate_slow}, /* 7 */ {8, 32, 128, 256, deflate_slow}, /* 8 */ {32, 128, 258, 1024, deflate_slow}, /* 9 */ {32, 258, 258, 4096, deflate_slow}}; /* max compression */ #endif /* Note: the deflate() code requires max_lazy >= MIN_MATCH and max_chain >= 4 * For deflate_fast() (levels <= 3) good is ignored and lazy has a different * meaning. */ /* rank Z_BLOCK between Z_NO_FLUSH and Z_PARTIAL_FLUSH */ #define RANK(f) (((f) * 2) - ((f) > 4 ? 9 : 0)) /* =========================================================================== * Update a hash value with the given input byte * IN assertion: all calls to UPDATE_HASH are made with consecutive input * characters, so that a running hash key can be computed from the previous * key instead of complete recalculation each time. */ #define UPDATE_HASH(s,h,c) (h = (((h)<hash_shift) ^ (c)) & s->hash_mask) /* =========================================================================== * Insert string str in the dictionary and set match_head to the previous head * of the hash chain (the most recent string with same hash key). Return * the previous length of the hash chain. * If this file is compiled with -DFASTEST, the compression level is forced * to 1, and no hash chains are maintained. * IN assertion: all calls to INSERT_STRING are made with consecutive input * characters and the first MIN_MATCH bytes of str are valid (except for * the last MIN_MATCH-1 bytes of the input file). */ #ifdef FASTEST #define INSERT_STRING(s, str, match_head) \ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ match_head = s->head[s->ins_h], \ s->head[s->ins_h] = (Pos)(str)) #else #define INSERT_STRING(s, str, match_head) \ (UPDATE_HASH(s, s->ins_h, s->window[(str) + (MIN_MATCH-1)]), \ match_head = s->prev[(str) & s->w_mask] = s->head[s->ins_h], \ s->head[s->ins_h] = (Pos)(str)) #endif /* =========================================================================== * Initialize the hash table (avoiding 64K overflow for 16 bit systems). * prev[] will be initialized on the fly. */ #define CLEAR_HASH(s) \ s->head[s->hash_size-1] = NIL; \ zmemzero((Bytef *)s->head, (unsigned)(s->hash_size-1)*sizeof(*s->head)); /* =========================================================================== * Slide the hash table when sliding the window down (could be avoided with 32 * bit values at the expense of memory usage). We slide even when level == 0 to * keep the hash table consistent if we switch back to level > 0 later. */ local void slide_hash(s) deflate_state *s; { unsigned n, m; Posf *p; uInt wsize = s->w_size; n = s->hash_size; p = &s->head[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m - wsize : NIL); } while (--n); n = wsize; #ifndef FASTEST p = &s->prev[n]; do { m = *--p; *p = (Pos)(m >= wsize ? m - wsize : NIL); /* If n is not on any hash chain, prev[n] is garbage but * its value will never be used. */ } while (--n); #endif } /* ========================================================================= */ int ZEXPORT deflateInit_(strm, level, version, stream_size) z_streamp strm; int level; const char *version; int stream_size; { return deflateInit2_(strm, level, Z_DEFLATED, MAX_WBITS, DEF_MEM_LEVEL, Z_DEFAULT_STRATEGY, version, stream_size); /* To do: ignore strm->next_in if we use it as window */ } /* ========================================================================= */ int ZEXPORT deflateInit2_(strm, level, method, windowBits, memLevel, strategy, version, stream_size) z_streamp strm; int level; int method; int windowBits; int memLevel; int strategy; const char *version; int stream_size; { deflate_state *s; int wrap = 1; static const char my_version[] = ZLIB_VERSION; ushf *overlay; /* We overlay pending_buf and d_buf+l_buf. This works since the average * output size for (length,distance) codes is <= 24 bits. */ if (version == Z_NULL || version[0] != my_version[0] || stream_size != sizeof(z_stream)) { return Z_VERSION_ERROR; } if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif #ifdef FASTEST if (level != 0) level = 1; #else if (level == Z_DEFAULT_COMPRESSION) level = 6; #endif if (windowBits < 0) { /* suppress zlib wrapper */ wrap = 0; windowBits = -windowBits; } #ifdef GZIP else if (windowBits > 15) { wrap = 2; /* write gzip wrapper instead */ windowBits -= 16; } #endif if (memLevel < 1 || memLevel > MAX_MEM_LEVEL || method != Z_DEFLATED || windowBits < 8 || windowBits > 15 || level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED || (windowBits == 8 && wrap != 1)) { return Z_STREAM_ERROR; } if (windowBits == 8) windowBits = 9; /* until 256-byte window bug fixed */ s = (deflate_state *) ZALLOC(strm, 1, sizeof(deflate_state)); if (s == Z_NULL) return Z_MEM_ERROR; strm->state = (struct internal_state FAR *)s; s->strm = strm; s->status = INIT_STATE; /* to pass state test in deflateReset() */ s->wrap = wrap; s->gzhead = Z_NULL; s->w_bits = (uInt)windowBits; s->w_size = 1 << s->w_bits; s->w_mask = s->w_size - 1; s->hash_bits = (uInt)memLevel + 7; s->hash_size = 1 << s->hash_bits; s->hash_mask = s->hash_size - 1; s->hash_shift = ((s->hash_bits+MIN_MATCH-1)/MIN_MATCH); s->window = (Bytef *) ZALLOC(strm, s->w_size, 2*sizeof(Byte)); s->prev = (Posf *) ZALLOC(strm, s->w_size, sizeof(Pos)); s->head = (Posf *) ZALLOC(strm, s->hash_size, sizeof(Pos)); s->high_water = 0; /* nothing written to s->window yet */ s->lit_bufsize = 1 << (memLevel + 6); /* 16K elements by default */ overlay = (ushf *) ZALLOC(strm, s->lit_bufsize, sizeof(ush)+2); s->pending_buf = (uchf *) overlay; s->pending_buf_size = (ulg)s->lit_bufsize * (sizeof(ush)+2L); if (s->window == Z_NULL || s->prev == Z_NULL || s->head == Z_NULL || s->pending_buf == Z_NULL) { s->status = FINISH_STATE; strm->msg = ERR_MSG(Z_MEM_ERROR); deflateEnd (strm); return Z_MEM_ERROR; } s->d_buf = overlay + s->lit_bufsize/sizeof(ush); s->l_buf = s->pending_buf + (1+sizeof(ush))*s->lit_bufsize; s->level = level; s->strategy = strategy; s->method = (Byte)method; return deflateReset(strm); } /* ========================================================================= * Check for a valid deflate stream state. Return 0 if ok, 1 if not. */ local int deflateStateCheck (strm) z_streamp strm; { deflate_state *s; if (strm == Z_NULL || strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) return 1; s = strm->state; if (s == Z_NULL || s->strm != strm || (s->status != INIT_STATE && #ifdef GZIP s->status != GZIP_STATE && #endif s->status != EXTRA_STATE && s->status != NAME_STATE && s->status != COMMENT_STATE && s->status != HCRC_STATE && s->status != BUSY_STATE && s->status != FINISH_STATE)) return 1; return 0; } /* ========================================================================= */ int ZEXPORT deflateSetDictionary (strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { deflate_state *s; uInt str, n; int wrap; unsigned avail; z_const unsigned char *next; if (deflateStateCheck(strm) || dictionary == Z_NULL) return Z_STREAM_ERROR; s = strm->state; wrap = s->wrap; if (wrap == 2 || (wrap == 1 && s->status != INIT_STATE) || s->lookahead) return Z_STREAM_ERROR; /* when using zlib wrappers, compute Adler-32 for provided dictionary */ if (wrap == 1) strm->adler = adler32(strm->adler, dictionary, dictLength); s->wrap = 0; /* avoid computing Adler-32 in read_buf */ /* if dictionary would fill window, just replace the history */ if (dictLength >= s->w_size) { if (wrap == 0) { /* already empty otherwise */ CLEAR_HASH(s); s->strstart = 0; s->block_start = 0L; s->insert = 0; } dictionary += dictLength - s->w_size; /* use the tail */ dictLength = s->w_size; } /* insert dictionary into window and hash */ avail = strm->avail_in; next = strm->next_in; strm->avail_in = dictLength; strm->next_in = (z_const Bytef *)dictionary; fill_window(s); while (s->lookahead >= MIN_MATCH) { str = s->strstart; n = s->lookahead - (MIN_MATCH-1); do { UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); #ifndef FASTEST s->prev[str & s->w_mask] = s->head[s->ins_h]; #endif s->head[s->ins_h] = (Pos)str; str++; } while (--n); s->strstart = str; s->lookahead = MIN_MATCH-1; fill_window(s); } s->strstart += s->lookahead; s->block_start = (long)s->strstart; s->insert = s->lookahead; s->lookahead = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; strm->next_in = next; strm->avail_in = avail; s->wrap = wrap; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateGetDictionary (strm, dictionary, dictLength) z_streamp strm; Bytef *dictionary; uInt *dictLength; { deflate_state *s; uInt len; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; len = s->strstart + s->lookahead; if (len > s->w_size) len = s->w_size; if (dictionary != Z_NULL && len) zmemcpy(dictionary, s->window + s->strstart + s->lookahead - len, len); if (dictLength != Z_NULL) *dictLength = len; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateResetKeep (strm) z_streamp strm; { deflate_state *s; if (deflateStateCheck(strm)) { return Z_STREAM_ERROR; } strm->total_in = strm->total_out = 0; strm->msg = Z_NULL; /* use zfree if we ever allocate msg dynamically */ strm->data_type = Z_UNKNOWN; s = (deflate_state *)strm->state; s->pending = 0; s->pending_out = s->pending_buf; if (s->wrap < 0) { s->wrap = -s->wrap; /* was made negative by deflate(..., Z_FINISH); */ } s->status = #ifdef GZIP s->wrap == 2 ? GZIP_STATE : #endif s->wrap ? INIT_STATE : BUSY_STATE; strm->adler = #ifdef GZIP s->wrap == 2 ? crc32(0L, Z_NULL, 0) : #endif adler32(0L, Z_NULL, 0); s->last_flush = Z_NO_FLUSH; _tr_init(s); return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateReset (strm) z_streamp strm; { int ret; ret = deflateResetKeep(strm); if (ret == Z_OK) lm_init(strm->state); return ret; } /* ========================================================================= */ int ZEXPORT deflateSetHeader (strm, head) z_streamp strm; gz_headerp head; { if (deflateStateCheck(strm) || strm->state->wrap != 2) return Z_STREAM_ERROR; strm->state->gzhead = head; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflatePending (strm, pending, bits) unsigned *pending; int *bits; z_streamp strm; { if (deflateStateCheck(strm)) return Z_STREAM_ERROR; if (pending != Z_NULL) *pending = strm->state->pending; if (bits != Z_NULL) *bits = strm->state->bi_valid; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflatePrime (strm, bits, value) z_streamp strm; int bits; int value; { deflate_state *s; int put; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; if ((Bytef *)(s->d_buf) < s->pending_out + ((Buf_size + 7) >> 3)) return Z_BUF_ERROR; do { put = Buf_size - s->bi_valid; if (put > bits) put = bits; s->bi_buf |= (ush)((value & ((1 << put) - 1)) << s->bi_valid); s->bi_valid += put; _tr_flush_bits(s); value >>= put; bits -= put; } while (bits); return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateParams(strm, level, strategy) z_streamp strm; int level; int strategy; { deflate_state *s; compress_func func; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; #ifdef FASTEST if (level != 0) level = 1; #else if (level == Z_DEFAULT_COMPRESSION) level = 6; #endif if (level < 0 || level > 9 || strategy < 0 || strategy > Z_FIXED) { return Z_STREAM_ERROR; } func = configuration_table[s->level].func; if ((strategy != s->strategy || func != configuration_table[level].func) && s->high_water) { /* Flush the last buffer: */ int err = deflate(strm, Z_BLOCK); if (err == Z_STREAM_ERROR) return err; if (strm->avail_out == 0) return Z_BUF_ERROR; } if (s->level != level) { if (s->level == 0 && s->matches != 0) { if (s->matches == 1) slide_hash(s); else CLEAR_HASH(s); s->matches = 0; } s->level = level; s->max_lazy_match = configuration_table[level].max_lazy; s->good_match = configuration_table[level].good_length; s->nice_match = configuration_table[level].nice_length; s->max_chain_length = configuration_table[level].max_chain; } s->strategy = strategy; return Z_OK; } /* ========================================================================= */ int ZEXPORT deflateTune(strm, good_length, max_lazy, nice_length, max_chain) z_streamp strm; int good_length; int max_lazy; int nice_length; int max_chain; { deflate_state *s; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; s = strm->state; s->good_match = (uInt)good_length; s->max_lazy_match = (uInt)max_lazy; s->nice_match = nice_length; s->max_chain_length = (uInt)max_chain; return Z_OK; } /* ========================================================================= * For the default windowBits of 15 and memLevel of 8, this function returns * a close to exact, as well as small, upper bound on the compressed size. * They are coded as constants here for a reason--if the #define's are * changed, then this function needs to be changed as well. The return * value for 15 and 8 only works for those exact settings. * * For any setting other than those defaults for windowBits and memLevel, * the value returned is a conservative worst case for the maximum expansion * resulting from using fixed blocks instead of stored blocks, which deflate * can emit on compressed data for some combinations of the parameters. * * This function could be more sophisticated to provide closer upper bounds for * every combination of windowBits and memLevel. But even the conservative * upper bound of about 14% expansion does not seem onerous for output buffer * allocation. */ uLong ZEXPORT deflateBound(strm, sourceLen) z_streamp strm; uLong sourceLen; { deflate_state *s; uLong complen, wraplen; /* conservative upper bound for compressed data */ complen = sourceLen + ((sourceLen + 7) >> 3) + ((sourceLen + 63) >> 6) + 5; /* if can't get parameters, return conservative bound plus zlib wrapper */ if (deflateStateCheck(strm)) return complen + 6; /* compute wrapper length */ s = strm->state; switch (s->wrap) { case 0: /* raw deflate */ wraplen = 0; break; case 1: /* zlib wrapper */ wraplen = 6 + (s->strstart ? 4 : 0); break; #ifdef GZIP case 2: /* gzip wrapper */ wraplen = 18; if (s->gzhead != Z_NULL) { /* user-supplied gzip header */ Bytef *str; if (s->gzhead->extra != Z_NULL) wraplen += 2 + s->gzhead->extra_len; str = s->gzhead->name; if (str != Z_NULL) do { wraplen++; } while (*str++); str = s->gzhead->comment; if (str != Z_NULL) do { wraplen++; } while (*str++); if (s->gzhead->hcrc) wraplen += 2; } break; #endif default: /* for compiler happiness */ wraplen = 6; } /* if not default parameters, return conservative bound */ if (s->w_bits != 15 || s->hash_bits != 8 + 7) return complen + wraplen; /* default settings: return tight bound for that case */ return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13 - 6 + wraplen; } /* ========================================================================= * Put a short in the pending buffer. The 16-bit value is put in MSB order. * IN assertion: the stream state is correct and there is enough room in * pending_buf. */ local void putShortMSB (s, b) deflate_state *s; uInt b; { put_byte(s, (Byte)(b >> 8)); put_byte(s, (Byte)(b & 0xff)); } /* ========================================================================= * Flush as much pending output as possible. All deflate() output, except for * some deflate_stored() output, goes through this function so some * applications may wish to modify it to avoid allocating a large * strm->next_out buffer and copying into it. (See also read_buf()). */ local void flush_pending(strm) z_streamp strm; { unsigned len; deflate_state *s = strm->state; _tr_flush_bits(s); len = s->pending; if (len > strm->avail_out) len = strm->avail_out; if (len == 0) return; zmemcpy(strm->next_out, s->pending_out, len); strm->next_out += len; s->pending_out += len; strm->total_out += len; strm->avail_out -= len; s->pending -= len; if (s->pending == 0) { s->pending_out = s->pending_buf; } } /* =========================================================================== * Update the header CRC with the bytes s->pending_buf[beg..s->pending - 1]. */ #define HCRC_UPDATE(beg) \ do { \ if (s->gzhead->hcrc && s->pending > (beg)) \ strm->adler = crc32(strm->adler, s->pending_buf + (beg), \ s->pending - (beg)); \ } while (0) /* ========================================================================= */ int ZEXPORT deflate (strm, flush) z_streamp strm; int flush; { int old_flush; /* value of flush param for previous deflate call */ deflate_state *s; if (deflateStateCheck(strm) || flush > Z_BLOCK || flush < 0) { return Z_STREAM_ERROR; } s = strm->state; if (strm->next_out == Z_NULL || (strm->avail_in != 0 && strm->next_in == Z_NULL) || (s->status == FINISH_STATE && flush != Z_FINISH)) { ERR_RETURN(strm, Z_STREAM_ERROR); } if (strm->avail_out == 0) ERR_RETURN(strm, Z_BUF_ERROR); old_flush = s->last_flush; s->last_flush = flush; /* Flush as much pending output as possible */ if (s->pending != 0) { flush_pending(strm); if (strm->avail_out == 0) { /* Since avail_out is 0, deflate will be called again with * more output space, but possibly with both pending and * avail_in equal to zero. There won't be anything to do, * but this is not an error situation so make sure we * return OK instead of BUF_ERROR at next call of deflate: */ s->last_flush = -1; return Z_OK; } /* Make sure there is something to do and avoid duplicate consecutive * flushes. For repeated and useless calls with Z_FINISH, we keep * returning Z_STREAM_END instead of Z_BUF_ERROR. */ } else if (strm->avail_in == 0 && RANK(flush) <= RANK(old_flush) && flush != Z_FINISH) { ERR_RETURN(strm, Z_BUF_ERROR); } /* User must not provide more input after the first FINISH: */ if (s->status == FINISH_STATE && strm->avail_in != 0) { ERR_RETURN(strm, Z_BUF_ERROR); } /* Write the header */ if (s->status == INIT_STATE) { /* zlib header */ uInt header = (Z_DEFLATED + ((s->w_bits-8)<<4)) << 8; uInt level_flags; if (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2) level_flags = 0; else if (s->level < 6) level_flags = 1; else if (s->level == 6) level_flags = 2; else level_flags = 3; header |= (level_flags << 6); if (s->strstart != 0) header |= PRESET_DICT; header += 31 - (header % 31); putShortMSB(s, header); /* Save the adler32 of the preset dictionary: */ if (s->strstart != 0) { putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); } strm->adler = adler32(0L, Z_NULL, 0); s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } #ifdef GZIP if (s->status == GZIP_STATE) { /* gzip header */ strm->adler = crc32(0L, Z_NULL, 0); put_byte(s, 31); put_byte(s, 139); put_byte(s, 8); if (s->gzhead == Z_NULL) { put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, 0); put_byte(s, s->level == 9 ? 2 : (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, OS_CODE); s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } else { put_byte(s, (s->gzhead->text ? 1 : 0) + (s->gzhead->hcrc ? 2 : 0) + (s->gzhead->extra == Z_NULL ? 0 : 4) + (s->gzhead->name == Z_NULL ? 0 : 8) + (s->gzhead->comment == Z_NULL ? 0 : 16) ); put_byte(s, (Byte)(s->gzhead->time & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 8) & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 16) & 0xff)); put_byte(s, (Byte)((s->gzhead->time >> 24) & 0xff)); put_byte(s, s->level == 9 ? 2 : (s->strategy >= Z_HUFFMAN_ONLY || s->level < 2 ? 4 : 0)); put_byte(s, s->gzhead->os & 0xff); if (s->gzhead->extra != Z_NULL) { put_byte(s, s->gzhead->extra_len & 0xff); put_byte(s, (s->gzhead->extra_len >> 8) & 0xff); } if (s->gzhead->hcrc) strm->adler = crc32(strm->adler, s->pending_buf, s->pending); s->gzindex = 0; s->status = EXTRA_STATE; } } if (s->status == EXTRA_STATE) { if (s->gzhead->extra != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ uInt left = (s->gzhead->extra_len & 0xffff) - s->gzindex; while (s->pending + left > s->pending_buf_size) { uInt copy = s->pending_buf_size - s->pending; zmemcpy(s->pending_buf + s->pending, s->gzhead->extra + s->gzindex, copy); s->pending = s->pending_buf_size; HCRC_UPDATE(beg); s->gzindex += copy; flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; left -= copy; } zmemcpy(s->pending_buf + s->pending, s->gzhead->extra + s->gzindex, left); s->pending += left; HCRC_UPDATE(beg); s->gzindex = 0; } s->status = NAME_STATE; } if (s->status == NAME_STATE) { if (s->gzhead->name != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ int val; do { if (s->pending == s->pending_buf_size) { HCRC_UPDATE(beg); flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; } val = s->gzhead->name[s->gzindex++]; put_byte(s, val); } while (val != 0); HCRC_UPDATE(beg); s->gzindex = 0; } s->status = COMMENT_STATE; } if (s->status == COMMENT_STATE) { if (s->gzhead->comment != Z_NULL) { ulg beg = s->pending; /* start of bytes to update crc */ int val; do { if (s->pending == s->pending_buf_size) { HCRC_UPDATE(beg); flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } beg = 0; } val = s->gzhead->comment[s->gzindex++]; put_byte(s, val); } while (val != 0); HCRC_UPDATE(beg); } s->status = HCRC_STATE; } if (s->status == HCRC_STATE) { if (s->gzhead->hcrc) { if (s->pending + 2 > s->pending_buf_size) { flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } put_byte(s, (Byte)(strm->adler & 0xff)); put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); strm->adler = crc32(0L, Z_NULL, 0); } s->status = BUSY_STATE; /* Compression must start with an empty pending buffer */ flush_pending(strm); if (s->pending != 0) { s->last_flush = -1; return Z_OK; } } #endif /* Start a new block or continue the current one. */ if (strm->avail_in != 0 || s->lookahead != 0 || (flush != Z_NO_FLUSH && s->status != FINISH_STATE)) { block_state bstate; bstate = s->level == 0 ? deflate_stored(s, flush) : s->strategy == Z_HUFFMAN_ONLY ? deflate_huff(s, flush) : s->strategy == Z_RLE ? deflate_rle(s, flush) : (*(configuration_table[s->level].func))(s, flush); if (bstate == finish_started || bstate == finish_done) { s->status = FINISH_STATE; } if (bstate == need_more || bstate == finish_started) { if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR next call, see above */ } return Z_OK; /* If flush != Z_NO_FLUSH && avail_out == 0, the next call * of deflate should use the same flush parameter to make sure * that the flush is complete. So we don't have to output an * empty block here, this will be done at next call. This also * ensures that for a very small output buffer, we emit at most * one empty block. */ } if (bstate == block_done) { if (flush == Z_PARTIAL_FLUSH) { _tr_align(s); } else if (flush != Z_BLOCK) { /* FULL_FLUSH or SYNC_FLUSH */ _tr_stored_block(s, (char*)0, 0L, 0); /* For a full flush, this empty block will be recognized * as a special marker by inflate_sync(). */ if (flush == Z_FULL_FLUSH) { CLEAR_HASH(s); /* forget history */ if (s->lookahead == 0) { s->strstart = 0; s->block_start = 0L; s->insert = 0; } } } flush_pending(strm); if (strm->avail_out == 0) { s->last_flush = -1; /* avoid BUF_ERROR at next call, see above */ return Z_OK; } } } if (flush != Z_FINISH) return Z_OK; if (s->wrap <= 0) return Z_STREAM_END; /* Write the trailer */ #ifdef GZIP if (s->wrap == 2) { put_byte(s, (Byte)(strm->adler & 0xff)); put_byte(s, (Byte)((strm->adler >> 8) & 0xff)); put_byte(s, (Byte)((strm->adler >> 16) & 0xff)); put_byte(s, (Byte)((strm->adler >> 24) & 0xff)); put_byte(s, (Byte)(strm->total_in & 0xff)); put_byte(s, (Byte)((strm->total_in >> 8) & 0xff)); put_byte(s, (Byte)((strm->total_in >> 16) & 0xff)); put_byte(s, (Byte)((strm->total_in >> 24) & 0xff)); } else #endif { putShortMSB(s, (uInt)(strm->adler >> 16)); putShortMSB(s, (uInt)(strm->adler & 0xffff)); } flush_pending(strm); /* If avail_out is zero, the application will call deflate again * to flush the rest. */ if (s->wrap > 0) s->wrap = -s->wrap; /* write the trailer only once! */ return s->pending != 0 ? Z_OK : Z_STREAM_END; } /* ========================================================================= */ int ZEXPORT deflateEnd (strm) z_streamp strm; { int status; if (deflateStateCheck(strm)) return Z_STREAM_ERROR; status = strm->state->status; /* Deallocate in reverse order of allocations: */ TRY_FREE(strm, strm->state->pending_buf); TRY_FREE(strm, strm->state->head); TRY_FREE(strm, strm->state->prev); TRY_FREE(strm, strm->state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; return status == BUSY_STATE ? Z_DATA_ERROR : Z_OK; } /* ========================================================================= * Copy the source state to the destination state. * To simplify the source, this is not supported for 16-bit MSDOS (which * doesn't have enough memory anyway to duplicate compression states). */ int ZEXPORT deflateCopy (dest, source) z_streamp dest; z_streamp source; { #ifdef MAXSEG_64K return Z_STREAM_ERROR; #else deflate_state *ds; deflate_state *ss; ushf *overlay; if (deflateStateCheck(source) || dest == Z_NULL) { return Z_STREAM_ERROR; } ss = source->state; zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); ds = (deflate_state *) ZALLOC(dest, 1, sizeof(deflate_state)); if (ds == Z_NULL) return Z_MEM_ERROR; dest->state = (struct internal_state FAR *) ds; zmemcpy((voidpf)ds, (voidpf)ss, sizeof(deflate_state)); ds->strm = dest; ds->window = (Bytef *) ZALLOC(dest, ds->w_size, 2*sizeof(Byte)); ds->prev = (Posf *) ZALLOC(dest, ds->w_size, sizeof(Pos)); ds->head = (Posf *) ZALLOC(dest, ds->hash_size, sizeof(Pos)); overlay = (ushf *) ZALLOC(dest, ds->lit_bufsize, sizeof(ush)+2); ds->pending_buf = (uchf *) overlay; if (ds->window == Z_NULL || ds->prev == Z_NULL || ds->head == Z_NULL || ds->pending_buf == Z_NULL) { deflateEnd (dest); return Z_MEM_ERROR; } /* following zmemcpy do not work for 16-bit MSDOS */ zmemcpy(ds->window, ss->window, ds->w_size * 2 * sizeof(Byte)); zmemcpy((voidpf)ds->prev, (voidpf)ss->prev, ds->w_size * sizeof(Pos)); zmemcpy((voidpf)ds->head, (voidpf)ss->head, ds->hash_size * sizeof(Pos)); zmemcpy(ds->pending_buf, ss->pending_buf, (uInt)ds->pending_buf_size); ds->pending_out = ds->pending_buf + (ss->pending_out - ss->pending_buf); ds->d_buf = overlay + ds->lit_bufsize/sizeof(ush); ds->l_buf = ds->pending_buf + (1+sizeof(ush))*ds->lit_bufsize; ds->l_desc.dyn_tree = ds->dyn_ltree; ds->d_desc.dyn_tree = ds->dyn_dtree; ds->bl_desc.dyn_tree = ds->bl_tree; return Z_OK; #endif /* MAXSEG_64K */ } /* =========================================================================== * Read a new buffer from the current input stream, update the adler32 * and total number of bytes read. All deflate() input goes through * this function so some applications may wish to modify it to avoid * allocating a large strm->next_in buffer and copying from it. * (See also flush_pending()). */ local unsigned read_buf(strm, buf, size) z_streamp strm; Bytef *buf; unsigned size; { unsigned len = strm->avail_in; if (len > size) len = size; if (len == 0) return 0; strm->avail_in -= len; zmemcpy(buf, strm->next_in, len); if (strm->state->wrap == 1) { strm->adler = adler32(strm->adler, buf, len); } #ifdef GZIP else if (strm->state->wrap == 2) { strm->adler = crc32(strm->adler, buf, len); } #endif strm->next_in += len; strm->total_in += len; return len; } /* =========================================================================== * Initialize the "longest match" routines for a new zlib stream */ local void lm_init (s) deflate_state *s; { s->window_size = (ulg)2L*s->w_size; CLEAR_HASH(s); /* Set the default configuration parameters: */ s->max_lazy_match = configuration_table[s->level].max_lazy; s->good_match = configuration_table[s->level].good_length; s->nice_match = configuration_table[s->level].nice_length; s->max_chain_length = configuration_table[s->level].max_chain; s->strstart = 0; s->block_start = 0L; s->lookahead = 0; s->insert = 0; s->match_length = s->prev_length = MIN_MATCH-1; s->match_available = 0; s->ins_h = 0; #ifndef FASTEST #ifdef ASMV match_init(); /* initialize the asm code */ #endif #endif } #ifndef FASTEST /* =========================================================================== * Set match_start to the longest match starting at the given string and * return its length. Matches shorter or equal to prev_length are discarded, * in which case the result is equal to prev_length and match_start is * garbage. * IN assertions: cur_match is the head of the hash chain for the current * string (strstart) and its distance is <= MAX_DIST, and prev_length >= 1 * OUT assertion: the match length is not greater than s->lookahead. */ #ifndef ASMV /* For 80x86 and 680x0, an optimized version will be provided in match.asm or * match.S. The code will be functionally equivalent. */ local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ { unsigned chain_length = s->max_chain_length;/* max hash chain length */ register Bytef *scan = s->window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ int best_len = (int)s->prev_length; /* best match length so far */ int nice_match = s->nice_match; /* stop if match long enough */ IPos limit = s->strstart > (IPos)MAX_DIST(s) ? s->strstart - (IPos)MAX_DIST(s) : NIL; /* Stop when cur_match becomes <= limit. To simplify the code, * we prevent matches with the string of window index 0. */ Posf *prev = s->prev; uInt wmask = s->w_mask; #ifdef UNALIGNED_OK /* Compare two bytes at a time. Note: this is not always beneficial. * Try with and without -DUNALIGNED_OK to check. */ register Bytef *strend = s->window + s->strstart + MAX_MATCH - 1; register ush scan_start = *(ushf*)scan; register ush scan_end = *(ushf*)(scan+best_len-1); #else register Bytef *strend = s->window + s->strstart + MAX_MATCH; register Byte scan_end1 = scan[best_len-1]; register Byte scan_end = scan[best_len]; #endif /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); /* Do not waste too much time if we already have a good match: */ if (s->prev_length >= s->good_match) { chain_length >>= 2; } /* Do not look for matches beyond the end of the input. This is necessary * to make deflate deterministic. */ if ((uInt)nice_match > s->lookahead) nice_match = (int)s->lookahead; Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); do { Assert(cur_match < s->strstart, "no future"); match = s->window + cur_match; /* Skip to next match if the match length cannot increase * or if the match length is less than 2. Note that the checks below * for insufficient lookahead only occur occasionally for performance * reasons. Therefore uninitialized memory will be accessed, and * conditional jumps will be made that depend on those values. * However the length of the match is limited to the lookahead, so * the output of deflate is not affected by the uninitialized values. */ #if (defined(UNALIGNED_OK) && MAX_MATCH == 258) /* This code assumes sizeof(unsigned short) == 2. Do not use * UNALIGNED_OK if your compiler uses a different size. */ if (*(ushf*)(match+best_len-1) != scan_end || *(ushf*)match != scan_start) continue; /* It is not necessary to compare scan[2] and match[2] since they are * always equal when the other bytes match, given that the hash keys * are equal and that HASH_BITS >= 8. Compare 2 bytes at a time at * strstart+3, +5, ... up to strstart+257. We check for insufficient * lookahead only every 4th comparison; the 128th check will be made * at strstart+257. If MAX_MATCH-2 is not a multiple of 8, it is * necessary to put more guard bytes at the end of the window, or * to check more often for insufficient lookahead. */ Assert(scan[2] == match[2], "scan[2]?"); scan++, match++; do { } while (*(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && *(ushf*)(scan+=2) == *(ushf*)(match+=2) && scan < strend); /* The funny "do {}" generates better code on most compilers */ /* Here, scan <= window+strstart+257 */ Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); if (*scan == *match) scan++; len = (MAX_MATCH - 1) - (int)(strend-scan); scan = strend - (MAX_MATCH-1); #else /* UNALIGNED_OK */ if (match[best_len] != scan_end || match[best_len-1] != scan_end1 || *match != *scan || *++match != scan[1]) continue; /* The check at best_len-1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match++; Assert(*scan == *match, "match[2]?"); /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart+258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); len = MAX_MATCH - (int)(strend - scan); scan = strend - MAX_MATCH; #endif /* UNALIGNED_OK */ if (len > best_len) { s->match_start = cur_match; best_len = len; if (len >= nice_match) break; #ifdef UNALIGNED_OK scan_end = *(ushf*)(scan+best_len-1); #else scan_end1 = scan[best_len-1]; scan_end = scan[best_len]; #endif } } while ((cur_match = prev[cur_match & wmask]) > limit && --chain_length != 0); if ((uInt)best_len <= s->lookahead) return (uInt)best_len; return s->lookahead; } #endif /* ASMV */ #else /* FASTEST */ /* --------------------------------------------------------------------------- * Optimized version for FASTEST only */ local uInt longest_match(s, cur_match) deflate_state *s; IPos cur_match; /* current match */ { register Bytef *scan = s->window + s->strstart; /* current string */ register Bytef *match; /* matched string */ register int len; /* length of current match */ register Bytef *strend = s->window + s->strstart + MAX_MATCH; /* The code is optimized for HASH_BITS >= 8 and MAX_MATCH-2 multiple of 16. * It is easy to get rid of this optimization if necessary. */ Assert(s->hash_bits >= 8 && MAX_MATCH == 258, "Code too clever"); Assert((ulg)s->strstart <= s->window_size-MIN_LOOKAHEAD, "need lookahead"); Assert(cur_match < s->strstart, "no future"); match = s->window + cur_match; /* Return failure if the match length is less than 2: */ if (match[0] != scan[0] || match[1] != scan[1]) return MIN_MATCH-1; /* The check at best_len-1 can be removed because it will be made * again later. (This heuristic is not always a win.) * It is not necessary to compare scan[2] and match[2] since they * are always equal when the other bytes match, given that * the hash keys are equal and that HASH_BITS >= 8. */ scan += 2, match += 2; Assert(*scan == *match, "match[2]?"); /* We check for insufficient lookahead only every 8th comparison; * the 256th check will be made at strstart+258. */ do { } while (*++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && *++scan == *++match && scan < strend); Assert(scan <= s->window+(unsigned)(s->window_size-1), "wild scan"); len = MAX_MATCH - (int)(strend - scan); if (len < MIN_MATCH) return MIN_MATCH - 1; s->match_start = cur_match; return (uInt)len <= s->lookahead ? (uInt)len : s->lookahead; } #endif /* FASTEST */ #ifdef ZLIB_DEBUG #define EQUAL 0 /* result of memcmp for equal strings */ /* =========================================================================== * Check that the match at match_start is indeed a match. */ local void check_match(s, start, match, length) deflate_state *s; IPos start, match; int length; { /* check that the match is indeed a match */ if (zmemcmp(s->window + match, s->window + start, length) != EQUAL) { fprintf(stderr, " start %u, match %u, length %d\n", start, match, length); do { fprintf(stderr, "%c%c", s->window[match++], s->window[start++]); } while (--length != 0); z_error("invalid match"); } if (z_verbose > 1) { fprintf(stderr,"\\[%d,%d]", start-match, length); do { putc(s->window[start++], stderr); } while (--length != 0); } } #else # define check_match(s, start, match, length) #endif /* ZLIB_DEBUG */ /* =========================================================================== * Fill the window when the lookahead becomes insufficient. * Updates strstart and lookahead. * * IN assertion: lookahead < MIN_LOOKAHEAD * OUT assertions: strstart <= window_size-MIN_LOOKAHEAD * At least one byte has been read, or avail_in == 0; reads are * performed for at least two bytes (required for the zip translate_eol * option -- not supported here). */ local void fill_window(s) deflate_state *s; { unsigned n; unsigned more; /* Amount of free space at the end of the window. */ uInt wsize = s->w_size; Assert(s->lookahead < MIN_LOOKAHEAD, "already enough lookahead"); do { more = (unsigned)(s->window_size -(ulg)s->lookahead -(ulg)s->strstart); /* Deal with !@#$% 64K limit: */ if (sizeof(int) <= 2) { if (more == 0 && s->strstart == 0 && s->lookahead == 0) { more = wsize; } else if (more == (unsigned)(-1)) { /* Very unlikely, but possible on 16 bit machine if * strstart == 0 && lookahead == 1 (input done a byte at time) */ more--; } } /* If the window is almost full and there is insufficient lookahead, * move the upper half to the lower one to make room in the upper half. */ if (s->strstart >= wsize+MAX_DIST(s)) { zmemcpy(s->window, s->window+wsize, (unsigned)wsize - more); s->match_start -= wsize; s->strstart -= wsize; /* we now have strstart >= MAX_DIST */ s->block_start -= (long) wsize; slide_hash(s); more += wsize; } if (s->strm->avail_in == 0) break; /* If there was no sliding: * strstart <= WSIZE+MAX_DIST-1 && lookahead <= MIN_LOOKAHEAD - 1 && * more == window_size - lookahead - strstart * => more >= window_size - (MIN_LOOKAHEAD-1 + WSIZE + MAX_DIST-1) * => more >= window_size - 2*WSIZE + 2 * In the BIG_MEM or MMAP case (not yet supported), * window_size == input_size + MIN_LOOKAHEAD && * strstart + s->lookahead <= input_size => more >= MIN_LOOKAHEAD. * Otherwise, window_size == 2*WSIZE so more >= 2. * If there was sliding, more >= WSIZE. So in all cases, more >= 2. */ Assert(more >= 2, "more < 2"); n = read_buf(s->strm, s->window + s->strstart + s->lookahead, more); s->lookahead += n; /* Initialize the hash value now that we have some input: */ if (s->lookahead + s->insert >= MIN_MATCH) { uInt str = s->strstart - s->insert; s->ins_h = s->window[str]; UPDATE_HASH(s, s->ins_h, s->window[str + 1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif while (s->insert) { UPDATE_HASH(s, s->ins_h, s->window[str + MIN_MATCH-1]); #ifndef FASTEST s->prev[str & s->w_mask] = s->head[s->ins_h]; #endif s->head[s->ins_h] = (Pos)str; str++; s->insert--; if (s->lookahead + s->insert < MIN_MATCH) break; } } /* If the whole input has less than MIN_MATCH bytes, ins_h is garbage, * but this is not important since only literal bytes will be emitted. */ } while (s->lookahead < MIN_LOOKAHEAD && s->strm->avail_in != 0); /* If the WIN_INIT bytes after the end of the current data have never been * written, then zero those bytes in order to avoid memory check reports of * the use of uninitialized (or uninitialised as Julian writes) bytes by * the longest match routines. Update the high water mark for the next * time through here. WIN_INIT is set to MAX_MATCH since the longest match * routines allow scanning to strstart + MAX_MATCH, ignoring lookahead. */ if (s->high_water < s->window_size) { ulg curr = s->strstart + (ulg)(s->lookahead); ulg init; if (s->high_water < curr) { /* Previous high water mark below current data -- zero WIN_INIT * bytes or up to end of window, whichever is less. */ init = s->window_size - curr; if (init > WIN_INIT) init = WIN_INIT; zmemzero(s->window + curr, (unsigned)init); s->high_water = curr + init; } else if (s->high_water < (ulg)curr + WIN_INIT) { /* High water mark at or above current data, but below current data * plus WIN_INIT -- zero out to current data plus WIN_INIT, or up * to end of window, whichever is less. */ init = (ulg)curr + WIN_INIT - s->high_water; if (init > s->window_size - s->high_water) init = s->window_size - s->high_water; zmemzero(s->window + s->high_water, (unsigned)init); s->high_water += init; } } Assert((ulg)s->strstart <= s->window_size - MIN_LOOKAHEAD, "not enough room for search"); } /* =========================================================================== * Flush the current block, with given end-of-file flag. * IN assertion: strstart is set to the end of the current match. */ #define FLUSH_BLOCK_ONLY(s, last) { \ _tr_flush_block(s, (s->block_start >= 0L ? \ (charf *)&s->window[(unsigned)s->block_start] : \ (charf *)Z_NULL), \ (ulg)((long)s->strstart - s->block_start), \ (last)); \ s->block_start = s->strstart; \ flush_pending(s->strm); \ Tracev((stderr,"[FLUSH]")); \ } /* Same but force premature exit if necessary. */ #define FLUSH_BLOCK(s, last) { \ FLUSH_BLOCK_ONLY(s, last); \ if (s->strm->avail_out == 0) return (last) ? finish_started : need_more; \ } /* Maximum stored block length in deflate format (not including header). */ #define MAX_STORED 65535 /* Minimum of a and b. */ #define MIN(a, b) ((a) > (b) ? (b) : (a)) /* =========================================================================== * Copy without compression as much as possible from the input stream, return * the current block state. * * In case deflateParams() is used to later switch to a non-zero compression * level, s->matches (otherwise unused when storing) keeps track of the number * of hash table slides to perform. If s->matches is 1, then one hash table * slide will be done when switching. If s->matches is 2, the maximum value * allowed here, then the hash table will be cleared, since two or more slides * is the same as a clear. * * deflate_stored() is written to minimize the number of times an input byte is * copied. It is most efficient with large input and output buffers, which * maximizes the opportunites to have a single copy from next_in to next_out. */ local block_state deflate_stored(s, flush) deflate_state *s; int flush; { /* Smallest worthy block size when not flushing or finishing. By default * this is 32K. This can be as small as 507 bytes for memLevel == 1. For * large input and output buffers, the stored block size will be larger. */ unsigned min_block = MIN(s->pending_buf_size - 5, s->w_size); /* Copy as many min_block or larger stored blocks directly to next_out as * possible. If flushing, copy the remaining available input to next_out as * stored blocks, if there is enough space. */ unsigned len, left, have, last = 0; unsigned used = s->strm->avail_in; do { /* Set len to the maximum size block that we can copy directly with the * available input data and output space. Set left to how much of that * would be copied from what's left in the window. */ len = MAX_STORED; /* maximum deflate stored block length */ have = (s->bi_valid + 42) >> 3; /* number of header bytes */ if (s->strm->avail_out < have) /* need room for header */ break; /* maximum stored block length that will fit in avail_out: */ have = s->strm->avail_out - have; left = s->strstart - s->block_start; /* bytes left in window */ if (len > (ulg)left + s->strm->avail_in) len = left + s->strm->avail_in; /* limit len to the input */ if (len > have) len = have; /* limit len to the output */ /* If the stored block would be less than min_block in length, or if * unable to copy all of the available input when flushing, then try * copying to the window and the pending buffer instead. Also don't * write an empty block when flushing -- deflate() does that. */ if (len < min_block && ((len == 0 && flush != Z_FINISH) || flush == Z_NO_FLUSH || len != left + s->strm->avail_in)) break; /* Make a dummy stored block in pending to get the header bytes, * including any pending bits. This also updates the debugging counts. */ last = flush == Z_FINISH && len == left + s->strm->avail_in ? 1 : 0; _tr_stored_block(s, (char *)0, 0L, last); /* Replace the lengths in the dummy stored block with len. */ s->pending_buf[s->pending - 4] = len; s->pending_buf[s->pending - 3] = len >> 8; s->pending_buf[s->pending - 2] = ~len; s->pending_buf[s->pending - 1] = ~len >> 8; /* Write the stored block header bytes. */ flush_pending(s->strm); #ifdef ZLIB_DEBUG /* Update debugging counts for the data about to be copied. */ s->compressed_len += len << 3; s->bits_sent += len << 3; #endif /* Copy uncompressed bytes from the window to next_out. */ if (left) { if (left > len) left = len; zmemcpy(s->strm->next_out, s->window + s->block_start, left); s->strm->next_out += left; s->strm->avail_out -= left; s->strm->total_out += left; s->block_start += left; len -= left; } /* Copy uncompressed bytes directly from next_in to next_out, updating * the check value. */ if (len) { read_buf(s->strm, s->strm->next_out, len); s->strm->next_out += len; s->strm->avail_out -= len; s->strm->total_out += len; } } while (last == 0); /* Update the sliding window with the last s->w_size bytes of the copied * data, or append all of the copied data to the existing window if less * than s->w_size bytes were copied. Also update the number of bytes to * insert in the hash tables, in the event that deflateParams() switches to * a non-zero compression level. */ used -= s->strm->avail_in; /* number of input bytes directly copied */ if (used) { /* If any input was used, then no unused input remains in the window, * therefore s->block_start == s->strstart. */ if (used >= s->w_size) { /* supplant the previous history */ s->matches = 2; /* clear hash */ zmemcpy(s->window, s->strm->next_in - s->w_size, s->w_size); s->strstart = s->w_size; } else { if (s->window_size - s->strstart <= used) { /* Slide the window down. */ s->strstart -= s->w_size; zmemcpy(s->window, s->window + s->w_size, s->strstart); if (s->matches < 2) s->matches++; /* add a pending slide_hash() */ } zmemcpy(s->window + s->strstart, s->strm->next_in - used, used); s->strstart += used; } s->block_start = s->strstart; s->insert += MIN(used, s->w_size - s->insert); } if (s->high_water < s->strstart) s->high_water = s->strstart; /* If the last block was written to next_out, then done. */ if (last) return finish_done; /* If flushing and all input has been consumed, then done. */ if (flush != Z_NO_FLUSH && flush != Z_FINISH && s->strm->avail_in == 0 && (long)s->strstart == s->block_start) return block_done; /* Fill the window with any remaining input. */ have = s->window_size - s->strstart - 1; if (s->strm->avail_in > have && s->block_start >= (long)s->w_size) { /* Slide the window down. */ s->block_start -= s->w_size; s->strstart -= s->w_size; zmemcpy(s->window, s->window + s->w_size, s->strstart); if (s->matches < 2) s->matches++; /* add a pending slide_hash() */ have += s->w_size; /* more space now */ } if (have > s->strm->avail_in) have = s->strm->avail_in; if (have) { read_buf(s->strm, s->window + s->strstart, have); s->strstart += have; } if (s->high_water < s->strstart) s->high_water = s->strstart; /* There was not enough avail_out to write a complete worthy or flushed * stored block to next_out. Write a stored block to pending instead, if we * have enough input for a worthy block, or if flushing and there is enough * room for the remaining input as a stored block in the pending buffer. */ have = (s->bi_valid + 42) >> 3; /* number of header bytes */ /* maximum stored block length that will fit in pending: */ have = MIN(s->pending_buf_size - have, MAX_STORED); min_block = MIN(have, s->w_size); left = s->strstart - s->block_start; if (left >= min_block || ((left || flush == Z_FINISH) && flush != Z_NO_FLUSH && s->strm->avail_in == 0 && left <= have)) { len = MIN(left, have); last = flush == Z_FINISH && s->strm->avail_in == 0 && len == left ? 1 : 0; _tr_stored_block(s, (charf *)s->window + s->block_start, len, last); s->block_start += len; flush_pending(s->strm); } /* We've done all we can with the available input and output. */ return last ? finish_started : need_more; } /* =========================================================================== * Compress as much as possible from the input stream, return the current * block state. * This function does not perform lazy evaluation of matches and inserts * new strings in the dictionary only for unmatched strings or for short * matches. It is used only for the fast compression options. */ local block_state deflate_fast(s, flush) deflate_state *s; int flush; { IPos hash_head; /* head of the hash chain */ int bflush; /* set if current block must be flushed */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. * At this point we have always match_length < MIN_MATCH */ if (hash_head != NIL && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ s->match_length = longest_match (s, hash_head); /* longest_match() sets match_start */ } if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->match_start, s->match_length); _tr_tally_dist(s, s->strstart - s->match_start, s->match_length - MIN_MATCH, bflush); s->lookahead -= s->match_length; /* Insert new strings in the hash table only if the match length * is not too large. This saves time but degrades compression. */ #ifndef FASTEST if (s->match_length <= s->max_insert_length && s->lookahead >= MIN_MATCH) { s->match_length--; /* string at strstart already in table */ do { s->strstart++; INSERT_STRING(s, s->strstart, hash_head); /* strstart never exceeds WSIZE-MAX_MATCH, so there are * always MIN_MATCH bytes ahead. */ } while (--s->match_length != 0); s->strstart++; } else #endif { s->strstart += s->match_length; s->match_length = 0; s->ins_h = s->window[s->strstart]; UPDATE_HASH(s, s->ins_h, s->window[s->strstart+1]); #if MIN_MATCH != 3 Call UPDATE_HASH() MIN_MATCH-3 more times #endif /* If lookahead < MIN_MATCH, ins_h is garbage, but it does not * matter since it will be recomputed at next deflate call. */ } } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit (s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; } if (bflush) FLUSH_BLOCK(s, 0); } s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->last_lit) FLUSH_BLOCK(s, 0); return block_done; } #ifndef FASTEST /* =========================================================================== * Same as above, but achieves better compression. We use a lazy * evaluation for matches: a match is finally adopted only if there is * no better match at the next window position. */ local block_state deflate_slow(s, flush) deflate_state *s; int flush; { IPos hash_head; /* head of hash chain */ int bflush; /* set if current block must be flushed */ /* Process the input block. */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the next match, plus MIN_MATCH bytes to insert the * string following the next match. */ if (s->lookahead < MIN_LOOKAHEAD) { fill_window(s); if (s->lookahead < MIN_LOOKAHEAD && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* Insert the string window[strstart .. strstart+2] in the * dictionary, and set hash_head to the head of the hash chain: */ hash_head = NIL; if (s->lookahead >= MIN_MATCH) { INSERT_STRING(s, s->strstart, hash_head); } /* Find the longest match, discarding those <= prev_length. */ s->prev_length = s->match_length, s->prev_match = s->match_start; s->match_length = MIN_MATCH-1; if (hash_head != NIL && s->prev_length < s->max_lazy_match && s->strstart - hash_head <= MAX_DIST(s)) { /* To simplify the code, we prevent matches with the string * of window index 0 (in particular we have to avoid a match * of the string with itself at the start of the input file). */ s->match_length = longest_match (s, hash_head); /* longest_match() sets match_start */ if (s->match_length <= 5 && (s->strategy == Z_FILTERED #if TOO_FAR <= 32767 || (s->match_length == MIN_MATCH && s->strstart - s->match_start > TOO_FAR) #endif )) { /* If prev_match is also MIN_MATCH, match_start is garbage * but we will ignore the current match anyway. */ s->match_length = MIN_MATCH-1; } } /* If there was a match at the previous step and the current * match is not better, output the previous match: */ if (s->prev_length >= MIN_MATCH && s->match_length <= s->prev_length) { uInt max_insert = s->strstart + s->lookahead - MIN_MATCH; /* Do not insert strings in hash table beyond this. */ check_match(s, s->strstart-1, s->prev_match, s->prev_length); _tr_tally_dist(s, s->strstart -1 - s->prev_match, s->prev_length - MIN_MATCH, bflush); /* Insert in hash table all strings up to the end of the match. * strstart-1 and strstart are already inserted. If there is not * enough lookahead, the last two strings are not inserted in * the hash table. */ s->lookahead -= s->prev_length-1; s->prev_length -= 2; do { if (++s->strstart <= max_insert) { INSERT_STRING(s, s->strstart, hash_head); } } while (--s->prev_length != 0); s->match_available = 0; s->match_length = MIN_MATCH-1; s->strstart++; if (bflush) FLUSH_BLOCK(s, 0); } else if (s->match_available) { /* If there was no match at the previous position, output a * single literal. If there was a match but the current match * is longer, truncate the previous match to a single literal. */ Tracevv((stderr,"%c", s->window[s->strstart-1])); _tr_tally_lit(s, s->window[s->strstart-1], bflush); if (bflush) { FLUSH_BLOCK_ONLY(s, 0); } s->strstart++; s->lookahead--; if (s->strm->avail_out == 0) return need_more; } else { /* There is no previous match to compare with, wait for * the next step to decide. */ s->match_available = 1; s->strstart++; s->lookahead--; } } Assert (flush != Z_NO_FLUSH, "no flush?"); if (s->match_available) { Tracevv((stderr,"%c", s->window[s->strstart-1])); _tr_tally_lit(s, s->window[s->strstart-1], bflush); s->match_available = 0; } s->insert = s->strstart < MIN_MATCH-1 ? s->strstart : MIN_MATCH-1; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->last_lit) FLUSH_BLOCK(s, 0); return block_done; } #endif /* FASTEST */ /* =========================================================================== * For Z_RLE, simply look for runs of bytes, generate matches only of distance * one. Do not maintain a hash table. (It will be regenerated if this run of * deflate switches away from Z_RLE.) */ local block_state deflate_rle(s, flush) deflate_state *s; int flush; { int bflush; /* set if current block must be flushed */ uInt prev; /* byte at distance one to match */ Bytef *scan, *strend; /* scan goes up to strend for length of run */ for (;;) { /* Make sure that we always have enough lookahead, except * at the end of the input file. We need MAX_MATCH bytes * for the longest run, plus one for the unrolled loop. */ if (s->lookahead <= MAX_MATCH) { fill_window(s); if (s->lookahead <= MAX_MATCH && flush == Z_NO_FLUSH) { return need_more; } if (s->lookahead == 0) break; /* flush the current block */ } /* See how many times the previous byte repeats */ s->match_length = 0; if (s->lookahead >= MIN_MATCH && s->strstart > 0) { scan = s->window + s->strstart - 1; prev = *scan; if (prev == *++scan && prev == *++scan && prev == *++scan) { strend = s->window + s->strstart + MAX_MATCH; do { } while (prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && prev == *++scan && scan < strend); s->match_length = MAX_MATCH - (uInt)(strend - scan); if (s->match_length > s->lookahead) s->match_length = s->lookahead; } Assert(scan <= s->window+(uInt)(s->window_size-1), "wild scan"); } /* Emit match if have run of MIN_MATCH or longer, else emit literal */ if (s->match_length >= MIN_MATCH) { check_match(s, s->strstart, s->strstart - 1, s->match_length); _tr_tally_dist(s, 1, s->match_length - MIN_MATCH, bflush); s->lookahead -= s->match_length; s->strstart += s->match_length; s->match_length = 0; } else { /* No match, output a literal byte */ Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit (s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; } if (bflush) FLUSH_BLOCK(s, 0); } s->insert = 0; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->last_lit) FLUSH_BLOCK(s, 0); return block_done; } /* =========================================================================== * For Z_HUFFMAN_ONLY, do not look for matches. Do not maintain a hash table. * (It will be regenerated if this run of deflate switches away from Huffman.) */ local block_state deflate_huff(s, flush) deflate_state *s; int flush; { int bflush; /* set if current block must be flushed */ for (;;) { /* Make sure that we have a literal to write. */ if (s->lookahead == 0) { fill_window(s); if (s->lookahead == 0) { if (flush == Z_NO_FLUSH) return need_more; break; /* flush the current block */ } } /* Output a literal byte */ s->match_length = 0; Tracevv((stderr,"%c", s->window[s->strstart])); _tr_tally_lit (s, s->window[s->strstart], bflush); s->lookahead--; s->strstart++; if (bflush) FLUSH_BLOCK(s, 0); } s->insert = 0; if (flush == Z_FINISH) { FLUSH_BLOCK(s, 1); return finish_done; } if (s->last_lit) FLUSH_BLOCK(s, 0); return block_done; } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/deflate.h0000644000076500000240000003153613572250760021245 0ustar alcaeusstaff/* deflate.h -- internal compression state * Copyright (C) 1995-2016 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef DEFLATE_H #define DEFLATE_H #include "zutil.h" /* define NO_GZIP when compiling if you want to disable gzip header and trailer creation by deflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip encoding should be left enabled. */ #ifndef NO_GZIP # define GZIP #endif /* =========================================================================== * Internal compression state. */ #define LENGTH_CODES 29 /* number of length codes, not counting the special END_BLOCK code */ #define LITERALS 256 /* number of literal bytes 0..255 */ #define L_CODES (LITERALS+1+LENGTH_CODES) /* number of Literal or Length codes, including the END_BLOCK code */ #define D_CODES 30 /* number of distance codes */ #define BL_CODES 19 /* number of codes used to transfer the bit lengths */ #define HEAP_SIZE (2*L_CODES+1) /* maximum heap size */ #define MAX_BITS 15 /* All codes must not exceed MAX_BITS bits */ #define Buf_size 16 /* size of bit buffer in bi_buf */ #define INIT_STATE 42 /* zlib header -> BUSY_STATE */ #ifdef GZIP # define GZIP_STATE 57 /* gzip header -> BUSY_STATE | EXTRA_STATE */ #endif #define EXTRA_STATE 69 /* gzip extra block -> NAME_STATE */ #define NAME_STATE 73 /* gzip file name -> COMMENT_STATE */ #define COMMENT_STATE 91 /* gzip comment -> HCRC_STATE */ #define HCRC_STATE 103 /* gzip header CRC -> BUSY_STATE */ #define BUSY_STATE 113 /* deflate -> FINISH_STATE */ #define FINISH_STATE 666 /* stream complete */ /* Stream status */ /* Data structure describing a single value and its code string. */ typedef struct ct_data_s { union { ush freq; /* frequency count */ ush code; /* bit string */ } fc; union { ush dad; /* father node in Huffman tree */ ush len; /* length of bit string */ } dl; } FAR ct_data; #define Freq fc.freq #define Code fc.code #define Dad dl.dad #define Len dl.len typedef struct static_tree_desc_s static_tree_desc; typedef struct tree_desc_s { ct_data *dyn_tree; /* the dynamic tree */ int max_code; /* largest code with non zero frequency */ const static_tree_desc *stat_desc; /* the corresponding static tree */ } FAR tree_desc; typedef ush Pos; typedef Pos FAR Posf; typedef unsigned IPos; /* A Pos is an index in the character window. We use short instead of int to * save space in the various tables. IPos is used only for parameter passing. */ typedef struct internal_state { z_streamp strm; /* pointer back to this zlib stream */ int status; /* as the name implies */ Bytef *pending_buf; /* output still pending */ ulg pending_buf_size; /* size of pending_buf */ Bytef *pending_out; /* next pending byte to output to the stream */ ulg pending; /* nb of bytes in the pending buffer */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip */ gz_headerp gzhead; /* gzip header information to write */ ulg gzindex; /* where in extra, name, or comment */ Byte method; /* can only be DEFLATED */ int last_flush; /* value of flush param for previous deflate call */ /* used by deflate.c: */ uInt w_size; /* LZ77 window size (32K by default) */ uInt w_bits; /* log2(w_size) (8..16) */ uInt w_mask; /* w_size - 1 */ Bytef *window; /* Sliding window. Input bytes are read into the second half of the window, * and move to the first half later to keep a dictionary of at least wSize * bytes. With this organization, matches are limited to a distance of * wSize-MAX_MATCH bytes, but this ensures that IO is always * performed with a length multiple of the block size. Also, it limits * the window size to 64K, which is quite useful on MSDOS. * To do: use the user input buffer as sliding window. */ ulg window_size; /* Actual size of window: 2*wSize, except when the user input buffer * is directly used as sliding window. */ Posf *prev; /* Link to older string with same hash index. To limit the size of this * array to 64K, this link is maintained only for the last 32K strings. * An index in this array is thus a window index modulo 32K. */ Posf *head; /* Heads of the hash chains or NIL. */ uInt ins_h; /* hash index of string to be inserted */ uInt hash_size; /* number of elements in hash table */ uInt hash_bits; /* log2(hash_size) */ uInt hash_mask; /* hash_size-1 */ uInt hash_shift; /* Number of bits by which ins_h must be shifted at each input * step. It must be such that after MIN_MATCH steps, the oldest * byte no longer takes part in the hash key, that is: * hash_shift * MIN_MATCH >= hash_bits */ long block_start; /* Window position at the beginning of the current output block. Gets * negative when the window is moved backwards. */ uInt match_length; /* length of best match */ IPos prev_match; /* previous match */ int match_available; /* set if previous match exists */ uInt strstart; /* start of string to insert */ uInt match_start; /* start of matching string */ uInt lookahead; /* number of valid bytes ahead in window */ uInt prev_length; /* Length of the best match at previous step. Matches not greater than this * are discarded. This is used in the lazy match evaluation. */ uInt max_chain_length; /* To speed up deflation, hash chains are never searched beyond this * length. A higher limit improves compression ratio but degrades the * speed. */ uInt max_lazy_match; /* Attempt to find a better match only when the current match is strictly * smaller than this value. This mechanism is used only for compression * levels >= 4. */ # define max_insert_length max_lazy_match /* Insert new strings in the hash table only if the match length is not * greater than this length. This saves time but degrades compression. * max_insert_length is used only for compression levels <= 3. */ int level; /* compression level (1..9) */ int strategy; /* favor or force Huffman coding*/ uInt good_match; /* Use a faster search when the previous match is longer than this */ int nice_match; /* Stop searching when current match exceeds this */ /* used by trees.c: */ /* Didn't use ct_data typedef below to suppress compiler warning */ struct ct_data_s dyn_ltree[HEAP_SIZE]; /* literal and length tree */ struct ct_data_s dyn_dtree[2*D_CODES+1]; /* distance tree */ struct ct_data_s bl_tree[2*BL_CODES+1]; /* Huffman tree for bit lengths */ struct tree_desc_s l_desc; /* desc. for literal tree */ struct tree_desc_s d_desc; /* desc. for distance tree */ struct tree_desc_s bl_desc; /* desc. for bit length tree */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ int heap[2*L_CODES+1]; /* heap used to build the Huffman trees */ int heap_len; /* number of elements in the heap */ int heap_max; /* element of largest frequency */ /* The sons of heap[n] are heap[2*n] and heap[2*n+1]. heap[0] is not used. * The same heap array is used to build all trees. */ uch depth[2*L_CODES+1]; /* Depth of each subtree used as tie breaker for trees of equal frequency */ uchf *l_buf; /* buffer for literals or lengths */ uInt lit_bufsize; /* Size of match buffer for literals/lengths. There are 4 reasons for * limiting lit_bufsize to 64K: * - frequencies can be kept in 16 bit counters * - if compression is not successful for the first block, all input * data is still in the window so we can still emit a stored block even * when input comes from standard input. (This can also be done for * all blocks if lit_bufsize is not greater than 32K.) * - if compression is not successful for a file smaller than 64K, we can * even emit a stored file instead of a stored block (saving 5 bytes). * This is applicable only for zip (not gzip or zlib). * - creating new Huffman trees less frequently may not provide fast * adaptation to changes in the input data statistics. (Take for * example a binary file with poorly compressible code followed by * a highly compressible string table.) Smaller buffer sizes give * fast adaptation but have of course the overhead of transmitting * trees more frequently. * - I can't count above 4 */ uInt last_lit; /* running index in l_buf */ ushf *d_buf; /* Buffer for distances. To simplify the code, d_buf and l_buf have * the same number of elements. To use different lengths, an extra flag * array would be necessary. */ ulg opt_len; /* bit length of current block with optimal trees */ ulg static_len; /* bit length of current block with static trees */ uInt matches; /* number of string matches in current block */ uInt insert; /* bytes at end of window left to insert */ #ifdef ZLIB_DEBUG ulg compressed_len; /* total bit length of compressed file mod 2^32 */ ulg bits_sent; /* bit length of compressed data sent mod 2^32 */ #endif ush bi_buf; /* Output buffer. bits are inserted starting at the bottom (least * significant bits). */ int bi_valid; /* Number of valid bits in bi_buf. All bits above the last valid bit * are always zero. */ ulg high_water; /* High water mark offset in window for initialized bytes -- bytes above * this are set to zero in order to avoid memory check warnings when * longest match routines access bytes past the input. This is then * updated to the new high water mark. */ } FAR deflate_state; /* Output a byte on the stream. * IN assertion: there is enough room in pending_buf. */ #define put_byte(s, c) {s->pending_buf[s->pending++] = (Bytef)(c);} #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1) /* Minimum amount of lookahead, except at the end of the input file. * See deflate.c for comments about the MIN_MATCH+1. */ #define MAX_DIST(s) ((s)->w_size-MIN_LOOKAHEAD) /* In order to simplify the code, particularly on 16 bit machines, match * distances are limited to MAX_DIST instead of WSIZE. */ #define WIN_INIT MAX_MATCH /* Number of bytes after end of data in window to initialize in order to avoid memory checker errors from longest match routines */ /* in trees.c */ void ZLIB_INTERNAL _tr_init OF((deflate_state *s)); int ZLIB_INTERNAL _tr_tally OF((deflate_state *s, unsigned dist, unsigned lc)); void ZLIB_INTERNAL _tr_flush_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); void ZLIB_INTERNAL _tr_flush_bits OF((deflate_state *s)); void ZLIB_INTERNAL _tr_align OF((deflate_state *s)); void ZLIB_INTERNAL _tr_stored_block OF((deflate_state *s, charf *buf, ulg stored_len, int last)); #define d_code(dist) \ ((dist) < 256 ? _dist_code[dist] : _dist_code[256+((dist)>>7)]) /* Mapping from a distance to a distance code. dist is the distance - 1 and * must not have side effects. _dist_code[256] and _dist_code[257] are never * used. */ #ifndef ZLIB_DEBUG /* Inline versions of _tr_tally for speed: */ #if defined(GEN_TREES_H) || !defined(STDC) extern uch ZLIB_INTERNAL _length_code[]; extern uch ZLIB_INTERNAL _dist_code[]; #else extern const uch ZLIB_INTERNAL _length_code[]; extern const uch ZLIB_INTERNAL _dist_code[]; #endif # define _tr_tally_lit(s, c, flush) \ { uch cc = (c); \ s->d_buf[s->last_lit] = 0; \ s->l_buf[s->last_lit++] = cc; \ s->dyn_ltree[cc].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } # define _tr_tally_dist(s, distance, length, flush) \ { uch len = (uch)(length); \ ush dist = (ush)(distance); \ s->d_buf[s->last_lit] = dist; \ s->l_buf[s->last_lit++] = len; \ dist--; \ s->dyn_ltree[_length_code[len]+LITERALS+1].Freq++; \ s->dyn_dtree[d_code(dist)].Freq++; \ flush = (s->last_lit == s->lit_bufsize-1); \ } #else # define _tr_tally_lit(s, c, flush) flush = _tr_tally(s, 0, c) # define _tr_tally_dist(s, distance, length, flush) \ flush = _tr_tally(s, distance, length) #endif #endif /* DEFLATE_H */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/gzclose.c0000644000076500000240000000124613572250760021275 0ustar alcaeusstaff/* gzclose.c -- zlib gzclose() function * Copyright (C) 2004, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "gzguts.h" /* gzclose() is in a separate file so that it is linked in only if it is used. That way the other gzclose functions can be used instead to avoid linking in unneeded compression or decompression routines. */ int ZEXPORT gzclose(file) gzFile file; { #ifndef NO_GZCOMPRESS gz_statep state; if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; return state->mode == GZ_READ ? gzclose_r(file) : gzclose_w(file); #else return gzclose_r(file); #endif } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/gzguts.h0000644000076500000240000001524313572250760021161 0ustar alcaeusstaff/* gzguts.h -- zlib internal header definitions for gz* operations * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #ifdef _LARGEFILE64_SOURCE # ifndef _LARGEFILE_SOURCE # define _LARGEFILE_SOURCE 1 # endif # ifdef _FILE_OFFSET_BITS # undef _FILE_OFFSET_BITS # endif #endif #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include #include "zlib.h" #ifdef STDC # include # include # include #endif #ifndef _POSIX_SOURCE # define _POSIX_SOURCE #endif #include #ifdef _WIN32 # include #endif #if defined(__TURBOC__) || defined(_MSC_VER) || defined(_WIN32) # include #endif #if defined(_WIN32) || defined(__CYGWIN__) # define WIDECHAR #endif #ifdef WINAPI_FAMILY # define open _open # define read _read # define write _write # define close _close #endif #ifdef NO_DEFLATE /* for compatibility with old definition */ # define NO_GZCOMPRESS #endif #if defined(STDC99) || (defined(__TURBOC__) && __TURBOC__ >= 0x550) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(__CYGWIN__) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #if defined(MSDOS) && defined(__BORLANDC__) && (BORLANDC > 0x410) # ifndef HAVE_VSNPRINTF # define HAVE_VSNPRINTF # endif #endif #ifndef HAVE_VSNPRINTF # ifdef MSDOS /* vsnprintf may exist on some MS-DOS compilers (DJGPP?), but for now we just assume it doesn't. */ # define NO_vsnprintf # endif # ifdef __TURBOC__ # define NO_vsnprintf # endif # ifdef WIN32 /* In Win32, vsnprintf is available as the "non-ANSI" _vsnprintf. */ # if !defined(vsnprintf) && !defined(NO_vsnprintf) # if !defined(_MSC_VER) || ( defined(_MSC_VER) && _MSC_VER < 1500 ) # define vsnprintf _vsnprintf # endif # endif # endif # ifdef __SASC # define NO_vsnprintf # endif # ifdef VMS # define NO_vsnprintf # endif # ifdef __OS400__ # define NO_vsnprintf # endif # ifdef __MVS__ # define NO_vsnprintf # endif #endif /* unlike snprintf (which is required in C99), _snprintf does not guarantee null termination of the result -- however this is only used in gzlib.c where the result is assured to fit in the space provided */ #if defined(_MSC_VER) && _MSC_VER < 1900 # define snprintf _snprintf #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ /* gz* functions always use library allocation functions */ #ifndef STDC extern voidp malloc OF((uInt size)); extern void free OF((voidpf ptr)); #endif /* get errno and strerror definition */ #if defined UNDER_CE # include # define zstrerror() gz_strwinerror((DWORD)GetLastError()) #else # ifndef NO_STRERROR # include # define zstrerror() strerror(errno) # else # define zstrerror() "stdio error (consult errno)" # endif #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0 ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); #endif /* default memLevel */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default i/o buffer size -- double this for output when reading (this and twice this must be able to fit in an unsigned type) */ #define GZBUFSIZE 8192 /* gzip modes, also provide a little integrity check on the passed structure */ #define GZ_NONE 0 #define GZ_READ 7247 #define GZ_WRITE 31153 #define GZ_APPEND 1 /* mode set to GZ_WRITE after the file is opened */ /* values for gz_state how */ #define LOOK 0 /* look for a gzip header */ #define COPY 1 /* copy input directly */ #define GZIP 2 /* decompress a gzip stream */ /* internal gzip file state data structure */ typedef struct { /* exposed contents for gzgetc() macro */ struct gzFile_s x; /* "x" for exposed */ /* x.have: number of bytes available at x.next */ /* x.next: next output data to deliver or write */ /* x.pos: current position in uncompressed data */ /* used for both reading and writing */ int mode; /* see gzip modes above */ int fd; /* file descriptor */ char *path; /* path or fd for error messages */ unsigned size; /* buffer size, zero if not allocated yet */ unsigned want; /* requested buffer size, default is GZBUFSIZE */ unsigned char *in; /* input buffer (double-sized when writing) */ unsigned char *out; /* output buffer (double-sized when reading) */ int direct; /* 0 if processing gzip, 1 if transparent */ /* just for reading */ int how; /* 0: get header, 1: copy, 2: decompress */ z_off64_t start; /* where the gzip data started, for rewinding */ int eof; /* true if end of input file reached */ int past; /* true if read requested past end */ /* just for writing */ int level; /* compression level */ int strategy; /* compression strategy */ /* seek request */ z_off64_t skip; /* amount to skip (already rewound if backwards) */ int seek; /* true if seek request pending */ /* error information */ int err; /* error code */ char *msg; /* error message */ /* zlib inflate or deflate stream */ z_stream strm; /* stream structure in-place (not a pointer) */ } gz_state; typedef gz_state FAR *gz_statep; /* shared functions */ void ZLIB_INTERNAL gz_error OF((gz_statep, int, const char *)); #if defined UNDER_CE char ZLIB_INTERNAL *gz_strwinerror OF((DWORD error)); #endif /* GT_OFF(x), where x is an unsigned value, is true if x > maximum z_off64_t value -- needed when comparing unsigned to z_off64_t, which is signed (possible z_off64_t types off_t, off64_t, and long are all signed) */ #ifdef INT_MAX # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > INT_MAX) #else unsigned ZLIB_INTERNAL gz_intmax OF((void)); # define GT_OFF(x) (sizeof(int) == sizeof(z_off64_t) && (x) > gz_intmax()) #endif mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/gzlib.c0000644000076500000240000004032713572250760020741 0ustar alcaeusstaff/* gzlib.c -- zlib functions common to reading and writing gzip files * Copyright (C) 2004-2017 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "gzguts.h" #if defined(_WIN32) && !defined(__BORLANDC__) && !defined(__MINGW32__) # define LSEEK _lseeki64 #else #if defined(_LARGEFILE64_SOURCE) && _LFS64_LARGEFILE-0 # define LSEEK lseek64 #else # define LSEEK lseek #endif #endif /* Local functions */ local void gz_reset OF((gz_statep)); local gzFile gz_open OF((const void *, int, const char *)); #if defined UNDER_CE /* Map the Windows error number in ERROR to a locale-dependent error message string and return a pointer to it. Typically, the values for ERROR come from GetLastError. The string pointed to shall not be modified by the application, but may be overwritten by a subsequent call to gz_strwinerror The gz_strwinerror function does not change the current setting of GetLastError. */ char ZLIB_INTERNAL *gz_strwinerror (error) DWORD error; { static char buf[1024]; wchar_t *msgbuf; DWORD lasterr = GetLastError(); DWORD chars = FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, NULL, error, 0, /* Default language */ (LPVOID)&msgbuf, 0, NULL); if (chars != 0) { /* If there is an \r\n appended, zap it. */ if (chars >= 2 && msgbuf[chars - 2] == '\r' && msgbuf[chars - 1] == '\n') { chars -= 2; msgbuf[chars] = 0; } if (chars > sizeof (buf) - 1) { chars = sizeof (buf) - 1; msgbuf[chars] = 0; } wcstombs(buf, msgbuf, chars + 1); LocalFree(msgbuf); } else { sprintf(buf, "unknown win32 error (%ld)", error); } SetLastError(lasterr); return buf; } #endif /* UNDER_CE */ /* Reset gzip file state */ local void gz_reset(state) gz_statep state; { state->x.have = 0; /* no output data available */ if (state->mode == GZ_READ) { /* for reading ... */ state->eof = 0; /* not at end of file */ state->past = 0; /* have not read past end yet */ state->how = LOOK; /* look for gzip header */ } state->seek = 0; /* no seek request pending */ gz_error(state, Z_OK, NULL); /* clear error */ state->x.pos = 0; /* no uncompressed data yet */ state->strm.avail_in = 0; /* no input data yet */ } /* Open a gzip file either by name or file descriptor. */ local gzFile gz_open(path, fd, mode) const void *path; int fd; const char *mode; { gz_statep state; z_size_t len; int oflag; #ifdef O_CLOEXEC int cloexec = 0; #endif #ifdef O_EXCL int exclusive = 0; #endif /* check input */ if (path == NULL) return NULL; /* allocate gzFile structure to return */ state = (gz_statep)malloc(sizeof(gz_state)); if (state == NULL) return NULL; state->size = 0; /* no buffers allocated yet */ state->want = GZBUFSIZE; /* requested buffer size */ state->msg = NULL; /* no error message yet */ /* interpret mode */ state->mode = GZ_NONE; state->level = Z_DEFAULT_COMPRESSION; state->strategy = Z_DEFAULT_STRATEGY; state->direct = 0; while (*mode) { if (*mode >= '0' && *mode <= '9') state->level = *mode - '0'; else switch (*mode) { case 'r': state->mode = GZ_READ; break; #ifndef NO_GZCOMPRESS case 'w': state->mode = GZ_WRITE; break; case 'a': state->mode = GZ_APPEND; break; #endif case '+': /* can't read and write at the same time */ free(state); return NULL; case 'b': /* ignore -- will request binary anyway */ break; #ifdef O_CLOEXEC case 'e': cloexec = 1; break; #endif #ifdef O_EXCL case 'x': exclusive = 1; break; #endif case 'f': state->strategy = Z_FILTERED; break; case 'h': state->strategy = Z_HUFFMAN_ONLY; break; case 'R': state->strategy = Z_RLE; break; case 'F': state->strategy = Z_FIXED; break; case 'T': state->direct = 1; break; default: /* could consider as an error, but just ignore */ ; } mode++; } /* must provide an "r", "w", or "a" */ if (state->mode == GZ_NONE) { free(state); return NULL; } /* can't force transparent read */ if (state->mode == GZ_READ) { if (state->direct) { free(state); return NULL; } state->direct = 1; /* for empty file */ } /* save the path name for error messages */ #ifdef WIDECHAR if (fd == -2) { len = wcstombs(NULL, path, 0); if (len == (z_size_t)-1) len = 0; } else #endif len = strlen((const char *)path); state->path = (char *)malloc(len + 1); if (state->path == NULL) { free(state); return NULL; } #ifdef WIDECHAR if (fd == -2) if (len) wcstombs(state->path, path, len + 1); else *(state->path) = 0; else #endif #if !defined(NO_snprintf) && !defined(NO_vsnprintf) (void)snprintf(state->path, len + 1, "%s", (const char *)path); #else strcpy(state->path, path); #endif /* compute the flags for open() */ oflag = #ifdef O_LARGEFILE O_LARGEFILE | #endif #ifdef O_BINARY O_BINARY | #endif #ifdef O_CLOEXEC (cloexec ? O_CLOEXEC : 0) | #endif (state->mode == GZ_READ ? O_RDONLY : (O_WRONLY | O_CREAT | #ifdef O_EXCL (exclusive ? O_EXCL : 0) | #endif (state->mode == GZ_WRITE ? O_TRUNC : O_APPEND))); /* open the file with the appropriate flags (or just use fd) */ state->fd = fd > -1 ? fd : ( #ifdef WIDECHAR fd == -2 ? _wopen(path, oflag, 0666) : #endif open((const char *)path, oflag, 0666)); if (state->fd == -1) { free(state->path); free(state); return NULL; } if (state->mode == GZ_APPEND) { LSEEK(state->fd, 0, SEEK_END); /* so gzoffset() is correct */ state->mode = GZ_WRITE; /* simplify later checks */ } /* save the current position for rewinding (only if reading) */ if (state->mode == GZ_READ) { state->start = LSEEK(state->fd, 0, SEEK_CUR); if (state->start == -1) state->start = 0; } /* initialize stream */ gz_reset(state); /* return stream */ return (gzFile)state; } /* -- see zlib.h -- */ gzFile ZEXPORT gzopen(path, mode) const char *path; const char *mode; { return gz_open(path, -1, mode); } /* -- see zlib.h -- */ gzFile ZEXPORT gzopen64(path, mode) const char *path; const char *mode; { return gz_open(path, -1, mode); } /* -- see zlib.h -- */ gzFile ZEXPORT gzdopen(fd, mode) int fd; const char *mode; { char *path; /* identifier for error messages */ gzFile gz; if (fd == -1 || (path = (char *)malloc(7 + 3 * sizeof(int))) == NULL) return NULL; #if !defined(NO_snprintf) && !defined(NO_vsnprintf) (void)snprintf(path, 7 + 3 * sizeof(int), "", fd); #else sprintf(path, "", fd); /* for debugging */ #endif gz = gz_open(path, fd, mode); free(path); return gz; } /* -- see zlib.h -- */ #ifdef WIDECHAR gzFile ZEXPORT gzopen_w(path, mode) const wchar_t *path; const char *mode; { return gz_open(path, -2, mode); } #endif /* -- see zlib.h -- */ int ZEXPORT gzbuffer(file, size) gzFile file; unsigned size; { gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return -1; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return -1; /* make sure we haven't already allocated memory */ if (state->size != 0) return -1; /* check and set requested size */ if ((size << 1) < size) return -1; /* need to be able to double it */ if (size < 2) size = 2; /* need two bytes to check magic header */ state->want = size; return 0; } /* -- see zlib.h -- */ int ZEXPORT gzrewind(file) gzFile file; { gz_statep state; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; /* check that we're reading and that there's no error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return -1; /* back up and start over */ if (LSEEK(state->fd, state->start, SEEK_SET) == -1) return -1; gz_reset(state); return 0; } /* -- see zlib.h -- */ z_off64_t ZEXPORT gzseek64(file, offset, whence) gzFile file; z_off64_t offset; int whence; { unsigned n; z_off64_t ret; gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return -1; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return -1; /* check that there's no error */ if (state->err != Z_OK && state->err != Z_BUF_ERROR) return -1; /* can only seek from start or relative to current position */ if (whence != SEEK_SET && whence != SEEK_CUR) return -1; /* normalize offset to a SEEK_CUR specification */ if (whence == SEEK_SET) offset -= state->x.pos; else if (state->seek) offset += state->skip; state->seek = 0; /* if within raw area while reading, just go there */ if (state->mode == GZ_READ && state->how == COPY && state->x.pos + offset >= 0) { ret = LSEEK(state->fd, offset - state->x.have, SEEK_CUR); if (ret == -1) return -1; state->x.have = 0; state->eof = 0; state->past = 0; state->seek = 0; gz_error(state, Z_OK, NULL); state->strm.avail_in = 0; state->x.pos += offset; return state->x.pos; } /* calculate skip amount, rewinding if needed for back seek when reading */ if (offset < 0) { if (state->mode != GZ_READ) /* writing -- can't go backwards */ return -1; offset += state->x.pos; if (offset < 0) /* before start of file! */ return -1; if (gzrewind(file) == -1) /* rewind, then skip to offset */ return -1; } /* if reading, skip what's in output buffer (one less gzgetc() check) */ if (state->mode == GZ_READ) { n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > offset ? (unsigned)offset : state->x.have; state->x.have -= n; state->x.next += n; state->x.pos += n; offset -= n; } /* request skip (if not zero) */ if (offset) { state->seek = 1; state->skip = offset; } return state->x.pos + offset; } /* -- see zlib.h -- */ z_off_t ZEXPORT gzseek(file, offset, whence) gzFile file; z_off_t offset; int whence; { z_off64_t ret; ret = gzseek64(file, (z_off64_t)offset, whence); return ret == (z_off_t)ret ? (z_off_t)ret : -1; } /* -- see zlib.h -- */ z_off64_t ZEXPORT gztell64(file) gzFile file; { gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return -1; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return -1; /* return position */ return state->x.pos + (state->seek ? state->skip : 0); } /* -- see zlib.h -- */ z_off_t ZEXPORT gztell(file) gzFile file; { z_off64_t ret; ret = gztell64(file); return ret == (z_off_t)ret ? (z_off_t)ret : -1; } /* -- see zlib.h -- */ z_off64_t ZEXPORT gzoffset64(file) gzFile file; { z_off64_t offset; gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return -1; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return -1; /* compute and return effective offset in file */ offset = LSEEK(state->fd, 0, SEEK_CUR); if (offset == -1) return -1; if (state->mode == GZ_READ) /* reading */ offset -= state->strm.avail_in; /* don't count buffered input */ return offset; } /* -- see zlib.h -- */ z_off_t ZEXPORT gzoffset(file) gzFile file; { z_off64_t ret; ret = gzoffset64(file); return ret == (z_off_t)ret ? (z_off_t)ret : -1; } /* -- see zlib.h -- */ int ZEXPORT gzeof(file) gzFile file; { gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return 0; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return 0; /* return end-of-file state */ return state->mode == GZ_READ ? state->past : 0; } /* -- see zlib.h -- */ const char * ZEXPORT gzerror(file, errnum) gzFile file; int *errnum; { gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return NULL; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return NULL; /* return error information */ if (errnum != NULL) *errnum = state->err; return state->err == Z_MEM_ERROR ? "out of memory" : (state->msg == NULL ? "" : state->msg); } /* -- see zlib.h -- */ void ZEXPORT gzclearerr(file) gzFile file; { gz_statep state; /* get internal structure and check integrity */ if (file == NULL) return; state = (gz_statep)file; if (state->mode != GZ_READ && state->mode != GZ_WRITE) return; /* clear error and end-of-file */ if (state->mode == GZ_READ) { state->eof = 0; state->past = 0; } gz_error(state, Z_OK, NULL); } /* Create an error message in allocated memory and set state->err and state->msg accordingly. Free any previous error message already there. Do not try to free or allocate space if the error is Z_MEM_ERROR (out of memory). Simply save the error message as a static string. If there is an allocation failure constructing the error message, then convert the error to out of memory. */ void ZLIB_INTERNAL gz_error(state, err, msg) gz_statep state; int err; const char *msg; { /* free previously allocated message and clear */ if (state->msg != NULL) { if (state->err != Z_MEM_ERROR) free(state->msg); state->msg = NULL; } /* if fatal, set state->x.have to 0 so that the gzgetc() macro fails */ if (err != Z_OK && err != Z_BUF_ERROR) state->x.have = 0; /* set error code, and if no message, then done */ state->err = err; if (msg == NULL) return; /* for an out of memory error, return literal string when requested */ if (err == Z_MEM_ERROR) return; /* construct error message with path */ if ((state->msg = (char *)malloc(strlen(state->path) + strlen(msg) + 3)) == NULL) { state->err = Z_MEM_ERROR; return; } #if !defined(NO_snprintf) && !defined(NO_vsnprintf) (void)snprintf(state->msg, strlen(state->path) + strlen(msg) + 3, "%s%s%s", state->path, ": ", msg); #else strcpy(state->msg, state->path); strcat(state->msg, ": "); strcat(state->msg, msg); #endif } #ifndef INT_MAX /* portably return maximum value for an int (when limits.h presumed not available) -- we need to do this to cover cases where 2's complement not used, since C standard permits 1's complement and sign-bit representations, otherwise we could just use ((unsigned)-1) >> 1 */ unsigned ZLIB_INTERNAL gz_intmax() { unsigned p, q; p = 1; do { q = p; p <<= 1; p++; } while (p > q); return q >> 1; } #endif mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/gzread.c0000644000076500000240000004775613572250760021123 0ustar alcaeusstaff/* gzread.c -- zlib functions for reading gzip files * Copyright (C) 2004, 2005, 2010, 2011, 2012, 2013, 2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "gzguts.h" #ifndef __clang_analyzer__ /* Local functions */ local int gz_load OF((gz_statep, unsigned char *, unsigned, unsigned *)); local int gz_avail OF((gz_statep)); local int gz_look OF((gz_statep)); local int gz_decomp OF((gz_statep)); local int gz_fetch OF((gz_statep)); local int gz_skip OF((gz_statep, z_off64_t)); local z_size_t gz_read OF((gz_statep, voidp, z_size_t)); /* Use read() to load a buffer -- return -1 on error, otherwise 0. Read from state->fd, and update state->eof, state->err, and state->msg as appropriate. This function needs to loop on read(), since read() is not guaranteed to read the number of bytes requested, depending on the type of descriptor. */ local int gz_load(state, buf, len, have) gz_statep state; unsigned char *buf; unsigned len; unsigned *have; { int ret; unsigned get, max = ((unsigned)-1 >> 2) + 1; *have = 0; do { get = len - *have; if (get > max) get = max; ret = read(state->fd, buf + *have, get); if (ret <= 0) break; *have += (unsigned)ret; } while (*have < len); if (ret < 0) { gz_error(state, Z_ERRNO, zstrerror()); return -1; } if (ret == 0) state->eof = 1; return 0; } /* Load up input buffer and set eof flag if last data loaded -- return -1 on error, 0 otherwise. Note that the eof flag is set when the end of the input file is reached, even though there may be unused data in the buffer. Once that data has been used, no more attempts will be made to read the file. If strm->avail_in != 0, then the current data is moved to the beginning of the input buffer, and then the remainder of the buffer is loaded with the available data from the input file. */ local int gz_avail(state) gz_statep state; { unsigned got; z_streamp strm = &(state->strm); if (state->err != Z_OK && state->err != Z_BUF_ERROR) return -1; if (state->eof == 0) { if (strm->avail_in) { /* copy what's there to the start */ unsigned char *p = state->in; unsigned const char *q = strm->next_in; unsigned n = strm->avail_in; do { *p++ = *q++; } while (--n); } if (gz_load(state, state->in + strm->avail_in, state->size - strm->avail_in, &got) == -1) return -1; strm->avail_in += got; strm->next_in = state->in; } return 0; } /* Look for gzip header, set up for inflate or copy. state->x.have must be 0. If this is the first time in, allocate required memory. state->how will be left unchanged if there is no more input data available, will be set to COPY if there is no gzip header and direct copying will be performed, or it will be set to GZIP for decompression. If direct copying, then leftover input data from the input buffer will be copied to the output buffer. In that case, all further file reads will be directly to either the output buffer or a user buffer. If decompressing, the inflate state will be initialized. gz_look() will return 0 on success or -1 on failure. */ local int gz_look(state) gz_statep state; { z_streamp strm = &(state->strm); /* allocate read buffers and inflate memory */ if (state->size == 0) { /* allocate buffers */ state->in = (unsigned char *)malloc(state->want); state->out = (unsigned char *)malloc(state->want << 1); if (state->in == NULL || state->out == NULL) { free(state->out); free(state->in); gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } state->size = state->want; /* allocate inflate memory */ state->strm.zalloc = Z_NULL; state->strm.zfree = Z_NULL; state->strm.opaque = Z_NULL; state->strm.avail_in = 0; state->strm.next_in = Z_NULL; if (inflateInit2(&(state->strm), 15 + 16) != Z_OK) { /* gunzip */ free(state->out); free(state->in); state->size = 0; gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } } /* get at least the magic bytes in the input buffer */ if (strm->avail_in < 2) { if (gz_avail(state) == -1) return -1; if (strm->avail_in == 0) return 0; } /* look for gzip magic bytes -- if there, do gzip decoding (note: there is a logical dilemma here when considering the case of a partially written gzip file, to wit, if a single 31 byte is written, then we cannot tell whether this is a single-byte file, or just a partially written gzip file -- for here we assume that if a gzip file is being written, then the header will be written in a single operation, so that reading a single byte is sufficient indication that it is not a gzip file) */ if (strm->avail_in > 1 && strm->next_in[0] == 31 && strm->next_in[1] == 139) { inflateReset(strm); state->how = GZIP; state->direct = 0; return 0; } /* no gzip header -- if we were decoding gzip before, then this is trailing garbage. Ignore the trailing garbage and finish. */ if (state->direct == 0) { strm->avail_in = 0; state->eof = 1; state->x.have = 0; return 0; } /* doing raw i/o, copy any leftover input to output -- this assumes that the output buffer is larger than the input buffer, which also assures space for gzungetc() */ state->x.next = state->out; if (strm->avail_in) { memcpy(state->x.next, strm->next_in, strm->avail_in); state->x.have = strm->avail_in; strm->avail_in = 0; } state->how = COPY; state->direct = 1; return 0; } /* Decompress from input to the provided next_out and avail_out in the state. On return, state->x.have and state->x.next point to the just decompressed data. If the gzip stream completes, state->how is reset to LOOK to look for the next gzip stream or raw data, once state->x.have is depleted. Returns 0 on success, -1 on failure. */ local int gz_decomp(state) gz_statep state; { int ret = Z_OK; unsigned had; z_streamp strm = &(state->strm); /* fill output buffer up to end of deflate stream */ had = strm->avail_out; do { /* get more input for inflate() */ if (strm->avail_in == 0 && gz_avail(state) == -1) return -1; if (strm->avail_in == 0) { gz_error(state, Z_BUF_ERROR, "unexpected end of file"); break; } /* decompress and handle errors */ ret = inflate(strm, Z_NO_FLUSH); if (ret == Z_STREAM_ERROR || ret == Z_NEED_DICT) { gz_error(state, Z_STREAM_ERROR, "internal error: inflate stream corrupt"); return -1; } if (ret == Z_MEM_ERROR) { gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } if (ret == Z_DATA_ERROR) { /* deflate stream invalid */ gz_error(state, Z_DATA_ERROR, strm->msg == NULL ? "compressed data error" : strm->msg); return -1; } } while (strm->avail_out && ret != Z_STREAM_END); /* update available output */ state->x.have = had - strm->avail_out; state->x.next = strm->next_out - state->x.have; /* if the gzip stream completed successfully, look for another */ if (ret == Z_STREAM_END) state->how = LOOK; /* good decompression */ return 0; } /* Fetch data and put it in the output buffer. Assumes state->x.have is 0. Data is either copied from the input file or decompressed from the input file depending on state->how. If state->how is LOOK, then a gzip header is looked for to determine whether to copy or decompress. Returns -1 on error, otherwise 0. gz_fetch() will leave state->how as COPY or GZIP unless the end of the input file has been reached and all data has been processed. */ local int gz_fetch(state) gz_statep state; { z_streamp strm = &(state->strm); do { switch(state->how) { case LOOK: /* -> LOOK, COPY (only if never GZIP), or GZIP */ if (gz_look(state) == -1) return -1; if (state->how == LOOK) return 0; break; case COPY: /* -> COPY */ if (gz_load(state, state->out, state->size << 1, &(state->x.have)) == -1) return -1; state->x.next = state->out; return 0; case GZIP: /* -> GZIP or LOOK (if end of gzip stream) */ strm->avail_out = state->size << 1; strm->next_out = state->out; if (gz_decomp(state) == -1) return -1; } } while (state->x.have == 0 && (!state->eof || strm->avail_in)); return 0; } /* Skip len uncompressed bytes of output. Return -1 on error, 0 on success. */ local int gz_skip(state, len) gz_statep state; z_off64_t len; { unsigned n; /* skip over len bytes or reach end-of-file, whichever comes first */ while (len) /* skip over whatever is in output buffer */ if (state->x.have) { n = GT_OFF(state->x.have) || (z_off64_t)state->x.have > len ? (unsigned)len : state->x.have; state->x.have -= n; state->x.next += n; state->x.pos += n; len -= n; } /* output buffer empty -- return if we're at the end of the input */ else if (state->eof && state->strm.avail_in == 0) break; /* need more data to skip -- load up output buffer */ else { /* get more output, looking for header if required */ if (gz_fetch(state) == -1) return -1; } return 0; } /* Read len bytes into buf from file, or less than len up to the end of the input. Return the number of bytes read. If zero is returned, either the end of file was reached, or there was an error. state->err must be consulted in that case to determine which. */ local z_size_t gz_read(state, buf, len) gz_statep state; voidp buf; z_size_t len; { z_size_t got; unsigned n; /* if len is zero, avoid unnecessary operations */ if (len == 0) return 0; /* process a skip request */ if (state->seek) { state->seek = 0; if (gz_skip(state, state->skip) == -1) return 0; } /* get len bytes to buf, or less than len if at the end */ got = 0; do { /* set n to the maximum amount of len that fits in an unsigned int */ n = -1; if (n > len) n = len; /* first just try copying data from the output buffer */ if (state->x.have) { if (state->x.have < n) n = state->x.have; memcpy(buf, state->x.next, n); state->x.next += n; state->x.have -= n; } /* output buffer empty -- return if we're at the end of the input */ else if (state->eof && state->strm.avail_in == 0) { state->past = 1; /* tried to read past end */ break; } /* need output data -- for small len or new stream load up our output buffer */ else if (state->how == LOOK || n < (state->size << 1)) { /* get more output, looking for header if required */ if (gz_fetch(state) == -1) return 0; continue; /* no progress yet -- go back to copy above */ /* the copy above assures that we will leave with space in the output buffer, allowing at least one gzungetc() to succeed */ } /* large len -- read directly into user buffer */ else if (state->how == COPY) { /* read directly */ if (gz_load(state, (unsigned char *)buf, n, &n) == -1) return 0; } /* large len -- decompress directly into user buffer */ else { /* state->how == GZIP */ state->strm.avail_out = n; state->strm.next_out = (unsigned char *)buf; if (gz_decomp(state) == -1) return 0; n = state->x.have; state->x.have = 0; } /* update progress */ len -= n; buf = (char *)buf + n; got += n; state->x.pos += n; } while (len); /* return number of bytes read into user buffer */ return got; } /* -- see zlib.h -- */ int ZEXPORT gzread(file, buf, len) gzFile file; voidp buf; unsigned len; { gz_statep state; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; /* check that we're reading and that there's no (serious) error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return -1; /* since an int is returned, make sure len fits in one, otherwise return with an error (this avoids a flaw in the interface) */ if ((int)len < 0) { gz_error(state, Z_STREAM_ERROR, "request does not fit in an int"); return -1; } /* read len or fewer bytes to buf */ len = gz_read(state, buf, len); /* check for an error */ if (len == 0 && state->err != Z_OK && state->err != Z_BUF_ERROR) return -1; /* return the number of bytes read (this is assured to fit in an int) */ return (int)len; } /* -- see zlib.h -- */ z_size_t ZEXPORT gzfread(buf, size, nitems, file) voidp buf; z_size_t size; z_size_t nitems; gzFile file; { z_size_t len; gz_statep state; /* get internal structure */ if (file == NULL) return 0; state = (gz_statep)file; /* check that we're reading and that there's no (serious) error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return 0; /* compute bytes to read -- error on overflow */ len = nitems * size; if (size && len / size != nitems) { gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t"); return 0; } /* read len or fewer bytes to buf, return the number of full items read */ return len ? gz_read(state, buf, len) / size : 0; } /* -- see zlib.h -- */ #ifdef Z_PREFIX_SET # undef z_gzgetc #else # undef gzgetc #endif int ZEXPORT gzgetc(file) gzFile file; { int ret; unsigned char buf[1]; gz_statep state; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; /* check that we're reading and that there's no (serious) error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return -1; /* try output buffer (no need to check for skip request) */ if (state->x.have) { state->x.have--; state->x.pos++; return *(state->x.next)++; } /* nothing there -- try gz_read() */ ret = gz_read(state, buf, 1); return ret < 1 ? -1 : buf[0]; } int ZEXPORT gzgetc_(file) gzFile file; { return gzgetc(file); } /* -- see zlib.h -- */ int ZEXPORT gzungetc(c, file) int c; gzFile file; { gz_statep state; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; /* check that we're reading and that there's no (serious) error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return -1; /* process a skip request */ if (state->seek) { state->seek = 0; if (gz_skip(state, state->skip) == -1) return -1; } /* can't push EOF */ if (c < 0) return -1; /* if output buffer empty, put byte at end (allows more pushing) */ if (state->x.have == 0) { state->x.have = 1; state->x.next = state->out + (state->size << 1) - 1; state->x.next[0] = (unsigned char)c; state->x.pos--; state->past = 0; return c; } /* if no room, give up (must have already done a gzungetc()) */ if (state->x.have == (state->size << 1)) { gz_error(state, Z_DATA_ERROR, "out of room to push characters"); return -1; } /* slide output data if needed and insert byte before existing data */ if (state->x.next == state->out) { unsigned char *src = state->out + state->x.have; unsigned char *dest = state->out + (state->size << 1); while (src > state->out) *--dest = *--src; state->x.next = dest; } state->x.have++; state->x.next--; state->x.next[0] = (unsigned char)c; state->x.pos--; state->past = 0; return c; } /* -- see zlib.h -- */ char * ZEXPORT gzgets(file, buf, len) gzFile file; char *buf; int len; { unsigned left, n; char *str; unsigned char *eol; gz_statep state; /* check parameters and get internal structure */ if (file == NULL || buf == NULL || len < 1) return NULL; state = (gz_statep)file; /* check that we're reading and that there's no (serious) error */ if (state->mode != GZ_READ || (state->err != Z_OK && state->err != Z_BUF_ERROR)) return NULL; /* process a skip request */ if (state->seek) { state->seek = 0; if (gz_skip(state, state->skip) == -1) return NULL; } /* copy output bytes up to new line or len - 1, whichever comes first -- append a terminating zero to the string (we don't check for a zero in the contents, let the user worry about that) */ str = buf; left = (unsigned)len - 1; if (left) do { /* assure that something is in the output buffer */ if (state->x.have == 0 && gz_fetch(state) == -1) return NULL; /* error */ if (state->x.have == 0) { /* end of file */ state->past = 1; /* read past end */ break; /* return what we have */ } /* look for end-of-line in current output buffer */ n = state->x.have > left ? left : state->x.have; eol = (unsigned char *)memchr(state->x.next, '\n', n); if (eol != NULL) n = (unsigned)(eol - state->x.next) + 1; /* copy through end-of-line, or remainder if not found */ memcpy(buf, state->x.next, n); state->x.have -= n; state->x.next += n; state->x.pos += n; left -= n; buf += n; } while (left && eol == NULL); /* return terminated string, or if nothing, end of file */ if (buf == str) return NULL; buf[0] = 0; return str; } /* -- see zlib.h -- */ int ZEXPORT gzdirect(file) gzFile file; { gz_statep state; /* get internal structure */ if (file == NULL) return 0; state = (gz_statep)file; /* if the state is not known, but we can find out, then do so (this is mainly for right after a gzopen() or gzdopen()) */ if (state->mode == GZ_READ && state->how == LOOK && state->x.have == 0) (void)gz_look(state); /* return 1 if transparent, 0 if processing a gzip stream */ return state->direct; } /* -- see zlib.h -- */ int ZEXPORT gzclose_r(file) gzFile file; { int ret, err; gz_statep state; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; /* check that we're reading */ if (state->mode != GZ_READ) return Z_STREAM_ERROR; /* free memory and close file */ if (state->size) { inflateEnd(&(state->strm)); free(state->out); free(state->in); } err = state->err == Z_BUF_ERROR ? Z_BUF_ERROR : Z_OK; gz_error(state, Z_OK, NULL); free(state->path); ret = close(state->fd); free(state); return ret ? Z_ERRNO : err; } #endif mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/gzwrite.c0000644000076500000240000004552713572250760021334 0ustar alcaeusstaff/* gzwrite.c -- zlib functions for writing gzip files * Copyright (C) 2004-2017 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "gzguts.h" #ifndef __clang_analyzer__ /* Local functions */ local int gz_init OF((gz_statep)); local int gz_comp OF((gz_statep, int)); local int gz_zero OF((gz_statep, z_off64_t)); local z_size_t gz_write OF((gz_statep, voidpc, z_size_t)); /* Initialize state for writing a gzip file. Mark initialization by setting state->size to non-zero. Return -1 on a memory allocation failure, or 0 on success. */ local int gz_init(state) gz_statep state; { int ret; z_streamp strm = &(state->strm); /* allocate input buffer (double size for gzprintf) */ state->in = (unsigned char *)malloc(state->want << 1); if (state->in == NULL) { gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } /* only need output buffer and deflate state if compressing */ if (!state->direct) { /* allocate output buffer */ state->out = (unsigned char *)malloc(state->want); if (state->out == NULL) { free(state->in); gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } /* allocate deflate memory, set up for gzip compression */ strm->zalloc = Z_NULL; strm->zfree = Z_NULL; strm->opaque = Z_NULL; ret = deflateInit2(strm, state->level, Z_DEFLATED, MAX_WBITS + 16, DEF_MEM_LEVEL, state->strategy); if (ret != Z_OK) { free(state->out); free(state->in); gz_error(state, Z_MEM_ERROR, "out of memory"); return -1; } strm->next_in = NULL; } /* mark state as initialized */ state->size = state->want; /* initialize write buffer if compressing */ if (!state->direct) { strm->avail_out = state->size; strm->next_out = state->out; state->x.next = strm->next_out; } return 0; } /* Compress whatever is at avail_in and next_in and write to the output file. Return -1 if there is an error writing to the output file or if gz_init() fails to allocate memory, otherwise 0. flush is assumed to be a valid deflate() flush value. If flush is Z_FINISH, then the deflate() state is reset to start a new gzip stream. If gz->direct is true, then simply write to the output file without compressing, and ignore flush. */ local int gz_comp(state, flush) gz_statep state; int flush; { int ret, writ; unsigned have, put, max = ((unsigned)-1 >> 2) + 1; z_streamp strm = &(state->strm); /* allocate memory if this is the first time through */ if (state->size == 0 && gz_init(state) == -1) return -1; /* write directly if requested */ if (state->direct) { while (strm->avail_in) { put = strm->avail_in > max ? max : strm->avail_in; writ = write(state->fd, strm->next_in, put); if (writ < 0) { gz_error(state, Z_ERRNO, zstrerror()); return -1; } strm->avail_in -= (unsigned)writ; strm->next_in += writ; } return 0; } /* run deflate() on provided input until it produces no more output */ ret = Z_OK; do { /* write out current buffer contents if full, or if flushing, but if doing Z_FINISH then don't write until we get to Z_STREAM_END */ if (strm->avail_out == 0 || (flush != Z_NO_FLUSH && (flush != Z_FINISH || ret == Z_STREAM_END))) { while (strm->next_out > state->x.next) { put = strm->next_out - state->x.next > (int)max ? max : (unsigned)(strm->next_out - state->x.next); writ = write(state->fd, state->x.next, put); if (writ < 0) { gz_error(state, Z_ERRNO, zstrerror()); return -1; } state->x.next += writ; } if (strm->avail_out == 0) { strm->avail_out = state->size; strm->next_out = state->out; state->x.next = state->out; } } /* compress */ have = strm->avail_out; ret = deflate(strm, flush); if (ret == Z_STREAM_ERROR) { gz_error(state, Z_STREAM_ERROR, "internal error: deflate stream corrupt"); return -1; } have -= strm->avail_out; } while (have); /* if that completed a deflate stream, allow another to start */ if (flush == Z_FINISH) deflateReset(strm); /* all done, no errors */ return 0; } /* Compress len zeros to output. Return -1 on a write error or memory allocation failure by gz_comp(), or 0 on success. */ local int gz_zero(state, len) gz_statep state; z_off64_t len; { int first; unsigned n; z_streamp strm = &(state->strm); /* consume whatever's left in the input buffer */ if (strm->avail_in && gz_comp(state, Z_NO_FLUSH) == -1) return -1; /* compress len zeros (len guaranteed > 0) */ first = 1; while (len) { n = GT_OFF(state->size) || (z_off64_t)state->size > len ? (unsigned)len : state->size; if (first) { memset(state->in, 0, n); first = 0; } strm->avail_in = n; strm->next_in = state->in; state->x.pos += n; if (gz_comp(state, Z_NO_FLUSH) == -1) return -1; len -= n; } return 0; } /* Write len bytes from buf to file. Return the number of bytes written. If the returned value is less than len, then there was an error. */ local z_size_t gz_write(state, buf, len) gz_statep state; voidpc buf; z_size_t len; { z_size_t put = len; /* if len is zero, avoid unnecessary operations */ if (len == 0) return 0; /* allocate memory if this is the first time through */ if (state->size == 0 && gz_init(state) == -1) return 0; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return 0; } /* for small len, copy to input buffer, otherwise compress directly */ if (len < state->size) { /* copy to input buffer, compress when full */ do { unsigned have, copy; if (state->strm.avail_in == 0) state->strm.next_in = state->in; have = (unsigned)((state->strm.next_in + state->strm.avail_in) - state->in); copy = state->size - have; if (copy > len) copy = len; memcpy(state->in + have, buf, copy); state->strm.avail_in += copy; state->x.pos += copy; buf = (const char *)buf + copy; len -= copy; if (len && gz_comp(state, Z_NO_FLUSH) == -1) return 0; } while (len); } else { /* consume whatever's left in the input buffer */ if (state->strm.avail_in && gz_comp(state, Z_NO_FLUSH) == -1) return 0; /* directly compress user buffer to file */ state->strm.next_in = (z_const Bytef *)buf; do { unsigned n = (unsigned)-1; if (n > len) n = len; state->strm.avail_in = n; state->x.pos += n; if (gz_comp(state, Z_NO_FLUSH) == -1) return 0; len -= n; } while (len); } /* input was all buffered or compressed */ return put; } /* -- see zlib.h -- */ int ZEXPORT gzwrite(file, buf, len) gzFile file; voidpc buf; unsigned len; { gz_statep state; /* get internal structure */ if (file == NULL) return 0; state = (gz_statep)file; /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return 0; /* since an int is returned, make sure len fits in one, otherwise return with an error (this avoids a flaw in the interface) */ if ((int)len < 0) { gz_error(state, Z_DATA_ERROR, "requested length does not fit in int"); return 0; } /* write len bytes from buf (the return value will fit in an int) */ return (int)gz_write(state, buf, len); } /* -- see zlib.h -- */ z_size_t ZEXPORT gzfwrite(buf, size, nitems, file) voidpc buf; z_size_t size; z_size_t nitems; gzFile file; { z_size_t len; gz_statep state; /* get internal structure */ if (file == NULL) return 0; state = (gz_statep)file; /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return 0; /* compute bytes to read -- error on overflow */ len = nitems * size; if (size && len / size != nitems) { gz_error(state, Z_STREAM_ERROR, "request does not fit in a size_t"); return 0; } /* write len bytes to buf, return the number of full items written */ return len ? gz_write(state, buf, len) / size : 0; } /* -- see zlib.h -- */ int ZEXPORT gzputc(file, c) gzFile file; int c; { unsigned have; unsigned char buf[1]; gz_statep state; z_streamp strm; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; strm = &(state->strm); /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return -1; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return -1; } /* try writing to input buffer for speed (state->size == 0 if buffer not initialized) */ if (state->size) { if (strm->avail_in == 0) strm->next_in = state->in; have = (unsigned)((strm->next_in + strm->avail_in) - state->in); if (have < state->size) { state->in[have] = (unsigned char)c; strm->avail_in++; state->x.pos++; return c & 0xff; } } /* no room in buffer or not initialized, use gz_write() */ buf[0] = (unsigned char)c; if (gz_write(state, buf, 1) != 1) return -1; return c & 0xff; } /* -- see zlib.h -- */ int ZEXPORT gzputs(file, str) gzFile file; const char *str; { int ret; z_size_t len; gz_statep state; /* get internal structure */ if (file == NULL) return -1; state = (gz_statep)file; /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return -1; /* write string */ len = strlen(str); ret = gz_write(state, str, len); return ret == 0 && len != 0 ? -1 : ret; } #if defined(STDC) || defined(Z_HAVE_STDARG_H) #include /* -- see zlib.h -- */ int ZEXPORTVA gzvprintf(gzFile file, const char *format, va_list va) { int len; unsigned left; char *next; gz_statep state; z_streamp strm; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; strm = &(state->strm); /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return Z_STREAM_ERROR; /* make sure we have some buffer space */ if (state->size == 0 && gz_init(state) == -1) return state->err; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return state->err; } /* do the printf() into the input buffer, put length in len -- the input buffer is double-sized just for this function, so there is guaranteed to be state->size bytes available after the current contents */ if (strm->avail_in == 0) strm->next_in = state->in; next = (char *)(state->in + (strm->next_in - state->in) + strm->avail_in); next[state->size - 1] = 0; #ifdef NO_vsnprintf # ifdef HAS_vsprintf_void (void)vsprintf(next, format, va); for (len = 0; len < state->size; len++) if (next[len] == 0) break; # else len = vsprintf(next, format, va); # endif #else # ifdef HAS_vsnprintf_void (void)vsnprintf(next, state->size, format, va); len = strlen(next); # else len = vsnprintf(next, state->size, format, va); # endif #endif /* check that printf() results fit in buffer */ if (len == 0 || (unsigned)len >= state->size || next[state->size - 1] != 0) return 0; /* update buffer and position, compress first half if past that */ strm->avail_in += (unsigned)len; state->x.pos += len; if (strm->avail_in >= state->size) { left = strm->avail_in - state->size; strm->avail_in = state->size; if (gz_comp(state, Z_NO_FLUSH) == -1) return state->err; memcpy(state->in, state->in + state->size, left); strm->next_in = state->in; strm->avail_in = left; } return len; } int ZEXPORTVA gzprintf(gzFile file, const char *format, ...) { va_list va; int ret; va_start(va, format); ret = gzvprintf(file, format, va); va_end(va); return ret; } #else /* !STDC && !Z_HAVE_STDARG_H */ /* -- see zlib.h -- */ int ZEXPORTVA gzprintf (file, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20) gzFile file; const char *format; int a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20; { unsigned len, left; char *next; gz_statep state; z_streamp strm; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; strm = &(state->strm); /* check that can really pass pointer in ints */ if (sizeof(int) != sizeof(void *)) return Z_STREAM_ERROR; /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return Z_STREAM_ERROR; /* make sure we have some buffer space */ if (state->size == 0 && gz_init(state) == -1) return state->error; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return state->error; } /* do the printf() into the input buffer, put length in len -- the input buffer is double-sized just for this function, so there is guaranteed to be state->size bytes available after the current contents */ if (strm->avail_in == 0) strm->next_in = state->in; next = (char *)(strm->next_in + strm->avail_in); next[state->size - 1] = 0; #ifdef NO_snprintf # ifdef HAS_sprintf_void sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); for (len = 0; len < size; len++) if (next[len] == 0) break; # else len = sprintf(next, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); # endif #else # ifdef HAS_snprintf_void snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); len = strlen(next); # else len = snprintf(next, state->size, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18, a19, a20); # endif #endif /* check that printf() results fit in buffer */ if (len == 0 || len >= state->size || next[state->size - 1] != 0) return 0; /* update buffer and position, compress first half if past that */ strm->avail_in += len; state->x.pos += len; if (strm->avail_in >= state->size) { left = strm->avail_in - state->size; strm->avail_in = state->size; if (gz_comp(state, Z_NO_FLUSH) == -1) return state->err; memcpy(state->in, state->in + state->size, left); strm->next_in = state->in; strm->avail_in = left; } return (int)len; } #endif /* -- see zlib.h -- */ int ZEXPORT gzflush(file, flush) gzFile file; int flush; { gz_statep state; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return Z_STREAM_ERROR; /* check flush parameter */ if (flush < 0 || flush > Z_FINISH) return Z_STREAM_ERROR; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return state->err; } /* compress remaining data with requested flush */ (void)gz_comp(state, flush); return state->err; } /* -- see zlib.h -- */ int ZEXPORT gzsetparams(file, level, strategy) gzFile file; int level; int strategy; { gz_statep state; z_streamp strm; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; strm = &(state->strm); /* check that we're writing and that there's no error */ if (state->mode != GZ_WRITE || state->err != Z_OK) return Z_STREAM_ERROR; /* if no change is requested, then do nothing */ if (level == state->level && strategy == state->strategy) return Z_OK; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) return state->err; } /* change compression parameters for subsequent input */ if (state->size) { /* flush previous input with previous parameters before changing */ if (strm->avail_in && gz_comp(state, Z_BLOCK) == -1) return state->err; deflateParams(strm, level, strategy); } state->level = level; state->strategy = strategy; return Z_OK; } /* -- see zlib.h -- */ int ZEXPORT gzclose_w(file) gzFile file; { int ret = Z_OK; gz_statep state; /* get internal structure */ if (file == NULL) return Z_STREAM_ERROR; state = (gz_statep)file; /* check that we're writing */ if (state->mode != GZ_WRITE) return Z_STREAM_ERROR; /* check for seek request */ if (state->seek) { state->seek = 0; if (gz_zero(state, state->skip) == -1) ret = state->err; } /* flush, free memory, and close file */ if (gz_comp(state, Z_FINISH) == -1) ret = state->err; if (state->size) { if (!state->direct) { (void)deflateEnd(&(state->strm)); free(state->out); } free(state->in); } gz_error(state, Z_OK, NULL); free(state->path); if (close(state->fd) == -1) ret = Z_ERRNO; free(state); return ret; } #endif mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/infback.c0000644000076500000240000005427313572250760021234 0ustar alcaeusstaff/* infback.c -- inflate using a call-back interface * Copyright (C) 1995-2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* This code is largely copied from inflate.c. Normally either infback.o or inflate.o would be linked into an application--not both. The interface with inffast.c is retained so that optimized assembler-coded versions of inflate_fast() can be used with either inflate.c or infback.c. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" /* function prototypes */ local void fixedtables OF((struct inflate_state FAR *state)); /* strm provides memory allocation functions in zalloc and zfree, or Z_NULL to use the library memory allocation functions. windowBits is in the range 8..15, and window is a user-supplied window and output buffer that is 2**windowBits bytes. */ int ZEXPORT inflateBackInit_(strm, windowBits, window, version, stream_size) z_streamp strm; int windowBits; unsigned char FAR *window; const char *version; int stream_size; { struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL || window == Z_NULL || windowBits < 8 || windowBits > 15) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *)ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->dmax = 32768U; state->wbits = (uInt)windowBits; state->wsize = 1U << windowBits; state->window = window; state->wnext = 0; state->whave = 0; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } /* Macros for inflateBack(): */ /* Load returned state from inflate_fast() */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Set state from registers for inflate_fast() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Assure that some input is available. If input is requested, but denied, then return a Z_BUF_ERROR from inflateBack(). */ #define PULL() \ do { \ if (have == 0) { \ have = in(in_desc, &next); \ if (have == 0) { \ next = Z_NULL; \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflateBack() with an error if there is no input available. */ #define PULLBYTE() \ do { \ PULL(); \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflateBack() with an error. */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Assure that some output space is available, by writing out the window if it's full. If the write fails, return from inflateBack() with a Z_BUF_ERROR. */ #define ROOM() \ do { \ if (left == 0) { \ put = state->window; \ left = state->wsize; \ state->whave = left; \ if (out(out_desc, put, left)) { \ ret = Z_BUF_ERROR; \ goto inf_leave; \ } \ } \ } while (0) /* strm provides the memory allocation functions and window buffer on input, and provides information on the unused input on return. For Z_DATA_ERROR returns, strm will also provide an error message. in() and out() are the call-back input and output functions. When inflateBack() needs more input, it calls in(). When inflateBack() has filled the window with output, or when it completes with data in the window, it calls out() to write out the data. The application must not change the provided input until in() is called again or inflateBack() returns. The application must not change the window/output buffer until inflateBack() returns. in() and out() are called with a descriptor parameter provided in the inflateBack() call. This parameter can be a structure that provides the information required to do the read or write, as well as accumulated information on the input and output such as totals and check values. in() should return zero on failure. out() should return non-zero on failure. If either in() or out() fails, than inflateBack() returns a Z_BUF_ERROR. strm->next_in can be checked for Z_NULL to see whether it was in() or out() that caused in the error. Otherwise, inflateBack() returns Z_STREAM_END on success, Z_DATA_ERROR for an deflate format error, or Z_MEM_ERROR if it could not allocate memory for the state. inflateBack() can also return Z_STREAM_ERROR if the input parameters are not correct, i.e. strm is Z_NULL or the state was not initialized. */ int ZEXPORT inflateBack(strm, in, in_desc, out, out_desc) z_streamp strm; in_func in; void FAR *in_desc; out_func out; void FAR *out_desc; { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Check that the strm exists and that the state was initialized */ if (strm == Z_NULL || strm->state == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* Reset the state */ strm->msg = Z_NULL; state->mode = TYPE; state->last = 0; state->whave = 0; next = strm->next_in; have = next != Z_NULL ? strm->avail_in : 0; hold = 0; bits = 0; put = state->window; left = state->wsize; /* Inflate until end of block marked as last */ for (;;) switch (state->mode) { case TYPE: /* determine and dispatch block type */ if (state->last) { BYTEBITS(); state->mode = DONE; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: /* get and verify stored block length */ BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); /* copy stored block from input to output */ while (state->length != 0) { copy = state->length; PULL(); ROOM(); if (copy > have) copy = have; if (copy > left) copy = left; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: /* get dynamic table entries descriptor */ NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); /* get code length code lengths (not a typo) */ state->have = 0; while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); /* get length and distance code code lengths */ state->have = 0; while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = (unsigned)(state->lens[state->have - 1]); copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (code const FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN; case LEN: /* use inflate_fast() if we have enough input and output */ if (have >= 6 && left >= 258) { RESTORE(); if (state->whave < state->wsize) state->whave = state->wsize - left; inflate_fast(strm, state->wsize); LOAD(); break; } /* get a literal, length, or end-of-block code */ for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); state->length = (unsigned)here.val; /* process literal */ if (here.op == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); ROOM(); *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; } /* process end of block */ if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } /* invalid code */ if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } /* length code -- get extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } Tracevv((stderr, "inflate: length %u\n", state->length)); /* get distance code */ for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(here.bits); if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; /* get distance extra bits, if any */ state->extra = (unsigned)(here.op) & 15; if (state->extra != 0) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } if (state->offset > state->wsize - (state->whave < state->wsize ? left : 0)) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } Tracevv((stderr, "inflate: distance %u\n", state->offset)); /* copy match from window to output */ do { ROOM(); copy = state->wsize - state->offset; if (copy < left) { from = put + copy; copy = left - copy; } else { from = put - state->offset; copy = left; } if (copy > state->length) copy = state->length; state->length -= copy; left -= copy; do { *put++ = *from++; } while (--copy); } while (state->length != 0); break; case DONE: /* inflate stream terminated properly -- write leftover output */ ret = Z_STREAM_END; if (left < state->wsize) { if (out(out_desc, state->window, state->wsize - left)) ret = Z_BUF_ERROR; } goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; default: /* can't happen, but makes compilers happy */ ret = Z_STREAM_ERROR; goto inf_leave; } /* Return unused input */ inf_leave: strm->next_in = next; strm->avail_in = have; return ret; } int ZEXPORT inflateBackEnd(strm) z_streamp strm; { if (strm == Z_NULL || strm->state == Z_NULL || strm->zfree == (free_func)0) return Z_STREAM_ERROR; ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inffast.c0000644000076500000240000003126213572250760021262 0ustar alcaeusstaff/* inffast.c -- fast decoding * Copyright (C) 1995-2017 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef ASMINF # pragma message("Assembler code may have bugs -- use at your own risk") #else /* Decode literal, length, and distance codes and write out the resulting literal and match bytes until either not enough input or output is available, an end-of-block is encountered, or a data error is encountered. When large enough input and output buffers are supplied to inflate(), for example, a 16K input buffer and a 64K output buffer, more than 95% of the inflate execution time is spent in this routine. Entry assumptions: state->mode == LEN strm->avail_in >= 6 strm->avail_out >= 258 start >= strm->avail_out state->bits < 8 On return, state->mode is one of: LEN -- ran out of enough output space or enough available input TYPE -- reached end of block code, inflate() to interpret next block BAD -- error in block data Notes: - The maximum input bits used by a length/distance pair is 15 bits for the length code, 5 bits for the length extra, 15 bits for the distance code, and 13 bits for the distance extra. This totals 48 bits, or six bytes. Therefore if strm->avail_in >= 6, then there is enough input to avoid checking for available input while decoding. - The maximum bytes that a single length/distance pair can output is 258 bytes, which is the maximum length that can be coded. inflate_fast() requires strm->avail_out >= 258 for each loop to avoid checking for output space. */ void ZLIB_INTERNAL inflate_fast(strm, start) z_streamp strm; unsigned start; /* inflate()'s starting value for strm->avail_out */ { struct inflate_state FAR *state; z_const unsigned char FAR *in; /* local strm->next_in */ z_const unsigned char FAR *last; /* have enough input while in < last */ unsigned char FAR *out; /* local strm->next_out */ unsigned char FAR *beg; /* inflate()'s initial strm->next_out */ unsigned char FAR *end; /* while out < end, enough space available */ #ifdef INFLATE_STRICT unsigned dmax; /* maximum distance from zlib header */ #endif unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if wsize != 0 */ unsigned long hold; /* local strm->hold */ unsigned bits; /* local strm->bits */ code const FAR *lcode; /* local strm->lencode */ code const FAR *dcode; /* local strm->distcode */ unsigned lmask; /* mask for first level of length codes */ unsigned dmask; /* mask for first level of distance codes */ code here; /* retrieved table entry */ unsigned op; /* code bits, operation, extra bits, or */ /* window position, window bytes to copy */ unsigned len; /* match length, unused bytes */ unsigned dist; /* match distance */ unsigned char FAR *from; /* where to copy match from */ /* copy state to local variables */ state = (struct inflate_state FAR *)strm->state; in = strm->next_in; last = in + (strm->avail_in - 5); out = strm->next_out; beg = out - (start - strm->avail_out); end = out + (strm->avail_out - 257); #ifdef INFLATE_STRICT dmax = state->dmax; #endif wsize = state->wsize; whave = state->whave; wnext = state->wnext; window = state->window; hold = state->hold; bits = state->bits; lcode = state->lencode; dcode = state->distcode; lmask = (1U << state->lenbits) - 1; dmask = (1U << state->distbits) - 1; /* decode literals and length/distances until end-of-block or not enough input data or output space */ do { if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } here = lcode[hold & lmask]; dolen: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op == 0) { /* literal */ Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); *out++ = (unsigned char)(here.val); } else if (op & 16) { /* length base */ len = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (op) { if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } len += (unsigned)hold & ((1U << op) - 1); hold >>= op; bits -= op; } Tracevv((stderr, "inflate: length %u\n", len)); if (bits < 15) { hold += (unsigned long)(*in++) << bits; bits += 8; hold += (unsigned long)(*in++) << bits; bits += 8; } here = dcode[hold & dmask]; dodist: op = (unsigned)(here.bits); hold >>= op; bits -= op; op = (unsigned)(here.op); if (op & 16) { /* distance base */ dist = (unsigned)(here.val); op &= 15; /* number of extra bits */ if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; if (bits < op) { hold += (unsigned long)(*in++) << bits; bits += 8; } } dist += (unsigned)hold & ((1U << op) - 1); #ifdef INFLATE_STRICT if (dist > dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif hold >>= op; bits -= op; Tracevv((stderr, "inflate: distance %u\n", dist)); op = (unsigned)(out - beg); /* max distance in output */ if (dist > op) { /* see if copy from window */ op = dist - op; /* distance back in window */ if (op > whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR if (len <= op - whave) { do { *out++ = 0; } while (--len); continue; } len -= op - whave; do { *out++ = 0; } while (--op > whave); if (op == 0) { from = out - dist; do { *out++ = *from++; } while (--len); continue; } #endif } from = window; if (wnext == 0) { /* very common case */ from += wsize - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } else if (wnext < op) { /* wrap around window */ from += wsize + wnext - op; op -= wnext; if (op < len) { /* some from end of window */ len -= op; do { *out++ = *from++; } while (--op); from = window; if (wnext < len) { /* some from start of window */ op = wnext; len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } } else { /* contiguous in window */ from += wnext - op; if (op < len) { /* some from window */ len -= op; do { *out++ = *from++; } while (--op); from = out - dist; /* rest from output */ } } while (len > 2) { *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } else { from = out - dist; /* copy direct from output */ do { /* minimum length is three */ *out++ = *from++; *out++ = *from++; *out++ = *from++; len -= 3; } while (len > 2); if (len) { *out++ = *from++; if (len > 1) *out++ = *from++; } } } else if ((op & 64) == 0) { /* 2nd level distance code */ here = dcode[here.val + (hold & ((1U << op) - 1))]; goto dodist; } else { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } } else if ((op & 64) == 0) { /* 2nd level length code */ here = lcode[here.val + (hold & ((1U << op) - 1))]; goto dolen; } else if (op & 32) { /* end-of-block */ Tracevv((stderr, "inflate: end of block\n")); state->mode = TYPE; break; } else { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } } while (in < last && out < end); /* return unused bytes (on entry, bits < 8, so in won't go too far back) */ len = bits >> 3; in -= len; bits -= len << 3; hold &= (1U << bits) - 1; /* update state and return */ strm->next_in = in; strm->next_out = out; strm->avail_in = (unsigned)(in < last ? 5 + (last - in) : 5 - (in - last)); strm->avail_out = (unsigned)(out < end ? 257 + (end - out) : 257 - (out - end)); state->hold = hold; state->bits = bits; return; } /* inflate_fast() speedups that turned out slower (on a PowerPC G3 750CXe): - Using bit fields for code structure - Different op definition to avoid & for extra bits (do & for table bits) - Three separate decoding do-loops for direct, window, and wnext == 0 - Special case for distance > 1 copies to do overlapped load and store copy - Explicit branch predictions (based on measured branch probabilities) - Deferring match copy and interspersed it with decoding subsequent codes - Swapping literal/length else - Swapping window/direct else - Larger unrolled copy loops (three is about right) - Moving len -= 3 statement into middle of loop */ #endif /* !ASMINF */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inffast.h0000644000076500000240000000065313572250760021267 0ustar alcaeusstaff/* inffast.h -- header to use inffast.c * Copyright (C) 1995-2003, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ void ZLIB_INTERNAL inflate_fast OF((z_streamp strm, unsigned start)); mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inffixed.h0000644000076500000240000001427413572250760021435 0ustar alcaeusstaff /* inffixed.h -- table for decoding fixed codes * Generated automatically by makefixed(). */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of this library and is subject to change. Applications should only use zlib.h. */ static const code lenfix[512] = { {96,7,0},{0,8,80},{0,8,16},{20,8,115},{18,7,31},{0,8,112},{0,8,48}, {0,9,192},{16,7,10},{0,8,96},{0,8,32},{0,9,160},{0,8,0},{0,8,128}, {0,8,64},{0,9,224},{16,7,6},{0,8,88},{0,8,24},{0,9,144},{19,7,59}, {0,8,120},{0,8,56},{0,9,208},{17,7,17},{0,8,104},{0,8,40},{0,9,176}, {0,8,8},{0,8,136},{0,8,72},{0,9,240},{16,7,4},{0,8,84},{0,8,20}, {21,8,227},{19,7,43},{0,8,116},{0,8,52},{0,9,200},{17,7,13},{0,8,100}, {0,8,36},{0,9,168},{0,8,4},{0,8,132},{0,8,68},{0,9,232},{16,7,8}, {0,8,92},{0,8,28},{0,9,152},{20,7,83},{0,8,124},{0,8,60},{0,9,216}, {18,7,23},{0,8,108},{0,8,44},{0,9,184},{0,8,12},{0,8,140},{0,8,76}, {0,9,248},{16,7,3},{0,8,82},{0,8,18},{21,8,163},{19,7,35},{0,8,114}, {0,8,50},{0,9,196},{17,7,11},{0,8,98},{0,8,34},{0,9,164},{0,8,2}, {0,8,130},{0,8,66},{0,9,228},{16,7,7},{0,8,90},{0,8,26},{0,9,148}, {20,7,67},{0,8,122},{0,8,58},{0,9,212},{18,7,19},{0,8,106},{0,8,42}, {0,9,180},{0,8,10},{0,8,138},{0,8,74},{0,9,244},{16,7,5},{0,8,86}, {0,8,22},{64,8,0},{19,7,51},{0,8,118},{0,8,54},{0,9,204},{17,7,15}, {0,8,102},{0,8,38},{0,9,172},{0,8,6},{0,8,134},{0,8,70},{0,9,236}, {16,7,9},{0,8,94},{0,8,30},{0,9,156},{20,7,99},{0,8,126},{0,8,62}, {0,9,220},{18,7,27},{0,8,110},{0,8,46},{0,9,188},{0,8,14},{0,8,142}, {0,8,78},{0,9,252},{96,7,0},{0,8,81},{0,8,17},{21,8,131},{18,7,31}, {0,8,113},{0,8,49},{0,9,194},{16,7,10},{0,8,97},{0,8,33},{0,9,162}, {0,8,1},{0,8,129},{0,8,65},{0,9,226},{16,7,6},{0,8,89},{0,8,25}, {0,9,146},{19,7,59},{0,8,121},{0,8,57},{0,9,210},{17,7,17},{0,8,105}, {0,8,41},{0,9,178},{0,8,9},{0,8,137},{0,8,73},{0,9,242},{16,7,4}, {0,8,85},{0,8,21},{16,8,258},{19,7,43},{0,8,117},{0,8,53},{0,9,202}, {17,7,13},{0,8,101},{0,8,37},{0,9,170},{0,8,5},{0,8,133},{0,8,69}, {0,9,234},{16,7,8},{0,8,93},{0,8,29},{0,9,154},{20,7,83},{0,8,125}, {0,8,61},{0,9,218},{18,7,23},{0,8,109},{0,8,45},{0,9,186},{0,8,13}, {0,8,141},{0,8,77},{0,9,250},{16,7,3},{0,8,83},{0,8,19},{21,8,195}, {19,7,35},{0,8,115},{0,8,51},{0,9,198},{17,7,11},{0,8,99},{0,8,35}, {0,9,166},{0,8,3},{0,8,131},{0,8,67},{0,9,230},{16,7,7},{0,8,91}, {0,8,27},{0,9,150},{20,7,67},{0,8,123},{0,8,59},{0,9,214},{18,7,19}, {0,8,107},{0,8,43},{0,9,182},{0,8,11},{0,8,139},{0,8,75},{0,9,246}, {16,7,5},{0,8,87},{0,8,23},{64,8,0},{19,7,51},{0,8,119},{0,8,55}, {0,9,206},{17,7,15},{0,8,103},{0,8,39},{0,9,174},{0,8,7},{0,8,135}, {0,8,71},{0,9,238},{16,7,9},{0,8,95},{0,8,31},{0,9,158},{20,7,99}, {0,8,127},{0,8,63},{0,9,222},{18,7,27},{0,8,111},{0,8,47},{0,9,190}, {0,8,15},{0,8,143},{0,8,79},{0,9,254},{96,7,0},{0,8,80},{0,8,16}, {20,8,115},{18,7,31},{0,8,112},{0,8,48},{0,9,193},{16,7,10},{0,8,96}, {0,8,32},{0,9,161},{0,8,0},{0,8,128},{0,8,64},{0,9,225},{16,7,6}, {0,8,88},{0,8,24},{0,9,145},{19,7,59},{0,8,120},{0,8,56},{0,9,209}, {17,7,17},{0,8,104},{0,8,40},{0,9,177},{0,8,8},{0,8,136},{0,8,72}, {0,9,241},{16,7,4},{0,8,84},{0,8,20},{21,8,227},{19,7,43},{0,8,116}, {0,8,52},{0,9,201},{17,7,13},{0,8,100},{0,8,36},{0,9,169},{0,8,4}, {0,8,132},{0,8,68},{0,9,233},{16,7,8},{0,8,92},{0,8,28},{0,9,153}, {20,7,83},{0,8,124},{0,8,60},{0,9,217},{18,7,23},{0,8,108},{0,8,44}, {0,9,185},{0,8,12},{0,8,140},{0,8,76},{0,9,249},{16,7,3},{0,8,82}, {0,8,18},{21,8,163},{19,7,35},{0,8,114},{0,8,50},{0,9,197},{17,7,11}, {0,8,98},{0,8,34},{0,9,165},{0,8,2},{0,8,130},{0,8,66},{0,9,229}, {16,7,7},{0,8,90},{0,8,26},{0,9,149},{20,7,67},{0,8,122},{0,8,58}, {0,9,213},{18,7,19},{0,8,106},{0,8,42},{0,9,181},{0,8,10},{0,8,138}, {0,8,74},{0,9,245},{16,7,5},{0,8,86},{0,8,22},{64,8,0},{19,7,51}, {0,8,118},{0,8,54},{0,9,205},{17,7,15},{0,8,102},{0,8,38},{0,9,173}, {0,8,6},{0,8,134},{0,8,70},{0,9,237},{16,7,9},{0,8,94},{0,8,30}, {0,9,157},{20,7,99},{0,8,126},{0,8,62},{0,9,221},{18,7,27},{0,8,110}, {0,8,46},{0,9,189},{0,8,14},{0,8,142},{0,8,78},{0,9,253},{96,7,0}, {0,8,81},{0,8,17},{21,8,131},{18,7,31},{0,8,113},{0,8,49},{0,9,195}, {16,7,10},{0,8,97},{0,8,33},{0,9,163},{0,8,1},{0,8,129},{0,8,65}, {0,9,227},{16,7,6},{0,8,89},{0,8,25},{0,9,147},{19,7,59},{0,8,121}, {0,8,57},{0,9,211},{17,7,17},{0,8,105},{0,8,41},{0,9,179},{0,8,9}, {0,8,137},{0,8,73},{0,9,243},{16,7,4},{0,8,85},{0,8,21},{16,8,258}, {19,7,43},{0,8,117},{0,8,53},{0,9,203},{17,7,13},{0,8,101},{0,8,37}, {0,9,171},{0,8,5},{0,8,133},{0,8,69},{0,9,235},{16,7,8},{0,8,93}, {0,8,29},{0,9,155},{20,7,83},{0,8,125},{0,8,61},{0,9,219},{18,7,23}, {0,8,109},{0,8,45},{0,9,187},{0,8,13},{0,8,141},{0,8,77},{0,9,251}, {16,7,3},{0,8,83},{0,8,19},{21,8,195},{19,7,35},{0,8,115},{0,8,51}, {0,9,199},{17,7,11},{0,8,99},{0,8,35},{0,9,167},{0,8,3},{0,8,131}, {0,8,67},{0,9,231},{16,7,7},{0,8,91},{0,8,27},{0,9,151},{20,7,67}, {0,8,123},{0,8,59},{0,9,215},{18,7,19},{0,8,107},{0,8,43},{0,9,183}, {0,8,11},{0,8,139},{0,8,75},{0,9,247},{16,7,5},{0,8,87},{0,8,23}, {64,8,0},{19,7,51},{0,8,119},{0,8,55},{0,9,207},{17,7,15},{0,8,103}, {0,8,39},{0,9,175},{0,8,7},{0,8,135},{0,8,71},{0,9,239},{16,7,9}, {0,8,95},{0,8,31},{0,9,159},{20,7,99},{0,8,127},{0,8,63},{0,9,223}, {18,7,27},{0,8,111},{0,8,47},{0,9,191},{0,8,15},{0,8,143},{0,8,79}, {0,9,255} }; static const code distfix[32] = { {16,5,1},{23,5,257},{19,5,17},{27,5,4097},{17,5,5},{25,5,1025}, {21,5,65},{29,5,16385},{16,5,3},{24,5,513},{20,5,33},{28,5,8193}, {18,5,9},{26,5,2049},{22,5,129},{64,5,0},{16,5,2},{23,5,385}, {19,5,25},{27,5,6145},{17,5,7},{25,5,1537},{21,5,97},{29,5,24577}, {16,5,4},{24,5,769},{20,5,49},{28,5,12289},{18,5,13},{26,5,3073}, {22,5,193},{64,5,0} }; mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inflate.c0000644000076500000240000015302013572250760021247 0ustar alcaeusstaff/* inflate.c -- zlib decompression * Copyright (C) 1995-2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* * Change history: * * 1.2.beta0 24 Nov 2002 * - First version -- complete rewrite of inflate to simplify code, avoid * creation of window when not needed, minimize use of window when it is * needed, make inffast.c even faster, implement gzip decoding, and to * improve code readability and style over the previous zlib inflate code * * 1.2.beta1 25 Nov 2002 * - Use pointers for available input and output checking in inffast.c * - Remove input and output counters in inffast.c * - Change inffast.c entry and loop from avail_in >= 7 to >= 6 * - Remove unnecessary second byte pull from length extra in inffast.c * - Unroll direct copy to three copies per loop in inffast.c * * 1.2.beta2 4 Dec 2002 * - Change external routine names to reduce potential conflicts * - Correct filename to inffixed.h for fixed tables in inflate.c * - Make hbuf[] unsigned char to match parameter type in inflate.c * - Change strm->next_out[-state->offset] to *(strm->next_out - state->offset) * to avoid negation problem on Alphas (64 bit) in inflate.c * * 1.2.beta3 22 Dec 2002 * - Add comments on state->bits assertion in inffast.c * - Add comments on op field in inftrees.h * - Fix bug in reuse of allocated window after inflateReset() * - Remove bit fields--back to byte structure for speed * - Remove distance extra == 0 check in inflate_fast()--only helps for lengths * - Change post-increments to pre-increments in inflate_fast(), PPC biased? * - Add compile time option, POSTINC, to use post-increments instead (Intel?) * - Make MATCH copy in inflate() much faster for when inflate_fast() not used * - Use local copies of stream next and avail values, as well as local bit * buffer and bit count in inflate()--for speed when inflate_fast() not used * * 1.2.beta4 1 Jan 2003 * - Split ptr - 257 statements in inflate_table() to avoid compiler warnings * - Move a comment on output buffer sizes from inffast.c to inflate.c * - Add comments in inffast.c to introduce the inflate_fast() routine * - Rearrange window copies in inflate_fast() for speed and simplification * - Unroll last copy for window match in inflate_fast() * - Use local copies of window variables in inflate_fast() for speed * - Pull out common wnext == 0 case for speed in inflate_fast() * - Make op and len in inflate_fast() unsigned for consistency * - Add FAR to lcode and dcode declarations in inflate_fast() * - Simplified bad distance check in inflate_fast() * - Added inflateBackInit(), inflateBack(), and inflateBackEnd() in new * source file infback.c to provide a call-back interface to inflate for * programs like gzip and unzip -- uses window as output buffer to avoid * window copying * * 1.2.beta5 1 Jan 2003 * - Improved inflateBack() interface to allow the caller to provide initial * input in strm. * - Fixed stored blocks bug in inflateBack() * * 1.2.beta6 4 Jan 2003 * - Added comments in inffast.c on effectiveness of POSTINC * - Typecasting all around to reduce compiler warnings * - Changed loops from while (1) or do {} while (1) to for (;;), again to * make compilers happy * - Changed type of window in inflateBackInit() to unsigned char * * * 1.2.beta7 27 Jan 2003 * - Changed many types to unsigned or unsigned short to avoid warnings * - Added inflateCopy() function * * 1.2.0 9 Mar 2003 * - Changed inflateBack() interface to provide separate opaque descriptors * for the in() and out() functions * - Changed inflateBack() argument and in_func typedef to swap the length * and buffer address return values for the input function * - Check next_in and next_out for Z_NULL on entry to inflate() * * The history for versions after 1.2.0 are in ChangeLog in zlib distribution. */ #include "zutil.h" #include "inftrees.h" #include "inflate.h" #include "inffast.h" #ifdef MAKEFIXED # ifndef BUILDFIXED # define BUILDFIXED # endif #endif /* function prototypes */ local int inflateStateCheck OF((z_streamp strm)); local void fixedtables OF((struct inflate_state FAR *state)); local int updatewindow OF((z_streamp strm, const unsigned char FAR *end, unsigned copy)); #ifdef BUILDFIXED void makefixed OF((void)); #endif local unsigned syncsearch OF((unsigned FAR *have, const unsigned char FAR *buf, unsigned len)); local int inflateStateCheck(strm) z_streamp strm; { struct inflate_state FAR *state; if (strm == Z_NULL || strm->zalloc == (alloc_func)0 || strm->zfree == (free_func)0) return 1; state = (struct inflate_state FAR *)strm->state; if (state == Z_NULL || state->strm != strm || state->mode < HEAD || state->mode > SYNC) return 1; return 0; } int ZEXPORT inflateResetKeep(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = Z_NULL; if (state->wrap) /* to support ill-conceived Java test suite */ strm->adler = state->wrap & 1; state->mode = HEAD; state->last = 0; state->havedict = 0; state->dmax = 32768U; state->head = Z_NULL; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; state->sane = 1; state->back = -1; Tracev((stderr, "inflate: reset\n")); return Z_OK; } int ZEXPORT inflateReset(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; state->wsize = 0; state->whave = 0; state->wnext = 0; return inflateResetKeep(strm); } int ZEXPORT inflateReset2(strm, windowBits) z_streamp strm; int windowBits; { int wrap; struct inflate_state FAR *state; /* get the state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* extract wrap request from windowBits parameter */ if (windowBits < 0) { wrap = 0; windowBits = -windowBits; } else { wrap = (windowBits >> 4) + 5; #ifdef GUNZIP if (windowBits < 48) windowBits &= 15; #endif } /* set number of window bits, free window if different */ if (windowBits && (windowBits < 8 || windowBits > 15)) return Z_STREAM_ERROR; if (state->window != Z_NULL && state->wbits != (unsigned)windowBits) { ZFREE(strm, state->window); state->window = Z_NULL; } /* update state and reset the rest of it */ state->wrap = wrap; state->wbits = (unsigned)windowBits; return inflateReset(strm); } int ZEXPORT inflateInit2_(strm, windowBits, version, stream_size) z_streamp strm; int windowBits; const char *version; int stream_size; { int ret; struct inflate_state FAR *state; if (version == Z_NULL || version[0] != ZLIB_VERSION[0] || stream_size != (int)(sizeof(z_stream))) return Z_VERSION_ERROR; if (strm == Z_NULL) return Z_STREAM_ERROR; strm->msg = Z_NULL; /* in case we return an error */ if (strm->zalloc == (alloc_func)0) { #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zalloc = zcalloc; strm->opaque = (voidpf)0; #endif } if (strm->zfree == (free_func)0) #ifdef Z_SOLO return Z_STREAM_ERROR; #else strm->zfree = zcfree; #endif state = (struct inflate_state FAR *) ZALLOC(strm, 1, sizeof(struct inflate_state)); if (state == Z_NULL) return Z_MEM_ERROR; Tracev((stderr, "inflate: allocated\n")); strm->state = (struct internal_state FAR *)state; state->strm = strm; state->window = Z_NULL; state->mode = HEAD; /* to pass state test in inflateReset2() */ ret = inflateReset2(strm, windowBits); if (ret != Z_OK) { ZFREE(strm, state); strm->state = Z_NULL; } return ret; } int ZEXPORT inflateInit_(strm, version, stream_size) z_streamp strm; const char *version; int stream_size; { return inflateInit2_(strm, DEF_WBITS, version, stream_size); } int ZEXPORT inflatePrime(strm, bits, value) z_streamp strm; int bits; int value; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (bits < 0) { state->hold = 0; state->bits = 0; return Z_OK; } if (bits > 16 || state->bits + (uInt)bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += (unsigned)value << state->bits; state->bits += (uInt)bits; return Z_OK; } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. Normally this returns fixed tables from inffixed.h. If BUILDFIXED is defined, then instead this routine builds the tables the first time it's called, and returns those tables the first time and thereafter. This reduces the size of the code by about 2K bytes, in exchange for a little execution time. However, BUILDFIXED should not be used for threaded applications, since the rewriting of the tables and virgin may not be thread-safe. */ local void fixedtables(state) struct inflate_state FAR *state; { #ifdef BUILDFIXED static int virgin = 1; static code *lenfix, *distfix; static code fixed[544]; /* build fixed huffman tables if first call (may not be thread safe) */ if (virgin) { unsigned sym, bits; static code *next; /* literal/length table */ sym = 0; while (sym < 144) state->lens[sym++] = 8; while (sym < 256) state->lens[sym++] = 9; while (sym < 280) state->lens[sym++] = 7; while (sym < 288) state->lens[sym++] = 8; next = fixed; lenfix = next; bits = 9; inflate_table(LENS, state->lens, 288, &(next), &(bits), state->work); /* distance table */ sym = 0; while (sym < 32) state->lens[sym++] = 5; distfix = next; bits = 5; inflate_table(DISTS, state->lens, 32, &(next), &(bits), state->work); /* do this just once */ virgin = 0; } #else /* !BUILDFIXED */ # include "inffixed.h" #endif /* BUILDFIXED */ state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } #ifdef MAKEFIXED #include /* Write out the inffixed.h that is #include'd above. Defining MAKEFIXED also defines BUILDFIXED, so the tables are built on the fly. makefixed() writes those tables to stdout, which would be piped to inffixed.h. A small program can simply call makefixed to do this: void makefixed(void); int main(void) { makefixed(); return 0; } Then that can be linked with zlib built with MAKEFIXED defined and run: a.out > inffixed.h */ void makefixed() { unsigned low, size; struct inflate_state state; fixedtables(&state); puts(" /* inffixed.h -- table for decoding fixed codes"); puts(" * Generated automatically by makefixed()."); puts(" */"); puts(""); puts(" /* WARNING: this file should *not* be used by applications."); puts(" It is part of the implementation of this library and is"); puts(" subject to change. Applications should only use zlib.h."); puts(" */"); puts(""); size = 1U << 9; printf(" static const code lenfix[%u] = {", size); low = 0; for (;;) { if ((low % 7) == 0) printf("\n "); printf("{%u,%u,%d}", (low & 127) == 99 ? 64 : state.lencode[low].op, state.lencode[low].bits, state.lencode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); size = 1U << 5; printf("\n static const code distfix[%u] = {", size); low = 0; for (;;) { if ((low % 6) == 0) printf("\n "); printf("{%u,%u,%d}", state.distcode[low].op, state.distcode[low].bits, state.distcode[low].val); if (++low == size) break; putchar(','); } puts("\n };"); } #endif /* MAKEFIXED */ /* Update the window with the last wsize (normally 32K) bytes written before returning. If window does not exist yet, create it. This is only called when a window is already in use, or when output has been written during this inflate call, but the end of the deflate stream has not been reached yet. It is also called to create a window for dictionary data when a dictionary is loaded. Providing output buffers larger than 32K to inflate() should provide a speed advantage, since only the last 32K of output is copied to the sliding window upon return from inflate(), and since all distances after the first 32K of output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ local int updatewindow(strm, end, copy) z_streamp strm; const Bytef *end; unsigned copy; { struct inflate_state FAR *state; unsigned dist; state = (struct inflate_state FAR *)strm->state; /* if it hasn't been done already, allocate space for the window */ if (state->window == Z_NULL) { state->window = (unsigned char FAR *) ZALLOC(strm, 1U << state->wbits, sizeof(unsigned char)); if (state->window == Z_NULL) return 1; } /* if window not in use yet, initialize */ if (state->wsize == 0) { state->wsize = 1U << state->wbits; state->wnext = 0; state->whave = 0; } /* copy state->wsize or less output bytes into the circular window */ if (copy >= state->wsize) { zmemcpy(state->window, end - state->wsize, state->wsize); state->wnext = 0; state->whave = state->wsize; } else { dist = state->wsize - state->wnext; if (dist > copy) dist = copy; zmemcpy(state->window + state->wnext, end - copy, dist); copy -= dist; if (copy) { zmemcpy(state->window, end - copy, copy); state->wnext = copy; state->whave = state->wsize; } else { state->wnext += dist; if (state->wnext == state->wsize) state->wnext = 0; if (state->whave < state->wsize) state->whave += dist; } } return 0; } /* Macros for inflate(): */ /* check function to use adler32() for zlib or crc32() for gzip */ #ifdef GUNZIP # define UPDATE(check, buf, len) \ (state->flags ? crc32(check, buf, len) : adler32(check, buf, len)) #else # define UPDATE(check, buf, len) adler32(check, buf, len) #endif /* check macros for header crc */ #ifdef GUNZIP # define CRC2(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ check = crc32(check, hbuf, 2); \ } while (0) # define CRC4(check, word) \ do { \ hbuf[0] = (unsigned char)(word); \ hbuf[1] = (unsigned char)((word) >> 8); \ hbuf[2] = (unsigned char)((word) >> 16); \ hbuf[3] = (unsigned char)((word) >> 24); \ check = crc32(check, hbuf, 4); \ } while (0) #endif /* Load registers with state in inflate() for speed */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Restore state from registers in inflate() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflate() if there is no input available. */ #define PULLBYTE() \ do { \ if (have == 0) goto inf_leave; \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflate(). */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is structured roughly as follows: for (;;) switch (state) { ... case STATEn: if (not enough input data or output space to make progress) return; ... make progress ... state = STATEm; break; ... } so when inflate() is called again, the same case is attempted again, and if the appropriate resources are provided, the machine proceeds to the next state. The NEEDBITS() macro is usually the way the state evaluates whether it can proceed or should return. NEEDBITS() does the return if the requested bits are not available. The typical use of the BITS macros is: NEEDBITS(n); ... do something with BITS(n) ... DROPBITS(n); where NEEDBITS(n) either returns from inflate() if there isn't enough input left to load n bits into the accumulator, or it continues. BITS(n) gives the low n bits in the accumulator. When done, DROPBITS(n) drops the low n bits off the accumulator. INITBITS() clears the accumulator and sets the number of available bits to zero. BYTEBITS() discards just enough bits to put the accumulator on a byte boundary. After BYTEBITS() and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return if there is no input available. The decoding of variable length codes uses PULLBYTE() directly in order to pull just enough bytes to decode the next code, and no more. Some states loop until they get enough input, making sure that enough state information is maintained to continue the loop where it left off if NEEDBITS() returns in the loop. For example, want, need, and keep would all have to actually be part of the saved state in case NEEDBITS() returns: case STATEw: while (want < need) { NEEDBITS(n); keep[want++] = BITS(n); DROPBITS(n); } state = STATEx; case STATEx: As shown above, if the next state is also the next case, then the break is omitted. A state may also return if there is not enough output space available to complete that state. Those states are copying stored data, writing a literal byte, and copying a matching string. When returning, a "goto inf_leave" is used to update the total counters, update the check value, and determine whether any progress has been made during that inflate() call in order to return the proper return code. Progress is defined as a change in either strm->avail_in or strm->avail_out. When there is a window, goto inf_leave will update the window with the last output written. If a goto inf_leave occurs in the middle of decompression and there is no window currently, goto inf_leave will create one and copy output to the window for the next call of inflate(). In this implementation, the flush parameter of inflate() only affects the return code (per zlib.h). inflate() always writes as much as possible to strm->next_out, given the space available and the provided input--the effect documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers the allocation of and copying into a sliding window until necessary, which provides the effect documented in zlib.h for Z_FINISH when the entire input stream available. So the only thing the flush parameter actually does is: when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it will return Z_BUF_ERROR if it has not reached the end of the stream. */ int ZEXPORT inflate(strm, flush) z_streamp strm; int flush; { struct inflate_state FAR *state; z_const unsigned char FAR *next; /* next input */ unsigned char FAR *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char FAR *from; /* where to copy match bytes from */ code here; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ #ifdef GUNZIP unsigned char hbuf[4]; /* buffer for gzip header crc calculation */ #endif static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; if (inflateStateCheck(strm) || strm->next_out == Z_NULL || (strm->next_in == Z_NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ LOAD(); in = have; out = left; ret = Z_OK; for (;;) switch (state->mode) { case HEAD: if (state->wrap == 0) { state->mode = TYPEDO; break; } NEEDBITS(16); #ifdef GUNZIP if ((state->wrap & 2) && hold == 0x8b1f) { /* gzip header */ if (state->wbits == 0) state->wbits = 15; state->check = crc32(0L, Z_NULL, 0); CRC2(state->check, hold); INITBITS(); state->mode = FLAGS; break; } state->flags = 0; /* expect zlib header */ if (state->head != Z_NULL) state->head->done = -1; if (!(state->wrap & 1) || /* check if zlib header allowed */ #else if ( #endif ((BITS(8) << 8) + (hold >> 8)) % 31) { strm->msg = (char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } DROPBITS(4); len = BITS(4) + 8; if (state->wbits == 0) state->wbits = len; if (len > 15 || len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; } state->dmax = 1U << len; Tracev((stderr, "inflate: zlib header ok\n")); strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = hold & 0x200 ? DICTID : TYPE; INITBITS(); break; #ifdef GUNZIP case FLAGS: NEEDBITS(16); state->flags = (int)(hold); if ((state->flags & 0xff) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } if (state->flags & 0xe000) { strm->msg = (char *)"unknown header flags set"; state->mode = BAD; break; } if (state->head != Z_NULL) state->head->text = (int)((hold >> 8) & 1); if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); state->mode = TIME; case TIME: NEEDBITS(32); if (state->head != Z_NULL) state->head->time = hold; if ((state->flags & 0x0200) && (state->wrap & 4)) CRC4(state->check, hold); INITBITS(); state->mode = OS; case OS: NEEDBITS(16); if (state->head != Z_NULL) { state->head->xflags = (int)(hold & 0xff); state->head->os = (int)(hold >> 8); } if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); state->mode = EXLEN; case EXLEN: if (state->flags & 0x0400) { NEEDBITS(16); state->length = (unsigned)(hold); if (state->head != Z_NULL) state->head->extra_len = (unsigned)hold; if ((state->flags & 0x0200) && (state->wrap & 4)) CRC2(state->check, hold); INITBITS(); } else if (state->head != Z_NULL) state->head->extra = Z_NULL; state->mode = EXTRA; case EXTRA: if (state->flags & 0x0400) { copy = state->length; if (copy > have) copy = have; if (copy) { if (state->head != Z_NULL && state->head->extra != Z_NULL) { len = state->head->extra_len - state->length; zmemcpy(state->head->extra + len, next, len + copy > state->head->extra_max ? state->head->extra_max - len : copy); } if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; state->length -= copy; } if (state->length) goto inf_leave; } state->length = 0; state->mode = NAME; case NAME: if (state->flags & 0x0800) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->name != Z_NULL && state->length < state->head->name_max) state->head->name[state->length++] = (Bytef)len; } while (len && copy < have); if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->name = Z_NULL; state->length = 0; state->mode = COMMENT; case COMMENT: if (state->flags & 0x1000) { if (have == 0) goto inf_leave; copy = 0; do { len = (unsigned)(next[copy++]); if (state->head != Z_NULL && state->head->comment != Z_NULL && state->length < state->head->comm_max) state->head->comment[state->length++] = (Bytef)len; } while (len && copy < have); if ((state->flags & 0x0200) && (state->wrap & 4)) state->check = crc32(state->check, next, copy); have -= copy; next += copy; if (len) goto inf_leave; } else if (state->head != Z_NULL) state->head->comment = Z_NULL; state->mode = HCRC; case HCRC: if (state->flags & 0x0200) { NEEDBITS(16); if ((state->wrap & 4) && hold != (state->check & 0xffff)) { strm->msg = (char *)"header crc mismatch"; state->mode = BAD; break; } INITBITS(); } if (state->head != Z_NULL) { state->head->hcrc = (int)((state->flags >> 9) & 1); state->head->done = 1; } strm->adler = state->check = crc32(0L, Z_NULL, 0); state->mode = TYPE; break; #endif case DICTID: NEEDBITS(32); strm->adler = state->check = ZSWAP32(hold); INITBITS(); state->mode = DICT; case DICT: if (state->havedict == 0) { RESTORE(); return Z_NEED_DICT; } strm->adler = state->check = adler32(0L, Z_NULL, 0); state->mode = TYPE; case TYPE: if (flush == Z_BLOCK || flush == Z_TREES) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); state->mode = CHECK; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ Tracev((stderr, "inflate: stored block%s\n", state->last ? " (last)" : "")); state->mode = STORED; break; case 1: /* fixed block */ fixedtables(state); Tracev((stderr, "inflate: fixed codes block%s\n", state->last ? " (last)" : "")); state->mode = LEN_; /* decode codes */ if (flush == Z_TREES) { DROPBITS(2); goto inf_leave; } break; case 2: /* dynamic block */ Tracev((stderr, "inflate: dynamic codes block%s\n", state->last ? " (last)" : "")); state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; Tracev((stderr, "inflate: stored length %u\n", state->length)); INITBITS(); state->mode = COPY_; if (flush == Z_TREES) goto inf_leave; case COPY_: state->mode = COPY; case COPY: copy = state->length; if (copy) { if (copy > have) copy = have; if (copy > left) copy = left; if (copy == 0) goto inf_leave; zmemcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; break; } Tracev((stderr, "inflate: stored end\n")); state->mode = TYPE; break; case TABLE: NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif Tracev((stderr, "inflate: table sizes ok\n")); state->have = 0; state->mode = LENLENS; case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 7; ret = inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } Tracev((stderr, "inflate: code lengths ok\n")); state->have = 0; state->mode = CODELENS; case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.val < 16) { DROPBITS(here.bits); state->lens[state->have++] = here.val; } else { if (here.val == 16) { NEEDBITS(here.bits + 2); DROPBITS(here.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = state->lens[state->have - 1]; copy = 3 + BITS(2); DROPBITS(2); } else if (here.val == 17) { NEEDBITS(here.bits + 3); DROPBITS(here.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(here.bits + 7); DROPBITS(here.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* check for end-of-block code (better have one) */ if (state->lens[256] == 0) { strm->msg = (char *)"invalid code -- missing end-of-block"; state->mode = BAD; break; } /* build code tables -- note: do not change the lenbits or distbits values here (9 and 6) without reading the comments in inftrees.h concerning the ENOUGH constants, which depend on those values */ state->next = state->codes; state->lencode = (const code FAR *)(state->next); state->lenbits = 9; ret = inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (const code FAR *)(state->next); state->distbits = 6; ret = inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } Tracev((stderr, "inflate: codes ok\n")); state->mode = LEN_; if (flush == Z_TREES) goto inf_leave; case LEN_: state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); if (state->mode == TYPE) state->back = -1; break; } state->back = 0; for (;;) { here = state->lencode[BITS(state->lenbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if (here.op && (here.op & 0xf0) == 0) { last = here; for (;;) { here = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; state->length = (unsigned)here.val; if ((int)(here.op) == 0) { Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ? "inflate: literal '%c'\n" : "inflate: literal 0x%02x\n", here.val)); state->mode = LIT; break; } if (here.op & 32) { Tracevv((stderr, "inflate: end of block\n")); state->back = -1; state->mode = TYPE; break; } if (here.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } state->extra = (unsigned)(here.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } Tracevv((stderr, "inflate: length %u\n", state->length)); state->was = state->length; state->mode = DIST; case DIST: for (;;) { here = state->distcode[BITS(state->distbits)]; if ((unsigned)(here.bits) <= bits) break; PULLBYTE(); } if ((here.op & 0xf0) == 0) { last = here; for (;;) { here = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + here.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); state->back += last.bits; } DROPBITS(here.bits); state->back += here.bits; if (here.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)here.val; state->extra = (unsigned)(here.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); state->back += state->extra; } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif Tracevv((stderr, "inflate: distance %u\n", state->offset)); state->mode = MATCH; case MATCH: if (left == 0) goto inf_leave; copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; if (copy > state->whave) { if (state->sane) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR Trace((stderr, "inflate.c too far\n")); copy -= state->whave; if (copy > state->length) copy = state->length; if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = 0; } while (--copy); if (state->length == 0) state->mode = LEN; break; #endif } if (copy > state->wnext) { copy -= state->wnext; from = state->window + (state->wsize - copy); } else from = state->window + (state->wnext - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ from = put - state->offset; copy = state->length; } if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = *from++; } while (--copy); if (state->length == 0) state->mode = LEN; break; case LIT: if (left == 0) goto inf_leave; *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; case CHECK: if (state->wrap) { NEEDBITS(32); out -= left; strm->total_out += out; state->total += out; if ((state->wrap & 4) && out) strm->adler = state->check = UPDATE(state->check, put - out, out); out = left; if ((state->wrap & 4) && ( #ifdef GUNZIP state->flags ? hold : #endif ZSWAP32(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: check matches trailer\n")); } #ifdef GUNZIP state->mode = LENGTH; case LENGTH: if (state->wrap && state->flags) { NEEDBITS(32); if (hold != (state->total & 0xffffffffUL)) { strm->msg = (char *)"incorrect length check"; state->mode = BAD; break; } INITBITS(); Tracev((stderr, "inflate: length matches trailer\n")); } #endif state->mode = DONE; case DONE: ret = Z_STREAM_END; goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; case MEM: return Z_MEM_ERROR; case SYNC: default: return Z_STREAM_ERROR; } /* Return from inflate(), updating the total counts and the check value. If there was no progress during the inflate() call, return a buffer error. Call updatewindow() to create and/or update the window state. Note: a memory error from inflate() is non-recoverable. */ inf_leave: RESTORE(); if (state->wsize || (out != strm->avail_out && state->mode < BAD && (state->mode < CHECK || flush != Z_FINISH))) if (updatewindow(strm, strm->next_out, out - strm->avail_out)) { state->mode = MEM; return Z_MEM_ERROR; } in -= strm->avail_in; out -= strm->avail_out; strm->total_in += in; strm->total_out += out; state->total += out; if ((state->wrap & 4) && out) strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = (int)state->bits + (state->last ? 64 : 0) + (state->mode == TYPE ? 128 : 0) + (state->mode == LEN_ || state->mode == COPY_ ? 256 : 0); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; } int ZEXPORT inflateEnd(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->window != Z_NULL) ZFREE(strm, state->window); ZFREE(strm, strm->state); strm->state = Z_NULL; Tracev((stderr, "inflate: end\n")); return Z_OK; } int ZEXPORT inflateGetDictionary(strm, dictionary, dictLength) z_streamp strm; Bytef *dictionary; uInt *dictLength; { struct inflate_state FAR *state; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; /* copy dictionary */ if (state->whave && dictionary != Z_NULL) { zmemcpy(dictionary, state->window + state->wnext, state->whave - state->wnext); zmemcpy(dictionary + state->whave - state->wnext, state->window, state->wnext); } if (dictLength != Z_NULL) *dictLength = state->whave; return Z_OK; } int ZEXPORT inflateSetDictionary(strm, dictionary, dictLength) z_streamp strm; const Bytef *dictionary; uInt dictLength; { struct inflate_state FAR *state; unsigned long dictid; int ret; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; /* check for correct dictionary identifier */ if (state->mode == DICT) { dictid = adler32(0L, Z_NULL, 0); dictid = adler32(dictid, dictionary, dictLength); if (dictid != state->check) return Z_DATA_ERROR; } /* copy dictionary to window using updatewindow(), which will amend the existing dictionary if appropriate */ ret = updatewindow(strm, dictionary + dictLength, dictLength); if (ret) { state->mode = MEM; return Z_MEM_ERROR; } state->havedict = 1; Tracev((stderr, "inflate: dictionary set\n")); return Z_OK; } int ZEXPORT inflateGetHeader(strm, head) z_streamp strm; gz_headerp head; { struct inflate_state FAR *state; /* check state */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if ((state->wrap & 2) == 0) return Z_STREAM_ERROR; /* save header structure */ state->head = head; head->done = 0; return Z_OK; } /* Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found or when out of input. When called, *have is the number of pattern bytes found in order so far, in 0..3. On return *have is updated to the new state. If on return *have equals four, then the pattern was found and the return value is how many bytes were read including the last byte of the pattern. If *have is less than four, then the pattern has not been found yet and the return value is len. In the latter case, syncsearch() can be called again with more data and the *have state. *have is initialized to zero for the first call. */ local unsigned syncsearch(have, buf, len) unsigned FAR *have; const unsigned char FAR *buf; unsigned len; { unsigned got; unsigned next; got = *have; next = 0; while (next < len && got < 4) { if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) got++; else if (buf[next]) got = 0; else got = 4 - got; next++; } *have = got; return next; } int ZEXPORT inflateSync(strm) z_streamp strm; { unsigned len; /* number of bytes to look at or looked at */ unsigned long in, out; /* temporary to save total_in and total_out */ unsigned char buf[4]; /* to restore bit buffer to byte string */ struct inflate_state FAR *state; /* check parameters */ if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; /* if first time, start search in bit buffer */ if (state->mode != SYNC) { state->mode = SYNC; state->hold <<= state->bits & 7; state->bits -= state->bits & 7; len = 0; while (state->bits >= 8) { buf[len++] = (unsigned char)(state->hold); state->hold >>= 8; state->bits -= 8; } state->have = 0; syncsearch(&(state->have), buf, len); } /* search available input */ len = syncsearch(&(state->have), strm->next_in, strm->avail_in); strm->avail_in -= len; strm->next_in += len; strm->total_in += len; /* return no joy or set up to restart inflate() on a new block */ if (state->have != 4) return Z_DATA_ERROR; in = strm->total_in; out = strm->total_out; inflateReset(strm); strm->total_in = in; strm->total_out = out; state->mode = TYPE; return Z_OK; } /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ int ZEXPORT inflateSyncPoint(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; return state->mode == STORED && state->bits == 0; } int ZEXPORT inflateCopy(dest, source) z_streamp dest; z_streamp source; { struct inflate_state FAR *state; struct inflate_state FAR *copy; unsigned char FAR *window; unsigned wsize; /* check input */ if (inflateStateCheck(source) || dest == Z_NULL) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)source->state; /* allocate space */ copy = (struct inflate_state FAR *) ZALLOC(source, 1, sizeof(struct inflate_state)); if (copy == Z_NULL) return Z_MEM_ERROR; window = Z_NULL; if (state->window != Z_NULL) { window = (unsigned char FAR *) ZALLOC(source, 1U << state->wbits, sizeof(unsigned char)); if (window == Z_NULL) { ZFREE(source, copy); return Z_MEM_ERROR; } } /* copy state */ zmemcpy((voidpf)dest, (voidpf)source, sizeof(z_stream)); zmemcpy((voidpf)copy, (voidpf)state, sizeof(struct inflate_state)); copy->strm = dest; if (state->lencode >= state->codes && state->lencode <= state->codes + ENOUGH - 1) { copy->lencode = copy->codes + (state->lencode - state->codes); copy->distcode = copy->codes + (state->distcode - state->codes); } copy->next = copy->codes + (state->next - state->codes); if (window != Z_NULL) { wsize = 1U << state->wbits; zmemcpy(window, state->window, wsize); } copy->window = window; dest->state = (struct internal_state FAR *)copy; return Z_OK; } int ZEXPORT inflateUndermine(strm, subvert) z_streamp strm; int subvert; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; #ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR state->sane = !subvert; return Z_OK; #else (void)subvert; state->sane = 1; return Z_DATA_ERROR; #endif } int ZEXPORT inflateValidate(strm, check) z_streamp strm; int check; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return Z_STREAM_ERROR; state = (struct inflate_state FAR *)strm->state; if (check) state->wrap |= 4; else state->wrap &= ~4; return Z_OK; } long ZEXPORT inflateMark(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return -(1L << 16); state = (struct inflate_state FAR *)strm->state; return (long)(((unsigned long)((long)state->back)) << 16) + (state->mode == COPY ? state->length : (state->mode == MATCH ? state->was - state->length : 0)); } unsigned long ZEXPORT inflateCodesUsed(strm) z_streamp strm; { struct inflate_state FAR *state; if (inflateStateCheck(strm)) return (unsigned long)-1; state = (struct inflate_state FAR *)strm->state; return (unsigned long)(state->next - state->codes); } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inflate.h0000644000076500000240000001473213572250760021262 0ustar alcaeusstaff/* inflate.h -- internal inflate state definition * Copyright (C) 1995-2016 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* define NO_GZIP when compiling if you want to disable gzip header and trailer decoding by inflate(). NO_GZIP would be used to avoid linking in the crc code when it is not needed. For shared libraries, gzip decoding should be left enabled. */ #ifndef NO_GZIP # define GUNZIP #endif /* Possible inflate modes between inflate() calls */ typedef enum { HEAD = 16180, /* i: waiting for magic header */ FLAGS, /* i: waiting for method and flags (gzip) */ TIME, /* i: waiting for modification time (gzip) */ OS, /* i: waiting for extra flags and operating system (gzip) */ EXLEN, /* i: waiting for extra length (gzip) */ EXTRA, /* i: waiting for extra bytes (gzip) */ NAME, /* i: waiting for end of file name (gzip) */ COMMENT, /* i: waiting for end of comment (gzip) */ HCRC, /* i: waiting for header crc (gzip) */ DICTID, /* i: waiting for dictionary check value */ DICT, /* waiting for inflateSetDictionary() call */ TYPE, /* i: waiting for type bits, including last-flag bit */ TYPEDO, /* i: same, but skip check to exit inflate on new block */ STORED, /* i: waiting for stored size (length and complement) */ COPY_, /* i/o: same as COPY below, but only first time in */ COPY, /* i/o: waiting for input or output to copy stored block */ TABLE, /* i: waiting for dynamic block table lengths */ LENLENS, /* i: waiting for code length code lengths */ CODELENS, /* i: waiting for length/lit and distance code lengths */ LEN_, /* i: same as LEN below, but only first time in */ LEN, /* i: waiting for length/lit/eob code */ LENEXT, /* i: waiting for length extra bits */ DIST, /* i: waiting for distance code */ DISTEXT, /* i: waiting for distance extra bits */ MATCH, /* o: waiting for output space to copy string */ LIT, /* o: waiting for output space to write literal */ CHECK, /* i: waiting for 32-bit check value */ LENGTH, /* i: waiting for 32-bit length (gzip) */ DONE, /* finished check, done -- remain here until reset */ BAD, /* got a data error -- remain here until reset */ MEM, /* got an inflate() memory error -- remain here until reset */ SYNC /* looking for synchronization bytes to restart inflate() */ } inflate_mode; /* State transitions between above modes - (most modes can go to BAD or MEM on error -- not shown for clarity) Process header: HEAD -> (gzip) or (zlib) or (raw) (gzip) -> FLAGS -> TIME -> OS -> EXLEN -> EXTRA -> NAME -> COMMENT -> HCRC -> TYPE (zlib) -> DICTID or TYPE DICTID -> DICT -> TYPE (raw) -> TYPEDO Read deflate blocks: TYPE -> TYPEDO -> STORED or TABLE or LEN_ or CHECK STORED -> COPY_ -> COPY -> TYPE TABLE -> LENLENS -> CODELENS -> LEN_ LEN_ -> LEN Read deflate codes in fixed or dynamic block: LEN -> LENEXT or LIT or TYPE LENEXT -> DIST -> DISTEXT -> MATCH -> LEN LIT -> LEN Process trailer: CHECK -> LENGTH -> DONE */ /* State maintained between inflate() calls -- approximately 7K bytes, not including the allocated sliding window, which is up to 32K bytes. */ struct inflate_state { z_streamp strm; /* pointer back to this zlib stream */ inflate_mode mode; /* current inflate mode */ int last; /* true if processing last block */ int wrap; /* bit 0 true for zlib, bit 1 true for gzip, bit 2 true to validate check value */ int havedict; /* true if dictionary provided */ int flags; /* gzip header method and flags (0 if zlib) */ unsigned dmax; /* zlib header max distance (INFLATE_STRICT) */ unsigned long check; /* protected copy of check value */ unsigned long total; /* protected copy of output count */ gz_headerp head; /* where to save gzip header information */ /* sliding window */ unsigned wbits; /* log base 2 of requested window size */ unsigned wsize; /* window size or zero if not using window */ unsigned whave; /* valid bytes in the window */ unsigned wnext; /* window write index */ unsigned char FAR *window; /* allocated sliding window, if needed */ /* bit accumulator */ unsigned long hold; /* input bit accumulator */ unsigned bits; /* number of bits in "in" */ /* for string and stored block copying */ unsigned length; /* literal or length of data to copy */ unsigned offset; /* distance back to copy string from */ /* for table and code decoding */ unsigned extra; /* extra bits needed */ /* fixed and dynamic code tables */ code const FAR *lencode; /* starting table for length/literal codes */ code const FAR *distcode; /* starting table for distance codes */ unsigned lenbits; /* index bits for lencode */ unsigned distbits; /* index bits for distcode */ /* dynamic table building */ unsigned ncode; /* number of code length code lengths */ unsigned nlen; /* number of length code lengths */ unsigned ndist; /* number of distance code lengths */ unsigned have; /* number of code lengths in lens[] */ code FAR *next; /* next available space in codes[] */ unsigned short lens[320]; /* temporary storage for code lengths */ unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ int sane; /* if false, allow invalid distance too far */ int back; /* bits back of last unprocessed length/lit */ unsigned was; /* initial length of match */ }; mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inftrees.c0000644000076500000240000003130713572250760021447 0ustar alcaeusstaff/* inftrees.c -- generate Huffman trees for efficient decoding * Copyright (C) 1995-2017 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ #include "zutil.h" #include "inftrees.h" #define MAXBITS 15 const char inflate_copyright[] = " inflate 1.2.11 Copyright 1995-2017 Mark Adler "; /* If you use the zlib library in a product, an acknowledgment is welcome in the documentation of your product. If for some reason you cannot include such an acknowledgment, I would appreciate that you keep this copyright string in the executable of your product. */ /* Build a set of tables to decode the provided canonical Huffman code. The code lengths are lens[0..codes-1]. The result starts at *table, whose indices are 0..2^bits-1. work is a writable array of at least lens shorts, which is used as a work area. type is the type of code to be generated, CODES, LENS, or DISTS. On return, zero is success, -1 is an invalid code, and +1 means that ENOUGH isn't enough. table on return points to the next available entry's address. bits is the requested root table index bits, and on return it is the actual root table index bits. It will differ if the request is greater than the longest code or if it is less than the shortest code. */ int ZLIB_INTERNAL inflate_table(type, lens, codes, table, bits, work) codetype type; unsigned short FAR *lens; unsigned codes; code FAR * FAR *table; unsigned FAR *bits; unsigned short FAR *work; { unsigned len; /* a code's length in bits */ unsigned sym; /* index of code symbols */ unsigned min, max; /* minimum and maximum code lengths */ unsigned root; /* number of index bits for root table */ unsigned curr; /* number of index bits for current table */ unsigned drop; /* code bits to drop for sub-table */ int left; /* number of prefix codes available */ unsigned used; /* code entries in table used */ unsigned huff; /* Huffman code */ unsigned incr; /* for incrementing code, index */ unsigned fill; /* index for replicating entries */ unsigned low; /* low bits for current root entry */ unsigned mask; /* mask for low root bits */ code here; /* table entry for duplication */ code FAR *next; /* next available space in table */ const unsigned short FAR *base; /* base value table to use */ const unsigned short FAR *extra; /* extra bits table to use */ unsigned match; /* use base and extra for symbol >= match */ unsigned short count[MAXBITS+1]; /* number of codes of each length */ unsigned short offs[MAXBITS+1]; /* offsets in table for each length */ static const unsigned short lbase[31] = { /* Length codes 257..285 base */ 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const unsigned short lext[31] = { /* Length codes 257..285 extra */ 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 77, 202}; static const unsigned short dbase[32] = { /* Distance codes 0..29 base */ 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const unsigned short dext[32] = { /* Distance codes 0..29 extra */ 16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28, 28, 29, 29, 64, 64}; /* Process a set of code lengths to create a canonical Huffman code. The code lengths are lens[0..codes-1]. Each length corresponds to the symbols 0..codes-1. The Huffman code is generated by first sorting the symbols by length from short to long, and retaining the symbol order for codes with equal lengths. Then the code starts with all zero bits for the first code of the shortest length, and the codes are integer increments for the same length, and zeros are appended as the length increases. For the deflate format, these bits are stored backwards from their more natural integer increment ordering, and so when the decoding tables are built in the large loop below, the integer codes are incremented backwards. This routine assumes, but does not check, that all of the entries in lens[] are in the range 0..MAXBITS. The caller must assure this. 1..MAXBITS is interpreted as that code length. zero means that that symbol does not occur in this code. The codes are sorted by computing a count of codes for each length, creating from that a table of starting indices for each length in the sorted table, and then entering the symbols in order in the sorted table. The sorted table is work[], with that space being provided by the caller. The length counts are used for other purposes as well, i.e. finding the minimum and maximum length codes, determining if there are any codes at all, checking for a valid set of lengths, and looking ahead at length counts to determine sub-table sizes when building the decoding tables. */ /* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */ for (len = 0; len <= MAXBITS; len++) count[len] = 0; for (sym = 0; sym < codes; sym++) count[lens[sym]]++; /* bound code lengths, force root to be within code lengths */ root = *bits; for (max = MAXBITS; max >= 1; max--) if (count[max] != 0) break; if (root > max) root = max; if (max == 0) { /* no symbols to code at all */ here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)1; here.val = (unsigned short)0; *(*table)++ = here; /* make a table to force an error */ *(*table)++ = here; *bits = 1; return 0; /* no symbols, but wait for decoding to report error */ } for (min = 1; min < max; min++) if (count[min] != 0) break; if (root < min) root = min; /* check for an over-subscribed or incomplete set of lengths */ left = 1; for (len = 1; len <= MAXBITS; len++) { left <<= 1; left -= count[len]; if (left < 0) return -1; /* over-subscribed */ } if (left > 0 && (type == CODES || max != 1)) return -1; /* incomplete set */ /* generate offsets into symbol table for each length for sorting */ offs[1] = 0; for (len = 1; len < MAXBITS; len++) offs[len + 1] = offs[len] + count[len]; /* sort symbols by length, by symbol order within each length */ for (sym = 0; sym < codes; sym++) if (lens[sym] != 0) work[offs[lens[sym]]++] = (unsigned short)sym; /* Create and fill in decoding tables. In this loop, the table being filled is at next and has curr index bits. The code being used is huff with length len. That code is converted to an index by dropping drop bits off of the bottom. For codes where len is less than drop + curr, those top drop + curr - len bits are incremented through all values to fill the table with replicated entries. root is the number of index bits for the root table. When len exceeds root, sub-tables are created pointed to by the root entry with an index of the low root bits of huff. This is saved in low to check for when a new sub-table should be started. drop is zero when the root table is being filled, and drop is root when sub-tables are being filled. When a new sub-table is needed, it is necessary to look ahead in the code lengths to determine what size sub-table is needed. The length counts are used for this, and so count[] is decremented as codes are entered in the tables. used keeps track of how many table entries have been allocated from the provided *table space. It is checked for LENS and DIST tables against the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in the initial root table size constants. See the comments in inftrees.h for more information. sym increments through all symbols, and the loop terminates when all codes of length max, i.e. all codes, have been processed. This routine permits incomplete codes, so another loop after this one fills in the rest of the decoding tables with invalid code markers. */ /* set up for code type */ switch (type) { case CODES: base = extra = work; /* dummy value--not used */ match = 20; break; case LENS: base = lbase; extra = lext; match = 257; break; default: /* DISTS */ base = dbase; extra = dext; match = 0; } /* initialize state for loop */ huff = 0; /* starting code */ sym = 0; /* starting code symbol */ len = min; /* starting code length */ next = *table; /* current table to fill in */ curr = root; /* current table index bits */ drop = 0; /* current bits to drop from code for index */ low = (unsigned)(-1); /* trigger new sub-table when len > root */ used = 1U << root; /* use root table entries */ mask = used - 1; /* mask for comparing low */ /* check available table space */ if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* process all codes and make table entries */ for (;;) { /* create table entry */ here.bits = (unsigned char)(len - drop); if (work[sym] + 1U < match) { here.op = (unsigned char)0; here.val = work[sym]; } else if (work[sym] >= match) { here.op = (unsigned char)(extra[work[sym] - match]); here.val = base[work[sym] - match]; } else { here.op = (unsigned char)(32 + 64); /* end of block */ here.val = 0; } /* replicate for those indices with low len bits equal to huff */ incr = 1U << (len - drop); fill = 1U << curr; min = fill; /* save offset to next table */ do { fill -= incr; next[(huff >> drop) + fill] = here; } while (fill != 0); /* backwards increment the len-bit code huff */ incr = 1U << (len - 1); while (huff & incr) incr >>= 1; if (incr != 0) { huff &= incr - 1; huff += incr; } else huff = 0; /* go to next symbol, update count, len */ sym++; if (--(count[len]) == 0) { if (len == max) break; len = lens[work[sym]]; } /* create new sub-table if needed */ if (len > root && (huff & mask) != low) { /* if first time, transition to sub-tables */ if (drop == 0) drop = root; /* increment past last table */ next += min; /* here min is 1 << curr */ /* determine length of next table */ curr = len - drop; left = (int)(1 << curr); while (curr + drop < max) { left -= count[curr + drop]; if (left <= 0) break; curr++; left <<= 1; } /* check for enough space */ used += 1U << curr; if ((type == LENS && used > ENOUGH_LENS) || (type == DISTS && used > ENOUGH_DISTS)) return 1; /* point entry in root table to sub-table */ low = huff & mask; (*table)[low].op = (unsigned char)curr; (*table)[low].bits = (unsigned char)root; (*table)[low].val = (unsigned short)(next - *table); } } /* fill in remaining table entry if code is incomplete (guaranteed to have at most one remaining entry, since if the code is incomplete, the maximum code length that was allowed to get this far is one bit) */ if (huff != 0) { here.op = (unsigned char)64; /* invalid code marker */ here.bits = (unsigned char)(len - drop); here.val = (unsigned short)0; next[huff] = here; } /* set return parameters */ *table += used; *bits = root; return 0; } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/inftrees.h0000644000076500000240000000556013572250760021456 0ustar alcaeusstaff/* inftrees.h -- header to use inftrees.c * Copyright (C) 1995-2005, 2010 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* Structure for decoding tables. Each entry provides either the information needed to do the operation requested by the code that indexed that table entry, or it provides a pointer to another table that indexes more bits of the code. op indicates whether the entry is a pointer to another table, a literal, a length or distance, an end-of-block, or an invalid code. For a table pointer, the low four bits of op is the number of index bits of that table. For a length or distance, the low four bits of op is the number of extra bits to get after the code. bits is the number of bits in this code or part of the code to drop off of the bit buffer. val is the actual byte to output in the case of a literal, the base length or distance, or the offset from the current table to the next table. Each entry is four bytes. */ typedef struct { unsigned char op; /* operation, extra bits, table bits */ unsigned char bits; /* bits in this part of the code */ unsigned short val; /* offset in table or code value */ } code; /* op values as set by inflate_table(): 00000000 - literal 0000tttt - table link, tttt != 0 is the number of table index bits 0001eeee - length or distance, eeee is the number of extra bits 01100000 - end of block 01000000 - invalid code */ /* Maximum size of the dynamic table. The maximum number of code structures is 1444, which is the sum of 852 for literal/length codes and 592 for distance codes. These values were found by exhaustive searches using the program examples/enough.c found in the zlib distribtution. The arguments to that program are the number of symbols, the initial root table size, and the maximum bit length of a code. "enough 286 9 15" for literal/length codes returns returns 852, and "enough 30 6 15" for distance codes returns 592. The initial root table size (9 or 6) is found in the fifth argument of the inflate_table() calls in inflate.c and infback.c. If the root table size is changed, then these maximum sizes would be need to be recalculated and updated. */ #define ENOUGH_LENS 852 #define ENOUGH_DISTS 592 #define ENOUGH (ENOUGH_LENS+ENOUGH_DISTS) /* Type of code to build for inflate_table() */ typedef enum { CODES, LENS, DISTS } codetype; int ZLIB_INTERNAL inflate_table OF((codetype type, unsigned short FAR *lens, unsigned codes, code FAR * FAR *table, unsigned FAR *bits, unsigned short FAR *work)); mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/trees.c0000644000076500000240000012536113572250760020756 0ustar alcaeusstaff/* trees.c -- output deflated data using Huffman coding * Copyright (C) 1995-2017 Jean-loup Gailly * detect_data_type() function provided freely by Cosmin Truta, 2006 * For conditions of distribution and use, see copyright notice in zlib.h */ /* * ALGORITHM * * The "deflation" process uses several Huffman trees. The more * common source values are represented by shorter bit sequences. * * Each code tree is stored in a compressed form which is itself * a Huffman encoding of the lengths of all the code strings (in * ascending order by source values). The actual code strings are * reconstructed from the lengths in the inflate process, as described * in the deflate specification. * * REFERENCES * * Deutsch, L.P.,"'Deflate' Compressed Data Format Specification". * Available in ftp.uu.net:/pub/archiving/zip/doc/deflate-1.1.doc * * Storer, James A. * Data Compression: Methods and Theory, pp. 49-50. * Computer Science Press, 1988. ISBN 0-7167-8156-5. * * Sedgewick, R. * Algorithms, p290. * Addison-Wesley, 1983. ISBN 0-201-06672-6. */ /* @(#) $Id$ */ /* #define GEN_TREES_H */ #include "deflate.h" #ifdef ZLIB_DEBUG # include #endif /* =========================================================================== * Constants */ #define MAX_BL_BITS 7 /* Bit length codes must not exceed MAX_BL_BITS bits */ #define END_BLOCK 256 /* end of block literal code */ #define REP_3_6 16 /* repeat previous bit length 3-6 times (2 bits of repeat count) */ #define REPZ_3_10 17 /* repeat a zero length 3-10 times (3 bits of repeat count) */ #define REPZ_11_138 18 /* repeat a zero length 11-138 times (7 bits of repeat count) */ local const int extra_lbits[LENGTH_CODES] /* extra bits for each length code */ = {0,0,0,0,0,0,0,0,1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4,5,5,5,5,0}; local const int extra_dbits[D_CODES] /* extra bits for each distance code */ = {0,0,0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,10,10,11,11,12,12,13,13}; local const int extra_blbits[BL_CODES]/* extra bits for each bit length code */ = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,3,7}; local const uch bl_order[BL_CODES] = {16,17,18,0,8,7,9,6,10,5,11,4,12,3,13,2,14,1,15}; /* The lengths of the bit length codes are sent in order of decreasing * probability, to avoid transmitting the lengths for unused bit length codes. */ /* =========================================================================== * Local data. These are initialized only once. */ #define DIST_CODE_LEN 512 /* see definition of array dist_code below */ #if defined(GEN_TREES_H) || !defined(STDC) /* non ANSI compilers may not accept trees.h */ local ct_data static_ltree[L_CODES+2]; /* The static literal tree. Since the bit lengths are imposed, there is no * need for the L_CODES extra codes used during heap construction. However * The codes 286 and 287 are needed to build a canonical tree (see _tr_init * below). */ local ct_data static_dtree[D_CODES]; /* The static distance tree. (Actually a trivial tree since all codes use * 5 bits.) */ uch _dist_code[DIST_CODE_LEN]; /* Distance codes. The first 256 values correspond to the distances * 3 .. 258, the last 256 values correspond to the top 8 bits of * the 15 bit distances. */ uch _length_code[MAX_MATCH-MIN_MATCH+1]; /* length code for each normalized match length (0 == MIN_MATCH) */ local int base_length[LENGTH_CODES]; /* First normalized length for each code (0 = MIN_MATCH) */ local int base_dist[D_CODES]; /* First normalized distance for each code (0 = distance of 1) */ #else # include "trees.h" #endif /* GEN_TREES_H */ struct static_tree_desc_s { const ct_data *static_tree; /* static tree or NULL */ const intf *extra_bits; /* extra bits for each code or NULL */ int extra_base; /* base index for extra_bits */ int elems; /* max number of elements in the tree */ int max_length; /* max bit length for the codes */ }; local const static_tree_desc static_l_desc = {static_ltree, extra_lbits, LITERALS+1, L_CODES, MAX_BITS}; local const static_tree_desc static_d_desc = {static_dtree, extra_dbits, 0, D_CODES, MAX_BITS}; local const static_tree_desc static_bl_desc = {(const ct_data *)0, extra_blbits, 0, BL_CODES, MAX_BL_BITS}; /* =========================================================================== * Local (static) routines in this file. */ local void tr_static_init OF((void)); local void init_block OF((deflate_state *s)); local void pqdownheap OF((deflate_state *s, ct_data *tree, int k)); local void gen_bitlen OF((deflate_state *s, tree_desc *desc)); local void gen_codes OF((ct_data *tree, int max_code, ushf *bl_count)); local void build_tree OF((deflate_state *s, tree_desc *desc)); local void scan_tree OF((deflate_state *s, ct_data *tree, int max_code)); local void send_tree OF((deflate_state *s, ct_data *tree, int max_code)); local int build_bl_tree OF((deflate_state *s)); local void send_all_trees OF((deflate_state *s, int lcodes, int dcodes, int blcodes)); local void compress_block OF((deflate_state *s, const ct_data *ltree, const ct_data *dtree)); local int detect_data_type OF((deflate_state *s)); local unsigned bi_reverse OF((unsigned value, int length)); local void bi_windup OF((deflate_state *s)); local void bi_flush OF((deflate_state *s)); #ifdef GEN_TREES_H local void gen_trees_header OF((void)); #endif #ifndef ZLIB_DEBUG # define send_code(s, c, tree) send_bits(s, tree[c].Code, tree[c].Len) /* Send a code of the given tree. c and tree must not have side effects */ #else /* !ZLIB_DEBUG */ # define send_code(s, c, tree) \ { if (z_verbose>2) fprintf(stderr,"\ncd %3d ",(c)); \ send_bits(s, tree[c].Code, tree[c].Len); } #endif /* =========================================================================== * Output a short LSB first on the stream. * IN assertion: there is enough room in pendingBuf. */ #define put_short(s, w) { \ put_byte(s, (uch)((w) & 0xff)); \ put_byte(s, (uch)((ush)(w) >> 8)); \ } /* =========================================================================== * Send a value on a given number of bits. * IN assertion: length <= 16 and value fits in length bits. */ #ifdef ZLIB_DEBUG local void send_bits OF((deflate_state *s, int value, int length)); local void send_bits(s, value, length) deflate_state *s; int value; /* value to send */ int length; /* number of bits */ { Tracevv((stderr," l %2d v %4x ", length, value)); Assert(length > 0 && length <= 15, "invalid length"); s->bits_sent += (ulg)length; /* If not enough room in bi_buf, use (valid) bits from bi_buf and * (16 - bi_valid) bits from value, leaving (width - (16-bi_valid)) * unused bits in value. */ if (s->bi_valid > (int)Buf_size - length) { s->bi_buf |= (ush)value << s->bi_valid; put_short(s, s->bi_buf); s->bi_buf = (ush)value >> (Buf_size - s->bi_valid); s->bi_valid += length - Buf_size; } else { s->bi_buf |= (ush)value << s->bi_valid; s->bi_valid += length; } } #else /* !ZLIB_DEBUG */ #define send_bits(s, value, length) \ { int len = length;\ if (s->bi_valid > (int)Buf_size - len) {\ int val = (int)value;\ s->bi_buf |= (ush)val << s->bi_valid;\ put_short(s, s->bi_buf);\ s->bi_buf = (ush)val >> (Buf_size - s->bi_valid);\ s->bi_valid += len - Buf_size;\ } else {\ s->bi_buf |= (ush)(value) << s->bi_valid;\ s->bi_valid += len;\ }\ } #endif /* ZLIB_DEBUG */ /* the arguments must not have side effects */ /* =========================================================================== * Initialize the various 'constant' tables. */ local void tr_static_init() { #if defined(GEN_TREES_H) || !defined(STDC) static int static_init_done = 0; int n; /* iterates over tree elements */ int bits; /* bit counter */ int length; /* length value */ int code; /* code value */ int dist; /* distance index */ ush bl_count[MAX_BITS+1]; /* number of codes at each bit length for an optimal tree */ if (static_init_done) return; /* For some embedded targets, global variables are not initialized: */ #ifdef NO_INIT_GLOBAL_POINTERS static_l_desc.static_tree = static_ltree; static_l_desc.extra_bits = extra_lbits; static_d_desc.static_tree = static_dtree; static_d_desc.extra_bits = extra_dbits; static_bl_desc.extra_bits = extra_blbits; #endif /* Initialize the mapping length (0..255) -> length code (0..28) */ length = 0; for (code = 0; code < LENGTH_CODES-1; code++) { base_length[code] = length; for (n = 0; n < (1< dist code (0..29) */ dist = 0; for (code = 0 ; code < 16; code++) { base_dist[code] = dist; for (n = 0; n < (1<>= 7; /* from now on, all distances are divided by 128 */ for ( ; code < D_CODES; code++) { base_dist[code] = dist << 7; for (n = 0; n < (1<<(extra_dbits[code]-7)); n++) { _dist_code[256 + dist++] = (uch)code; } } Assert (dist == 256, "tr_static_init: 256+dist != 512"); /* Construct the codes of the static literal tree */ for (bits = 0; bits <= MAX_BITS; bits++) bl_count[bits] = 0; n = 0; while (n <= 143) static_ltree[n++].Len = 8, bl_count[8]++; while (n <= 255) static_ltree[n++].Len = 9, bl_count[9]++; while (n <= 279) static_ltree[n++].Len = 7, bl_count[7]++; while (n <= 287) static_ltree[n++].Len = 8, bl_count[8]++; /* Codes 286 and 287 do not exist, but we must include them in the * tree construction to get a canonical Huffman tree (longest code * all ones) */ gen_codes((ct_data *)static_ltree, L_CODES+1, bl_count); /* The static distance tree is trivial: */ for (n = 0; n < D_CODES; n++) { static_dtree[n].Len = 5; static_dtree[n].Code = bi_reverse((unsigned)n, 5); } static_init_done = 1; # ifdef GEN_TREES_H gen_trees_header(); # endif #endif /* defined(GEN_TREES_H) || !defined(STDC) */ } /* =========================================================================== * Genererate the file trees.h describing the static trees. */ #ifdef GEN_TREES_H # ifndef ZLIB_DEBUG # include # endif # define SEPARATOR(i, last, width) \ ((i) == (last)? "\n};\n\n" : \ ((i) % (width) == (width)-1 ? ",\n" : ", ")) void gen_trees_header() { FILE *header = fopen("trees.h", "w"); int i; Assert (header != NULL, "Can't open trees.h"); fprintf(header, "/* header created automatically with -DGEN_TREES_H */\n\n"); fprintf(header, "local const ct_data static_ltree[L_CODES+2] = {\n"); for (i = 0; i < L_CODES+2; i++) { fprintf(header, "{{%3u},{%3u}}%s", static_ltree[i].Code, static_ltree[i].Len, SEPARATOR(i, L_CODES+1, 5)); } fprintf(header, "local const ct_data static_dtree[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "{{%2u},{%2u}}%s", static_dtree[i].Code, static_dtree[i].Len, SEPARATOR(i, D_CODES-1, 5)); } fprintf(header, "const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = {\n"); for (i = 0; i < DIST_CODE_LEN; i++) { fprintf(header, "%2u%s", _dist_code[i], SEPARATOR(i, DIST_CODE_LEN-1, 20)); } fprintf(header, "const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= {\n"); for (i = 0; i < MAX_MATCH-MIN_MATCH+1; i++) { fprintf(header, "%2u%s", _length_code[i], SEPARATOR(i, MAX_MATCH-MIN_MATCH, 20)); } fprintf(header, "local const int base_length[LENGTH_CODES] = {\n"); for (i = 0; i < LENGTH_CODES; i++) { fprintf(header, "%1u%s", base_length[i], SEPARATOR(i, LENGTH_CODES-1, 20)); } fprintf(header, "local const int base_dist[D_CODES] = {\n"); for (i = 0; i < D_CODES; i++) { fprintf(header, "%5u%s", base_dist[i], SEPARATOR(i, D_CODES-1, 10)); } fclose(header); } #endif /* GEN_TREES_H */ /* =========================================================================== * Initialize the tree data structures for a new zlib stream. */ void ZLIB_INTERNAL _tr_init(s) deflate_state *s; { tr_static_init(); s->l_desc.dyn_tree = s->dyn_ltree; s->l_desc.stat_desc = &static_l_desc; s->d_desc.dyn_tree = s->dyn_dtree; s->d_desc.stat_desc = &static_d_desc; s->bl_desc.dyn_tree = s->bl_tree; s->bl_desc.stat_desc = &static_bl_desc; s->bi_buf = 0; s->bi_valid = 0; #ifdef ZLIB_DEBUG s->compressed_len = 0L; s->bits_sent = 0L; #endif /* Initialize the first block of the first file: */ init_block(s); } /* =========================================================================== * Initialize a new block. */ local void init_block(s) deflate_state *s; { int n; /* iterates over tree elements */ /* Initialize the trees. */ for (n = 0; n < L_CODES; n++) s->dyn_ltree[n].Freq = 0; for (n = 0; n < D_CODES; n++) s->dyn_dtree[n].Freq = 0; for (n = 0; n < BL_CODES; n++) s->bl_tree[n].Freq = 0; s->dyn_ltree[END_BLOCK].Freq = 1; s->opt_len = s->static_len = 0L; s->last_lit = s->matches = 0; } #define SMALLEST 1 /* Index within the heap array of least frequent node in the Huffman tree */ /* =========================================================================== * Remove the smallest element from the heap and recreate the heap with * one less element. Updates heap and heap_len. */ #define pqremove(s, tree, top) \ {\ top = s->heap[SMALLEST]; \ s->heap[SMALLEST] = s->heap[s->heap_len--]; \ pqdownheap(s, tree, SMALLEST); \ } /* =========================================================================== * Compares to subtrees, using the tree depth as tie breaker when * the subtrees have equal frequency. This minimizes the worst case length. */ #define smaller(tree, n, m, depth) \ (tree[n].Freq < tree[m].Freq || \ (tree[n].Freq == tree[m].Freq && depth[n] <= depth[m])) /* =========================================================================== * Restore the heap property by moving down the tree starting at node k, * exchanging a node with the smallest of its two sons if necessary, stopping * when the heap property is re-established (each father smaller than its * two sons). */ local void pqdownheap(s, tree, k) deflate_state *s; ct_data *tree; /* the tree to restore */ int k; /* node to move down */ { int v = s->heap[k]; int j = k << 1; /* left son of k */ while (j <= s->heap_len) { /* Set j to the smallest of the two sons: */ if (j < s->heap_len && smaller(tree, s->heap[j+1], s->heap[j], s->depth)) { j++; } /* Exit if v is smaller than both sons */ if (smaller(tree, v, s->heap[j], s->depth)) break; /* Exchange v with the smallest son */ s->heap[k] = s->heap[j]; k = j; /* And continue down the tree, setting j to the left son of k */ j <<= 1; } s->heap[k] = v; } /* =========================================================================== * Compute the optimal bit lengths for a tree and update the total bit length * for the current block. * IN assertion: the fields freq and dad are set, heap[heap_max] and * above are the tree nodes sorted by increasing frequency. * OUT assertions: the field len is set to the optimal bit length, the * array bl_count contains the frequencies for each bit length. * The length opt_len is updated; static_len is also updated if stree is * not null. */ local void gen_bitlen(s, desc) deflate_state *s; tree_desc *desc; /* the tree descriptor */ { ct_data *tree = desc->dyn_tree; int max_code = desc->max_code; const ct_data *stree = desc->stat_desc->static_tree; const intf *extra = desc->stat_desc->extra_bits; int base = desc->stat_desc->extra_base; int max_length = desc->stat_desc->max_length; int h; /* heap index */ int n, m; /* iterate over the tree elements */ int bits; /* bit length */ int xbits; /* extra bits */ ush f; /* frequency */ int overflow = 0; /* number of elements with bit length too large */ for (bits = 0; bits <= MAX_BITS; bits++) s->bl_count[bits] = 0; /* In a first pass, compute the optimal bit lengths (which may * overflow in the case of the bit length tree). */ tree[s->heap[s->heap_max]].Len = 0; /* root of the heap */ for (h = s->heap_max+1; h < HEAP_SIZE; h++) { n = s->heap[h]; bits = tree[tree[n].Dad].Len + 1; if (bits > max_length) bits = max_length, overflow++; tree[n].Len = (ush)bits; /* We overwrite tree[n].Dad which is no longer needed */ if (n > max_code) continue; /* not a leaf node */ s->bl_count[bits]++; xbits = 0; if (n >= base) xbits = extra[n-base]; f = tree[n].Freq; s->opt_len += (ulg)f * (unsigned)(bits + xbits); if (stree) s->static_len += (ulg)f * (unsigned)(stree[n].Len + xbits); } if (overflow == 0) return; Tracev((stderr,"\nbit length overflow\n")); /* This happens for example on obj2 and pic of the Calgary corpus */ /* Find the first bit length which could increase: */ do { bits = max_length-1; while (s->bl_count[bits] == 0) bits--; s->bl_count[bits]--; /* move one leaf down the tree */ s->bl_count[bits+1] += 2; /* move one overflow item as its brother */ s->bl_count[max_length]--; /* The brother of the overflow item also moves one step up, * but this does not affect bl_count[max_length] */ overflow -= 2; } while (overflow > 0); /* Now recompute all bit lengths, scanning in increasing frequency. * h is still equal to HEAP_SIZE. (It is simpler to reconstruct all * lengths instead of fixing only the wrong ones. This idea is taken * from 'ar' written by Haruhiko Okumura.) */ for (bits = max_length; bits != 0; bits--) { n = s->bl_count[bits]; while (n != 0) { m = s->heap[--h]; if (m > max_code) continue; if ((unsigned) tree[m].Len != (unsigned) bits) { Tracev((stderr,"code %d bits %d->%d\n", m, tree[m].Len, bits)); s->opt_len += ((ulg)bits - tree[m].Len) * tree[m].Freq; tree[m].Len = (ush)bits; } n--; } } } /* =========================================================================== * Generate the codes for a given tree and bit counts (which need not be * optimal). * IN assertion: the array bl_count contains the bit length statistics for * the given tree and the field len is set for all tree elements. * OUT assertion: the field code is set for all tree elements of non * zero code length. */ local void gen_codes (tree, max_code, bl_count) ct_data *tree; /* the tree to decorate */ int max_code; /* largest code with non zero frequency */ ushf *bl_count; /* number of codes at each bit length */ { ush next_code[MAX_BITS+1]; /* next code value for each bit length */ unsigned code = 0; /* running code value */ int bits; /* bit index */ int n; /* code index */ /* The distribution counts are first used to generate the code values * without bit reversal. */ for (bits = 1; bits <= MAX_BITS; bits++) { code = (code + bl_count[bits-1]) << 1; next_code[bits] = (ush)code; } /* Check that the bit counts in bl_count are consistent. The last code * must be all ones. */ Assert (code + bl_count[MAX_BITS]-1 == (1<dyn_tree; const ct_data *stree = desc->stat_desc->static_tree; int elems = desc->stat_desc->elems; int n, m; /* iterate over heap elements */ int max_code = -1; /* largest code with non zero frequency */ int node; /* new node being created */ /* Construct the initial heap, with least frequent element in * heap[SMALLEST]. The sons of heap[n] are heap[2*n] and heap[2*n+1]. * heap[0] is not used. */ s->heap_len = 0, s->heap_max = HEAP_SIZE; for (n = 0; n < elems; n++) { if (tree[n].Freq != 0) { s->heap[++(s->heap_len)] = max_code = n; s->depth[n] = 0; } else { tree[n].Len = 0; } } /* The pkzip format requires that at least one distance code exists, * and that at least one bit should be sent even if there is only one * possible code. So to avoid special checks later on we force at least * two codes of non zero frequency. */ while (s->heap_len < 2) { node = s->heap[++(s->heap_len)] = (max_code < 2 ? ++max_code : 0); tree[node].Freq = 1; s->depth[node] = 0; s->opt_len--; if (stree) s->static_len -= stree[node].Len; /* node is 0 or 1 so it does not have extra bits */ } desc->max_code = max_code; /* The elements heap[heap_len/2+1 .. heap_len] are leaves of the tree, * establish sub-heaps of increasing lengths: */ for (n = s->heap_len/2; n >= 1; n--) pqdownheap(s, tree, n); /* Construct the Huffman tree by repeatedly combining the least two * frequent nodes. */ node = elems; /* next internal node of the tree */ do { pqremove(s, tree, n); /* n = node of least frequency */ m = s->heap[SMALLEST]; /* m = node of next least frequency */ s->heap[--(s->heap_max)] = n; /* keep the nodes sorted by frequency */ s->heap[--(s->heap_max)] = m; /* Create a new node father of n and m */ tree[node].Freq = tree[n].Freq + tree[m].Freq; s->depth[node] = (uch)((s->depth[n] >= s->depth[m] ? s->depth[n] : s->depth[m]) + 1); tree[n].Dad = tree[m].Dad = (ush)node; #ifdef DUMP_BL_TREE if (tree == s->bl_tree) { fprintf(stderr,"\nnode %d(%d), sons %d(%d) %d(%d)", node, tree[node].Freq, n, tree[n].Freq, m, tree[m].Freq); } #endif /* and insert the new node in the heap */ s->heap[SMALLEST] = node++; pqdownheap(s, tree, SMALLEST); } while (s->heap_len >= 2); s->heap[--(s->heap_max)] = s->heap[SMALLEST]; /* At this point, the fields freq and dad are set. We can now * generate the bit lengths. */ gen_bitlen(s, (tree_desc *)desc); /* The field len is now set, we can generate the bit codes */ gen_codes ((ct_data *)tree, max_code, s->bl_count); } /* =========================================================================== * Scan a literal or distance tree to determine the frequencies of the codes * in the bit length tree. */ local void scan_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ if (nextlen == 0) max_count = 138, min_count = 3; tree[max_code+1].Len = (ush)0xffff; /* guard */ for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { s->bl_tree[curlen].Freq += count; } else if (curlen != 0) { if (curlen != prevlen) s->bl_tree[curlen].Freq++; s->bl_tree[REP_3_6].Freq++; } else if (count <= 10) { s->bl_tree[REPZ_3_10].Freq++; } else { s->bl_tree[REPZ_11_138].Freq++; } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Send a literal or distance tree in compressed form, using the codes in * bl_tree. */ local void send_tree (s, tree, max_code) deflate_state *s; ct_data *tree; /* the tree to be scanned */ int max_code; /* and its largest code of non zero frequency */ { int n; /* iterates over all tree elements */ int prevlen = -1; /* last emitted length */ int curlen; /* length of current code */ int nextlen = tree[0].Len; /* length of next code */ int count = 0; /* repeat count of the current code */ int max_count = 7; /* max repeat count */ int min_count = 4; /* min repeat count */ /* tree[max_code+1].Len = -1; */ /* guard already set */ if (nextlen == 0) max_count = 138, min_count = 3; for (n = 0; n <= max_code; n++) { curlen = nextlen; nextlen = tree[n+1].Len; if (++count < max_count && curlen == nextlen) { continue; } else if (count < min_count) { do { send_code(s, curlen, s->bl_tree); } while (--count != 0); } else if (curlen != 0) { if (curlen != prevlen) { send_code(s, curlen, s->bl_tree); count--; } Assert(count >= 3 && count <= 6, " 3_6?"); send_code(s, REP_3_6, s->bl_tree); send_bits(s, count-3, 2); } else if (count <= 10) { send_code(s, REPZ_3_10, s->bl_tree); send_bits(s, count-3, 3); } else { send_code(s, REPZ_11_138, s->bl_tree); send_bits(s, count-11, 7); } count = 0; prevlen = curlen; if (nextlen == 0) { max_count = 138, min_count = 3; } else if (curlen == nextlen) { max_count = 6, min_count = 3; } else { max_count = 7, min_count = 4; } } } /* =========================================================================== * Construct the Huffman tree for the bit lengths and return the index in * bl_order of the last bit length code to send. */ local int build_bl_tree(s) deflate_state *s; { int max_blindex; /* index of last bit length code of non zero freq */ /* Determine the bit length frequencies for literal and distance trees */ scan_tree(s, (ct_data *)s->dyn_ltree, s->l_desc.max_code); scan_tree(s, (ct_data *)s->dyn_dtree, s->d_desc.max_code); /* Build the bit length tree: */ build_tree(s, (tree_desc *)(&(s->bl_desc))); /* opt_len now includes the length of the tree representations, except * the lengths of the bit lengths codes and the 5+5+4 bits for the counts. */ /* Determine the number of bit length codes to send. The pkzip format * requires that at least 4 bit length codes be sent. (appnote.txt says * 3 but the actual value used is 4.) */ for (max_blindex = BL_CODES-1; max_blindex >= 3; max_blindex--) { if (s->bl_tree[bl_order[max_blindex]].Len != 0) break; } /* Update opt_len to include the bit length tree and counts */ s->opt_len += 3*((ulg)max_blindex+1) + 5+5+4; Tracev((stderr, "\ndyn trees: dyn %ld, stat %ld", s->opt_len, s->static_len)); return max_blindex; } /* =========================================================================== * Send the header for a block using dynamic Huffman trees: the counts, the * lengths of the bit length codes, the literal tree and the distance tree. * IN assertion: lcodes >= 257, dcodes >= 1, blcodes >= 4. */ local void send_all_trees(s, lcodes, dcodes, blcodes) deflate_state *s; int lcodes, dcodes, blcodes; /* number of codes for each tree */ { int rank; /* index in bl_order */ Assert (lcodes >= 257 && dcodes >= 1 && blcodes >= 4, "not enough codes"); Assert (lcodes <= L_CODES && dcodes <= D_CODES && blcodes <= BL_CODES, "too many codes"); Tracev((stderr, "\nbl counts: ")); send_bits(s, lcodes-257, 5); /* not +255 as stated in appnote.txt */ send_bits(s, dcodes-1, 5); send_bits(s, blcodes-4, 4); /* not -3 as stated in appnote.txt */ for (rank = 0; rank < blcodes; rank++) { Tracev((stderr, "\nbl code %2d ", bl_order[rank])); send_bits(s, s->bl_tree[bl_order[rank]].Len, 3); } Tracev((stderr, "\nbl tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_ltree, lcodes-1); /* literal tree */ Tracev((stderr, "\nlit tree: sent %ld", s->bits_sent)); send_tree(s, (ct_data *)s->dyn_dtree, dcodes-1); /* distance tree */ Tracev((stderr, "\ndist tree: sent %ld", s->bits_sent)); } /* =========================================================================== * Send a stored block */ void ZLIB_INTERNAL _tr_stored_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block */ ulg stored_len; /* length of input block */ int last; /* one if this is the last block for a file */ { send_bits(s, (STORED_BLOCK<<1)+last, 3); /* send block type */ bi_windup(s); /* align on byte boundary */ put_short(s, (ush)stored_len); put_short(s, (ush)~stored_len); zmemcpy(s->pending_buf + s->pending, (Bytef *)buf, stored_len); s->pending += stored_len; #ifdef ZLIB_DEBUG s->compressed_len = (s->compressed_len + 3 + 7) & (ulg)~7L; s->compressed_len += (stored_len + 4) << 3; s->bits_sent += 2*16; s->bits_sent += stored_len<<3; #endif } /* =========================================================================== * Flush the bits in the bit buffer to pending output (leaves at most 7 bits) */ void ZLIB_INTERNAL _tr_flush_bits(s) deflate_state *s; { bi_flush(s); } /* =========================================================================== * Send one empty static block to give enough lookahead for inflate. * This takes 10 bits, of which 7 may remain in the bit buffer. */ void ZLIB_INTERNAL _tr_align(s) deflate_state *s; { send_bits(s, STATIC_TREES<<1, 3); send_code(s, END_BLOCK, static_ltree); #ifdef ZLIB_DEBUG s->compressed_len += 10L; /* 3 for block type, 7 for EOB */ #endif bi_flush(s); } /* =========================================================================== * Determine the best encoding for the current block: dynamic trees, static * trees or store, and write out the encoded block. */ void ZLIB_INTERNAL _tr_flush_block(s, buf, stored_len, last) deflate_state *s; charf *buf; /* input block, or NULL if too old */ ulg stored_len; /* length of input block */ int last; /* one if this is the last block for a file */ { ulg opt_lenb, static_lenb; /* opt_len and static_len in bytes */ int max_blindex = 0; /* index of last bit length code of non zero freq */ /* Build the Huffman trees unless a stored block is forced */ if (s->level > 0) { /* Check if the file is binary or text */ if (s->strm->data_type == Z_UNKNOWN) s->strm->data_type = detect_data_type(s); /* Construct the literal and distance trees */ build_tree(s, (tree_desc *)(&(s->l_desc))); Tracev((stderr, "\nlit data: dyn %ld, stat %ld", s->opt_len, s->static_len)); build_tree(s, (tree_desc *)(&(s->d_desc))); Tracev((stderr, "\ndist data: dyn %ld, stat %ld", s->opt_len, s->static_len)); /* At this point, opt_len and static_len are the total bit lengths of * the compressed block data, excluding the tree representations. */ /* Build the bit length tree for the above two trees, and get the index * in bl_order of the last bit length code to send. */ max_blindex = build_bl_tree(s); /* Determine the best encoding. Compute the block lengths in bytes. */ opt_lenb = (s->opt_len+3+7)>>3; static_lenb = (s->static_len+3+7)>>3; Tracev((stderr, "\nopt %lu(%lu) stat %lu(%lu) stored %lu lit %u ", opt_lenb, s->opt_len, static_lenb, s->static_len, stored_len, s->last_lit)); if (static_lenb <= opt_lenb) opt_lenb = static_lenb; } else { Assert(buf != (char*)0, "lost buf"); opt_lenb = static_lenb = stored_len + 5; /* force a stored block */ } #ifdef FORCE_STORED if (buf != (char*)0) { /* force stored block */ #else if (stored_len+4 <= opt_lenb && buf != (char*)0) { /* 4: two words for the lengths */ #endif /* The test buf != NULL is only necessary if LIT_BUFSIZE > WSIZE. * Otherwise we can't have processed more than WSIZE input bytes since * the last block flush, because compression would have been * successful. If LIT_BUFSIZE <= WSIZE, it is never too late to * transform a block into a stored block. */ _tr_stored_block(s, buf, stored_len, last); #ifdef FORCE_STATIC } else if (static_lenb >= 0) { /* force static trees */ #else } else if (s->strategy == Z_FIXED || static_lenb == opt_lenb) { #endif send_bits(s, (STATIC_TREES<<1)+last, 3); compress_block(s, (const ct_data *)static_ltree, (const ct_data *)static_dtree); #ifdef ZLIB_DEBUG s->compressed_len += 3 + s->static_len; #endif } else { send_bits(s, (DYN_TREES<<1)+last, 3); send_all_trees(s, s->l_desc.max_code+1, s->d_desc.max_code+1, max_blindex+1); compress_block(s, (const ct_data *)s->dyn_ltree, (const ct_data *)s->dyn_dtree); #ifdef ZLIB_DEBUG s->compressed_len += 3 + s->opt_len; #endif } Assert (s->compressed_len == s->bits_sent, "bad compressed size"); /* The above check is made mod 2^32, for files larger than 512 MB * and uLong implemented on 32 bits. */ init_block(s); if (last) { bi_windup(s); #ifdef ZLIB_DEBUG s->compressed_len += 7; /* align on byte boundary */ #endif } Tracev((stderr,"\ncomprlen %lu(%lu) ", s->compressed_len>>3, s->compressed_len-7*last)); } /* =========================================================================== * Save the match info and tally the frequency counts. Return true if * the current block must be flushed. */ int ZLIB_INTERNAL _tr_tally (s, dist, lc) deflate_state *s; unsigned dist; /* distance of matched string */ unsigned lc; /* match length-MIN_MATCH or unmatched char (if dist==0) */ { s->d_buf[s->last_lit] = (ush)dist; s->l_buf[s->last_lit++] = (uch)lc; if (dist == 0) { /* lc is the unmatched char */ s->dyn_ltree[lc].Freq++; } else { s->matches++; /* Here, lc is the match length - MIN_MATCH */ dist--; /* dist = match distance - 1 */ Assert((ush)dist < (ush)MAX_DIST(s) && (ush)lc <= (ush)(MAX_MATCH-MIN_MATCH) && (ush)d_code(dist) < (ush)D_CODES, "_tr_tally: bad match"); s->dyn_ltree[_length_code[lc]+LITERALS+1].Freq++; s->dyn_dtree[d_code(dist)].Freq++; } #ifdef TRUNCATE_BLOCK /* Try to guess if it is profitable to stop the current block here */ if ((s->last_lit & 0x1fff) == 0 && s->level > 2) { /* Compute an upper bound for the compressed length */ ulg out_length = (ulg)s->last_lit*8L; ulg in_length = (ulg)((long)s->strstart - s->block_start); int dcode; for (dcode = 0; dcode < D_CODES; dcode++) { out_length += (ulg)s->dyn_dtree[dcode].Freq * (5L+extra_dbits[dcode]); } out_length >>= 3; Tracev((stderr,"\nlast_lit %u, in %ld, out ~%ld(%ld%%) ", s->last_lit, in_length, out_length, 100L - out_length*100L/in_length)); if (s->matches < s->last_lit/2 && out_length < in_length/2) return 1; } #endif return (s->last_lit == s->lit_bufsize-1); /* We avoid equality with lit_bufsize because of wraparound at 64K * on 16 bit machines and because stored blocks are restricted to * 64K-1 bytes. */ } /* =========================================================================== * Send the block data compressed using the given Huffman trees */ local void compress_block(s, ltree, dtree) deflate_state *s; const ct_data *ltree; /* literal tree */ const ct_data *dtree; /* distance tree */ { unsigned dist; /* distance of matched string */ int lc; /* match length or unmatched char (if dist == 0) */ unsigned lx = 0; /* running index in l_buf */ unsigned code; /* the code to send */ int extra; /* number of extra bits to send */ if (s->last_lit != 0) do { dist = s->d_buf[lx]; lc = s->l_buf[lx++]; if (dist == 0) { send_code(s, lc, ltree); /* send a literal byte */ Tracecv(isgraph(lc), (stderr," '%c' ", lc)); } else { /* Here, lc is the match length - MIN_MATCH */ code = _length_code[lc]; send_code(s, code+LITERALS+1, ltree); /* send the length code */ extra = extra_lbits[code]; if (extra != 0) { lc -= base_length[code]; send_bits(s, lc, extra); /* send the extra length bits */ } dist--; /* dist is now the match distance - 1 */ code = d_code(dist); Assert (code < D_CODES, "bad d_code"); send_code(s, code, dtree); /* send the distance code */ extra = extra_dbits[code]; if (extra != 0) { dist -= (unsigned)base_dist[code]; send_bits(s, dist, extra); /* send the extra distance bits */ } } /* literal or match pair ? */ /* Check that the overlay between pending_buf and d_buf+l_buf is ok: */ Assert((uInt)(s->pending) < s->lit_bufsize + 2*lx, "pendingBuf overflow"); } while (lx < s->last_lit); send_code(s, END_BLOCK, ltree); } /* =========================================================================== * Check if the data type is TEXT or BINARY, using the following algorithm: * - TEXT if the two conditions below are satisfied: * a) There are no non-portable control characters belonging to the * "black list" (0..6, 14..25, 28..31). * b) There is at least one printable character belonging to the * "white list" (9 {TAB}, 10 {LF}, 13 {CR}, 32..255). * - BINARY otherwise. * - The following partially-portable control characters form a * "gray list" that is ignored in this detection algorithm: * (7 {BEL}, 8 {BS}, 11 {VT}, 12 {FF}, 26 {SUB}, 27 {ESC}). * IN assertion: the fields Freq of dyn_ltree are set. */ local int detect_data_type(s) deflate_state *s; { /* black_mask is the bit mask of black-listed bytes * set bits 0..6, 14..25, and 28..31 * 0xf3ffc07f = binary 11110011111111111100000001111111 */ unsigned long black_mask = 0xf3ffc07fUL; int n; /* Check for non-textual ("black-listed") bytes. */ for (n = 0; n <= 31; n++, black_mask >>= 1) if ((black_mask & 1) && (s->dyn_ltree[n].Freq != 0)) return Z_BINARY; /* Check for textual ("white-listed") bytes. */ if (s->dyn_ltree[9].Freq != 0 || s->dyn_ltree[10].Freq != 0 || s->dyn_ltree[13].Freq != 0) return Z_TEXT; for (n = 32; n < LITERALS; n++) if (s->dyn_ltree[n].Freq != 0) return Z_TEXT; /* There are no "black-listed" or "white-listed" bytes: * this stream either is empty or has tolerated ("gray-listed") bytes only. */ return Z_BINARY; } /* =========================================================================== * Reverse the first len bits of a code, using straightforward code (a faster * method would use a table) * IN assertion: 1 <= len <= 15 */ local unsigned bi_reverse(code, len) unsigned code; /* the value to invert */ int len; /* its bit length */ { register unsigned res = 0; do { res |= code & 1; code >>= 1, res <<= 1; } while (--len > 0); return res >> 1; } /* =========================================================================== * Flush the bit buffer, keeping at most 7 bits in it. */ local void bi_flush(s) deflate_state *s; { if (s->bi_valid == 16) { put_short(s, s->bi_buf); s->bi_buf = 0; s->bi_valid = 0; } else if (s->bi_valid >= 8) { put_byte(s, (Byte)s->bi_buf); s->bi_buf >>= 8; s->bi_valid -= 8; } } /* =========================================================================== * Flush the bit buffer and align the output on a byte boundary */ local void bi_windup(s) deflate_state *s; { if (s->bi_valid > 8) { put_short(s, s->bi_buf); } else if (s->bi_valid > 0) { put_byte(s, (Byte)s->bi_buf); } s->bi_buf = 0; s->bi_valid = 0; #ifdef ZLIB_DEBUG s->bits_sent = (s->bits_sent+7) & ~7; #endif } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/trees.h0000644000076500000240000002043013572250760020752 0ustar alcaeusstaff/* header created automatically with -DGEN_TREES_H */ local const ct_data static_ltree[L_CODES+2] = { {{ 12},{ 8}}, {{140},{ 8}}, {{ 76},{ 8}}, {{204},{ 8}}, {{ 44},{ 8}}, {{172},{ 8}}, {{108},{ 8}}, {{236},{ 8}}, {{ 28},{ 8}}, {{156},{ 8}}, {{ 92},{ 8}}, {{220},{ 8}}, {{ 60},{ 8}}, {{188},{ 8}}, {{124},{ 8}}, {{252},{ 8}}, {{ 2},{ 8}}, {{130},{ 8}}, {{ 66},{ 8}}, {{194},{ 8}}, {{ 34},{ 8}}, {{162},{ 8}}, {{ 98},{ 8}}, {{226},{ 8}}, {{ 18},{ 8}}, {{146},{ 8}}, {{ 82},{ 8}}, {{210},{ 8}}, {{ 50},{ 8}}, {{178},{ 8}}, {{114},{ 8}}, {{242},{ 8}}, {{ 10},{ 8}}, {{138},{ 8}}, {{ 74},{ 8}}, {{202},{ 8}}, {{ 42},{ 8}}, {{170},{ 8}}, {{106},{ 8}}, {{234},{ 8}}, {{ 26},{ 8}}, {{154},{ 8}}, {{ 90},{ 8}}, {{218},{ 8}}, {{ 58},{ 8}}, {{186},{ 8}}, {{122},{ 8}}, {{250},{ 8}}, {{ 6},{ 8}}, {{134},{ 8}}, {{ 70},{ 8}}, {{198},{ 8}}, {{ 38},{ 8}}, {{166},{ 8}}, {{102},{ 8}}, {{230},{ 8}}, {{ 22},{ 8}}, {{150},{ 8}}, {{ 86},{ 8}}, {{214},{ 8}}, {{ 54},{ 8}}, {{182},{ 8}}, {{118},{ 8}}, {{246},{ 8}}, {{ 14},{ 8}}, {{142},{ 8}}, {{ 78},{ 8}}, {{206},{ 8}}, {{ 46},{ 8}}, {{174},{ 8}}, {{110},{ 8}}, {{238},{ 8}}, {{ 30},{ 8}}, {{158},{ 8}}, {{ 94},{ 8}}, {{222},{ 8}}, {{ 62},{ 8}}, {{190},{ 8}}, {{126},{ 8}}, {{254},{ 8}}, {{ 1},{ 8}}, {{129},{ 8}}, {{ 65},{ 8}}, {{193},{ 8}}, {{ 33},{ 8}}, {{161},{ 8}}, {{ 97},{ 8}}, {{225},{ 8}}, {{ 17},{ 8}}, {{145},{ 8}}, {{ 81},{ 8}}, {{209},{ 8}}, {{ 49},{ 8}}, {{177},{ 8}}, {{113},{ 8}}, {{241},{ 8}}, {{ 9},{ 8}}, {{137},{ 8}}, {{ 73},{ 8}}, {{201},{ 8}}, {{ 41},{ 8}}, {{169},{ 8}}, {{105},{ 8}}, {{233},{ 8}}, {{ 25},{ 8}}, {{153},{ 8}}, {{ 89},{ 8}}, {{217},{ 8}}, {{ 57},{ 8}}, {{185},{ 8}}, {{121},{ 8}}, {{249},{ 8}}, {{ 5},{ 8}}, {{133},{ 8}}, {{ 69},{ 8}}, {{197},{ 8}}, {{ 37},{ 8}}, {{165},{ 8}}, {{101},{ 8}}, {{229},{ 8}}, {{ 21},{ 8}}, {{149},{ 8}}, {{ 85},{ 8}}, {{213},{ 8}}, {{ 53},{ 8}}, {{181},{ 8}}, {{117},{ 8}}, {{245},{ 8}}, {{ 13},{ 8}}, {{141},{ 8}}, {{ 77},{ 8}}, {{205},{ 8}}, {{ 45},{ 8}}, {{173},{ 8}}, {{109},{ 8}}, {{237},{ 8}}, {{ 29},{ 8}}, {{157},{ 8}}, {{ 93},{ 8}}, {{221},{ 8}}, {{ 61},{ 8}}, {{189},{ 8}}, {{125},{ 8}}, {{253},{ 8}}, {{ 19},{ 9}}, {{275},{ 9}}, {{147},{ 9}}, {{403},{ 9}}, {{ 83},{ 9}}, {{339},{ 9}}, {{211},{ 9}}, {{467},{ 9}}, {{ 51},{ 9}}, {{307},{ 9}}, {{179},{ 9}}, {{435},{ 9}}, {{115},{ 9}}, {{371},{ 9}}, {{243},{ 9}}, {{499},{ 9}}, {{ 11},{ 9}}, {{267},{ 9}}, {{139},{ 9}}, {{395},{ 9}}, {{ 75},{ 9}}, {{331},{ 9}}, {{203},{ 9}}, {{459},{ 9}}, {{ 43},{ 9}}, {{299},{ 9}}, {{171},{ 9}}, {{427},{ 9}}, {{107},{ 9}}, {{363},{ 9}}, {{235},{ 9}}, {{491},{ 9}}, {{ 27},{ 9}}, {{283},{ 9}}, {{155},{ 9}}, {{411},{ 9}}, {{ 91},{ 9}}, {{347},{ 9}}, {{219},{ 9}}, {{475},{ 9}}, {{ 59},{ 9}}, {{315},{ 9}}, {{187},{ 9}}, {{443},{ 9}}, {{123},{ 9}}, {{379},{ 9}}, {{251},{ 9}}, {{507},{ 9}}, {{ 7},{ 9}}, {{263},{ 9}}, {{135},{ 9}}, {{391},{ 9}}, {{ 71},{ 9}}, {{327},{ 9}}, {{199},{ 9}}, {{455},{ 9}}, {{ 39},{ 9}}, {{295},{ 9}}, {{167},{ 9}}, {{423},{ 9}}, {{103},{ 9}}, {{359},{ 9}}, {{231},{ 9}}, {{487},{ 9}}, {{ 23},{ 9}}, {{279},{ 9}}, {{151},{ 9}}, {{407},{ 9}}, {{ 87},{ 9}}, {{343},{ 9}}, {{215},{ 9}}, {{471},{ 9}}, {{ 55},{ 9}}, {{311},{ 9}}, {{183},{ 9}}, {{439},{ 9}}, {{119},{ 9}}, {{375},{ 9}}, {{247},{ 9}}, {{503},{ 9}}, {{ 15},{ 9}}, {{271},{ 9}}, {{143},{ 9}}, {{399},{ 9}}, {{ 79},{ 9}}, {{335},{ 9}}, {{207},{ 9}}, {{463},{ 9}}, {{ 47},{ 9}}, {{303},{ 9}}, {{175},{ 9}}, {{431},{ 9}}, {{111},{ 9}}, {{367},{ 9}}, {{239},{ 9}}, {{495},{ 9}}, {{ 31},{ 9}}, {{287},{ 9}}, {{159},{ 9}}, {{415},{ 9}}, {{ 95},{ 9}}, {{351},{ 9}}, {{223},{ 9}}, {{479},{ 9}}, {{ 63},{ 9}}, {{319},{ 9}}, {{191},{ 9}}, {{447},{ 9}}, {{127},{ 9}}, {{383},{ 9}}, {{255},{ 9}}, {{511},{ 9}}, {{ 0},{ 7}}, {{ 64},{ 7}}, {{ 32},{ 7}}, {{ 96},{ 7}}, {{ 16},{ 7}}, {{ 80},{ 7}}, {{ 48},{ 7}}, {{112},{ 7}}, {{ 8},{ 7}}, {{ 72},{ 7}}, {{ 40},{ 7}}, {{104},{ 7}}, {{ 24},{ 7}}, {{ 88},{ 7}}, {{ 56},{ 7}}, {{120},{ 7}}, {{ 4},{ 7}}, {{ 68},{ 7}}, {{ 36},{ 7}}, {{100},{ 7}}, {{ 20},{ 7}}, {{ 84},{ 7}}, {{ 52},{ 7}}, {{116},{ 7}}, {{ 3},{ 8}}, {{131},{ 8}}, {{ 67},{ 8}}, {{195},{ 8}}, {{ 35},{ 8}}, {{163},{ 8}}, {{ 99},{ 8}}, {{227},{ 8}} }; local const ct_data static_dtree[D_CODES] = { {{ 0},{ 5}}, {{16},{ 5}}, {{ 8},{ 5}}, {{24},{ 5}}, {{ 4},{ 5}}, {{20},{ 5}}, {{12},{ 5}}, {{28},{ 5}}, {{ 2},{ 5}}, {{18},{ 5}}, {{10},{ 5}}, {{26},{ 5}}, {{ 6},{ 5}}, {{22},{ 5}}, {{14},{ 5}}, {{30},{ 5}}, {{ 1},{ 5}}, {{17},{ 5}}, {{ 9},{ 5}}, {{25},{ 5}}, {{ 5},{ 5}}, {{21},{ 5}}, {{13},{ 5}}, {{29},{ 5}}, {{ 3},{ 5}}, {{19},{ 5}}, {{11},{ 5}}, {{27},{ 5}}, {{ 7},{ 5}}, {{23},{ 5}} }; const uch ZLIB_INTERNAL _dist_code[DIST_CODE_LEN] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 0, 0, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29 }; const uch ZLIB_INTERNAL _length_code[MAX_MATCH-MIN_MATCH+1]= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28 }; local const int base_length[LENGTH_CODES] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 0 }; local const int base_dist[D_CODES] = { 0, 1, 2, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192, 256, 384, 512, 768, 1024, 1536, 2048, 3072, 4096, 6144, 8192, 12288, 16384, 24576 }; mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/uncompr.c0000644000076500000240000000562613572250760021320 0ustar alcaeusstaff/* uncompr.c -- decompress a memory buffer * Copyright (C) 1995-2003, 2010, 2014, 2016 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #define ZLIB_INTERNAL #include "zlib.h" /* =========================================================================== Decompresses the source buffer into the destination buffer. *sourceLen is the byte length of the source buffer. Upon entry, *destLen is the total size of the destination buffer, which must be large enough to hold the entire uncompressed data. (The size of the uncompressed data must have been saved previously by the compressor and transmitted to the decompressor by some mechanism outside the scope of this compression library.) Upon exit, *destLen is the size of the decompressed data and *sourceLen is the number of source bytes consumed. Upon return, source + *sourceLen points to the first unused input byte. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, or Z_DATA_ERROR if the input data was corrupted, including if the input data is an incomplete zlib stream. */ int ZEXPORT uncompress2 (dest, destLen, source, sourceLen) Bytef *dest; uLongf *destLen; const Bytef *source; uLong *sourceLen; { z_stream stream; int err; const uInt max = (uInt)-1; uLong len, left; Byte buf[1]; /* for detection of incomplete stream when *destLen == 0 */ len = *sourceLen; if (*destLen) { left = *destLen; *destLen = 0; } else { left = 1; dest = buf; } stream.next_in = (z_const Bytef *)source; stream.avail_in = 0; stream.zalloc = (alloc_func)0; stream.zfree = (free_func)0; stream.opaque = (voidpf)0; err = inflateInit(&stream); if (err != Z_OK) return err; stream.next_out = dest; stream.avail_out = 0; do { if (stream.avail_out == 0) { stream.avail_out = left > (uLong)max ? max : (uInt)left; left -= stream.avail_out; } if (stream.avail_in == 0) { stream.avail_in = len > (uLong)max ? max : (uInt)len; len -= stream.avail_in; } err = inflate(&stream, Z_NO_FLUSH); } while (err == Z_OK); *sourceLen -= len + stream.avail_in; if (dest != buf) *destLen = stream.total_out; else if (stream.total_out && err == Z_BUF_ERROR) left = 1; inflateEnd(&stream); return err == Z_STREAM_END ? Z_OK : err == Z_NEED_DICT ? Z_DATA_ERROR : err == Z_BUF_ERROR && left + stream.avail_out ? Z_DATA_ERROR : err; } int ZEXPORT uncompress (dest, destLen, source, sourceLen) Bytef *dest; uLongf *destLen; const Bytef *source; uLong sourceLen; { return uncompress2(dest, destLen, source, &sourceLen); } mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/zconf.h.in0000644000076500000240000003765213572250760021372 0ustar alcaeusstaff/* zconf.h -- configuration of the zlib compression library * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #ifndef ZCONF_H #define ZCONF_H /* * If you *really* need a unique prefix for all types and library functions, * compile with -DZ_PREFIX. The "standard" zlib should be compiled without it. * Even better than compiling with -DZ_PREFIX would be to use configure to set * this permanently in zconf.h using "./configure --zprefix". */ #ifdef Z_PREFIX /* may be set to #if 1 by ./configure */ # define Z_PREFIX_SET /* all linked symbols and init macros */ # define _dist_code z__dist_code # define _length_code z__length_code # define _tr_align z__tr_align # define _tr_flush_bits z__tr_flush_bits # define _tr_flush_block z__tr_flush_block # define _tr_init z__tr_init # define _tr_stored_block z__tr_stored_block # define _tr_tally z__tr_tally # define adler32 z_adler32 # define adler32_combine z_adler32_combine # define adler32_combine64 z_adler32_combine64 # define adler32_z z_adler32_z # ifndef Z_SOLO # define compress z_compress # define compress2 z_compress2 # define compressBound z_compressBound # endif # define crc32 z_crc32 # define crc32_combine z_crc32_combine # define crc32_combine64 z_crc32_combine64 # define crc32_z z_crc32_z # define deflate z_deflate # define deflateBound z_deflateBound # define deflateCopy z_deflateCopy # define deflateEnd z_deflateEnd # define deflateGetDictionary z_deflateGetDictionary # define deflateInit z_deflateInit # define deflateInit2 z_deflateInit2 # define deflateInit2_ z_deflateInit2_ # define deflateInit_ z_deflateInit_ # define deflateParams z_deflateParams # define deflatePending z_deflatePending # define deflatePrime z_deflatePrime # define deflateReset z_deflateReset # define deflateResetKeep z_deflateResetKeep # define deflateSetDictionary z_deflateSetDictionary # define deflateSetHeader z_deflateSetHeader # define deflateTune z_deflateTune # define deflate_copyright z_deflate_copyright # define get_crc_table z_get_crc_table # ifndef Z_SOLO # define gz_error z_gz_error # define gz_intmax z_gz_intmax # define gz_strwinerror z_gz_strwinerror # define gzbuffer z_gzbuffer # define gzclearerr z_gzclearerr # define gzclose z_gzclose # define gzclose_r z_gzclose_r # define gzclose_w z_gzclose_w # define gzdirect z_gzdirect # define gzdopen z_gzdopen # define gzeof z_gzeof # define gzerror z_gzerror # define gzflush z_gzflush # define gzfread z_gzfread # define gzfwrite z_gzfwrite # define gzgetc z_gzgetc # define gzgetc_ z_gzgetc_ # define gzgets z_gzgets # define gzoffset z_gzoffset # define gzoffset64 z_gzoffset64 # define gzopen z_gzopen # define gzopen64 z_gzopen64 # ifdef _WIN32 # define gzopen_w z_gzopen_w # endif # define gzprintf z_gzprintf # define gzputc z_gzputc # define gzputs z_gzputs # define gzread z_gzread # define gzrewind z_gzrewind # define gzseek z_gzseek # define gzseek64 z_gzseek64 # define gzsetparams z_gzsetparams # define gztell z_gztell # define gztell64 z_gztell64 # define gzungetc z_gzungetc # define gzvprintf z_gzvprintf # define gzwrite z_gzwrite # endif # define inflate z_inflate # define inflateBack z_inflateBack # define inflateBackEnd z_inflateBackEnd # define inflateBackInit z_inflateBackInit # define inflateBackInit_ z_inflateBackInit_ # define inflateCodesUsed z_inflateCodesUsed # define inflateCopy z_inflateCopy # define inflateEnd z_inflateEnd # define inflateGetDictionary z_inflateGetDictionary # define inflateGetHeader z_inflateGetHeader # define inflateInit z_inflateInit # define inflateInit2 z_inflateInit2 # define inflateInit2_ z_inflateInit2_ # define inflateInit_ z_inflateInit_ # define inflateMark z_inflateMark # define inflatePrime z_inflatePrime # define inflateReset z_inflateReset # define inflateReset2 z_inflateReset2 # define inflateResetKeep z_inflateResetKeep # define inflateSetDictionary z_inflateSetDictionary # define inflateSync z_inflateSync # define inflateSyncPoint z_inflateSyncPoint # define inflateUndermine z_inflateUndermine # define inflateValidate z_inflateValidate # define inflate_copyright z_inflate_copyright # define inflate_fast z_inflate_fast # define inflate_table z_inflate_table # ifndef Z_SOLO # define uncompress z_uncompress # define uncompress2 z_uncompress2 # endif # define zError z_zError # ifndef Z_SOLO # define zcalloc z_zcalloc # define zcfree z_zcfree # endif # define zlibCompileFlags z_zlibCompileFlags # define zlibVersion z_zlibVersion /* all zlib typedefs in zlib.h and zconf.h */ # define Byte z_Byte # define Bytef z_Bytef # define alloc_func z_alloc_func # define charf z_charf # define free_func z_free_func # ifndef Z_SOLO # define gzFile z_gzFile # endif # define gz_header z_gz_header # define gz_headerp z_gz_headerp # define in_func z_in_func # define intf z_intf # define out_func z_out_func # define uInt z_uInt # define uIntf z_uIntf # define uLong z_uLong # define uLongf z_uLongf # define voidp z_voidp # define voidpc z_voidpc # define voidpf z_voidpf /* all zlib structs in zlib.h and zconf.h */ # define gz_header_s z_gz_header_s # define internal_state z_internal_state #endif #if defined(__MSDOS__) && !defined(MSDOS) # define MSDOS #endif #if (defined(OS_2) || defined(__OS2__)) && !defined(OS2) # define OS2 #endif #if defined(_WINDOWS) && !defined(WINDOWS) # define WINDOWS #endif #if defined(_WIN32) || defined(_WIN32_WCE) || defined(__WIN32__) # ifndef WIN32 # define WIN32 # endif #endif #if (defined(MSDOS) || defined(OS2) || defined(WINDOWS)) && !defined(WIN32) # if !defined(__GNUC__) && !defined(__FLAT__) && !defined(__386__) # ifndef SYS16BIT # define SYS16BIT # endif # endif #endif /* * Compile with -DMAXSEG_64K if the alloc function cannot allocate more * than 64k bytes at a time (needed on systems with 16-bit int). */ #ifdef SYS16BIT # define MAXSEG_64K #endif #ifdef MSDOS # define UNALIGNED_OK #endif #ifdef __STDC_VERSION__ # ifndef STDC # define STDC # endif # if __STDC_VERSION__ >= 199901L # ifndef STDC99 # define STDC99 # endif # endif #endif #if !defined(STDC) && (defined(__STDC__) || defined(__cplusplus)) # define STDC #endif #if !defined(STDC) && (defined(__GNUC__) || defined(__BORLANDC__)) # define STDC #endif #if !defined(STDC) && (defined(MSDOS) || defined(WINDOWS) || defined(WIN32)) # define STDC #endif #if !defined(STDC) && (defined(OS2) || defined(__HOS_AIX__)) # define STDC #endif #if defined(__OS400__) && !defined(STDC) /* iSeries (formerly AS/400). */ # define STDC #endif #ifndef STDC # ifndef const /* cannot use !defined(STDC) && !defined(const) on Mac */ # define const /* note: need a more gentle solution here */ # endif #endif #if defined(ZLIB_CONST) && !defined(z_const) # define z_const const #else # define z_const #endif #ifdef Z_SOLO typedef unsigned long z_size_t; #else # define z_longlong long long # if defined(NO_SIZE_T) typedef unsigned NO_SIZE_T z_size_t; # elif defined(STDC) # include typedef size_t z_size_t; # else typedef unsigned long z_size_t; # endif # undef z_longlong #endif /* Maximum value for memLevel in deflateInit2 */ #ifndef MAX_MEM_LEVEL # ifdef MAXSEG_64K # define MAX_MEM_LEVEL 8 # else # define MAX_MEM_LEVEL 9 # endif #endif /* Maximum value for windowBits in deflateInit2 and inflateInit2. * WARNING: reducing MAX_WBITS makes minigzip unable to extract .gz files * created by gzip. (Files created by minigzip can still be extracted by * gzip.) */ #ifndef MAX_WBITS # define MAX_WBITS 15 /* 32K LZ77 window */ #endif /* The memory requirements for deflate are (in bytes): (1 << (windowBits+2)) + (1 << (memLevel+9)) that is: 128K for windowBits=15 + 128K for memLevel = 8 (default values) plus a few kilobytes for small objects. For example, if you want to reduce the default memory requirements from 256K to 128K, compile with make CFLAGS="-O -DMAX_WBITS=14 -DMAX_MEM_LEVEL=7" Of course this will generally degrade compression (there's no free lunch). The memory requirements for inflate are (in bytes) 1 << windowBits that is, 32K for windowBits=15 (default value) plus about 7 kilobytes for small objects. */ /* Type declarations */ #ifndef OF /* function prototypes */ # ifdef STDC # define OF(args) args # else # define OF(args) () # endif #endif #ifndef Z_ARG /* function prototypes for stdarg */ # if defined(STDC) || defined(Z_HAVE_STDARG_H) # define Z_ARG(args) args # else # define Z_ARG(args) () # endif #endif /* The following definitions for FAR are needed only for MSDOS mixed * model programming (small or medium model with some far allocations). * This was tested only with MSC; for other MSDOS compilers you may have * to define NO_MEMCPY in zutil.h. If you don't need the mixed model, * just define FAR to be empty. */ #ifdef SYS16BIT # if defined(M_I86SM) || defined(M_I86MM) /* MSC small or medium model */ # define SMALL_MEDIUM # ifdef _MSC_VER # define FAR _far # else # define FAR far # endif # endif # if (defined(__SMALL__) || defined(__MEDIUM__)) /* Turbo C small or medium model */ # define SMALL_MEDIUM # ifdef __BORLANDC__ # define FAR _far # else # define FAR far # endif # endif #endif #if defined(WINDOWS) || defined(WIN32) /* If building or using zlib as a DLL, define ZLIB_DLL. * This is not mandatory, but it offers a little performance increase. */ # ifdef ZLIB_DLL # if defined(WIN32) && (!defined(__BORLANDC__) || (__BORLANDC__ >= 0x500)) # ifdef ZLIB_INTERNAL # define ZEXTERN extern __declspec(dllexport) # else # define ZEXTERN extern __declspec(dllimport) # endif # endif # endif /* ZLIB_DLL */ /* If building or using zlib with the WINAPI/WINAPIV calling convention, * define ZLIB_WINAPI. * Caution: the standard ZLIB1.DLL is NOT compiled using ZLIB_WINAPI. */ # ifdef ZLIB_WINAPI # ifdef FAR # undef FAR # endif # include /* No need for _export, use ZLIB.DEF instead. */ /* For complete Windows compatibility, use WINAPI, not __stdcall. */ # define ZEXPORT WINAPI # ifdef WIN32 # define ZEXPORTVA WINAPIV # else # define ZEXPORTVA FAR CDECL # endif # endif #endif #if defined (__BEOS__) # ifdef ZLIB_DLL # ifdef ZLIB_INTERNAL # define ZEXPORT __declspec(dllexport) # define ZEXPORTVA __declspec(dllexport) # else # define ZEXPORT __declspec(dllimport) # define ZEXPORTVA __declspec(dllimport) # endif # endif #endif #ifndef ZEXTERN # define ZEXTERN extern #endif #ifndef ZEXPORT # define ZEXPORT #endif #ifndef ZEXPORTVA # define ZEXPORTVA #endif #ifndef FAR # define FAR #endif #if !defined(__MACTYPES__) typedef unsigned char Byte; /* 8 bits */ #endif typedef unsigned int uInt; /* 16 bits or more */ typedef unsigned long uLong; /* 32 bits or more */ #ifdef SMALL_MEDIUM /* Borland C/C++ and some old MSC versions ignore FAR inside typedef */ # define Bytef Byte FAR #else typedef Byte FAR Bytef; #endif typedef char FAR charf; typedef int FAR intf; typedef uInt FAR uIntf; typedef uLong FAR uLongf; #ifdef STDC typedef void const *voidpc; typedef void FAR *voidpf; typedef void *voidp; #else typedef Byte const *voidpc; typedef Byte FAR *voidpf; typedef Byte *voidp; #endif #if !defined(Z_U4) && !defined(Z_SOLO) && defined(STDC) # include # if (UINT_MAX == 0xffffffffUL) # define Z_U4 unsigned # elif (ULONG_MAX == 0xffffffffUL) # define Z_U4 unsigned long # elif (USHRT_MAX == 0xffffffffUL) # define Z_U4 unsigned short # endif #endif #ifdef Z_U4 typedef Z_U4 z_crc_t; #else typedef unsigned long z_crc_t; #endif #ifdef HAVE_UNISTD_H /* may be set to #if 1 by ./configure */ # define Z_HAVE_UNISTD_H #endif #ifdef HAVE_STDARG_H /* may be set to #if 1 by ./configure */ # define Z_HAVE_STDARG_H #endif #ifdef STDC # ifndef Z_SOLO # include /* for off_t */ # endif #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifndef Z_SOLO # include /* for va_list */ # endif #endif #ifdef _WIN32 # ifndef Z_SOLO # include /* for wchar_t */ # endif #endif /* a little trick to accommodate both "#define _LARGEFILE64_SOURCE" and * "#define _LARGEFILE64_SOURCE 1" as requesting 64-bit operations, (even * though the former does not conform to the LFS document), but considering * both "#undef _LARGEFILE64_SOURCE" and "#define _LARGEFILE64_SOURCE 0" as * equivalently requesting no 64-bit operations */ #if defined(_LARGEFILE64_SOURCE) && -_LARGEFILE64_SOURCE - -1 == 1 # undef _LARGEFILE64_SOURCE #endif #if defined(__WATCOMC__) && !defined(Z_HAVE_UNISTD_H) # define Z_HAVE_UNISTD_H #endif #ifndef Z_SOLO # if defined(Z_HAVE_UNISTD_H) || defined(_LARGEFILE64_SOURCE) # include /* for SEEK_*, off_t, and _LFS64_LARGEFILE */ # ifdef VMS # include /* for off_t */ # endif # ifndef z_off_t # define z_off_t off_t # endif # endif #endif #if defined(_LFS64_LARGEFILE) && _LFS64_LARGEFILE-0 # define Z_LFS64 #endif #if defined(_LARGEFILE64_SOURCE) && defined(Z_LFS64) # define Z_LARGE64 #endif #if defined(_FILE_OFFSET_BITS) && _FILE_OFFSET_BITS-0 == 64 && defined(Z_LFS64) # define Z_WANT64 #endif #if !defined(SEEK_SET) && !defined(Z_SOLO) # define SEEK_SET 0 /* Seek from beginning of file. */ # define SEEK_CUR 1 /* Seek from current position. */ # define SEEK_END 2 /* Set file pointer to EOF plus "offset" */ #endif #ifndef z_off_t # define z_off_t long #endif #if !defined(_WIN32) && defined(Z_LARGE64) # define z_off64_t off64_t #else # if defined(_WIN32) && !defined(__GNUC__) && !defined(Z_SOLO) # define z_off64_t __int64 # else # define z_off64_t z_off_t # endif #endif /* MVS linker does not support external names larger than 8 bytes */ #if defined(__MVS__) #pragma map(deflateInit_,"DEIN") #pragma map(deflateInit2_,"DEIN2") #pragma map(deflateEnd,"DEEND") #pragma map(deflateBound,"DEBND") #pragma map(inflateInit_,"ININ") #pragma map(inflateInit2_,"ININ2") #pragma map(inflateEnd,"INEND") #pragma map(inflateSync,"INSY") #pragma map(inflateSetDictionary,"INSEDI") #pragma map(compressBound,"CMBND") #pragma map(inflate_table,"INTABL") #pragma map(inflate_fast,"INFA") #pragma map(inflate_copyright,"INCOPY") #endif #endif /* ZCONF_H */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/zlib.h0000644000076500000240000027375713572250760020616 0ustar alcaeusstaff/* zlib.h -- interface of the 'zlib' general purpose compression library version 1.2.11, January 15th, 2017 Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Jean-loup Gailly Mark Adler jloup@gzip.org madler@alumni.caltech.edu The data format used by the zlib library is described by RFCs (Request for Comments) 1950 to 1952 in the files http://tools.ietf.org/html/rfc1950 (zlib format), rfc1951 (deflate format) and rfc1952 (gzip format). */ #ifndef ZLIB_H #define ZLIB_H #include "zconf.h" #ifdef __cplusplus extern "C" { #endif #define ZLIB_VERSION "1.2.11" #define ZLIB_VERNUM 0x12b0 #define ZLIB_VER_MAJOR 1 #define ZLIB_VER_MINOR 2 #define ZLIB_VER_REVISION 11 #define ZLIB_VER_SUBREVISION 0 /* The 'zlib' compression library provides in-memory compression and decompression functions, including integrity checks of the uncompressed data. This version of the library supports only one compression method (deflation) but other algorithms will be added later and will have the same stream interface. Compression can be done in a single step if the buffers are large enough, or can be done by repeated calls of the compression function. In the latter case, the application must provide more input and/or consume the output (providing more output space) before each call. The compressed data format used by default by the in-memory functions is the zlib format, which is a zlib wrapper documented in RFC 1950, wrapped around a deflate stream, which is itself documented in RFC 1951. The library also supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. This library can optionally read and write gzip and raw deflate streams in memory as well. The zlib format was designed to be compact and fast for use in memory and on communications channels. The gzip format was designed for single- file compression on file systems, has a larger header than zlib to maintain directory information, and uses a different, slower check method than zlib. The library does not install any signal handler. The decoder checks the consistency of the compressed data, so the library should never crash even in the case of corrupted input. */ typedef voidpf (*alloc_func) OF((voidpf opaque, uInt items, uInt size)); typedef void (*free_func) OF((voidpf opaque, voidpf address)); struct internal_state; typedef struct z_stream_s { z_const Bytef *next_in; /* next input byte */ uInt avail_in; /* number of bytes available at next_in */ uLong total_in; /* total number of input bytes read so far */ Bytef *next_out; /* next output byte will go here */ uInt avail_out; /* remaining free space at next_out */ uLong total_out; /* total number of bytes output so far */ z_const char *msg; /* last error message, NULL if no error */ struct internal_state FAR *state; /* not visible by applications */ alloc_func zalloc; /* used to allocate the internal state */ free_func zfree; /* used to free the internal state */ voidpf opaque; /* private data object passed to zalloc and zfree */ int data_type; /* best guess about the data type: binary or text for deflate, or the decoding state for inflate */ uLong adler; /* Adler-32 or CRC-32 value of the uncompressed data */ uLong reserved; /* reserved for future use */ } z_stream; typedef z_stream FAR *z_streamp; /* gzip header information passed to and from zlib routines. See RFC 1952 for more details on the meanings of these fields. */ typedef struct gz_header_s { int text; /* true if compressed data believed to be text */ uLong time; /* modification time */ int xflags; /* extra flags (not used when writing a gzip file) */ int os; /* operating system */ Bytef *extra; /* pointer to extra field or Z_NULL if none */ uInt extra_len; /* extra field length (valid if extra != Z_NULL) */ uInt extra_max; /* space at extra (only when reading header) */ Bytef *name; /* pointer to zero-terminated file name or Z_NULL */ uInt name_max; /* space at name (only when reading header) */ Bytef *comment; /* pointer to zero-terminated comment or Z_NULL */ uInt comm_max; /* space at comment (only when reading header) */ int hcrc; /* true if there was or will be a header crc */ int done; /* true when done reading gzip header (not used when writing a gzip file) */ } gz_header; typedef gz_header FAR *gz_headerp; /* The application must update next_in and avail_in when avail_in has dropped to zero. It must update next_out and avail_out when avail_out has dropped to zero. The application must initialize zalloc, zfree and opaque before calling the init function. All other fields are set by the compression library and must not be updated by the application. The opaque value provided by the application will be passed as the first parameter for calls of zalloc and zfree. This can be useful for custom memory management. The compression library attaches no meaning to the opaque value. zalloc must return Z_NULL if there is not enough memory for the object. If zlib is used in a multi-threaded application, zalloc and zfree must be thread safe. In that case, zlib is thread-safe. When zalloc and zfree are Z_NULL on entry to the initialization function, they are set to internal routines that use the standard library functions malloc() and free(). On 16-bit systems, the functions zalloc and zfree must be able to allocate exactly 65536 bytes, but will not be required to allocate more than this if the symbol MAXSEG_64K is defined (see zconf.h). WARNING: On MSDOS, pointers returned by zalloc for objects of exactly 65536 bytes *must* have their offset normalized to zero. The default allocation function provided by this library ensures this (see zutil.c). To reduce memory requirements and avoid any allocation of 64K objects, at the expense of compression ratio, compile the library with -DMAX_WBITS=14 (see zconf.h). The fields total_in and total_out can be used for statistics or progress reports. After compression, total_in holds the total size of the uncompressed data and may be saved for use by the decompressor (particularly if the decompressor wants to decompress everything in a single step). */ /* constants */ #define Z_NO_FLUSH 0 #define Z_PARTIAL_FLUSH 1 #define Z_SYNC_FLUSH 2 #define Z_FULL_FLUSH 3 #define Z_FINISH 4 #define Z_BLOCK 5 #define Z_TREES 6 /* Allowed flush values; see deflate() and inflate() below for details */ #define Z_OK 0 #define Z_STREAM_END 1 #define Z_NEED_DICT 2 #define Z_ERRNO (-1) #define Z_STREAM_ERROR (-2) #define Z_DATA_ERROR (-3) #define Z_MEM_ERROR (-4) #define Z_BUF_ERROR (-5) #define Z_VERSION_ERROR (-6) /* Return codes for the compression/decompression functions. Negative values * are errors, positive values are used for special but normal events. */ #define Z_NO_COMPRESSION 0 #define Z_BEST_SPEED 1 #define Z_BEST_COMPRESSION 9 #define Z_DEFAULT_COMPRESSION (-1) /* compression levels */ #define Z_FILTERED 1 #define Z_HUFFMAN_ONLY 2 #define Z_RLE 3 #define Z_FIXED 4 #define Z_DEFAULT_STRATEGY 0 /* compression strategy; see deflateInit2() below for details */ #define Z_BINARY 0 #define Z_TEXT 1 #define Z_ASCII Z_TEXT /* for compatibility with 1.2.2 and earlier */ #define Z_UNKNOWN 2 /* Possible values of the data_type field for deflate() */ #define Z_DEFLATED 8 /* The deflate compression method (the only one supported in this version) */ #define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ #define zlib_version zlibVersion() /* for compatibility with versions < 1.0.2 */ /* basic functions */ ZEXTERN const char * ZEXPORT zlibVersion OF((void)); /* The application can compare zlibVersion and ZLIB_VERSION for consistency. If the first character differs, the library code actually used is not compatible with the zlib.h header file used by the application. This check is automatically made by deflateInit and inflateInit. */ /* ZEXTERN int ZEXPORT deflateInit OF((z_streamp strm, int level)); Initializes the internal stream state for compression. The fields zalloc, zfree and opaque must be initialized before by the caller. If zalloc and zfree are set to Z_NULL, deflateInit updates them to use default allocation functions. The compression level must be Z_DEFAULT_COMPRESSION, or between 0 and 9: 1 gives best speed, 9 gives best compression, 0 gives no compression at all (the input data is simply copied a block at a time). Z_DEFAULT_COMPRESSION requests a default compromise between speed and compression (currently equivalent to level 6). deflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if level is not a valid compression level, or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible with the version assumed by the caller (ZLIB_VERSION). msg is set to null if there is no error message. deflateInit does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflate OF((z_streamp strm, int flush)); /* deflate compresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. deflate performs one or both of the following actions: - Compress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), next_in and avail_in are updated and processing will resume at this point for the next call of deflate(). - Generate more output starting at next_out and update next_out and avail_out accordingly. This action is forced if the parameter flush is non zero. Forcing flush frequently degrades the compression ratio, so this parameter should be set only when necessary. Some output may be provided even if flush is zero. Before the call of deflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating avail_in or avail_out accordingly; avail_out should never be zero before the call. The application can consume the compressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of deflate(). If deflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. See deflatePending(), which can be used if desired to determine whether or not there is more ouput in that case. Normally the parameter flush is set to Z_NO_FLUSH, which allows deflate to decide how much data to accumulate before producing output, in order to maximize compression. If the parameter flush is set to Z_SYNC_FLUSH, all pending output is flushed to the output buffer and the output is aligned on a byte boundary, so that the decompressor can get all input data available so far. (In particular avail_in is zero after the call if enough output space has been provided before the call.) Flushing may degrade compression for some compression algorithms and so it should be used only when necessary. This completes the current deflate block and follows it with an empty stored block that is three bits plus filler bits to the next byte, followed by four bytes (00 00 ff ff). If flush is set to Z_PARTIAL_FLUSH, all pending output is flushed to the output buffer, but the output is not aligned to a byte boundary. All of the input data so far will be available to the decompressor, as for Z_SYNC_FLUSH. This completes the current deflate block and follows it with an empty fixed codes block that is 10 bits long. This assures that enough bytes are output in order for the decompressor to finish the block before the empty fixed codes block. If flush is set to Z_BLOCK, a deflate block is completed and emitted, as for Z_SYNC_FLUSH, but the output is not aligned on a byte boundary, and up to seven bits of the current block are held to be written as the next byte after the next deflate block is completed. In this case, the decompressor may not be provided enough bits at this point in order to complete decompression of the data provided so far to the compressor. It may need to wait for the next block to be emitted. This is for advanced applications that need to control the emission of deflate blocks. If flush is set to Z_FULL_FLUSH, all output is flushed as with Z_SYNC_FLUSH, and the compression state is reset so that decompression can restart from this point if previous compressed data has been damaged or if random access is desired. Using Z_FULL_FLUSH too often can seriously degrade compression. If deflate returns with avail_out == 0, this function must be called again with the same value of the flush parameter and more output space (updated avail_out), until the flush is complete (deflate returns with non-zero avail_out). In the case of a Z_FULL_FLUSH or Z_SYNC_FLUSH, make sure that avail_out is greater than six to avoid repeated flush markers due to avail_out == 0 on return. If the parameter flush is set to Z_FINISH, pending input is processed, pending output is flushed and deflate returns with Z_STREAM_END if there was enough output space. If deflate returns with Z_OK or Z_BUF_ERROR, this function must be called again with Z_FINISH and more output space (updated avail_out) but no more input data, until it returns with Z_STREAM_END or an error. After deflate has returned Z_STREAM_END, the only possible operations on the stream are deflateReset or deflateEnd. Z_FINISH can be used in the first deflate call after deflateInit if all the compression is to be done in a single step. In order to complete in one call, avail_out must be at least the value returned by deflateBound (see below). Then deflate is guaranteed to return Z_STREAM_END. If not enough output space is provided, deflate will not return Z_STREAM_END, and it must be called again as described above. deflate() sets strm->adler to the Adler-32 checksum of all input read so far (that is, total_in bytes). If a gzip stream is being generated, then strm->adler will be the CRC-32 checksum of the input read so far. (See deflateInit2 below.) deflate() may update strm->data_type if it can make a good guess about the input data type (Z_BINARY or Z_TEXT). If in doubt, the data is considered binary. This field is only for information purposes and does not affect the compression algorithm in any manner. deflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if all input has been consumed and all output has been produced (only when flush is set to Z_FINISH), Z_STREAM_ERROR if the stream state was inconsistent (for example if next_in or next_out was Z_NULL or the state was inadvertently written over by the application), or Z_BUF_ERROR if no progress is possible (for example avail_in or avail_out was zero). Note that Z_BUF_ERROR is not fatal, and deflate() can be called again with more input and more output space to continue compressing. */ ZEXTERN int ZEXPORT deflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. deflateEnd returns Z_OK if success, Z_STREAM_ERROR if the stream state was inconsistent, Z_DATA_ERROR if the stream was freed prematurely (some input or output was discarded). In the error case, msg may be set but then points to a static string (which must not be deallocated). */ /* ZEXTERN int ZEXPORT inflateInit OF((z_streamp strm)); Initializes the internal stream state for decompression. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. In the current version of inflate, the provided input is not read or consumed. The allocation of a sliding window will be deferred to the first call of inflate (if the decompression does not complete on the first call). If zalloc and zfree are set to Z_NULL, inflateInit updates them to use default allocation functions. inflateInit returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the version assumed by the caller, or Z_STREAM_ERROR if the parameters are invalid, such as a null pointer to the structure. msg is set to null if there is no error message. inflateInit does not perform any decompression. Actual decompression will be done by inflate(). So next_in, and avail_in, next_out, and avail_out are unused and unchanged. The current implementation of inflateInit() does not process any header information -- that is deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflate OF((z_streamp strm, int flush)); /* inflate decompresses as much data as possible, and stops when the input buffer becomes empty or the output buffer becomes full. It may introduce some output latency (reading input without producing any output) except when forced to flush. The detailed semantics are as follows. inflate performs one or both of the following actions: - Decompress more input starting at next_in and update next_in and avail_in accordingly. If not all input can be processed (because there is not enough room in the output buffer), then next_in and avail_in are updated accordingly, and processing will resume at this point for the next call of inflate(). - Generate more output starting at next_out and update next_out and avail_out accordingly. inflate() provides as much output as possible, until there is no more input data or no more space in the output buffer (see below about the flush parameter). Before the call of inflate(), the application should ensure that at least one of the actions is possible, by providing more input and/or consuming more output, and updating the next_* and avail_* values accordingly. If the caller of inflate() does not provide both available input and available output space, it is possible that there will be no progress made. The application can consume the uncompressed output when it wants, for example when the output buffer is full (avail_out == 0), or after each call of inflate(). If inflate returns Z_OK and with zero avail_out, it must be called again after making room in the output buffer because there might be more output pending. The flush parameter of inflate() can be Z_NO_FLUSH, Z_SYNC_FLUSH, Z_FINISH, Z_BLOCK, or Z_TREES. Z_SYNC_FLUSH requests that inflate() flush as much output as possible to the output buffer. Z_BLOCK requests that inflate() stop if and when it gets to the next deflate block boundary. When decoding the zlib or gzip format, this will cause inflate() to return immediately after the header and before the first block. When doing a raw inflate, inflate() will go ahead and process the first block, and will return when it gets to the end of that block, or when it runs out of data. The Z_BLOCK option assists in appending to or combining deflate streams. To assist in this, on return inflate() always sets strm->data_type to the number of unused bits in the last byte taken from strm->next_in, plus 64 if inflate() is currently decoding the last block in the deflate stream, plus 128 if inflate() returned immediately after decoding an end-of-block code or decoding the complete header up to just before the first byte of the deflate stream. The end-of-block will not be indicated until all of the uncompressed data from that block has been written to strm->next_out. The number of unused bits may in general be greater than seven, except when bit 7 of data_type is set, in which case the number of unused bits will be less than eight. data_type is set as noted here every time inflate() returns for all flush options, and so can be used to determine the amount of currently consumed input in bits. The Z_TREES option behaves as Z_BLOCK does, but it also returns when the end of each deflate block header is reached, before any actual data in that block is decoded. This allows the caller to determine the length of the deflate block header for later use in random access within a deflate block. 256 is added to the value of strm->data_type when inflate() returns immediately after reaching the end of the deflate block header. inflate() should normally be called until it returns Z_STREAM_END or an error. However if all decompression is to be performed in a single step (a single call of inflate), the parameter flush should be set to Z_FINISH. In this case all pending input is processed and all pending output is flushed; avail_out must be large enough to hold all of the uncompressed data for the operation to complete. (The size of the uncompressed data may have been saved by the compressor for this purpose.) The use of Z_FINISH is not required to perform an inflation in one step. However it may be used to inform inflate that a faster approach can be used for the single inflate() call. Z_FINISH also informs inflate to not maintain a sliding window if the stream completes, which reduces inflate's memory footprint. If the stream does not complete, either because not all of the stream is provided or not enough output space is provided, then a sliding window will be allocated and inflate() can be called again to continue the operation as if Z_NO_FLUSH had been used. In this implementation, inflate() always flushes as much output as possible to the output buffer, and always uses the faster approach on the first call. So the effects of the flush parameter in this implementation are on the return value of inflate() as noted below, when inflate() returns early when Z_BLOCK or Z_TREES is used, and when inflate() avoids the allocation of memory for a sliding window when Z_FINISH is used. If a preset dictionary is needed after this call (see inflateSetDictionary below), inflate sets strm->adler to the Adler-32 checksum of the dictionary chosen by the compressor and returns Z_NEED_DICT; otherwise it sets strm->adler to the Adler-32 checksum of all output produced so far (that is, total_out bytes) and returns Z_OK, Z_STREAM_END or an error code as described below. At the end of the stream, inflate() checks that its computed Adler-32 checksum is equal to that saved by the compressor and returns Z_STREAM_END only if the checksum is correct. inflate() can decompress and check either zlib-wrapped or gzip-wrapped deflate data. The header type is detected automatically, if requested when initializing with inflateInit2(). Any information contained in the gzip header is not retained unless inflateGetHeader() is used. When processing gzip-wrapped deflate data, strm->adler32 is set to the CRC-32 of the output produced so far. The CRC-32 is checked against the gzip trailer, as is the uncompressed length, modulo 2^32. inflate() returns Z_OK if some progress has been made (more input processed or more output produced), Z_STREAM_END if the end of the compressed data has been reached and all uncompressed output has been produced, Z_NEED_DICT if a preset dictionary is needed at this point, Z_DATA_ERROR if the input data was corrupted (input stream not conforming to the zlib format or incorrect check value, in which case strm->msg points to a string with a more specific error), Z_STREAM_ERROR if the stream structure was inconsistent (for example next_in or next_out was Z_NULL, or the state was inadvertently written over by the application), Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if no progress was possible or if there was not enough room in the output buffer when Z_FINISH is used. Note that Z_BUF_ERROR is not fatal, and inflate() can be called again with more input and more output space to continue decompressing. If Z_DATA_ERROR is returned, the application may then call inflateSync() to look for a good compression block if a partial recovery of the data is to be attempted. */ ZEXTERN int ZEXPORT inflateEnd OF((z_streamp strm)); /* All dynamically allocated data structures for this stream are freed. This function discards any unprocessed input and does not flush any pending output. inflateEnd returns Z_OK if success, or Z_STREAM_ERROR if the stream state was inconsistent. */ /* Advanced functions */ /* The following functions are needed only in some special applications. */ /* ZEXTERN int ZEXPORT deflateInit2 OF((z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy)); This is another version of deflateInit with more compression options. The fields next_in, zalloc, zfree and opaque must be initialized before by the caller. The method parameter is the compression method. It must be Z_DEFLATED in this version of the library. The windowBits parameter is the base two logarithm of the window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. Larger values of this parameter result in better compression at the expense of memory usage. The default value is 15 if deflateInit is used instead. For the current implementation of deflate(), a windowBits value of 8 (a window size of 256 bytes) is not supported. As a result, a request for 8 will result in 9 (a 512-byte window). In that case, providing 8 to inflateInit2() will result in an error when the zlib header with 9 is checked against the initialization of inflate(). The remedy is to not use 8 with deflateInit2() with this initialization, or at least in that case use 9 with inflateInit2(). windowBits can also be -8..-15 for raw deflate. In this case, -windowBits determines the window size. deflate() will then generate raw deflate data with no zlib header or trailer, and will not compute a check value. windowBits can also be greater than 15 for optional gzip encoding. Add 16 to windowBits to write a simple gzip header and trailer around the compressed data instead of a zlib wrapper. The gzip header will have no file name, no extra data, no comment, no modification time (set to zero), no header crc, and the operating system will be set to the appropriate value, if the operating system was determined at compile time. If a gzip stream is being written, strm->adler is a CRC-32 instead of an Adler-32. For raw deflate or gzip encoding, a request for a 256-byte window is rejected as invalid, since only the zlib header provides a means of transmitting the window size to the decompressor. The memLevel parameter specifies how much memory should be allocated for the internal compression state. memLevel=1 uses minimum memory but is slow and reduces compression ratio; memLevel=9 uses maximum memory for optimal speed. The default value is 8. See zconf.h for total memory usage as a function of windowBits and memLevel. The strategy parameter is used to tune the compression algorithm. Use the value Z_DEFAULT_STRATEGY for normal data, Z_FILTERED for data produced by a filter (or predictor), Z_HUFFMAN_ONLY to force Huffman encoding only (no string match), or Z_RLE to limit match distances to one (run-length encoding). Filtered data consists mostly of small values with a somewhat random distribution. In this case, the compression algorithm is tuned to compress them better. The effect of Z_FILTERED is to force more Huffman coding and less string matching; it is somewhat intermediate between Z_DEFAULT_STRATEGY and Z_HUFFMAN_ONLY. Z_RLE is designed to be almost as fast as Z_HUFFMAN_ONLY, but give better compression for PNG image data. The strategy parameter only affects the compression ratio but not the correctness of the compressed output even if it is not set appropriately. Z_FIXED prevents the use of dynamic Huffman codes, allowing for a simpler decoder for special applications. deflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if any parameter is invalid (such as an invalid method), or Z_VERSION_ERROR if the zlib library version (zlib_version) is incompatible with the version assumed by the caller (ZLIB_VERSION). msg is set to null if there is no error message. deflateInit2 does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateSetDictionary OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)); /* Initializes the compression dictionary from the given byte sequence without producing any compressed output. When using the zlib format, this function must be called immediately after deflateInit, deflateInit2 or deflateReset, and before any call of deflate. When doing raw deflate, this function must be called either before any call of deflate, or immediately after the completion of a deflate block, i.e. after all input has been consumed and all output has been delivered when using any of the flush options Z_BLOCK, Z_PARTIAL_FLUSH, Z_SYNC_FLUSH, or Z_FULL_FLUSH. The compressor and decompressor must use exactly the same dictionary (see inflateSetDictionary). The dictionary should consist of strings (byte sequences) that are likely to be encountered later in the data to be compressed, with the most commonly used strings preferably put towards the end of the dictionary. Using a dictionary is most useful when the data to be compressed is short and can be predicted with good accuracy; the data can then be compressed better than with the default empty dictionary. Depending on the size of the compression data structures selected by deflateInit or deflateInit2, a part of the dictionary may in effect be discarded, for example if the dictionary is larger than the window size provided in deflateInit or deflateInit2. Thus the strings most likely to be useful should be put at the end of the dictionary, not at the front. In addition, the current implementation of deflate will use at most the window size minus 262 bytes of the provided dictionary. Upon return of this function, strm->adler is set to the Adler-32 value of the dictionary; the decompressor may later use this value to determine which dictionary has been used by the compressor. (The Adler-32 value applies to the whole dictionary even if only a subset of the dictionary is actually used by the compressor.) If a raw deflate was requested, then the Adler-32 value is not computed and strm->adler is not set. deflateSetDictionary returns Z_OK if success, or Z_STREAM_ERROR if a parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent (for example if deflate has already been called for this stream or if not at a block boundary for raw deflate). deflateSetDictionary does not perform any compression: this will be done by deflate(). */ ZEXTERN int ZEXPORT deflateGetDictionary OF((z_streamp strm, Bytef *dictionary, uInt *dictLength)); /* Returns the sliding dictionary being maintained by deflate. dictLength is set to the number of bytes in the dictionary, and that many bytes are copied to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If deflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. Similary, if dictLength is Z_NULL, then it is not set. deflateGetDictionary() may return a length less than the window size, even when more than the window size in input has been provided. It may return up to 258 bytes less in that case, due to how zlib's implementation of deflate manages the sliding window and lookahead for matches, where matches can be up to 258 bytes long. If the application needs the last window-size bytes of input, then that would need to be saved by the application outside of zlib. deflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the stream state is inconsistent. */ ZEXTERN int ZEXPORT deflateCopy OF((z_streamp dest, z_streamp source)); /* Sets the destination stream as a complete copy of the source stream. This function can be useful when several compression strategies will be tried, for example when there are several ways of pre-processing the input data with a filter. The streams that will be discarded should then be freed by calling deflateEnd. Note that deflateCopy duplicates the internal compression state which can be quite large, so this strategy is slow and can consume lots of memory. deflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT deflateReset OF((z_streamp strm)); /* This function is equivalent to deflateEnd followed by deflateInit, but does not free and reallocate the internal compression state. The stream will leave the compression level and any other attributes that may have been set unchanged. deflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT deflateParams OF((z_streamp strm, int level, int strategy)); /* Dynamically update the compression level and compression strategy. The interpretation of level and strategy is as in deflateInit2(). This can be used to switch between compression and straight copy of the input data, or to switch to a different kind of input data requiring a different strategy. If the compression approach (which is a function of the level) or the strategy is changed, and if any input has been consumed in a previous deflate() call, then the input available so far is compressed with the old level and strategy using deflate(strm, Z_BLOCK). There are three approaches for the compression levels 0, 1..3, and 4..9 respectively. The new level and strategy will take effect at the next call of deflate(). If a deflate(strm, Z_BLOCK) is performed by deflateParams(), and it does not have enough output space to complete, then the parameter change will not take effect. In this case, deflateParams() can be called again with the same parameters and more output space to try again. In order to assure a change in the parameters on the first try, the deflate stream should be flushed using deflate() with Z_BLOCK or other flush request until strm.avail_out is not zero, before calling deflateParams(). Then no more input data should be provided before the deflateParams() call. If this is done, the old level and strategy will be applied to the data compressed before deflateParams(), and the new level and strategy will be applied to the the data compressed after deflateParams(). deflateParams returns Z_OK on success, Z_STREAM_ERROR if the source stream state was inconsistent or if a parameter was invalid, or Z_BUF_ERROR if there was not enough output space to complete the compression of the available input data before a change in the strategy or approach. Note that in the case of a Z_BUF_ERROR, the parameters are not changed. A return value of Z_BUF_ERROR is not fatal, in which case deflateParams() can be retried with more output space. */ ZEXTERN int ZEXPORT deflateTune OF((z_streamp strm, int good_length, int max_lazy, int nice_length, int max_chain)); /* Fine tune deflate's internal compression parameters. This should only be used by someone who understands the algorithm used by zlib's deflate for searching for the best matching string, and even then only by the most fanatic optimizer trying to squeeze out the last compressed bit for their specific input data. Read the deflate.c source code for the meaning of the max_lazy, good_length, nice_length, and max_chain parameters. deflateTune() can be called after deflateInit() or deflateInit2(), and returns Z_OK on success, or Z_STREAM_ERROR for an invalid deflate stream. */ ZEXTERN uLong ZEXPORT deflateBound OF((z_streamp strm, uLong sourceLen)); /* deflateBound() returns an upper bound on the compressed size after deflation of sourceLen bytes. It must be called after deflateInit() or deflateInit2(), and after deflateSetHeader(), if used. This would be used to allocate an output buffer for deflation in a single pass, and so would be called before deflate(). If that first deflate() call is provided the sourceLen input bytes, an output buffer allocated to the size returned by deflateBound(), and the flush value Z_FINISH, then deflate() is guaranteed to return Z_STREAM_END. Note that it is possible for the compressed size to be larger than the value returned by deflateBound() if flush options other than Z_FINISH or Z_NO_FLUSH are used. */ ZEXTERN int ZEXPORT deflatePending OF((z_streamp strm, unsigned *pending, int *bits)); /* deflatePending() returns the number of bytes and bits of output that have been generated, but not yet provided in the available output. The bytes not provided would be due to the available output space having being consumed. The number of bits of output not provided are between 0 and 7, where they await more bits to join them in order to fill out a full byte. If pending or bits are Z_NULL, then those values are not set. deflatePending returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflatePrime OF((z_streamp strm, int bits, int value)); /* deflatePrime() inserts bits in the deflate output stream. The intent is that this function is used to start off the deflate output with the bits leftover from a previous deflate stream when appending to it. As such, this function can only be used for raw deflate, and must be used before the first deflate() call after a deflateInit2() or deflateReset(). bits must be less than or equal to 16, and that many of the least significant bits of value will be inserted in the output. deflatePrime returns Z_OK if success, Z_BUF_ERROR if there was not enough room in the internal buffer to insert the bits, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN int ZEXPORT deflateSetHeader OF((z_streamp strm, gz_headerp head)); /* deflateSetHeader() provides gzip header information for when a gzip stream is requested by deflateInit2(). deflateSetHeader() may be called after deflateInit2() or deflateReset() and before the first call of deflate(). The text, time, os, extra field, name, and comment information in the provided gz_header structure are written to the gzip header (xflag is ignored -- the extra flags are set according to the compression level). The caller must assure that, if not Z_NULL, name and comment are terminated with a zero byte, and that if extra is not Z_NULL, that extra_len bytes are available there. If hcrc is true, a gzip header crc is included. Note that the current versions of the command-line version of gzip (up through version 1.3.x) do not support header crc's, and will report that it is a "multi-part gzip file" and give up. If deflateSetHeader is not used, the default gzip header has text false, the time set to zero, and os set to 255, with no extra, name, or comment fields. The gzip header is returned to the default state by deflateReset(). deflateSetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ /* ZEXTERN int ZEXPORT inflateInit2 OF((z_streamp strm, int windowBits)); This is another version of inflateInit with an extra parameter. The fields next_in, avail_in, zalloc, zfree and opaque must be initialized before by the caller. The windowBits parameter is the base two logarithm of the maximum window size (the size of the history buffer). It should be in the range 8..15 for this version of the library. The default value is 15 if inflateInit is used instead. windowBits must be greater than or equal to the windowBits value provided to deflateInit2() while compressing, or it must be equal to 15 if deflateInit2() was not used. If a compressed stream with a larger window size is given as input, inflate() will return with the error code Z_DATA_ERROR instead of trying to allocate a larger window. windowBits can also be zero to request that inflate use the window size in the zlib header of the compressed stream. windowBits can also be -8..-15 for raw inflate. In this case, -windowBits determines the window size. inflate() will then process raw deflate data, not looking for a zlib or gzip header, not generating a check value, and not looking for any check values for comparison at the end of the stream. This is for use with other formats that use the deflate compressed data format such as zip. Those formats provide their own check values. If a custom format is developed using the raw deflate format for compressed data, it is recommended that a check value such as an Adler-32 or a CRC-32 be applied to the uncompressed data as is done in the zlib, gzip, and zip formats. For most applications, the zlib format should be used as is. Note that comments above on the use in deflateInit2() applies to the magnitude of windowBits. windowBits can also be greater than 15 for optional gzip decoding. Add 32 to windowBits to enable zlib and gzip decoding with automatic header detection, or add 16 to decode only the gzip format (the zlib format will return a Z_DATA_ERROR). If a gzip stream is being decoded, strm->adler is a CRC-32 instead of an Adler-32. Unlike the gunzip utility and gzread() (see below), inflate() will not automatically decode concatenated gzip streams. inflate() will return Z_STREAM_END at the end of the gzip stream. The state would need to be reset to continue decoding a subsequent gzip stream. inflateInit2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_VERSION_ERROR if the zlib library version is incompatible with the version assumed by the caller, or Z_STREAM_ERROR if the parameters are invalid, such as a null pointer to the structure. msg is set to null if there is no error message. inflateInit2 does not perform any decompression apart from possibly reading the zlib header if present: actual decompression will be done by inflate(). (So next_in and avail_in may be modified, but next_out and avail_out are unused and unchanged.) The current implementation of inflateInit2() does not process any header information -- that is deferred until inflate() is called. */ ZEXTERN int ZEXPORT inflateSetDictionary OF((z_streamp strm, const Bytef *dictionary, uInt dictLength)); /* Initializes the decompression dictionary from the given uncompressed byte sequence. This function must be called immediately after a call of inflate, if that call returned Z_NEED_DICT. The dictionary chosen by the compressor can be determined from the Adler-32 value returned by that call of inflate. The compressor and decompressor must use exactly the same dictionary (see deflateSetDictionary). For raw inflate, this function can be called at any time to set the dictionary. If the provided dictionary is smaller than the window and there is already data in the window, then the provided dictionary will amend what's there. The application must insure that the dictionary that was used for compression is provided. inflateSetDictionary returns Z_OK if success, Z_STREAM_ERROR if a parameter is invalid (e.g. dictionary being Z_NULL) or the stream state is inconsistent, Z_DATA_ERROR if the given dictionary doesn't match the expected one (incorrect Adler-32 value). inflateSetDictionary does not perform any decompression: this will be done by subsequent calls of inflate(). */ ZEXTERN int ZEXPORT inflateGetDictionary OF((z_streamp strm, Bytef *dictionary, uInt *dictLength)); /* Returns the sliding dictionary being maintained by inflate. dictLength is set to the number of bytes in the dictionary, and that many bytes are copied to dictionary. dictionary must have enough space, where 32768 bytes is always enough. If inflateGetDictionary() is called with dictionary equal to Z_NULL, then only the dictionary length is returned, and nothing is copied. Similary, if dictLength is Z_NULL, then it is not set. inflateGetDictionary returns Z_OK on success, or Z_STREAM_ERROR if the stream state is inconsistent. */ ZEXTERN int ZEXPORT inflateSync OF((z_streamp strm)); /* Skips invalid compressed data until a possible full flush point (see above for the description of deflate with Z_FULL_FLUSH) can be found, or until all available input is skipped. No output is provided. inflateSync searches for a 00 00 FF FF pattern in the compressed data. All full flush points have this pattern, but not all occurrences of this pattern are full flush points. inflateSync returns Z_OK if a possible full flush point has been found, Z_BUF_ERROR if no more input was provided, Z_DATA_ERROR if no flush point has been found, or Z_STREAM_ERROR if the stream structure was inconsistent. In the success case, the application may save the current current value of total_in which indicates where valid compressed data was found. In the error case, the application may repeatedly call inflateSync, providing more input each time, until success or end of the input data. */ ZEXTERN int ZEXPORT inflateCopy OF((z_streamp dest, z_streamp source)); /* Sets the destination stream as a complete copy of the source stream. This function can be useful when randomly accessing a large stream. The first pass through the stream can periodically record the inflate state, allowing restarting inflate at those points when randomly accessing the stream. inflateCopy returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc being Z_NULL). msg is left unchanged in both source and destination. */ ZEXTERN int ZEXPORT inflateReset OF((z_streamp strm)); /* This function is equivalent to inflateEnd followed by inflateInit, but does not free and reallocate the internal decompression state. The stream will keep attributes that may have been set by inflateInit2. inflateReset returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL). */ ZEXTERN int ZEXPORT inflateReset2 OF((z_streamp strm, int windowBits)); /* This function is the same as inflateReset, but it also permits changing the wrap and window size requests. The windowBits parameter is interpreted the same as it is for inflateInit2. If the window size is changed, then the memory allocated for the window is freed, and the window will be reallocated by inflate() if needed. inflateReset2 returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent (such as zalloc or state being Z_NULL), or if the windowBits parameter is invalid. */ ZEXTERN int ZEXPORT inflatePrime OF((z_streamp strm, int bits, int value)); /* This function inserts bits in the inflate input stream. The intent is that this function is used to start inflating at a bit position in the middle of a byte. The provided bits will be used before any bytes are used from next_in. This function should only be used with raw inflate, and should be used before the first inflate() call after inflateInit2() or inflateReset(). bits must be less than or equal to 16, and that many of the least significant bits of value will be inserted in the input. If bits is negative, then the input stream bit buffer is emptied. Then inflatePrime() can be called again to put bits in the buffer. This is used to clear out bits leftover after feeding inflate a block description prior to feeding inflate codes. inflatePrime returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ ZEXTERN long ZEXPORT inflateMark OF((z_streamp strm)); /* This function returns two values, one in the lower 16 bits of the return value, and the other in the remaining upper bits, obtained by shifting the return value down 16 bits. If the upper value is -1 and the lower value is zero, then inflate() is currently decoding information outside of a block. If the upper value is -1 and the lower value is non-zero, then inflate is in the middle of a stored block, with the lower value equaling the number of bytes from the input remaining to copy. If the upper value is not -1, then it is the number of bits back from the current bit position in the input of the code (literal or length/distance pair) currently being processed. In that case the lower value is the number of bytes already emitted for that code. A code is being processed if inflate is waiting for more input to complete decoding of the code, or if it has completed decoding but is waiting for more output space to write the literal or match data. inflateMark() is used to mark locations in the input data for random access, which may be at bit positions, and to note those cases where the output of a code may span boundaries of random access blocks. The current location in the input stream can be determined from avail_in and data_type as noted in the description for the Z_BLOCK flush parameter for inflate. inflateMark returns the value noted above, or -65536 if the provided source stream state was inconsistent. */ ZEXTERN int ZEXPORT inflateGetHeader OF((z_streamp strm, gz_headerp head)); /* inflateGetHeader() requests that gzip header information be stored in the provided gz_header structure. inflateGetHeader() may be called after inflateInit2() or inflateReset(), and before the first call of inflate(). As inflate() processes the gzip stream, head->done is zero until the header is completed, at which time head->done is set to one. If a zlib stream is being decoded, then head->done is set to -1 to indicate that there will be no gzip header information forthcoming. Note that Z_BLOCK or Z_TREES can be used to force inflate() to return immediately after header processing is complete and before any actual data is decompressed. The text, time, xflags, and os fields are filled in with the gzip header contents. hcrc is set to true if there is a header CRC. (The header CRC was valid if done is set to one.) If extra is not Z_NULL, then extra_max contains the maximum number of bytes to write to extra. Once done is true, extra_len contains the actual extra field length, and extra contains the extra field, or that field truncated if extra_max is less than extra_len. If name is not Z_NULL, then up to name_max characters are written there, terminated with a zero unless the length is greater than name_max. If comment is not Z_NULL, then up to comm_max characters are written there, terminated with a zero unless the length is greater than comm_max. When any of extra, name, or comment are not Z_NULL and the respective field is not present in the header, then that field is set to Z_NULL to signal its absence. This allows the use of deflateSetHeader() with the returned structure to duplicate the header. However if those fields are set to allocated memory, then the application will need to save those pointers elsewhere so that they can be eventually freed. If inflateGetHeader is not used, then the header information is simply discarded. The header is always checked for validity, including the header CRC if present. inflateReset() will reset the process to discard the header information. The application would need to call inflateGetHeader() again to retrieve the header from the next gzip stream. inflateGetHeader returns Z_OK if success, or Z_STREAM_ERROR if the source stream state was inconsistent. */ /* ZEXTERN int ZEXPORT inflateBackInit OF((z_streamp strm, int windowBits, unsigned char FAR *window)); Initialize the internal stream state for decompression using inflateBack() calls. The fields zalloc, zfree and opaque in strm must be initialized before the call. If zalloc and zfree are Z_NULL, then the default library- derived memory allocation routines are used. windowBits is the base two logarithm of the window size, in the range 8..15. window is a caller supplied buffer of that size. Except for special applications where it is assured that deflate was used with small window sizes, windowBits must be 15 and a 32K byte window must be supplied to be able to decompress general deflate streams. See inflateBack() for the usage of these routines. inflateBackInit will return Z_OK on success, Z_STREAM_ERROR if any of the parameters are invalid, Z_MEM_ERROR if the internal state could not be allocated, or Z_VERSION_ERROR if the version of the library does not match the version of the header file. */ typedef unsigned (*in_func) OF((void FAR *, z_const unsigned char FAR * FAR *)); typedef int (*out_func) OF((void FAR *, unsigned char FAR *, unsigned)); ZEXTERN int ZEXPORT inflateBack OF((z_streamp strm, in_func in, void FAR *in_desc, out_func out, void FAR *out_desc)); /* inflateBack() does a raw inflate with a single call using a call-back interface for input and output. This is potentially more efficient than inflate() for file i/o applications, in that it avoids copying between the output and the sliding window by simply making the window itself the output buffer. inflate() can be faster on modern CPUs when used with large buffers. inflateBack() trusts the application to not change the output buffer passed by the output function, at least until inflateBack() returns. inflateBackInit() must be called first to allocate the internal state and to initialize the state with the user-provided window buffer. inflateBack() may then be used multiple times to inflate a complete, raw deflate stream with each call. inflateBackEnd() is then called to free the allocated state. A raw deflate stream is one with no zlib or gzip header or trailer. This routine would normally be used in a utility that reads zip or gzip files and writes out uncompressed files. The utility would decode the header and process the trailer on its own, hence this routine expects only the raw deflate stream to decompress. This is different from the default behavior of inflate(), which expects a zlib header and trailer around the deflate stream. inflateBack() uses two subroutines supplied by the caller that are then called by inflateBack() for input and output. inflateBack() calls those routines until it reads a complete deflate stream and writes out all of the uncompressed data, or until it encounters an error. The function's parameters and return types are defined above in the in_func and out_func typedefs. inflateBack() will call in(in_desc, &buf) which should return the number of bytes of provided input, and a pointer to that input in buf. If there is no input available, in() must return zero -- buf is ignored in that case -- and inflateBack() will return a buffer error. inflateBack() will call out(out_desc, buf, len) to write the uncompressed data buf[0..len-1]. out() should return zero on success, or non-zero on failure. If out() returns non-zero, inflateBack() will return with an error. Neither in() nor out() are permitted to change the contents of the window provided to inflateBackInit(), which is also the buffer that out() uses to write from. The length written by out() will be at most the window size. Any non-zero amount of input may be provided by in(). For convenience, inflateBack() can be provided input on the first call by setting strm->next_in and strm->avail_in. If that input is exhausted, then in() will be called. Therefore strm->next_in must be initialized before calling inflateBack(). If strm->next_in is Z_NULL, then in() will be called immediately for input. If strm->next_in is not Z_NULL, then strm->avail_in must also be initialized, and then if strm->avail_in is not zero, input will initially be taken from strm->next_in[0 .. strm->avail_in - 1]. The in_desc and out_desc parameters of inflateBack() is passed as the first parameter of in() and out() respectively when they are called. These descriptors can be optionally used to pass any information that the caller- supplied in() and out() functions need to do their job. On return, inflateBack() will set strm->next_in and strm->avail_in to pass back any unused input that was provided by the last in() call. The return values of inflateBack() can be Z_STREAM_END on success, Z_BUF_ERROR if in() or out() returned an error, Z_DATA_ERROR if there was a format error in the deflate stream (in which case strm->msg is set to indicate the nature of the error), or Z_STREAM_ERROR if the stream was not properly initialized. In the case of Z_BUF_ERROR, an input or output error can be distinguished using strm->next_in which will be Z_NULL only if in() returned an error. If strm->next_in is not Z_NULL, then the Z_BUF_ERROR was due to out() returning non-zero. (in() will always be called before out(), so strm->next_in is assured to be defined if out() returns non-zero.) Note that inflateBack() cannot return Z_OK. */ ZEXTERN int ZEXPORT inflateBackEnd OF((z_streamp strm)); /* All memory allocated by inflateBackInit() is freed. inflateBackEnd() returns Z_OK on success, or Z_STREAM_ERROR if the stream state was inconsistent. */ ZEXTERN uLong ZEXPORT zlibCompileFlags OF((void)); /* Return flags indicating compile-time options. Type sizes, two bits each, 00 = 16 bits, 01 = 32, 10 = 64, 11 = other: 1.0: size of uInt 3.2: size of uLong 5.4: size of voidpf (pointer) 7.6: size of z_off_t Compiler, assembler, and debug options: 8: ZLIB_DEBUG 9: ASMV or ASMINF -- use ASM code 10: ZLIB_WINAPI -- exported functions use the WINAPI calling convention 11: 0 (reserved) One-time table building (smaller code, but not thread-safe if true): 12: BUILDFIXED -- build static block decoding tables when needed 13: DYNAMIC_CRC_TABLE -- build CRC calculation tables when needed 14,15: 0 (reserved) Library content (indicates missing functionality): 16: NO_GZCOMPRESS -- gz* functions cannot compress (to avoid linking deflate code when not needed) 17: NO_GZIP -- deflate can't write gzip streams, and inflate can't detect and decode gzip streams (to avoid linking crc code) 18-19: 0 (reserved) Operation variations (changes in library functionality): 20: PKZIP_BUG_WORKAROUND -- slightly more permissive inflate 21: FASTEST -- deflate algorithm with only one, lowest compression level 22,23: 0 (reserved) The sprintf variant used by gzprintf (zero is best): 24: 0 = vs*, 1 = s* -- 1 means limited to 20 arguments after the format 25: 0 = *nprintf, 1 = *printf -- 1 means gzprintf() not secure! 26: 0 = returns value, 1 = void -- 1 means inferred string length returned Remainder: 27-31: 0 (reserved) */ #ifndef Z_SOLO /* utility functions */ /* The following utility functions are implemented on top of the basic stream-oriented functions. To simplify the interface, some default options are assumed (compression level and memory usage, standard memory allocation functions). The source code of these utility functions can be modified if you need special options. */ ZEXTERN int ZEXPORT compress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Compresses the source buffer into the destination buffer. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed data. compress() is equivalent to compress2() with a level parameter of Z_DEFAULT_COMPRESSION. compress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer. */ ZEXTERN int ZEXPORT compress2 OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen, int level)); /* Compresses the source buffer into the destination buffer. The level parameter has the same meaning as in deflateInit. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be at least the value returned by compressBound(sourceLen). Upon exit, destLen is the actual size of the compressed data. compress2 returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, Z_STREAM_ERROR if the level parameter is invalid. */ ZEXTERN uLong ZEXPORT compressBound OF((uLong sourceLen)); /* compressBound() returns an upper bound on the compressed size after compress() or compress2() on sourceLen bytes. It would be used before a compress() or compress2() call to allocate the destination buffer. */ ZEXTERN int ZEXPORT uncompress OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong sourceLen)); /* Decompresses the source buffer into the destination buffer. sourceLen is the byte length of the source buffer. Upon entry, destLen is the total size of the destination buffer, which must be large enough to hold the entire uncompressed data. (The size of the uncompressed data must have been saved previously by the compressor and transmitted to the decompressor by some mechanism outside the scope of this compression library.) Upon exit, destLen is the actual size of the uncompressed data. uncompress returns Z_OK if success, Z_MEM_ERROR if there was not enough memory, Z_BUF_ERROR if there was not enough room in the output buffer, or Z_DATA_ERROR if the input data was corrupted or incomplete. In the case where there is not enough room, uncompress() will fill the output buffer with the uncompressed data up to that point. */ ZEXTERN int ZEXPORT uncompress2 OF((Bytef *dest, uLongf *destLen, const Bytef *source, uLong *sourceLen)); /* Same as uncompress, except that sourceLen is a pointer, where the length of the source is *sourceLen. On return, *sourceLen is the number of source bytes consumed. */ /* gzip file access functions */ /* This library supports reading and writing files in gzip (.gz) format with an interface similar to that of stdio, using the functions that start with "gz". The gzip format is different from the zlib format. gzip is a gzip wrapper, documented in RFC 1952, wrapped around a deflate stream. */ typedef struct gzFile_s *gzFile; /* semi-opaque gzip file descriptor */ /* ZEXTERN gzFile ZEXPORT gzopen OF((const char *path, const char *mode)); Opens a gzip (.gz) file for reading or writing. The mode parameter is as in fopen ("rb" or "wb") but can also include a compression level ("wb9") or a strategy: 'f' for filtered data as in "wb6f", 'h' for Huffman-only compression as in "wb1h", 'R' for run-length encoding as in "wb1R", or 'F' for fixed code compression as in "wb9F". (See the description of deflateInit2 for more information about the strategy parameter.) 'T' will request transparent writing or appending with no compression and not using the gzip format. "a" can be used instead of "w" to request that the gzip stream that will be written be appended to the file. "+" will result in an error, since reading and writing to the same gzip file is not supported. The addition of "x" when writing will create the file exclusively, which fails if the file already exists. On systems that support it, the addition of "e" when reading or writing will set the flag to close the file on an execve() call. These functions, as well as gzip, will read and decode a sequence of gzip streams in a file. The append function of gzopen() can be used to create such a file. (Also see gzflush() for another way to do this.) When appending, gzopen does not test whether the file begins with a gzip stream, nor does it look for the end of the gzip streams to begin appending. gzopen will simply append a gzip stream to the existing file. gzopen can be used to read a file which is not in gzip format; in this case gzread will directly read from the file without decompression. When reading, this will be detected automatically by looking for the magic two- byte gzip header. gzopen returns NULL if the file could not be opened, if there was insufficient memory to allocate the gzFile state, or if an invalid mode was specified (an 'r', 'w', or 'a' was not provided, or '+' was provided). errno can be checked to determine if the reason gzopen failed was that the file could not be opened. */ ZEXTERN gzFile ZEXPORT gzdopen OF((int fd, const char *mode)); /* gzdopen associates a gzFile with the file descriptor fd. File descriptors are obtained from calls like open, dup, creat, pipe or fileno (if the file has been previously opened with fopen). The mode parameter is as in gzopen. The next call of gzclose on the returned gzFile will also close the file descriptor fd, just like fclose(fdopen(fd, mode)) closes the file descriptor fd. If you want to keep fd open, use fd = dup(fd_keep); gz = gzdopen(fd, mode);. The duplicated descriptor should be saved to avoid a leak, since gzdopen does not close fd if it fails. If you are using fileno() to get the file descriptor from a FILE *, then you will have to use dup() to avoid double-close()ing the file descriptor. Both gzclose() and fclose() will close the associated file descriptor, so they need to have different file descriptors. gzdopen returns NULL if there was insufficient memory to allocate the gzFile state, if an invalid mode was specified (an 'r', 'w', or 'a' was not provided, or '+' was provided), or if fd is -1. The file descriptor is not used until the next gz* read, write, seek, or close operation, so gzdopen will not detect if fd is invalid (unless fd is -1). */ ZEXTERN int ZEXPORT gzbuffer OF((gzFile file, unsigned size)); /* Set the internal buffer size used by this library's functions. The default buffer size is 8192 bytes. This function must be called after gzopen() or gzdopen(), and before any other calls that read or write the file. The buffer memory allocation is always deferred to the first read or write. Three times that size in buffer space is allocated. A larger buffer size of, for example, 64K or 128K bytes will noticeably increase the speed of decompression (reading). The new buffer size also affects the maximum length for gzprintf(). gzbuffer() returns 0 on success, or -1 on failure, such as being called too late. */ ZEXTERN int ZEXPORT gzsetparams OF((gzFile file, int level, int strategy)); /* Dynamically update the compression level or strategy. See the description of deflateInit2 for the meaning of these parameters. Previously provided data is flushed before the parameter change. gzsetparams returns Z_OK if success, Z_STREAM_ERROR if the file was not opened for writing, Z_ERRNO if there is an error writing the flushed data, or Z_MEM_ERROR if there is a memory allocation error. */ ZEXTERN int ZEXPORT gzread OF((gzFile file, voidp buf, unsigned len)); /* Reads the given number of uncompressed bytes from the compressed file. If the input file is not in gzip format, gzread copies the given number of bytes into the buffer directly from the file. After reaching the end of a gzip stream in the input, gzread will continue to read, looking for another gzip stream. Any number of gzip streams may be concatenated in the input file, and will all be decompressed by gzread(). If something other than a gzip stream is encountered after a gzip stream, that remaining trailing garbage is ignored (and no error is returned). gzread can be used to read a gzip file that is being concurrently written. Upon reaching the end of the input, gzread will return with the available data. If the error code returned by gzerror is Z_OK or Z_BUF_ERROR, then gzclearerr can be used to clear the end of file indicator in order to permit gzread to be tried again. Z_OK indicates that a gzip stream was completed on the last gzread. Z_BUF_ERROR indicates that the input file ended in the middle of a gzip stream. Note that gzread does not return -1 in the event of an incomplete gzip stream. This error is deferred until gzclose(), which will return Z_BUF_ERROR if the last gzread ended in the middle of a gzip stream. Alternatively, gzerror can be used before gzclose to detect this case. gzread returns the number of uncompressed bytes actually read, less than len for end of file, or -1 for error. If len is too large to fit in an int, then nothing is read, -1 is returned, and the error state is set to Z_STREAM_ERROR. */ ZEXTERN z_size_t ZEXPORT gzfread OF((voidp buf, z_size_t size, z_size_t nitems, gzFile file)); /* Read up to nitems items of size size from file to buf, otherwise operating as gzread() does. This duplicates the interface of stdio's fread(), with size_t request and return types. If the library defines size_t, then z_size_t is identical to size_t. If not, then z_size_t is an unsigned integer type that can contain a pointer. gzfread() returns the number of full items read of size size, or zero if the end of the file was reached and a full item could not be read, or if there was an error. gzerror() must be consulted if zero is returned in order to determine if there was an error. If the multiplication of size and nitems overflows, i.e. the product does not fit in a z_size_t, then nothing is read, zero is returned, and the error state is set to Z_STREAM_ERROR. In the event that the end of file is reached and only a partial item is available at the end, i.e. the remaining uncompressed data length is not a multiple of size, then the final partial item is nevetheless read into buf and the end-of-file flag is set. The length of the partial item read is not provided, but could be inferred from the result of gztell(). This behavior is the same as the behavior of fread() implementations in common libraries, but it prevents the direct use of gzfread() to read a concurrently written file, reseting and retrying on end-of-file, when size is not 1. */ ZEXTERN int ZEXPORT gzwrite OF((gzFile file, voidpc buf, unsigned len)); /* Writes the given number of uncompressed bytes into the compressed file. gzwrite returns the number of uncompressed bytes written or 0 in case of error. */ ZEXTERN z_size_t ZEXPORT gzfwrite OF((voidpc buf, z_size_t size, z_size_t nitems, gzFile file)); /* gzfwrite() writes nitems items of size size from buf to file, duplicating the interface of stdio's fwrite(), with size_t request and return types. If the library defines size_t, then z_size_t is identical to size_t. If not, then z_size_t is an unsigned integer type that can contain a pointer. gzfwrite() returns the number of full items written of size size, or zero if there was an error. If the multiplication of size and nitems overflows, i.e. the product does not fit in a z_size_t, then nothing is written, zero is returned, and the error state is set to Z_STREAM_ERROR. */ ZEXTERN int ZEXPORTVA gzprintf Z_ARG((gzFile file, const char *format, ...)); /* Converts, formats, and writes the arguments to the compressed file under control of the format string, as in fprintf. gzprintf returns the number of uncompressed bytes actually written, or a negative zlib error code in case of error. The number of uncompressed bytes written is limited to 8191, or one less than the buffer size given to gzbuffer(). The caller should assure that this limit is not exceeded. If it is exceeded, then gzprintf() will return an error (0) with nothing written. In this case, there may also be a buffer overflow with unpredictable consequences, which is possible only if zlib was compiled with the insecure functions sprintf() or vsprintf() because the secure snprintf() or vsnprintf() functions were not available. This can be determined using zlibCompileFlags(). */ ZEXTERN int ZEXPORT gzputs OF((gzFile file, const char *s)); /* Writes the given null-terminated string to the compressed file, excluding the terminating null character. gzputs returns the number of characters written, or -1 in case of error. */ ZEXTERN char * ZEXPORT gzgets OF((gzFile file, char *buf, int len)); /* Reads bytes from the compressed file until len-1 characters are read, or a newline character is read and transferred to buf, or an end-of-file condition is encountered. If any characters are read or if len == 1, the string is terminated with a null character. If no characters are read due to an end-of-file or len < 1, then the buffer is left untouched. gzgets returns buf which is a null-terminated string, or it returns NULL for end-of-file or in case of error. If there was an error, the contents at buf are indeterminate. */ ZEXTERN int ZEXPORT gzputc OF((gzFile file, int c)); /* Writes c, converted to an unsigned char, into the compressed file. gzputc returns the value that was written, or -1 in case of error. */ ZEXTERN int ZEXPORT gzgetc OF((gzFile file)); /* Reads one byte from the compressed file. gzgetc returns this byte or -1 in case of end of file or error. This is implemented as a macro for speed. As such, it does not do all of the checking the other functions do. I.e. it does not check to see if file is NULL, nor whether the structure file points to has been clobbered or not. */ ZEXTERN int ZEXPORT gzungetc OF((int c, gzFile file)); /* Push one character back onto the stream to be read as the first character on the next read. At least one character of push-back is allowed. gzungetc() returns the character pushed, or -1 on failure. gzungetc() will fail if c is -1, and may fail if a character has been pushed but not read yet. If gzungetc is used immediately after gzopen or gzdopen, at least the output buffer size of pushed characters is allowed. (See gzbuffer above.) The pushed character will be discarded if the stream is repositioned with gzseek() or gzrewind(). */ ZEXTERN int ZEXPORT gzflush OF((gzFile file, int flush)); /* Flushes all pending output into the compressed file. The parameter flush is as in the deflate() function. The return value is the zlib error number (see function gzerror below). gzflush is only permitted when writing. If the flush parameter is Z_FINISH, the remaining data is written and the gzip stream is completed in the output. If gzwrite() is called again, a new gzip stream will be started in the output. gzread() is able to read such concatenated gzip streams. gzflush should be called only when strictly necessary because it will degrade compression if called too often. */ /* ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile file, z_off_t offset, int whence)); Sets the starting position for the next gzread or gzwrite on the given compressed file. The offset represents a number of bytes in the uncompressed data stream. The whence parameter is defined as in lseek(2); the value SEEK_END is not supported. If the file is opened for reading, this function is emulated but can be extremely slow. If the file is opened for writing, only forward seeks are supported; gzseek then compresses a sequence of zeroes up to the new starting position. gzseek returns the resulting offset location as measured in bytes from the beginning of the uncompressed stream, or -1 in case of error, in particular if the file is opened for writing and the new starting position would be before the current position. */ ZEXTERN int ZEXPORT gzrewind OF((gzFile file)); /* Rewinds the given file. This function is supported only for reading. gzrewind(file) is equivalent to (int)gzseek(file, 0L, SEEK_SET) */ /* ZEXTERN z_off_t ZEXPORT gztell OF((gzFile file)); Returns the starting position for the next gzread or gzwrite on the given compressed file. This position represents a number of bytes in the uncompressed data stream, and is zero when starting, even if appending or reading a gzip stream from the middle of a file using gzdopen(). gztell(file) is equivalent to gzseek(file, 0L, SEEK_CUR) */ /* ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile file)); Returns the current offset in the file being read or written. This offset includes the count of bytes that precede the gzip stream, for example when appending or when using gzdopen() for reading. When reading, the offset does not include as yet unused buffered input. This information can be used for a progress indicator. On error, gzoffset() returns -1. */ ZEXTERN int ZEXPORT gzeof OF((gzFile file)); /* Returns true (1) if the end-of-file indicator has been set while reading, false (0) otherwise. Note that the end-of-file indicator is set only if the read tried to go past the end of the input, but came up short. Therefore, just like feof(), gzeof() may return false even if there is no more data to read, in the event that the last read request was for the exact number of bytes remaining in the input file. This will happen if the input file size is an exact multiple of the buffer size. If gzeof() returns true, then the read functions will return no more data, unless the end-of-file indicator is reset by gzclearerr() and the input file has grown since the previous end of file was detected. */ ZEXTERN int ZEXPORT gzdirect OF((gzFile file)); /* Returns true (1) if file is being copied directly while reading, or false (0) if file is a gzip stream being decompressed. If the input file is empty, gzdirect() will return true, since the input does not contain a gzip stream. If gzdirect() is used immediately after gzopen() or gzdopen() it will cause buffers to be allocated to allow reading the file to determine if it is a gzip file. Therefore if gzbuffer() is used, it should be called before gzdirect(). When writing, gzdirect() returns true (1) if transparent writing was requested ("wT" for the gzopen() mode), or false (0) otherwise. (Note: gzdirect() is not needed when writing. Transparent writing must be explicitly requested, so the application already knows the answer. When linking statically, using gzdirect() will include all of the zlib code for gzip file reading and decompression, which may not be desired.) */ ZEXTERN int ZEXPORT gzclose OF((gzFile file)); /* Flushes all pending output if necessary, closes the compressed file and deallocates the (de)compression state. Note that once file is closed, you cannot call gzerror with file, since its structures have been deallocated. gzclose must not be called more than once on the same file, just as free must not be called more than once on the same allocation. gzclose will return Z_STREAM_ERROR if file is not valid, Z_ERRNO on a file operation error, Z_MEM_ERROR if out of memory, Z_BUF_ERROR if the last read ended in the middle of a gzip stream, or Z_OK on success. */ ZEXTERN int ZEXPORT gzclose_r OF((gzFile file)); ZEXTERN int ZEXPORT gzclose_w OF((gzFile file)); /* Same as gzclose(), but gzclose_r() is only for use when reading, and gzclose_w() is only for use when writing or appending. The advantage to using these instead of gzclose() is that they avoid linking in zlib compression or decompression code that is not used when only reading or only writing respectively. If gzclose() is used, then both compression and decompression code will be included the application when linking to a static zlib library. */ ZEXTERN const char * ZEXPORT gzerror OF((gzFile file, int *errnum)); /* Returns the error message for the last error which occurred on the given compressed file. errnum is set to zlib error number. If an error occurred in the file system and not in the compression library, errnum is set to Z_ERRNO and the application may consult errno to get the exact error code. The application must not modify the returned string. Future calls to this function may invalidate the previously returned string. If file is closed, then the string previously returned by gzerror will no longer be available. gzerror() should be used to distinguish errors from end-of-file for those functions above that do not distinguish those cases in their return values. */ ZEXTERN void ZEXPORT gzclearerr OF((gzFile file)); /* Clears the error and end-of-file flags for file. This is analogous to the clearerr() function in stdio. This is useful for continuing to read a gzip file that is being written concurrently. */ #endif /* !Z_SOLO */ /* checksum functions */ /* These functions are not related to compression but are exported anyway because they might be useful in applications using the compression library. */ ZEXTERN uLong ZEXPORT adler32 OF((uLong adler, const Bytef *buf, uInt len)); /* Update a running Adler-32 checksum with the bytes buf[0..len-1] and return the updated checksum. If buf is Z_NULL, this function returns the required initial value for the checksum. An Adler-32 checksum is almost as reliable as a CRC-32 but can be computed much faster. Usage example: uLong adler = adler32(0L, Z_NULL, 0); while (read_buffer(buffer, length) != EOF) { adler = adler32(adler, buffer, length); } if (adler != original_adler) error(); */ ZEXTERN uLong ZEXPORT adler32_z OF((uLong adler, const Bytef *buf, z_size_t len)); /* Same as adler32(), but with a size_t length. */ /* ZEXTERN uLong ZEXPORT adler32_combine OF((uLong adler1, uLong adler2, z_off_t len2)); Combine two Adler-32 checksums into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, Adler-32 checksums were calculated for each, adler1 and adler2. adler32_combine() returns the Adler-32 checksum of seq1 and seq2 concatenated, requiring only adler1, adler2, and len2. Note that the z_off_t type (like off_t) is a signed integer. If len2 is negative, the result has no meaning or utility. */ ZEXTERN uLong ZEXPORT crc32 OF((uLong crc, const Bytef *buf, uInt len)); /* Update a running CRC-32 with the bytes buf[0..len-1] and return the updated CRC-32. If buf is Z_NULL, this function returns the required initial value for the crc. Pre- and post-conditioning (one's complement) is performed within this function so it shouldn't be done by the application. Usage example: uLong crc = crc32(0L, Z_NULL, 0); while (read_buffer(buffer, length) != EOF) { crc = crc32(crc, buffer, length); } if (crc != original_crc) error(); */ ZEXTERN uLong ZEXPORT crc32_z OF((uLong adler, const Bytef *buf, z_size_t len)); /* Same as crc32(), but with a size_t length. */ /* ZEXTERN uLong ZEXPORT crc32_combine OF((uLong crc1, uLong crc2, z_off_t len2)); Combine two CRC-32 check values into one. For two sequences of bytes, seq1 and seq2 with lengths len1 and len2, CRC-32 check values were calculated for each, crc1 and crc2. crc32_combine() returns the CRC-32 check value of seq1 and seq2 concatenated, requiring only crc1, crc2, and len2. */ /* various hacks, don't look :) */ /* deflateInit and inflateInit are macros to allow checking the zlib version * and the compiler's view of z_stream: */ ZEXTERN int ZEXPORT deflateInit_ OF((z_streamp strm, int level, const char *version, int stream_size)); ZEXTERN int ZEXPORT inflateInit_ OF((z_streamp strm, const char *version, int stream_size)); ZEXTERN int ZEXPORT deflateInit2_ OF((z_streamp strm, int level, int method, int windowBits, int memLevel, int strategy, const char *version, int stream_size)); ZEXTERN int ZEXPORT inflateInit2_ OF((z_streamp strm, int windowBits, const char *version, int stream_size)); ZEXTERN int ZEXPORT inflateBackInit_ OF((z_streamp strm, int windowBits, unsigned char FAR *window, const char *version, int stream_size)); #ifdef Z_PREFIX_SET # define z_deflateInit(strm, level) \ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_inflateInit(strm) \ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) # define z_inflateInit2(strm, windowBits) \ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ (int)sizeof(z_stream)) # define z_inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ ZLIB_VERSION, (int)sizeof(z_stream)) #else # define deflateInit(strm, level) \ deflateInit_((strm), (level), ZLIB_VERSION, (int)sizeof(z_stream)) # define inflateInit(strm) \ inflateInit_((strm), ZLIB_VERSION, (int)sizeof(z_stream)) # define deflateInit2(strm, level, method, windowBits, memLevel, strategy) \ deflateInit2_((strm),(level),(method),(windowBits),(memLevel),\ (strategy), ZLIB_VERSION, (int)sizeof(z_stream)) # define inflateInit2(strm, windowBits) \ inflateInit2_((strm), (windowBits), ZLIB_VERSION, \ (int)sizeof(z_stream)) # define inflateBackInit(strm, windowBits, window) \ inflateBackInit_((strm), (windowBits), (window), \ ZLIB_VERSION, (int)sizeof(z_stream)) #endif #ifndef Z_SOLO /* gzgetc() macro and its supporting function and exposed data structure. Note * that the real internal state is much larger than the exposed structure. * This abbreviated structure exposes just enough for the gzgetc() macro. The * user should not mess with these exposed elements, since their names or * behavior could change in the future, perhaps even capriciously. They can * only be used by the gzgetc() macro. You have been warned. */ struct gzFile_s { unsigned have; unsigned char *next; z_off64_t pos; }; ZEXTERN int ZEXPORT gzgetc_ OF((gzFile file)); /* backward compatibility */ #ifdef Z_PREFIX_SET # undef z_gzgetc # define z_gzgetc(g) \ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) #else # define gzgetc(g) \ ((g)->have ? ((g)->have--, (g)->pos++, *((g)->next)++) : (gzgetc)(g)) #endif /* provide 64-bit offset functions if _LARGEFILE64_SOURCE defined, and/or * change the regular functions to 64 bits if _FILE_OFFSET_BITS is 64 (if * both are true, the application gets the *64 functions, and the regular * functions are changed to 64 bits) -- in case these are set on systems * without large file support, _LFS64_LARGEFILE must also be true */ #ifdef Z_LARGE64 ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); ZEXTERN z_off64_t ZEXPORT gzseek64 OF((gzFile, z_off64_t, int)); ZEXTERN z_off64_t ZEXPORT gztell64 OF((gzFile)); ZEXTERN z_off64_t ZEXPORT gzoffset64 OF((gzFile)); ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off64_t)); ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off64_t)); #endif #if !defined(ZLIB_INTERNAL) && defined(Z_WANT64) # ifdef Z_PREFIX_SET # define z_gzopen z_gzopen64 # define z_gzseek z_gzseek64 # define z_gztell z_gztell64 # define z_gzoffset z_gzoffset64 # define z_adler32_combine z_adler32_combine64 # define z_crc32_combine z_crc32_combine64 # else # define gzopen gzopen64 # define gzseek gzseek64 # define gztell gztell64 # define gzoffset gzoffset64 # define adler32_combine adler32_combine64 # define crc32_combine crc32_combine64 # endif # ifndef Z_LARGE64 ZEXTERN gzFile ZEXPORT gzopen64 OF((const char *, const char *)); ZEXTERN z_off_t ZEXPORT gzseek64 OF((gzFile, z_off_t, int)); ZEXTERN z_off_t ZEXPORT gztell64 OF((gzFile)); ZEXTERN z_off_t ZEXPORT gzoffset64 OF((gzFile)); ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); # endif #else ZEXTERN gzFile ZEXPORT gzopen OF((const char *, const char *)); ZEXTERN z_off_t ZEXPORT gzseek OF((gzFile, z_off_t, int)); ZEXTERN z_off_t ZEXPORT gztell OF((gzFile)); ZEXTERN z_off_t ZEXPORT gzoffset OF((gzFile)); ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); #endif #else /* Z_SOLO */ ZEXTERN uLong ZEXPORT adler32_combine OF((uLong, uLong, z_off_t)); ZEXTERN uLong ZEXPORT crc32_combine OF((uLong, uLong, z_off_t)); #endif /* !Z_SOLO */ /* undocumented functions */ ZEXTERN const char * ZEXPORT zError OF((int)); ZEXTERN int ZEXPORT inflateSyncPoint OF((z_streamp)); ZEXTERN const z_crc_t FAR * ZEXPORT get_crc_table OF((void)); ZEXTERN int ZEXPORT inflateUndermine OF((z_streamp, int)); ZEXTERN int ZEXPORT inflateValidate OF((z_streamp, int)); ZEXTERN unsigned long ZEXPORT inflateCodesUsed OF ((z_streamp)); ZEXTERN int ZEXPORT inflateResetKeep OF((z_streamp)); ZEXTERN int ZEXPORT deflateResetKeep OF((z_streamp)); #if (defined(_WIN32) || defined(__CYGWIN__)) && !defined(Z_SOLO) ZEXTERN gzFile ZEXPORT gzopen_w OF((const wchar_t *path, const char *mode)); #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifndef Z_SOLO ZEXTERN int ZEXPORTVA gzvprintf Z_ARG((gzFile file, const char *format, va_list va)); # endif #endif #ifdef __cplusplus } #endif #endif /* ZLIB_H */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/zutil.c0000644000076500000240000001621013572250760020773 0ustar alcaeusstaff/* zutil.c -- target dependent utility functions for the compression library * Copyright (C) 1995-2017 Jean-loup Gailly * For conditions of distribution and use, see copyright notice in zlib.h */ /* @(#) $Id$ */ #include "zutil.h" #ifndef Z_SOLO # include "gzguts.h" #endif z_const char * const z_errmsg[10] = { (z_const char *)"need dictionary", /* Z_NEED_DICT 2 */ (z_const char *)"stream end", /* Z_STREAM_END 1 */ (z_const char *)"", /* Z_OK 0 */ (z_const char *)"file error", /* Z_ERRNO (-1) */ (z_const char *)"stream error", /* Z_STREAM_ERROR (-2) */ (z_const char *)"data error", /* Z_DATA_ERROR (-3) */ (z_const char *)"insufficient memory", /* Z_MEM_ERROR (-4) */ (z_const char *)"buffer error", /* Z_BUF_ERROR (-5) */ (z_const char *)"incompatible version",/* Z_VERSION_ERROR (-6) */ (z_const char *)"" }; const char * ZEXPORT zlibVersion() { return ZLIB_VERSION; } uLong ZEXPORT zlibCompileFlags() { uLong flags; flags = 0; switch ((int)(sizeof(uInt))) { case 2: break; case 4: flags += 1; break; case 8: flags += 2; break; default: flags += 3; } switch ((int)(sizeof(uLong))) { case 2: break; case 4: flags += 1 << 2; break; case 8: flags += 2 << 2; break; default: flags += 3 << 2; } switch ((int)(sizeof(voidpf))) { case 2: break; case 4: flags += 1 << 4; break; case 8: flags += 2 << 4; break; default: flags += 3 << 4; } switch ((int)(sizeof(z_off_t))) { case 2: break; case 4: flags += 1 << 6; break; case 8: flags += 2 << 6; break; default: flags += 3 << 6; } #ifdef ZLIB_DEBUG flags += 1 << 8; #endif #if defined(ASMV) || defined(ASMINF) flags += 1 << 9; #endif #ifdef ZLIB_WINAPI flags += 1 << 10; #endif #ifdef BUILDFIXED flags += 1 << 12; #endif #ifdef DYNAMIC_CRC_TABLE flags += 1 << 13; #endif #ifdef NO_GZCOMPRESS flags += 1L << 16; #endif #ifdef NO_GZIP flags += 1L << 17; #endif #ifdef PKZIP_BUG_WORKAROUND flags += 1L << 20; #endif #ifdef FASTEST flags += 1L << 21; #endif #if defined(STDC) || defined(Z_HAVE_STDARG_H) # ifdef NO_vsnprintf flags += 1L << 25; # ifdef HAS_vsprintf_void flags += 1L << 26; # endif # else # ifdef HAS_vsnprintf_void flags += 1L << 26; # endif # endif #else flags += 1L << 24; # ifdef NO_snprintf flags += 1L << 25; # ifdef HAS_sprintf_void flags += 1L << 26; # endif # else # ifdef HAS_snprintf_void flags += 1L << 26; # endif # endif #endif return flags; } #ifdef ZLIB_DEBUG #include # ifndef verbose # define verbose 0 # endif int ZLIB_INTERNAL z_verbose = verbose; void ZLIB_INTERNAL z_error (m) char *m; { fprintf(stderr, "%s\n", m); exit(1); } #endif /* exported to allow conversion of error code to string for compress() and * uncompress() */ const char * ZEXPORT zError(err) int err; { return ERR_MSG(err); } #if defined(_WIN32_WCE) /* The Microsoft C Run-Time Library for Windows CE doesn't have * errno. We define it as a global variable to simplify porting. * Its value is always 0 and should not be used. */ int errno = 0; #endif #ifndef HAVE_MEMCPY void ZLIB_INTERNAL zmemcpy(dest, source, len) Bytef* dest; const Bytef* source; uInt len; { if (len == 0) return; do { *dest++ = *source++; /* ??? to be unrolled */ } while (--len != 0); } int ZLIB_INTERNAL zmemcmp(s1, s2, len) const Bytef* s1; const Bytef* s2; uInt len; { uInt j; for (j = 0; j < len; j++) { if (s1[j] != s2[j]) return 2*(s1[j] > s2[j])-1; } return 0; } void ZLIB_INTERNAL zmemzero(dest, len) Bytef* dest; uInt len; { if (len == 0) return; do { *dest++ = 0; /* ??? to be unrolled */ } while (--len != 0); } #endif #ifndef Z_SOLO #ifdef SYS16BIT #ifdef __TURBOC__ /* Turbo C in 16-bit mode */ # define MY_ZCALLOC /* Turbo C malloc() does not allow dynamic allocation of 64K bytes * and farmalloc(64K) returns a pointer with an offset of 8, so we * must fix the pointer. Warning: the pointer must be put back to its * original form in order to free it, use zcfree(). */ #define MAX_PTR 10 /* 10*64K = 640K */ local int next_ptr = 0; typedef struct ptr_table_s { voidpf org_ptr; voidpf new_ptr; } ptr_table; local ptr_table table[MAX_PTR]; /* This table is used to remember the original form of pointers * to large buffers (64K). Such pointers are normalized with a zero offset. * Since MSDOS is not a preemptive multitasking OS, this table is not * protected from concurrent access. This hack doesn't work anyway on * a protected system like OS/2. Use Microsoft C instead. */ voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, unsigned items, unsigned size) { voidpf buf; ulg bsize = (ulg)items*size; (void)opaque; /* If we allocate less than 65520 bytes, we assume that farmalloc * will return a usable pointer which doesn't have to be normalized. */ if (bsize < 65520L) { buf = farmalloc(bsize); if (*(ush*)&buf != 0) return buf; } else { buf = farmalloc(bsize + 16L); } if (buf == NULL || next_ptr >= MAX_PTR) return NULL; table[next_ptr].org_ptr = buf; /* Normalize the pointer to seg:0 */ *((ush*)&buf+1) += ((ush)((uch*)buf-0) + 15) >> 4; *(ush*)&buf = 0; table[next_ptr++].new_ptr = buf; return buf; } void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { int n; (void)opaque; if (*(ush*)&ptr != 0) { /* object < 64K */ farfree(ptr); return; } /* Find the original pointer */ for (n = 0; n < next_ptr; n++) { if (ptr != table[n].new_ptr) continue; farfree(table[n].org_ptr); while (++n < next_ptr) { table[n-1] = table[n]; } next_ptr--; return; } Assert(0, "zcfree: ptr not found"); } #endif /* __TURBOC__ */ #ifdef M_I86 /* Microsoft C in 16-bit mode */ # define MY_ZCALLOC #if (!defined(_MSC_VER) || (_MSC_VER <= 600)) # define _halloc halloc # define _hfree hfree #endif voidpf ZLIB_INTERNAL zcalloc (voidpf opaque, uInt items, uInt size) { (void)opaque; return _halloc((long)items, size); } void ZLIB_INTERNAL zcfree (voidpf opaque, voidpf ptr) { (void)opaque; _hfree(ptr); } #endif /* M_I86 */ #endif /* SYS16BIT */ #ifndef MY_ZCALLOC /* Any system without a special alloc function */ #ifndef STDC extern voidp malloc OF((uInt size)); extern voidp calloc OF((uInt items, uInt size)); extern void free OF((voidpf ptr)); #endif voidpf ZLIB_INTERNAL zcalloc (opaque, items, size) voidpf opaque; unsigned items; unsigned size; { (void)opaque; return sizeof(uInt) > 2 ? (voidpf)malloc(items * size) : (voidpf)calloc(items, size); } void ZLIB_INTERNAL zcfree (opaque, ptr) voidpf opaque; voidpf ptr; { (void)opaque; free(ptr); } #endif /* MY_ZCALLOC */ #endif /* !Z_SOLO */ mongodb-1.6.1/src/libmongoc/src/zlib-1.2.11/zutil.h0000644000076500000240000001572713572250760021014 0ustar alcaeusstaff/* zutil.h -- internal interface and configuration of the compression library * Copyright (C) 1995-2016 Jean-loup Gailly, Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h */ /* WARNING: this file should *not* be used by applications. It is part of the implementation of the compression library and is subject to change. Applications should only use zlib.h. */ /* @(#) $Id$ */ #ifndef ZUTIL_H #define ZUTIL_H #ifdef HAVE_HIDDEN # define ZLIB_INTERNAL __attribute__((visibility ("hidden"))) #else # define ZLIB_INTERNAL #endif #include "zlib.h" #if defined(STDC) && !defined(Z_SOLO) # if !(defined(_WIN32_WCE) && defined(_MSC_VER)) # include # endif # include # include #endif #ifdef Z_SOLO typedef long ptrdiff_t; /* guess -- will be caught if guess is wrong */ #endif #ifndef local # define local static #endif /* since "static" is used to mean two completely different things in C, we define "local" for the non-static meaning of "static", for readability (compile with -Dlocal if your debugger can't find static symbols) */ typedef unsigned char uch; typedef uch FAR uchf; typedef unsigned short ush; typedef ush FAR ushf; typedef unsigned long ulg; extern z_const char * const z_errmsg[10]; /* indexed by 2-zlib_error */ /* (size given to avoid silly warnings with Visual C++) */ #define ERR_MSG(err) z_errmsg[Z_NEED_DICT-(err)] #define ERR_RETURN(strm,err) \ return (strm->msg = ERR_MSG(err), (err)) /* To be used only when the state is known to be valid */ /* common constants */ #ifndef DEF_WBITS # define DEF_WBITS MAX_WBITS #endif /* default windowBits for decompression. MAX_WBITS is for compression only */ #if MAX_MEM_LEVEL >= 8 # define DEF_MEM_LEVEL 8 #else # define DEF_MEM_LEVEL MAX_MEM_LEVEL #endif /* default memLevel */ #define STORED_BLOCK 0 #define STATIC_TREES 1 #define DYN_TREES 2 /* The three kinds of block type */ #define MIN_MATCH 3 #define MAX_MATCH 258 /* The minimum and maximum match lengths */ #define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */ /* target dependencies */ #if defined(MSDOS) || (defined(WINDOWS) && !defined(WIN32)) # define OS_CODE 0x00 # ifndef Z_SOLO # if defined(__TURBOC__) || defined(__BORLANDC__) # if (__STDC__ == 1) && (defined(__LARGE__) || defined(__COMPACT__)) /* Allow compilation with ANSI keywords only enabled */ void _Cdecl farfree( void *block ); void *_Cdecl farmalloc( unsigned long nbytes ); # else # include # endif # else /* MSC or DJGPP */ # include # endif # endif #endif #ifdef AMIGA # define OS_CODE 1 #endif #if defined(VAXC) || defined(VMS) # define OS_CODE 2 # define F_OPEN(name, mode) \ fopen((name), (mode), "mbc=60", "ctx=stm", "rfm=fix", "mrs=512") #endif #ifdef __370__ # if __TARGET_LIB__ < 0x20000000 # define OS_CODE 4 # elif __TARGET_LIB__ < 0x40000000 # define OS_CODE 11 # else # define OS_CODE 8 # endif #endif #if defined(ATARI) || defined(atarist) # define OS_CODE 5 #endif #ifdef OS2 # define OS_CODE 6 # if defined(M_I86) && !defined(Z_SOLO) # include # endif #endif #if defined(MACOS) || defined(TARGET_OS_MAC) # define OS_CODE 7 # ifndef Z_SOLO # if defined(__MWERKS__) && __dest_os != __be_os && __dest_os != __win32_os # include /* for fdopen */ # else # ifndef fdopen # define fdopen(fd,mode) NULL /* No fdopen() */ # endif # endif # endif #endif #ifdef __acorn # define OS_CODE 13 #endif #if defined(WIN32) && !defined(__CYGWIN__) # define OS_CODE 10 #endif #ifdef _BEOS_ # define OS_CODE 16 #endif #ifdef __TOS_OS400__ # define OS_CODE 18 #endif #ifdef __APPLE__ # define OS_CODE 19 #endif #if defined(_BEOS_) || defined(RISCOS) # define fdopen(fd,mode) NULL /* No fdopen() */ #endif #if (defined(_MSC_VER) && (_MSC_VER > 600)) && !defined __INTERIX # if defined(_WIN32_WCE) # define fdopen(fd,mode) NULL /* No fdopen() */ # ifndef _PTRDIFF_T_DEFINED typedef int ptrdiff_t; # define _PTRDIFF_T_DEFINED # endif # else # define fdopen(fd,type) _fdopen(fd,type) # endif #endif #if defined(__BORLANDC__) && !defined(MSDOS) #pragma warn -8004 #pragma warn -8008 #pragma warn -8066 #endif /* provide prototypes for these when building zlib without LFS */ #if !defined(_WIN32) && \ (!defined(_LARGEFILE64_SOURCE) || _LFS64_LARGEFILE-0 == 0) ZEXTERN uLong ZEXPORT adler32_combine64 OF((uLong, uLong, z_off_t)); ZEXTERN uLong ZEXPORT crc32_combine64 OF((uLong, uLong, z_off_t)); #endif /* common defaults */ #ifndef OS_CODE # define OS_CODE 3 /* assume Unix */ #endif #ifndef F_OPEN # define F_OPEN(name, mode) fopen((name), (mode)) #endif /* functions */ #if defined(pyr) || defined(Z_SOLO) # define NO_MEMCPY #endif #if defined(SMALL_MEDIUM) && !defined(_MSC_VER) && !defined(__SC__) /* Use our own functions for small and medium model with MSC <= 5.0. * You may have to use the same strategy for Borland C (untested). * The __SC__ check is for Symantec. */ # define NO_MEMCPY #endif #if defined(STDC) && !defined(HAVE_MEMCPY) && !defined(NO_MEMCPY) # define HAVE_MEMCPY #endif #ifdef HAVE_MEMCPY # ifdef SMALL_MEDIUM /* MSDOS small or medium model */ # define zmemcpy _fmemcpy # define zmemcmp _fmemcmp # define zmemzero(dest, len) _fmemset(dest, 0, len) # else # define zmemcpy memcpy # define zmemcmp memcmp # define zmemzero(dest, len) memset(dest, 0, len) # endif #else void ZLIB_INTERNAL zmemcpy OF((Bytef* dest, const Bytef* source, uInt len)); int ZLIB_INTERNAL zmemcmp OF((const Bytef* s1, const Bytef* s2, uInt len)); void ZLIB_INTERNAL zmemzero OF((Bytef* dest, uInt len)); #endif /* Diagnostic functions */ #ifdef ZLIB_DEBUG # include extern int ZLIB_INTERNAL z_verbose; extern void ZLIB_INTERNAL z_error OF((char *m)); # define Assert(cond,msg) {if(!(cond)) z_error(msg);} # define Trace(x) {if (z_verbose>=0) fprintf x ;} # define Tracev(x) {if (z_verbose>0) fprintf x ;} # define Tracevv(x) {if (z_verbose>1) fprintf x ;} # define Tracec(c,x) {if (z_verbose>0 && (c)) fprintf x ;} # define Tracecv(c,x) {if (z_verbose>1 && (c)) fprintf x ;} #else # define Assert(cond,msg) # define Trace(x) # define Tracev(x) # define Tracevv(x) # define Tracec(c,x) # define Tracecv(c,x) #endif #ifndef Z_SOLO voidpf ZLIB_INTERNAL zcalloc OF((voidpf opaque, unsigned items, unsigned size)); void ZLIB_INTERNAL zcfree OF((voidpf opaque, voidpf ptr)); #endif #define ZALLOC(strm, items, size) \ (*((strm)->zalloc))((strm)->opaque, (items), (size)) #define ZFREE(strm, addr) (*((strm)->zfree))((strm)->opaque, (voidpf)(addr)) #define TRY_FREE(s, p) {if (p) ZFREE(s, p);} /* Reverse the bytes in a 32-bit value */ #define ZSWAP32(q) ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) #endif /* ZUTIL_H */ mongodb-1.6.1/src/LIBMONGOC_VERSION_CURRENT0000644000076500000240000000000713572250760017102 0ustar alcaeusstaff1.15.2 mongodb-1.6.1/src/bson-encode.c0000644000076500000240000005415213572250760015611 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_phongo.h" #include "php_bson.h" #include "phongo_compat.h" #if SIZEOF_PHONGO_LONG == 8 #define BSON_APPEND_INT(b, key, keylen, val) \ if (val > INT32_MAX || val < INT32_MIN) { \ bson_append_int64(b, key, keylen, val); \ } else { \ bson_append_int32(b, key, keylen, val); \ } #elif SIZEOF_PHONGO_LONG == 4 #define BSON_APPEND_INT(b, key, keylen, val) \ bson_append_int32(b, key, keylen, val) #else #error Unsupported architecture (integers are neither 32-bit nor 64-bit) #endif #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "PHONGO-BSON" /* Forwards declarations */ static void php_phongo_zval_to_bson_internal(zval* data, php_phongo_field_path* field_path, php_phongo_bson_flags_t flags, bson_t* bson, bson_t** bson_out TSRMLS_DC); /* Determines whether the argument should be serialized as a BSON array or * document. IS_ARRAY is returned if the argument's keys are a sequence of * integers starting at zero; otherwise, IS_OBJECT is returned. */ static int php_phongo_is_array_or_document(zval* val TSRMLS_DC) /* {{{ */ { HashTable* ht_data = HASH_OF(val); int count; if (Z_TYPE_P(val) != IS_ARRAY) { return IS_OBJECT; } count = ht_data ? zend_hash_num_elements(ht_data) : 0; if (count > 0) { #if PHP_VERSION_ID >= 70000 zend_string* key; zend_ulong index, idx; idx = 0; ZEND_HASH_FOREACH_KEY(ht_data, index, key) { if (key) { return IS_OBJECT; } else { if (index != idx) { return IS_OBJECT; } } idx++; } ZEND_HASH_FOREACH_END(); #else char* key; unsigned int key_len; unsigned long index = 0; unsigned long idx = 0; int hash_type = 0; HashPosition pos; zend_hash_internal_pointer_reset_ex(ht_data, &pos); for (;; zend_hash_move_forward_ex(ht_data, &pos)) { hash_type = zend_hash_get_current_key_ex(ht_data, &key, &key_len, &index, 0, &pos); if (hash_type == HASH_KEY_NON_EXISTENT) { break; } if (hash_type == HASH_KEY_IS_STRING) { return IS_OBJECT; } else { if (index != idx) { return IS_OBJECT; } } idx++; } #endif } else { return Z_TYPE_P(val); } return IS_ARRAY; } /* }}} */ /* Appends the array or object argument to the BSON document. If the object is * an instance of MongoDB\BSON\Serializable, the return value of bsonSerialize() * will be appended as an embedded document. Other MongoDB\BSON\Type instances * will be appended as the appropriate BSON type. Other array or object values * will be appended as an embedded document. */ static void php_phongo_bson_append_object(bson_t* bson, php_phongo_field_path* field_path, php_phongo_bson_flags_t flags, const char* key, long key_len, zval* object TSRMLS_DC) /* {{{ */ { if (Z_TYPE_P(object) == IS_OBJECT && instanceof_function(Z_OBJCE_P(object), php_phongo_cursorid_ce TSRMLS_CC)) { bson_append_int64(bson, key, key_len, Z_CURSORID_OBJ_P(object)->id); return; } if (Z_TYPE_P(object) == IS_OBJECT && instanceof_function(Z_OBJCE_P(object), php_phongo_type_ce TSRMLS_CC)) { if (instanceof_function(Z_OBJCE_P(object), php_phongo_serializable_ce TSRMLS_CC)) { #if PHP_VERSION_ID >= 70000 zval obj_data; #else zval* obj_data = NULL; #endif bson_t child; #if PHP_VERSION_ID >= 70000 zend_call_method_with_0_params(object, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data); #else zend_call_method_with_0_params(&object, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data); #endif if (Z_ISUNDEF(obj_data)) { /* zend_call_method() failed or bsonSerialize() threw an * exception. Either way, there is nothing else to do. */ return; } #if PHP_VERSION_ID >= 70000 if (Z_TYPE(obj_data) != IS_ARRAY && !(Z_TYPE(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE(obj_data), zend_standard_class_def TSRMLS_CC))) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Expected %s::%s() to return an array or stdClass, %s given", ZSTR_VAL(Z_OBJCE_P(object)->name), BSON_SERIALIZE_FUNC_NAME, PHONGO_ZVAL_CLASS_OR_TYPE_NAME(obj_data)); zval_ptr_dtor(&obj_data); #else if (Z_TYPE_P(obj_data) != IS_ARRAY && !(Z_TYPE_P(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE_P(obj_data), zend_standard_class_def TSRMLS_CC))) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Expected %s::%s() to return an array or stdClass, %s given", ZSTR_VAL(Z_OBJCE_P(object)->name), BSON_SERIALIZE_FUNC_NAME, PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(obj_data)); zval_ptr_dtor(&obj_data); #endif return; } /* Persistable objects must always be serialized as BSON documents; * otherwise, infer based on bsonSerialize()'s return value. */ #if PHP_VERSION_ID >= 70000 if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC) || php_phongo_is_array_or_document(&obj_data TSRMLS_CC) == IS_OBJECT) { #else if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC) || php_phongo_is_array_or_document(obj_data TSRMLS_CC) == IS_OBJECT) { #endif bson_append_document_begin(bson, key, key_len, &child); if (instanceof_function(Z_OBJCE_P(object), php_phongo_persistable_ce TSRMLS_CC)) { #if PHP_VERSION_ID >= 70000 bson_append_binary(&child, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t*) Z_OBJCE_P(object)->name->val, Z_OBJCE_P(object)->name->len); #else bson_append_binary(&child, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t*) Z_OBJCE_P(object)->name, strlen(Z_OBJCE_P(object)->name)); #endif } #if PHP_VERSION_ID >= 70000 php_phongo_zval_to_bson_internal(&obj_data, field_path, flags, &child, NULL TSRMLS_CC); #else php_phongo_zval_to_bson_internal(obj_data, field_path, flags, &child, NULL TSRMLS_CC); #endif bson_append_document_end(bson, &child); } else { bson_append_array_begin(bson, key, key_len, &child); #if PHP_VERSION_ID >= 70000 php_phongo_zval_to_bson_internal(&obj_data, field_path, flags, &child, NULL TSRMLS_CC); #else php_phongo_zval_to_bson_internal(obj_data, field_path, flags, &child, NULL TSRMLS_CC); #endif bson_append_array_end(bson, &child); } zval_ptr_dtor(&obj_data); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_objectid_ce TSRMLS_CC)) { bson_oid_t oid; php_phongo_objectid_t* intern = Z_OBJECTID_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding ObjectId"); bson_oid_init_from_string(&oid, intern->oid); bson_append_oid(bson, key, key_len, &oid); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_utcdatetime_ce TSRMLS_CC)) { php_phongo_utcdatetime_t* intern = Z_UTCDATETIME_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding UTCDateTime"); bson_append_date_time(bson, key, key_len, intern->milliseconds); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_binary_ce TSRMLS_CC)) { php_phongo_binary_t* intern = Z_BINARY_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Binary"); bson_append_binary(bson, key, key_len, intern->type, (const uint8_t*) intern->data, (uint32_t) intern->data_len); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_decimal128_ce TSRMLS_CC)) { php_phongo_decimal128_t* intern = Z_DECIMAL128_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Decimal128"); bson_append_decimal128(bson, key, key_len, &intern->decimal); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_int64_ce TSRMLS_CC)) { php_phongo_int64_t* intern = Z_INT64_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Int64"); bson_append_int64(bson, key, key_len, intern->integer); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_regex_ce TSRMLS_CC)) { php_phongo_regex_t* intern = Z_REGEX_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Regex"); bson_append_regex(bson, key, key_len, intern->pattern, intern->flags); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_javascript_ce TSRMLS_CC)) { php_phongo_javascript_t* intern = Z_JAVASCRIPT_OBJ_P(object); if (intern->scope) { mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Javascript with scope"); bson_append_code_with_scope(bson, key, key_len, intern->code, intern->scope); } else { mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Javascript without scope"); bson_append_code(bson, key, key_len, intern->code); } return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_timestamp_ce TSRMLS_CC)) { php_phongo_timestamp_t* intern = Z_TIMESTAMP_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Timestamp"); bson_append_timestamp(bson, key, key_len, intern->timestamp, intern->increment); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_maxkey_ce TSRMLS_CC)) { mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding MaxKey"); bson_append_maxkey(bson, key, key_len); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_minkey_ce TSRMLS_CC)) { mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding MinKey"); bson_append_minkey(bson, key, key_len); return; } /* Deprecated types */ if (instanceof_function(Z_OBJCE_P(object), php_phongo_dbpointer_ce TSRMLS_CC)) { bson_oid_t oid; php_phongo_dbpointer_t* intern = Z_DBPOINTER_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding DBPointer"); bson_oid_init_from_string(&oid, intern->id); bson_append_dbpointer(bson, key, key_len, intern->ref, &oid); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_symbol_ce TSRMLS_CC)) { php_phongo_symbol_t* intern = Z_SYMBOL_OBJ_P(object); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Symbol"); bson_append_symbol(bson, key, key_len, intern->symbol, intern->symbol_len); return; } if (instanceof_function(Z_OBJCE_P(object), php_phongo_undefined_ce TSRMLS_CC)) { mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding Undefined"); bson_append_undefined(bson, key, key_len); return; } phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Unexpected %s instance: %s", ZSTR_VAL(php_phongo_type_ce->name), ZSTR_VAL(Z_OBJCE_P(object)->name)); return; } else { bson_t child; mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "encoding document"); bson_append_document_begin(bson, key, key_len, &child); php_phongo_zval_to_bson_internal(object, field_path, flags, &child, NULL TSRMLS_CC); bson_append_document_end(bson, &child); } } /* }}} */ /* Appends the zval argument to the BSON document. If the argument is an object, * or an array that should be serialized as an embedded document, this function * will defer to php_phongo_bson_append_object(). */ static void php_phongo_bson_append(bson_t* bson, php_phongo_field_path* field_path, php_phongo_bson_flags_t flags, const char* key, long key_len, zval* entry TSRMLS_DC) /* {{{ */ { php_phongo_field_path_write_item_at_current_level(field_path, key); #if PHP_VERSION_ID >= 70000 try_again: #endif switch (Z_TYPE_P(entry)) { case IS_NULL: bson_append_null(bson, key, key_len); break; #if PHP_VERSION_ID >= 70000 case IS_TRUE: bson_append_bool(bson, key, key_len, true); break; case IS_FALSE: bson_append_bool(bson, key, key_len, false); break; #else case IS_BOOL: bson_append_bool(bson, key, key_len, Z_BVAL_P(entry)); break; #endif case IS_LONG: BSON_APPEND_INT(bson, key, key_len, Z_LVAL_P(entry)); break; case IS_DOUBLE: bson_append_double(bson, key, key_len, Z_DVAL_P(entry)); break; case IS_STRING: if (bson_utf8_validate(Z_STRVAL_P(entry), Z_STRLEN_P(entry), true)) { bson_append_utf8(bson, key, key_len, Z_STRVAL_P(entry), Z_STRLEN_P(entry)); } else { char* path_string = php_phongo_field_path_as_string(field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected invalid UTF-8 for field path \"%s\": %s", path_string, Z_STRVAL_P(entry)); efree(path_string); } break; case IS_ARRAY: if (php_phongo_is_array_or_document(entry TSRMLS_CC) == IS_ARRAY) { bson_t child; HashTable* tmp_ht = HASH_OF(entry); if (!php_phongo_zend_hash_apply_protection_begin(tmp_ht)) { char* path_string = php_phongo_field_path_as_string(field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected recursion for field path \"%s\"", path_string); efree(path_string); break; } bson_append_array_begin(bson, key, key_len, &child); php_phongo_field_path_write_type_at_current_level(field_path, PHONGO_FIELD_PATH_ITEM_ARRAY); field_path->size++; php_phongo_zval_to_bson_internal(entry, field_path, flags, &child, NULL TSRMLS_CC); field_path->size--; bson_append_array_end(bson, &child); php_phongo_zend_hash_apply_protection_end(tmp_ht); break; } PHONGO_BREAK_INTENTIONALLY_MISSING case IS_OBJECT: { HashTable* tmp_ht = HASH_OF(entry); if (!php_phongo_zend_hash_apply_protection_begin(tmp_ht)) { char* path_string = php_phongo_field_path_as_string(field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected recursion for field path \"%s\"", path_string); efree(path_string); break; } php_phongo_field_path_write_type_at_current_level(field_path, PHONGO_FIELD_PATH_ITEM_DOCUMENT); field_path->size++; php_phongo_bson_append_object(bson, field_path, flags, key, key_len, entry TSRMLS_CC); field_path->size--; php_phongo_zend_hash_apply_protection_end(tmp_ht); break; } #if PHP_VERSION_ID >= 70000 case IS_INDIRECT: php_phongo_bson_append(bson, field_path, flags, key, key_len, Z_INDIRECT_P(entry) TSRMLS_DC); break; case IS_REFERENCE: ZVAL_DEREF(entry); goto try_again; #endif default: { char* path_string = php_phongo_field_path_as_string(field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected unsupported PHP type for field path \"%s\": %d (%s)", path_string, Z_TYPE_P(entry), zend_get_type_by_const(Z_TYPE_P(entry))); efree(path_string); } } } /* }}} */ static void php_phongo_zval_to_bson_internal(zval* data, php_phongo_field_path* field_path, php_phongo_bson_flags_t flags, bson_t* bson, bson_t** bson_out TSRMLS_DC) /* {{{ */ { HashTable* ht_data = NULL; #if PHP_VERSION_ID >= 70000 zval obj_data; #else HashPosition pos; zval* obj_data = NULL; #endif /* If we will be encoding a class that may contain protected and private * properties, we'll need to filter them out later. */ bool ht_data_from_properties = false; /* If the object is an instance of MongoDB\BSON\Persistable, we will need to * inject the PHP class name as a BSON key and ignore any existing key in * the return value of bsonSerialize(). */ bool skip_odm_field = false; ZVAL_UNDEF(&obj_data); switch (Z_TYPE_P(data)) { case IS_OBJECT: if (instanceof_function(Z_OBJCE_P(data), php_phongo_serializable_ce TSRMLS_CC)) { #if PHP_VERSION_ID >= 70000 zend_call_method_with_0_params(data, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data); #else zend_call_method_with_0_params(&data, NULL, NULL, BSON_SERIALIZE_FUNC_NAME, &obj_data); #endif if (Z_ISUNDEF(obj_data)) { /* zend_call_method() failed or bsonSerialize() threw an * exception. Either way, there is nothing else to do. */ return; } #if PHP_VERSION_ID >= 70000 if (Z_TYPE(obj_data) != IS_ARRAY && !(Z_TYPE(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE(obj_data), zend_standard_class_def TSRMLS_CC))) { #else if (Z_TYPE_P(obj_data) != IS_ARRAY && !(Z_TYPE_P(obj_data) == IS_OBJECT && instanceof_function(Z_OBJCE_P(obj_data), zend_standard_class_def TSRMLS_CC))) { #endif phongo_throw_exception( PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Expected %s::%s() to return an array or stdClass, %s given", ZSTR_VAL(Z_OBJCE_P(data)->name), BSON_SERIALIZE_FUNC_NAME, #if PHP_VERSION_ID >= 70000 PHONGO_ZVAL_CLASS_OR_TYPE_NAME(obj_data) #else PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(obj_data) #endif ); goto cleanup; } #if PHP_VERSION_ID >= 70000 ht_data = HASH_OF(&obj_data); #else ht_data = HASH_OF(obj_data); #endif if (instanceof_function(Z_OBJCE_P(data), php_phongo_persistable_ce TSRMLS_CC)) { #if PHP_VERSION_ID >= 70000 bson_append_binary(bson, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t*) Z_OBJCE_P(data)->name->val, Z_OBJCE_P(data)->name->len); #else bson_append_binary(bson, PHONGO_ODM_FIELD_NAME, -1, 0x80, (const uint8_t*) Z_OBJCE_P(data)->name, strlen(Z_OBJCE_P(data)->name)); #endif /* Ensure that we ignore an existing key with the same name * if one exists in the bsonSerialize() return value. */ skip_odm_field = true; } break; } if (instanceof_function(Z_OBJCE_P(data), php_phongo_type_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "%s instance %s cannot be serialized as a root element", ZSTR_VAL(php_phongo_type_ce->name), ZSTR_VAL(Z_OBJCE_P(data)->name)); return; } ht_data = Z_OBJ_HT_P(data)->get_properties(data TSRMLS_CC); ht_data_from_properties = true; break; case IS_ARRAY: ht_data = HASH_OF(data); break; default: return; } #if PHP_VERSION_ID >= 70000 { zend_string* string_key = NULL; zend_ulong num_key = 0; zval* value; ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, value) { if (string_key) { if (ht_data_from_properties) { /* Skip protected and private properties */ if (ZSTR_VAL(string_key)[0] == '\0' && ZSTR_LEN(string_key) > 0) { continue; } } if (strlen(ZSTR_VAL(string_key)) != ZSTR_LEN(string_key)) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "BSON keys cannot contain null bytes. Unexpected null byte after \"%s\".", ZSTR_VAL(string_key)); goto cleanup; } if (skip_odm_field && !strcmp(ZSTR_VAL(string_key), PHONGO_ODM_FIELD_NAME)) { continue; } if (flags & PHONGO_BSON_ADD_ID) { if (!strcmp(ZSTR_VAL(string_key), "_id")) { flags &= ~PHONGO_BSON_ADD_ID; } } } /* Ensure we're working with a string key */ if (!string_key) { string_key = zend_long_to_str(num_key); } else { zend_string_addref(string_key); } php_phongo_bson_append(bson, field_path, flags & ~PHONGO_BSON_ADD_ID, ZSTR_VAL(string_key), strlen(ZSTR_VAL(string_key)), value TSRMLS_CC); zend_string_release(string_key); } ZEND_HASH_FOREACH_END(); } #else zend_hash_internal_pointer_reset_ex(ht_data, &pos); for (;; zend_hash_move_forward_ex(ht_data, &pos)) { char* string_key = NULL; uint string_key_len = 0; ulong num_key = 0; zval** value; int hash_type; hash_type = zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos); if (hash_type == HASH_KEY_NON_EXISTENT) { break; } if (zend_hash_get_current_data_ex(ht_data, (void**) &value, &pos) == FAILURE) { break; } if (hash_type == HASH_KEY_IS_STRING) { if (ht_data_from_properties) { /* Skip protected and private properties */ if (string_key[0] == '\0' && string_key_len > 1) { continue; } } if (strlen(string_key) != string_key_len - 1) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "BSON keys cannot contain null bytes. Unexpected null byte after \"%s\".", ZSTR_VAL(string_key)); goto cleanup; } if (skip_odm_field && !strcmp(string_key, PHONGO_ODM_FIELD_NAME)) { continue; } if (flags & PHONGO_BSON_ADD_ID) { if (!strcmp(string_key, "_id")) { flags &= ~PHONGO_BSON_ADD_ID; } } } /* Ensure we're working with a string key */ if (hash_type == HASH_KEY_IS_LONG) { spprintf(&string_key, 0, "%ld", num_key); } php_phongo_bson_append(bson, field_path, flags & ~PHONGO_BSON_ADD_ID, string_key, strlen(string_key), *value TSRMLS_CC); if (hash_type == HASH_KEY_IS_LONG) { efree(string_key); } } #endif if (flags & PHONGO_BSON_ADD_ID) { bson_oid_t oid; bson_oid_init(&oid, NULL); bson_append_oid(bson, "_id", strlen("_id"), &oid); mongoc_log(MONGOC_LOG_LEVEL_TRACE, MONGOC_LOG_DOMAIN, "Added new _id"); } if (flags & PHONGO_BSON_RETURN_ID && bson_out) { bson_iter_t iter; *bson_out = bson_new(); if (bson_iter_init_find(&iter, bson, "_id") && !bson_append_iter(*bson_out, NULL, 0, &iter)) { /* This should not be able to happen since we are copying from * within a valid bson_t. */ phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Error copying \"_id\" field from encoded document"); goto cleanup; } } cleanup: if (!Z_ISUNDEF(obj_data)) { zval_ptr_dtor(&obj_data); } } /* }}} */ /* Converts the array or object argument to a BSON document. If the object is an * instance of MongoDB\BSON\Serializable, the return value of bsonSerialize() * will be used. */ void php_phongo_zval_to_bson(zval* data, php_phongo_bson_flags_t flags, bson_t* bson, bson_t** bson_out TSRMLS_DC) /* {{{ */ { php_phongo_field_path* field_path = php_phongo_field_path_alloc(false); php_phongo_zval_to_bson_internal(data, field_path, flags, bson, bson_out TSRMLS_CC); php_phongo_field_path_free(field_path); } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/src/bson.c0000644000076500000240000013560313572250760014357 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #include "php_phongo.h" #include "php_bson.h" #include "phongo_compat.h" #include "php_array_api.h" #define DEBUG 0 #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "PHONGO-BSON" #define PHONGO_IS_CLASS_INSTANTIATABLE(ce) \ (!(ce->ce_flags & (ZEND_ACC_INTERFACE | ZEND_ACC_IMPLICIT_ABSTRACT_CLASS | ZEND_ACC_EXPLICIT_ABSTRACT_CLASS))) #if PHP_VERSION_ID >= 70000 #define PHONGO_BSON_STATE_ZCHILD(state) (&((php_phongo_bson_state*) (state))->zchild) #else #define PHONGO_BSON_STATE_ZCHILD(state) (((php_phongo_bson_state*) (state))->zchild) #endif #define PHONGO_FIELD_PATH_EXPANSION 8 /* Forward declarations */ static bool php_phongo_bson_visit_document(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_t* v_document, void* data); static bool php_phongo_bson_visit_array(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_t* v_document, void* data); /* Path builder */ char* php_phongo_field_path_as_string(php_phongo_field_path* field_path) { size_t length = 1; /* NULL character */ size_t i; char* path; char* ptr; if (!field_path) { return estrdup(""); } if (!field_path->elements) { return estrdup(""); } for (i = 0; i <= field_path->size; i++) { if (!field_path->elements[i]) { continue; } length += (1 + strlen(field_path->elements[i])); } path = emalloc(length); ptr = path; for (i = 0; i <= field_path->size; i++) { if (!field_path->elements[i]) { continue; } strcpy(ptr, field_path->elements[i]); ptr += strlen(field_path->elements[i]); ptr[0] = '.'; ptr++; } ptr[-1] = '\0'; return path; } php_phongo_field_path* php_phongo_field_path_alloc(bool owns_elements) { php_phongo_field_path* tmp = ecalloc(1, sizeof(php_phongo_field_path)); tmp->ref_count = 1; tmp->owns_elements = owns_elements; return tmp; } void php_phongo_field_path_free(php_phongo_field_path* field_path) { if (field_path->owns_elements) { size_t i; for (i = 0; i < field_path->size; i++) { efree(field_path->elements[i]); } } if (field_path->elements) { efree(field_path->elements); } if (field_path->element_types) { efree(field_path->element_types); } efree(field_path); } static void php_phongo_field_path_ensure_allocation(php_phongo_field_path* field_path, size_t level) { if (level >= field_path->allocated_size) { size_t i; field_path->allocated_size = field_path->size + PHONGO_FIELD_PATH_EXPANSION; field_path->elements = erealloc(field_path->elements, sizeof(char*) * field_path->allocated_size); field_path->element_types = erealloc(field_path->element_types, sizeof(php_phongo_bson_field_path_item_types) * field_path->allocated_size); for (i = level; i < field_path->allocated_size; i++) { field_path->elements[i] = NULL; field_path->element_types[i] = PHONGO_FIELD_PATH_ITEM_NONE; } } } void php_phongo_field_path_write_item_at_current_level(php_phongo_field_path* field_path, const char* element) { php_phongo_field_path_ensure_allocation(field_path, field_path->size); if (field_path->owns_elements) { field_path->elements[field_path->size] = estrdup(element); } else { field_path->elements[field_path->size] = (char*) element; } } void php_phongo_field_path_write_type_at_current_level(php_phongo_field_path* field_path, php_phongo_bson_field_path_item_types element_type) { php_phongo_field_path_ensure_allocation(field_path, field_path->size); field_path->element_types[field_path->size] = element_type; } bool php_phongo_field_path_push(php_phongo_field_path* field_path, const char* element, php_phongo_bson_field_path_item_types element_type) { php_phongo_field_path_write_item_at_current_level(field_path, element); php_phongo_field_path_write_type_at_current_level(field_path, element_type); field_path->size++; return true; } bool php_phongo_field_path_pop(php_phongo_field_path* field_path) { php_phongo_field_path_ensure_allocation(field_path, field_path->size); field_path->elements[field_path->size] = NULL; field_path->element_types[field_path->size] = PHONGO_FIELD_PATH_ITEM_NONE; field_path->size--; field_path->elements[field_path->size] = NULL; field_path->element_types[field_path->size] = PHONGO_FIELD_PATH_ITEM_NONE; return true; } inline static bool php_phongo_bson_state_is_initialized(php_phongo_bson_state* state) { return state->field_path != NULL; } void php_phongo_bson_state_ctor(php_phongo_bson_state* state) { state->field_path = php_phongo_field_path_alloc(false); } void php_phongo_bson_state_copy_ctor(php_phongo_bson_state* dst, php_phongo_bson_state* src) { dst->map = src->map; if (src->field_path) { src->field_path->ref_count++; } dst->field_path = src->field_path; } void php_phongo_bson_state_dtor(php_phongo_bson_state* state) { if (state->field_path) { state->field_path->ref_count--; if (state->field_path->ref_count < 1) { php_phongo_field_path_free(state->field_path); } state->field_path = NULL; } } /* }}} */ static void php_phongo_bson_visit_corrupt(const bson_iter_t* iter ARG_UNUSED, void* data ARG_UNUSED) /* {{{ */ { mongoc_log(MONGOC_LOG_LEVEL_WARNING, MONGOC_LOG_DOMAIN, "Corrupt BSON data detected!"); } /* }}} */ static void php_phongo_bson_visit_unsupported_type(const bson_iter_t* iter ARG_UNUSED, const char* key, uint32_t v_type_code, void* data ARG_UNUSED) /* {{{ */ { php_phongo_bson_state* state = (php_phongo_bson_state*) data; char* path_string; TSRMLS_FETCH(); php_phongo_field_path_write_item_at_current_level(state->field_path, key); path_string = php_phongo_field_path_as_string(state->field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected unknown BSON type 0x%02hhx for field path \"%s\". Are you using the latest driver?", (unsigned char) v_type_code, path_string); efree(path_string); } /* }}} */ static bool php_phongo_bson_visit_double(const bson_iter_t* iter ARG_UNUSED, const char* key, double v_double, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; if (state->is_visiting_array) { add_next_index_double(retval, v_double); } else { add_assoc_double(retval, key, v_double); } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_utf8(const bson_iter_t* iter ARG_UNUSED, const char* key, size_t v_utf8_len, const char* v_utf8, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; if (state->is_visiting_array) { ADD_NEXT_INDEX_STRINGL(retval, v_utf8, v_utf8_len); } else { ADD_ASSOC_STRING_EX(retval, key, strlen(key), v_utf8, v_utf8_len); } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_binary(const bson_iter_t* iter ARG_UNUSED, const char* key, bson_subtype_t v_subtype, size_t v_binary_len, const uint8_t* v_binary, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; TSRMLS_FETCH(); if (v_subtype == 0x80 && strcmp(key, PHONGO_ODM_FIELD_NAME) == 0) { #if PHP_VERSION_ID >= 70000 zend_string* zs_classname = zend_string_init((const char*) v_binary, v_binary_len, 0); zend_class_entry* found_ce = zend_fetch_class(zs_classname, ZEND_FETCH_CLASS_AUTO | ZEND_FETCH_CLASS_SILENT TSRMLS_CC); zend_string_release(zs_classname); #else zend_class_entry* found_ce = zend_fetch_class((const char*) v_binary, v_binary_len, ZEND_FETCH_CLASS_AUTO | ZEND_FETCH_CLASS_SILENT TSRMLS_CC); #endif if (found_ce && PHONGO_IS_CLASS_INSTANTIATABLE(found_ce) && instanceof_function(found_ce, php_phongo_persistable_ce TSRMLS_CC)) { ((php_phongo_bson_state*) data)->odm = found_ce; } } { #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_binary_from_binary_and_type(&zchild, (const char*) v_binary, v_binary_len, v_subtype TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; MAKE_STD_ZVAL(zchild); php_phongo_new_binary_from_binary_and_type(zchild, (const char*) v_binary, v_binary_len, v_subtype TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_undefined(const bson_iter_t* iter, const char* key, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 /* PHP_VERSION_ID >= 70000 */ zval zchild; object_init_ex(&zchild, php_phongo_undefined_ce); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); object_init_ex(zchild, php_phongo_undefined_ce); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_oid(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_oid_t* v_oid, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_objectid_new_from_oid(&zchild, v_oid TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_objectid_new_from_oid(zchild, v_oid TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_bool(const bson_iter_t* iter ARG_UNUSED, const char* key, bool v_bool, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; if (state->is_visiting_array) { add_next_index_bool(retval, v_bool); } else { add_assoc_bool(retval, key, v_bool); } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_date_time(const bson_iter_t* iter ARG_UNUSED, const char* key, int64_t msec_since_epoch, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_utcdatetime_from_epoch(&zchild, msec_since_epoch TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_utcdatetime_from_epoch(zchild, msec_since_epoch TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_decimal128(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_decimal128_t* decimal, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_decimal128(&zchild, decimal TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_decimal128(zchild, decimal TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_null(const bson_iter_t* iter ARG_UNUSED, const char* key, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; if (state->is_visiting_array) { add_next_index_null(retval); } else { add_assoc_null(retval, key); } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_regex(const bson_iter_t* iter ARG_UNUSED, const char* key, const char* v_regex, const char* v_options, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_regex_from_regex_and_options(&zchild, v_regex, v_options TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_regex_from_regex_and_options(zchild, v_regex, v_options TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_symbol(const bson_iter_t* iter, const char* key, size_t v_symbol_len, const char* v_symbol, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_symbol(&zchild, v_symbol, v_symbol_len TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_symbol(zchild, v_symbol, v_symbol_len TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_code(const bson_iter_t* iter ARG_UNUSED, const char* key, size_t v_code_len, const char* v_code, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_javascript_from_javascript(1, &zchild, v_code, v_code_len TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_javascript_from_javascript(1, zchild, v_code, v_code_len TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_dbpointer(const bson_iter_t* iter, const char* key, size_t namespace_len, const char* namespace, const bson_oid_t* oid, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_dbpointer(&zchild, namespace, namespace_len, oid TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_dbpointer(zchild, namespace, namespace_len, oid TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_codewscope(const bson_iter_t* iter ARG_UNUSED, const char* key, size_t v_code_len, const char* v_code, const bson_t* v_scope, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_javascript_from_javascript_and_scope(1, &zchild, v_code, v_code_len, v_scope TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_javascript_from_javascript_and_scope(1, zchild, v_code, v_code_len, v_scope TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_int32(const bson_iter_t* iter ARG_UNUSED, const char* key, int32_t v_int32, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; if (state->is_visiting_array) { add_next_index_long(retval, v_int32); } else { add_assoc_long(retval, key, v_int32); } php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_timestamp(const bson_iter_t* iter ARG_UNUSED, const char* key, uint32_t v_timestamp, uint32_t v_increment, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; php_phongo_new_timestamp_from_increment_and_timestamp(&zchild, v_increment, v_timestamp TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); php_phongo_new_timestamp_from_increment_and_timestamp(zchild, v_increment, v_timestamp TSRMLS_CC); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_int64(const bson_iter_t* iter ARG_UNUSED, const char* key, int64_t v_int64, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if SIZEOF_PHONGO_LONG == 4 TSRMLS_FETCH(); #endif php_phongo_field_path_write_item_at_current_level(state->field_path, key); if (state->is_visiting_array) { ADD_NEXT_INDEX_INT64(retval, v_int64); } else { ADD_ASSOC_INT64(retval, key, v_int64); } return false; } /* }}} */ static bool php_phongo_bson_visit_maxkey(const bson_iter_t* iter ARG_UNUSED, const char* key, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; object_init_ex(&zchild, php_phongo_maxkey_ce); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); object_init_ex(zchild, php_phongo_maxkey_ce); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static bool php_phongo_bson_visit_minkey(const bson_iter_t* iter ARG_UNUSED, const char* key, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); php_phongo_bson_state* state = (php_phongo_bson_state*) data; #if PHP_VERSION_ID >= 70000 zval zchild; object_init_ex(&zchild, php_phongo_minkey_ce); if (state->is_visiting_array) { add_next_index_zval(retval, &zchild); } else { ADD_ASSOC_ZVAL(retval, key, &zchild); } #else /* PHP_VERSION_ID >= 70000 */ zval* zchild = NULL; TSRMLS_FETCH(); MAKE_STD_ZVAL(zchild); object_init_ex(zchild, php_phongo_minkey_ce); if (state->is_visiting_array) { add_next_index_zval(retval, zchild); } else { ADD_ASSOC_ZVAL(retval, key, zchild); } #endif /* PHP_VERSION_ID >= 70000 */ php_phongo_field_path_write_item_at_current_level(state->field_path, key); return false; } /* }}} */ static const bson_visitor_t php_bson_visitors = { NULL /* php_phongo_bson_visit_before*/, NULL /*php_phongo_bson_visit_after*/, php_phongo_bson_visit_corrupt, php_phongo_bson_visit_double, php_phongo_bson_visit_utf8, php_phongo_bson_visit_document, php_phongo_bson_visit_array, php_phongo_bson_visit_binary, php_phongo_bson_visit_undefined, php_phongo_bson_visit_oid, php_phongo_bson_visit_bool, php_phongo_bson_visit_date_time, php_phongo_bson_visit_null, php_phongo_bson_visit_regex, php_phongo_bson_visit_dbpointer, php_phongo_bson_visit_code, php_phongo_bson_visit_symbol, php_phongo_bson_visit_codewscope, php_phongo_bson_visit_int32, php_phongo_bson_visit_timestamp, php_phongo_bson_visit_int64, php_phongo_bson_visit_maxkey, php_phongo_bson_visit_minkey, php_phongo_bson_visit_unsupported_type, php_phongo_bson_visit_decimal128, { NULL } }; static inline bool map_element_matches_field_path(php_phongo_field_path_map_element* map_element, php_phongo_field_path* current) { size_t i; if (map_element->entry->size != current->size) { return false; } for (i = 0; i < current->size; i++) { if (strcmp(map_element->entry->elements[i], "$") == 0) { continue; } if (strcmp(map_element->entry->elements[i], current->elements[i]) != 0) { return false; } } return true; } static php_phongo_field_path_map_element* map_find_field_path_entry(php_phongo_bson_state* state) { size_t i; /* Loop over all field path mappings, and for each, try to see whether it matches the current path */ for (i = 0; i < state->map.field_paths.size; i++) { if (map_element_matches_field_path(state->map.field_paths.map[i], state->field_path)) { return state->map.field_paths.map[i]; } } return NULL; } static void php_phongo_handle_field_path_entry_for_compound_type(php_phongo_bson_state* state, php_phongo_bson_typemap_types* type, zend_class_entry** ce) { php_phongo_field_path_map_element* entry = map_find_field_path_entry(state); if (entry) { switch (entry->node_type) { case PHONGO_TYPEMAP_NATIVE_ARRAY: case PHONGO_TYPEMAP_NATIVE_OBJECT: *type = entry->node_type; break; case PHONGO_TYPEMAP_CLASS: *type = entry->node_type; *ce = entry->node_ce; break; default: /* Do nothing - pacify compiler */ break; } } } static bool php_phongo_bson_visit_document(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_t* v_document, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); bson_iter_t child; php_phongo_bson_state* parent_state = (php_phongo_bson_state*) data; TSRMLS_FETCH(); php_phongo_field_path_push(parent_state->field_path, key, PHONGO_FIELD_PATH_ITEM_DOCUMENT); if (bson_iter_init(&child, v_document)) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; php_phongo_bson_state_copy_ctor(&state, parent_state); #if PHP_VERSION_ID >= 70000 array_init(&state.zchild); #else MAKE_STD_ZVAL(state.zchild); array_init(state.zchild); #endif if (!bson_iter_visit_all(&child, &php_bson_visitors, &state) && !child.err_off) { /* Check for entries in the fieldPath type map key, and use them to * override the default ones for this type */ php_phongo_handle_field_path_entry_for_compound_type(&state, &state.map.document_type, &state.map.document); /* If php_phongo_bson_visit_binary() finds an ODM class, it should * supersede a default type map and named document class. */ if (state.odm && state.map.document_type == PHONGO_TYPEMAP_NONE) { state.map.document_type = PHONGO_TYPEMAP_CLASS; } switch (state.map.document_type) { case PHONGO_TYPEMAP_NATIVE_ARRAY: #if PHP_VERSION_ID >= 70000 if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, &state.zchild); } #else /* PHP_VERSION_ID >= 70000 */ if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, state.zchild); } #endif /* PHP_VERSION_ID >= 70000 */ break; case PHONGO_TYPEMAP_CLASS: { #if PHP_VERSION_ID >= 70000 zval obj; object_init_ex(&obj, state.odm ? state.odm : state.map.document); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &obj); } else { ADD_ASSOC_ZVAL(retval, key, &obj); } zval_ptr_dtor(&state.zchild); #else /* PHP_VERSION_ID >= 70000 */ zval* obj = NULL; MAKE_STD_ZVAL(obj); object_init_ex(obj, state.odm ? state.odm : state.map.document); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, obj); } else { ADD_ASSOC_ZVAL(retval, key, obj); } zval_ptr_dtor(&state.zchild); #endif /* PHP_VERSION_ID >= 70000 */ break; } case PHONGO_TYPEMAP_NATIVE_OBJECT: default: #if PHP_VERSION_ID >= 70000 convert_to_object(&state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, &state.zchild); } #else /* PHP_VERSION_ID >= 70000 */ convert_to_object(state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, state.zchild); } #endif /* PHP_VERSION_ID >= 70000 */ } } else { /* Iteration stopped prematurely due to corruption or a failed * visitor. Free state.zchild, which we just initialized, and return * true to stop iteration for our parent context. */ zval_ptr_dtor(&state.zchild); php_phongo_bson_state_dtor(&state); return true; } php_phongo_bson_state_dtor(&state); php_phongo_field_path_pop(parent_state->field_path); } return false; } /* }}} */ static bool php_phongo_bson_visit_array(const bson_iter_t* iter ARG_UNUSED, const char* key, const bson_t* v_array, void* data) /* {{{ */ { zval* retval = PHONGO_BSON_STATE_ZCHILD(data); bson_iter_t child; php_phongo_bson_state* parent_state = (php_phongo_bson_state*) data; TSRMLS_FETCH(); php_phongo_field_path_push(parent_state->field_path, key, PHONGO_FIELD_PATH_ITEM_ARRAY); if (bson_iter_init(&child, v_array)) { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; php_phongo_bson_state_copy_ctor(&state, parent_state); /* Note that we are visiting an array, so element visitors know to use * add_next_index() (i.e. disregard BSON keys) instead of add_assoc() * when building the PHP array. */ state.is_visiting_array = true; #if PHP_VERSION_ID >= 70000 array_init(&state.zchild); #else MAKE_STD_ZVAL(state.zchild); array_init(state.zchild); #endif if (!bson_iter_visit_all(&child, &php_bson_visitors, &state) && !child.err_off) { /* Check for entries in the fieldPath type map key, and use them to * override the default ones for this type */ php_phongo_handle_field_path_entry_for_compound_type(&state, &state.map.array_type, &state.map.array); switch (state.map.array_type) { case PHONGO_TYPEMAP_CLASS: { #if PHP_VERSION_ID >= 70000 zval obj; object_init_ex(&obj, state.map.array); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &obj); } else { ADD_ASSOC_ZVAL(retval, key, &obj); } zval_ptr_dtor(&state.zchild); #else /* PHP_VERSION_ID >= 70000 */ zval* obj = NULL; MAKE_STD_ZVAL(obj); object_init_ex(obj, state.map.array); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, obj); } else { ADD_ASSOC_ZVAL(retval, key, obj); } zval_ptr_dtor(&state.zchild); #endif /* PHP_VERSION_ID >= 70000 */ break; } case PHONGO_TYPEMAP_NATIVE_OBJECT: #if PHP_VERSION_ID >= 70000 convert_to_object(&state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, &state.zchild); } #else /* PHP_VERSION_ID >= 70000 */ convert_to_object(state.zchild); if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, state.zchild); } #endif /* PHP_VERSION_ID >= 70000 */ break; case PHONGO_TYPEMAP_NATIVE_ARRAY: default: #if PHP_VERSION_ID >= 70000 if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, &state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, &state.zchild); } #else /* PHP_VERSION_ID >= 70000 */ if (((php_phongo_bson_state*) data)->is_visiting_array) { add_next_index_zval(retval, state.zchild); } else { ADD_ASSOC_ZVAL(retval, key, state.zchild); } #endif /* PHP_VERSION_ID >= 70000 */ break; } } else { /* Iteration stopped prematurely due to corruption or a failed * visitor. Free state.zchild, which we just initialized, and return * true to stop iteration for our parent context. */ zval_ptr_dtor(&state.zchild); php_phongo_bson_state_dtor(&state); return true; } php_phongo_bson_state_dtor(&state); php_phongo_field_path_pop(parent_state->field_path); } return false; } /* }}} */ /* Converts a BSON document to a PHP value using the default typemap. */ #if PHP_VERSION_ID >= 70000 bool php_phongo_bson_to_zval(const unsigned char* data, int data_len, zval* zv) /* {{{ */ #else bool php_phongo_bson_to_zval(const unsigned char* data, int data_len, zval** zv) #endif { bool retval; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; retval = php_phongo_bson_to_zval_ex(data, data_len, &state); #if PHP_VERSION_ID >= 70000 ZVAL_ZVAL(zv, &state.zchild, 1, 1); #else *zv = state.zchild; #endif return retval; } /* }}} */ /* Converts a BSON document to a PHP value according to the typemap specified in * the state argument. * * On success, the result will be set on the state argument and true will be * returned. On error, an exception will have been thrown and false will be * returned. * * Note: the result zval in the state argument will always be initialized for * PHP 5.x so that the caller may always zval_ptr_dtor() it. The zval is left * as-is on PHP 7; however, it should have the type undefined if the state * was initialized to zero. */ bool php_phongo_bson_to_zval_ex(const unsigned char* data, int data_len, php_phongo_bson_state* state) /* {{{ */ { bson_reader_t* reader = NULL; bson_iter_t iter; const bson_t* b; bool eof = false; bool retval = false; bool must_dtor_state = false; TSRMLS_FETCH(); #if PHP_VERSION_ID < 70000 MAKE_STD_ZVAL(state->zchild); /* Ensure that state->zchild has a type, since the calling code may want to * zval_ptr_dtor() it if we throw an exception. */ ZVAL_NULL(state->zchild); #endif if (!php_phongo_bson_state_is_initialized(state)) { php_phongo_bson_state_ctor(state); must_dtor_state = true; } reader = bson_reader_new_from_data(data, data_len); if (!(b = bson_reader_read(reader, NULL))) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not read document from BSON reader"); goto cleanup; } if (!bson_iter_init(&iter, b)) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Could not initialize BSON iterator"); goto cleanup; } /* We initialize an array because it will either be returned as-is (native * array in type map), passed to bsonUnserialize() (ODM class), or used to * initialize a stdClass object (native object in type map). */ #if PHP_VERSION_ID >= 70000 array_init(&state->zchild); #else array_init(state->zchild); #endif if (bson_iter_visit_all(&iter, &php_bson_visitors, state) || iter.err_off) { /* Iteration stopped prematurely due to corruption or a failed visitor. * While we free the reader, state->zchild should be left as-is, since * the calling code may want to zval_ptr_dtor() it. If an exception has * been thrown already (due to an unsupported BSON type for example, * don't overwrite with a generic exception message. */ if (!EG(exception)) { char* path = php_phongo_field_path_as_string(state->field_path); phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Detected corrupt BSON data for field path '%s' at offset %d", path, iter.err_off); efree(path); } goto cleanup; } /* If php_phongo_bson_visit_binary() finds an ODM class, it should supersede * a default type map and named root class. */ if (state->odm && state->map.root_type == PHONGO_TYPEMAP_NONE) { state->map.root_type = PHONGO_TYPEMAP_CLASS; } switch (state->map.root_type) { case PHONGO_TYPEMAP_NATIVE_ARRAY: /* Nothing to do here */ break; case PHONGO_TYPEMAP_CLASS: { #if PHP_VERSION_ID >= 70000 zval obj; object_init_ex(&obj, state->odm ? state->odm : state->map.root); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, &state->zchild); zval_ptr_dtor(&state->zchild); ZVAL_COPY_VALUE(&state->zchild, &obj); #else /* PHP_VERSION_ID >= 70000 */ zval* obj = NULL; MAKE_STD_ZVAL(obj); object_init_ex(obj, state->odm ? state->odm : state->map.root); zend_call_method_with_1_params(&obj, NULL, NULL, BSON_UNSERIALIZE_FUNC_NAME, NULL, state->zchild); zval_ptr_dtor(&state->zchild); state->zchild = obj; #endif /* PHP_VERSION_ID >= 70000 */ break; } case PHONGO_TYPEMAP_NATIVE_OBJECT: default: #if PHP_VERSION_ID >= 70000 convert_to_object(&state->zchild); #else convert_to_object(state->zchild); #endif } if (bson_reader_read(reader, &eof) || !eof) { phongo_throw_exception(PHONGO_ERROR_UNEXPECTED_VALUE TSRMLS_CC, "Reading document did not exhaust input buffer"); goto cleanup; } retval = true; cleanup: if (reader) { bson_reader_destroy(reader); } if (must_dtor_state) { php_phongo_bson_state_dtor(state); } return retval; } /* }}} */ /* Fetches a zend_class_entry for the given class name and checks that it is * also instantiatable and implements a specified interface. Returns the class * on success; otherwise, NULL is returned and an exception is thrown. */ static zend_class_entry* php_phongo_bson_state_fetch_class(const char* classname, int classname_len, zend_class_entry* interface_ce TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zend_string* zs_classname = zend_string_init(classname, classname_len, 0); zend_class_entry* found_ce = zend_fetch_class(zs_classname, ZEND_FETCH_CLASS_AUTO | ZEND_FETCH_CLASS_SILENT TSRMLS_CC); zend_string_release(zs_classname); #else zend_class_entry* found_ce = zend_fetch_class(classname, classname_len, ZEND_FETCH_CLASS_AUTO | ZEND_FETCH_CLASS_SILENT TSRMLS_CC); #endif if (!found_ce) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s does not exist", classname); } else if (!PHONGO_IS_CLASS_INSTANTIATABLE(found_ce)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s is not instantiatable", classname); } else if (!instanceof_function(found_ce, interface_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Class %s does not implement %s", classname, ZSTR_VAL(interface_ce->name)); } else { return found_ce; } return NULL; } /* }}} */ /* Parses a BSON type (i.e. array, document, or root). On success, the type and * type_ce output arguments will be assigned and true will be returned; * otherwise, false is returned and an exception is thrown. */ static bool php_phongo_bson_state_parse_type(zval* options, const char* name, php_phongo_bson_typemap_types* type, zend_class_entry** type_ce TSRMLS_DC) /* {{{ */ { char* classname; int classname_len; zend_bool classname_free = 0; bool retval = true; classname = php_array_fetch_string(options, name, &classname_len, &classname_free); if (!classname_len) { goto cleanup; } if (!strcasecmp(classname, "array")) { *type = PHONGO_TYPEMAP_NATIVE_ARRAY; *type_ce = NULL; } else if (!strcasecmp(classname, "stdclass") || !strcasecmp(classname, "object")) { *type = PHONGO_TYPEMAP_NATIVE_OBJECT; *type_ce = NULL; } else { if ((*type_ce = php_phongo_bson_state_fetch_class(classname, classname_len, php_phongo_unserializable_ce TSRMLS_CC))) { *type = PHONGO_TYPEMAP_CLASS; } else { retval = false; } } cleanup: if (classname_free) { str_efree(classname); } return retval; } /* }}} */ static void field_path_map_element_set_info(php_phongo_field_path_map_element* element, php_phongo_bson_typemap_types type, zend_class_entry* ce) { element->node_type = type; element->node_ce = ce; } static void map_add_field_path_element(php_phongo_bson_typemap* map, php_phongo_field_path_map_element* element) { /* Make sure we have allocated enough */ if (map->field_paths.allocated_size < map->field_paths.size + 1) { map->field_paths.allocated_size += PHONGO_FIELD_PATH_EXPANSION; map->field_paths.map = erealloc(map->field_paths.map, sizeof(php_phongo_field_path_map_element) * map->field_paths.allocated_size); } map->field_paths.map[map->field_paths.size] = element; map->field_paths.size++; } static php_phongo_field_path_map_element* field_path_map_element_alloc(void) { php_phongo_field_path_map_element* tmp = ecalloc(1, sizeof(php_phongo_field_path_map_element)); tmp->entry = php_phongo_field_path_alloc(true); return tmp; } static void field_path_map_element_dtor(php_phongo_field_path_map_element* element) { php_phongo_field_path_free(element->entry); efree(element); } bool php_phongo_bson_state_add_field_path(php_phongo_bson_typemap* map, char* field_path_original, php_phongo_bson_typemap_types type, zend_class_entry* ce TSRMLS_DC) { char* ptr = NULL; char* segment_end = NULL; php_phongo_field_path_map_element* field_path_map_element; if (field_path_original[0] == '.') { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "A 'fieldPaths' key may not start with a '.'"); return false; } if (field_path_original[strlen(field_path_original) - 1] == '.') { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "A 'fieldPaths' key may not end with a '.'"); return false; } field_path_map_element = field_path_map_element_alloc(); ptr = field_path_original; /* Loop over all the segments. A segment is delimited by a "." */ while ((segment_end = strchr(ptr, '.')) != NULL) { char* tmp = NULL; /* Bail out if we have an empty segment */ if (ptr == segment_end) { field_path_map_element_dtor(field_path_map_element); phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "A 'fieldPaths' key may not have an empty segment"); return false; } tmp = calloc(1, segment_end - ptr + 1); memcpy(tmp, ptr, segment_end - ptr); php_phongo_field_path_push(field_path_map_element->entry, tmp, PHONGO_FIELD_PATH_ITEM_NONE); free(tmp); ptr = segment_end + 1; } /* Add the last (or single) element */ php_phongo_field_path_push(field_path_map_element->entry, ptr, PHONGO_FIELD_PATH_ITEM_NONE); field_path_map_element_set_info(field_path_map_element, type, ce); map_add_field_path_element(map, field_path_map_element); return true; } void php_phongo_bson_typemap_dtor(php_phongo_bson_typemap* map) { size_t i; if (map->field_paths.map) { for (i = 0; i < map->field_paths.size; i++) { field_path_map_element_dtor(map->field_paths.map[i]); } efree(map->field_paths.map); } map->field_paths.map = NULL; } /* Loops over each element in the fieldPaths array (if exists, and is an * array), and then checks whether each element is a valid type mapping */ bool php_phongo_bson_state_parse_fieldpaths(zval* typemap, php_phongo_bson_typemap* map TSRMLS_DC) /* {{{ */ { zval* fieldpaths = NULL; HashTable* ht_data; if (!php_array_existsc(typemap, "fieldPaths")) { return true; } fieldpaths = php_array_fetchc_array(typemap, "fieldPaths"); if (!fieldpaths) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "The 'fieldPaths' element is not an array"); return false; } ht_data = HASH_OF(fieldpaths); #if PHP_VERSION_ID >= 70000 { zend_string* string_key = NULL; zend_ulong num_key = 0; zval* property; ZEND_HASH_FOREACH_KEY_VAL(ht_data, num_key, string_key, property) { zend_class_entry* map_ce = NULL; php_phongo_bson_typemap_types map_type; if (!string_key) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "The 'fieldPaths' element is not an associative array"); return false; } if (strcmp(ZSTR_VAL(string_key), "") == 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "The 'fieldPaths' element may not be an empty string"); return false; } if (!php_phongo_bson_state_parse_type(fieldpaths, ZSTR_VAL(string_key), &map_type, &map_ce TSRMLS_CC)) { return false; } if (!php_phongo_bson_state_add_field_path(map, ZSTR_VAL(string_key), map_type, map_ce TSRMLS_CC)) { return false; } } ZEND_HASH_FOREACH_END(); } #else { HashPosition pos; zval** property; for ( zend_hash_internal_pointer_reset_ex(ht_data, &pos); zend_hash_get_current_data_ex(ht_data, (void**) &property, &pos) == SUCCESS; zend_hash_move_forward_ex(ht_data, &pos)) { char* string_key = NULL; uint string_key_len = 0; ulong num_key = 0; zend_class_entry* map_ce = NULL; php_phongo_bson_typemap_types map_type; if (HASH_KEY_IS_STRING != zend_hash_get_current_key_ex(ht_data, &string_key, &string_key_len, &num_key, 0, &pos)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "The 'fieldPaths' element is not an associative array"); return false; } if (strcmp(string_key, "") == 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "The 'fieldPaths' element may not be an empty string"); return false; } if (!php_phongo_bson_state_parse_type(fieldpaths, string_key, &map_type, &map_ce TSRMLS_CC)) { return false; } if (!php_phongo_bson_state_add_field_path(map, string_key, map_type, map_ce TSRMLS_CC)) { return false; } } } #endif /* PHP_VERSION_ID >= 70000 */ return true; } /* }}} */ #if DEBUG static void print_node_info(php_phongo_field_path_node* ptr, int level) { printf("%*sNAME: %s\n", level * 4, "", ptr->name); printf("%*s- type:", level * 4, ""); switch (ptr->node_type) { case PHONGO_TYPEMAP_NONE: printf(" none (unset)\n"); break; case PHONGO_TYPEMAP_CLASS: printf(" class (%s)\n", ZSTR_VAL(ptr->node_ce->name)); break; case PHONGO_TYPEMAP_NATIVE_ARRAY: printf(" array\n"); break; case PHONGO_TYPEMAP_NATIVE_OBJECT: printf(" stdClass\n"); break; } } static void print_map_list(php_phongo_field_path_node* node, int level) { php_phongo_field_path_node* ptr = node->children; if (!ptr) { return; } do { print_node_info(ptr, level); if (ptr->children) { printf("%*s- children:\n", level * 4, ""); print_map_list(ptr, level + 1); } ptr = ptr->next; } while (ptr); } #endif /* Applies the array argument to a typemap struct. Returns true on success; * otherwise, false is returned an an exception is thrown. */ bool php_phongo_bson_typemap_to_state(zval* typemap, php_phongo_bson_typemap* map TSRMLS_DC) /* {{{ */ { if (!typemap) { return true; } if (!php_phongo_bson_state_parse_type(typemap, "array", &map->array_type, &map->array TSRMLS_CC) || !php_phongo_bson_state_parse_type(typemap, "document", &map->document_type, &map->document TSRMLS_CC) || !php_phongo_bson_state_parse_type(typemap, "root", &map->root_type, &map->root TSRMLS_CC) || !php_phongo_bson_state_parse_fieldpaths(typemap, map TSRMLS_CC)) { /* Exception should already have been thrown */ return false; } #if DEBUG print_map_list(&map->field_path_map, 0); #endif return true; } /* }}} */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/tests/apm/bug0950-001.phpt0000644000076500000240000000172013572250760016660 0ustar alcaeusstaff--TEST-- PHPC-950: Segfault killing cursor after subscriber HashTable is destroyed (no subscribers) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); /* Exiting during iteration on a live cursor will result in * php_phongo_command_started() being invoked for the killCursor command after * RSHUTDOWN has already destroyed the subscriber HashTable */ foreach ($cursor as $data) { echo "Exiting during first iteration on cursor\n"; exit(0); } ?> ===DONE=== --EXPECT-- Exiting during first iteration on cursor mongodb-1.6.1/tests/apm/bug0950-002.phpt0000644000076500000240000000315013572250760016660 0ustar alcaeusstaff--TEST-- PHPC-950: Segfault killing cursor after subscriber HashTable is destroyed (one subscriber) --SKIPIF-- --FILE-- getCommandName()); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { printf("- succeeded: %s\n", $event->getCommandName()); } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { printf("- failed: %s\n", $event->getCommandName()); } } $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); MongoDB\Driver\Monitoring\addSubscriber(new MySubscriber); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); /* Exiting during iteration on a live cursor will result in * php_phongo_command_started() being invoked for the killCursor command after * RSHUTDOWN has already destroyed the subscriber HashTable */ foreach ($cursor as $data) { echo "Exiting during first iteration on cursor\n"; exit(0); } ?> ===DONE=== --EXPECT-- - started: find - succeeded: find Exiting during first iteration on cursor mongodb-1.6.1/tests/apm/monitoring-addSubscriber-001.phpt0000644000076500000240000000214313572250760022524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\addSubscriber(): Adding one subscriber --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); echo "After addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber - started: find mongodb-1.6.1/tests/apm/monitoring-addSubscriber-002.phpt0000644000076500000240000000305213572250760022525 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\addSubscriber(): Adding two subscribers --SKIPIF-- --FILE-- instanceName = $instanceName; } public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event ) { echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber1 = new MySubscriber( "ONE" ); $subscriber2 = new MySubscriber( "TWO" ); echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 ); echo "After addSubscriber (ONE)\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 ); echo "After addSubscriber (TWO)\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber (ONE) - (ONE) - started: find After addSubscriber (TWO) - (ONE) - started: find - (TWO) - started: find mongodb-1.6.1/tests/apm/monitoring-addSubscriber-003.phpt0000644000076500000240000000244313572250760022531 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\addSubscriber(): Adding one subscriber multiple times --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber(); echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); echo "After addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); echo "After addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber - started: find After addSubscriber - started: find mongodb-1.6.1/tests/apm/monitoring-addSubscriber-004.phpt0000644000076500000240000000351213572250760022530 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\addSubscriber(): Adding three subscribers --SKIPIF-- --FILE-- instanceName = $instanceName; } public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event ) { echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber1 = new MySubscriber( "ONE" ); $subscriber2 = new MySubscriber( "TWO" ); $subscriber3 = new MySubscriber( "THR" ); echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 ); echo "After addSubscriber (ONE)\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 ); echo "After addSubscriber (TWO)\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber3 ); echo "After addSubscriber (THR)\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber (ONE) - (ONE) - started: find After addSubscriber (TWO) - (ONE) - started: find - (TWO) - started: find After addSubscriber (THR) - (ONE) - started: find - (TWO) - started: find - (THR) - started: find mongodb-1.6.1/tests/apm/monitoring-commandFailed-001.phpt0000644000076500000240000000534113572250760022476 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandFailedEvent --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { echo "failed: ", $event->getCommandName(), "\n"; echo "- getError() returns an object: ", is_object( $event->getError() ) ? 'yes' : 'no', "\n"; echo "- getError() returns an MongoDB\Driver\Exception\Exception object: ", $event->getError() instanceof MongoDB\Driver\Exception\Exception ? 'yes' : 'no', "\n"; echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n"; echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n"; echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n"; echo "- getCommandName() returns '", $event->getCommandName(), "'\n"; echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n"; echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n"; echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n"; echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n"; } } $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $primary = get_primary_server(URI); $command = new \MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$unsupported' => 1]] ]); try { $primary->executeCommand(DATABASE_NAME, $command); } catch (Exception $e) { /* Swallow */ } ?> --EXPECT-- started: aggregate failed: aggregate - getError() returns an object: yes - getError() returns an MongoDB\Driver\Exception\Exception object: yes - getDurationMicros() returns an integer: yes - getDurationMicros() returns > 0: yes - getCommandName() returns a string: yes - getCommandName() returns 'aggregate' - getServer() returns an object: yes - getServer() returns a Server object: yes - getOperationId() returns a string: yes - getRequestId() returns a string: yes mongodb-1.6.1/tests/apm/monitoring-commandFailed-002.phpt0000644000076500000240000000320113572250760022470 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandFailedEvent: requestId and operationId match --SKIPIF-- --FILE-- getCommandName(), "\n"; $this->startRequestId = $event->getRequestId(); $this->startOperationId = $event->getOperationId(); } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { echo "failed: ", $event->getCommandName(), "\n"; echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n"; echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n"; } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $primary = get_primary_server(URI); $command = new \MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$unsupported' => 1]] ]); try { $primary->executeCommand(DATABASE_NAME, $command); } catch (Exception $e) { /* Swallow */ } ?> --EXPECT-- started: aggregate failed: aggregate - requestId matches: yes - operationId matches: yes mongodb-1.6.1/tests/apm/monitoring-commandFailed-003.phpt0000644000076500000240000000274013572250760022500 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandFailedEvent --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { var_dump($event); } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { echo "failed: ", $event->getCommandName(), "\n"; var_dump($event->getReply()); } } $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'upsert' => true, 'new' => true, ]); try { $manager->executeWriteCommand(DATABASE_NAME, $command); } catch (MongoDB\Driver\Exception\CommandException $e) {} ?> --EXPECTF-- started: findAndModify failed: findAndModify object(stdClass)#%d (%d) {%A ["ok"]=> float(0) ["errmsg"]=> string(49) "Either an update or remove=true must be specified" ["code"]=> int(9) ["codeName"]=> string(13) "FailedToParse"%A } mongodb-1.6.1/tests/apm/monitoring-commandStarted-001.phpt0000644000076500000240000000456313572250760022725 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandStartedEvent --SKIPIF-- --FILE-- getCommandName(), "\n"; echo "- getCommand() returns an object: ", is_object( $event->getCommand() ) ? 'yes' : 'no', "\n"; echo "- getCommand() returns a stdClass object: ", $event->getCommand() instanceof stdClass ? 'yes' : 'no', "\n"; echo "- getDatabaseName() returns a string: ", is_string( $event->getDatabaseName() ) ? 'yes' : 'no', "\n"; echo "- getDatabaseName() returns '", $event->getDatabaseName(), "'\n"; echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n"; echo "- getCommandName() returns '", $event->getCommandName(), "'\n"; echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n"; echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n"; echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n"; echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- started: find - getCommand() returns an object: yes - getCommand() returns a stdClass object: yes - getDatabaseName() returns a string: yes - getDatabaseName() returns 'demo' - getCommandName() returns a string: yes - getCommandName() returns 'find' - getServer() returns an object: yes - getServer() returns a Server object: yes - getOperationId() returns a string: yes - getRequestId() returns a string: yes mongodb-1.6.1/tests/apm/monitoring-commandSucceeded-001.phpt0000644000076500000240000000473613572250760023205 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandSucceededEvent --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { echo "succeeded: ", $event->getCommandName(), "\n"; echo "- getReply() returns an object: ", is_object( $event->getReply() ) ? 'yes' : 'no', "\n"; echo "- getReply() returns a stdClass object: ", $event->getReply() instanceof stdClass ? 'yes' : 'no', "\n"; echo "- getDurationMicros() returns an integer: ", is_integer( $event->getDurationMicros() ) ? 'yes' : 'no', "\n"; echo "- getDurationMicros() returns > 0: ", $event->getDurationMicros() > 0 ? 'yes' : 'no', "\n"; echo "- getCommandName() returns a string: ", is_string( $event->getCommandName() ) ? 'yes' : 'no', "\n"; echo "- getCommandName() returns '", $event->getCommandName(), "'\n"; echo "- getServer() returns an object: ", is_object( $event->getServer() ) ? 'yes' : 'no', "\n"; echo "- getServer() returns a Server object: ", $event->getServer() instanceof MongoDB\Driver\Server ? 'yes' : 'no', "\n"; echo "- getOperationId() returns a string: ", is_string( $event->getOperationId() ) ? 'yes' : 'no', "\n"; echo "- getRequestId() returns a string: ", is_string( $event->getRequestId() ) ? 'yes' : 'no', "\n"; } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- started: find succeeded: find - getReply() returns an object: yes - getReply() returns a stdClass object: yes - getDurationMicros() returns an integer: yes - getDurationMicros() returns > 0: yes - getCommandName() returns a string: yes - getCommandName() returns 'find' - getServer() returns an object: yes - getServer() returns a Server object: yes - getOperationId() returns a string: yes - getRequestId() returns a string: yes mongodb-1.6.1/tests/apm/monitoring-commandSucceeded-002.phpt0000644000076500000240000000265213572250760023201 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\CommandSucceededEvent: requestId and operationId match --SKIPIF-- --FILE-- getCommandName(), "\n"; $this->startRequestId = $event->getRequestId(); $this->startOperationId = $event->getOperationId(); } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { echo "succeeded: ", $event->getCommandName(), "\n"; echo "- requestId matches: ", $this->startRequestId == $event->getRequestId() ? 'yes' : 'no', " \n"; echo "- operationId matches: ", $this->startOperationId == $event->getOperationId() ? 'yes' : 'no', " \n"; } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- started: find succeeded: find - requestId matches: yes - operationId matches: yes mongodb-1.6.1/tests/apm/monitoring-removeSubscriber-001.phpt0000644000076500000240000000242513572250760023274 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\removeSubscriber(): Removing the only subscriber --SKIPIF-- --FILE-- getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber = new MySubscriber; echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber ); echo "After addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\removeSubscriber( $subscriber ); echo "After removeSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber - started: find After removeSubscriber mongodb-1.6.1/tests/apm/monitoring-removeSubscriber-002.phpt0000644000076500000240000000341013572250760023270 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Monitoring\removeSubscriber(): Removing one of multiple subscribers --SKIPIF-- --FILE-- instanceName = $instanceName; } public function commandStarted( \MongoDB\Driver\Monitoring\CommandStartedEvent $event ) { echo "- ({$this->instanceName}) - started: ", $event->getCommandName(), "\n"; } public function commandSucceeded( \MongoDB\Driver\Monitoring\CommandSucceededEvent $event ) { } public function commandFailed( \MongoDB\Driver\Monitoring\CommandFailedEvent $event ) { } } $query = new MongoDB\Driver\Query( [] ); $subscriber1 = new MySubscriber( "ONE" ); $subscriber2 = new MySubscriber( "TWO" ); echo "Before addSubscriber\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber1 ); echo "After addSubscriber (ONE)\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\addSubscriber( $subscriber2 ); echo "After addSubscriber (TWO)\n"; $cursor = $m->executeQuery( "demo.test", $query ); MongoDB\Driver\Monitoring\removeSubscriber( $subscriber2 ); echo "After removeSubscriber (TWO)\n"; $cursor = $m->executeQuery( "demo.test", $query ); ?> --EXPECT-- Before addSubscriber After addSubscriber (ONE) - (ONE) - started: find After addSubscriber (TWO) - (ONE) - started: find - (TWO) - started: find After removeSubscriber (TWO) - (ONE) - started: find mongodb-1.6.1/tests/bson-corpus/array-decodeError-001.phpt0000644000076500000240000000077113572250760022640 0ustar alcaeusstaff--TEST-- Array: Array length too long: eats outer terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/array-decodeError-002.phpt0000644000076500000240000000076513572250760022644 0ustar alcaeusstaff--TEST-- Array: Array length too short: leaks terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/array-decodeError-003.phpt0000644000076500000240000000100213572250760022626 0ustar alcaeusstaff--TEST-- Array: Invalid Array: bad string length in field --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/array-valid-001.phpt0000644000076500000240000000125413572250760021477 0ustar alcaeusstaff--TEST-- Array: Empty --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d000000046100050000000000 {"a":[]} 0d000000046100050000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/array-valid-002.phpt0000644000076500000240000000141413572250760021476 0ustar alcaeusstaff--TEST-- Array: Single Element Array --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 140000000461000c0000001030000a0000000000 {"a":[{"$numberInt":"10"}]} 140000000461000c0000001030000a0000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/array-valid-003.phpt0000644000076500000240000000221113572250760021473 0ustar alcaeusstaff--TEST-- Array: Single Element Array with index set incorrectly --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate BSON -> Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($degenerateBson))), "\n"; // Degenerate BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($degenerateBson)), "\n"; ?> ===DONE=== --EXPECT-- 140000000461000c0000001030000a0000000000 {"a":[{"$numberInt":"10"}]} 140000000461000c0000001030000a0000000000 140000000461000c0000001030000a0000000000 {"a":[{"$numberInt":"10"}]} ===DONE===mongodb-1.6.1/tests/bson-corpus/array-valid-004.phpt0000644000076500000240000000221513572250760021500 0ustar alcaeusstaff--TEST-- Array: Single Element Array with index set incorrectly --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate BSON -> Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($degenerateBson))), "\n"; // Degenerate BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($degenerateBson)), "\n"; ?> ===DONE=== --EXPECT-- 140000000461000c0000001030000a0000000000 {"a":[{"$numberInt":"10"}]} 140000000461000c0000001030000a0000000000 140000000461000c0000001030000a0000000000 {"a":[{"$numberInt":"10"}]} ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-decodeError-001.phpt0000644000076500000240000000100013572250760022770 0ustar alcaeusstaff--TEST-- Binary type: Length longer than document --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-decodeError-002.phpt0000644000076500000240000000072413572250760023005 0ustar alcaeusstaff--TEST-- Binary type: Negative length --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-decodeError-003.phpt0000644000076500000240000000075513572250760023012 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x02 length too long --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-decodeError-004.phpt0000644000076500000240000000075613572250760023014 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x02 length too short --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-decodeError-005.phpt0000644000076500000240000000076113572250760023011 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x02 length negative one --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-001.phpt0000644000076500000240000000143313572250760021644 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x00 (Zero-length) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d000000057800000000000000 {"x":{"$binary":{"base64":"","subType":"00"}}} 0d000000057800000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-002.phpt0000644000076500000240000000176213572250760021652 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x00 (Zero-length, keys reversed) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d000000057800000000000000 {"x":{"$binary":{"base64":"","subType":"00"}}} 0d000000057800000000000000 0d000000057800000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-003.phpt0000644000076500000240000000144313572250760021647 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x00 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f0000000578000200000000ffff00 {"x":{"$binary":{"base64":"\/\/8=","subType":"00"}}} 0f0000000578000200000000ffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-004.phpt0000644000076500000240000000144313572250760021650 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x01 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f0000000578000200000001ffff00 {"x":{"$binary":{"base64":"\/\/8=","subType":"01"}}} 0f0000000578000200000001ffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-005.phpt0000644000076500000240000000147313572250760021654 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x02 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 13000000057800060000000202000000ffff00 {"x":{"$binary":{"base64":"\/\/8=","subType":"02"}}} 13000000057800060000000202000000ffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-006.phpt0000644000076500000240000000163713572250760021657 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x03 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1d000000057800100000000373ffd26444b34c6990e8e7d1dfc035d400 {"x":{"$binary":{"base64":"c\/\/SZESzTGmQ6OfR38A11A==","subType":"03"}}} 1d000000057800100000000373ffd26444b34c6990e8e7d1dfc035d400 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-007.phpt0000644000076500000240000000163713572250760021660 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x04 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1d000000057800100000000473ffd26444b34c6990e8e7d1dfc035d400 {"x":{"$binary":{"base64":"c\/\/SZESzTGmQ6OfR38A11A==","subType":"04"}}} 1d000000057800100000000473ffd26444b34c6990e8e7d1dfc035d400 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-008.phpt0000644000076500000240000000163713572250760021661 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x05 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1d000000057800100000000573ffd26444b34c6990e8e7d1dfc035d400 {"x":{"$binary":{"base64":"c\/\/SZESzTGmQ6OfR38A11A==","subType":"05"}}} 1d000000057800100000000573ffd26444b34c6990e8e7d1dfc035d400 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-009.phpt0000644000076500000240000000144313572250760021655 0ustar alcaeusstaff--TEST-- Binary type: subtype 0x80 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f0000000578000200000080ffff00 {"x":{"$binary":{"base64":"\/\/8=","subType":"80"}}} 0f0000000578000200000080ffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-010.phpt0000644000076500000240000000160613572250760021646 0ustar alcaeusstaff--TEST-- Binary type: $type query operator (conflicts with legacy $binary form with $type field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1f000000037800170000000224747970650007000000737472696e67000000 {"x":{"$type":"string"}} 1f000000037800170000000224747970650007000000737472696e67000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/binary-valid-011.phpt0000644000076500000240000000156113572250760021647 0ustar alcaeusstaff--TEST-- Binary type: $type query operator (conflicts with legacy $binary form with $type field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000000378001000000010247479706500020000000000 {"x":{"$type":{"$numberInt":"2"}}} 180000000378001000000010247479706500020000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/boolean-decodeError-001.phpt0000644000076500000240000000072313572250760023136 0ustar alcaeusstaff--TEST-- Boolean: Invalid boolean value of 2 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/boolean-decodeError-002.phpt0000644000076500000240000000072413572250760023140 0ustar alcaeusstaff--TEST-- Boolean: Invalid boolean value of -1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/boolean-valid-001.phpt0000644000076500000240000000123113572250760021773 0ustar alcaeusstaff--TEST-- Boolean: True --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 090000000862000100 {"b":true} 090000000862000100 ===DONE===mongodb-1.6.1/tests/bson-corpus/boolean-valid-002.phpt0000644000076500000240000000123413572250760021777 0ustar alcaeusstaff--TEST-- Boolean: False --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 090000000862000000 {"b":false} 090000000862000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-001.phpt0000644000076500000240000000076513572250760022437 0ustar alcaeusstaff--TEST-- Javascript Code: bad code string length: 0 (but no 0x00 either) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-002.phpt0000644000076500000240000000074113572250760022432 0ustar alcaeusstaff--TEST-- Javascript Code: bad code string length: -1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-003.phpt0000644000076500000240000000076613572250760022442 0ustar alcaeusstaff--TEST-- Javascript Code: bad code string length: eats terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-004.phpt0000644000076500000240000000100713572250760022430 0ustar alcaeusstaff--TEST-- Javascript Code: bad code string length: longer than rest of document --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-005.phpt0000644000076500000240000000076113572250760022437 0ustar alcaeusstaff--TEST-- Javascript Code: code string is not null-terminated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-006.phpt0000644000076500000240000000075413572250760022442 0ustar alcaeusstaff--TEST-- Javascript Code: empty code string, but extra null --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-decodeError-007.phpt0000644000076500000240000000073013572250760022435 0ustar alcaeusstaff--TEST-- Javascript Code: invalid UTF-8 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-001.phpt0000644000076500000240000000132313572250760021270 0ustar alcaeusstaff--TEST-- Javascript Code: Empty string --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d0000000d6100010000000000 {"a":{"$code":""}} 0d0000000d6100010000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-002.phpt0000644000076500000240000000133713572250760021276 0ustar alcaeusstaff--TEST-- Javascript Code: Single character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0e0000000d610002000000620000 {"a":{"$code":"b"}} 0e0000000d610002000000620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-003.phpt0000644000076500000240000000146613572250760021302 0ustar alcaeusstaff--TEST-- Javascript Code: Multi-character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000d61000d0000006162616261626162616261620000 {"a":{"$code":"abababababab"}} 190000000d61000d0000006162616261626162616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-004.phpt0000644000076500000240000000153213572250760021275 0ustar alcaeusstaff--TEST-- Javascript Code: two-byte UTF-8 (é) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 {"a":"\u00e9\u00e9\u00e9\u00e9\u00e9\u00e9"} 190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-005.phpt0000644000076500000240000000150313572250760021274 0ustar alcaeusstaff--TEST-- Javascript Code: three-byte UTF-8 (☆) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d000000e29886e29886e29886e298860000 {"a":"\u2606\u2606\u2606\u2606"} 190000000261000d000000e29886e29886e29886e298860000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code-valid-006.phpt0000644000076500000240000000146513572250760021304 0ustar alcaeusstaff--TEST-- Javascript Code: Embedded nulls --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d0000006162006261620062616261620000 {"a":"ab\u0000bab\u0000babab"} 190000000261000d0000006162006261620062616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-001.phpt0000644000076500000240000000103313572250760024143 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length zero --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-002.phpt0000644000076500000240000000103713572250760024150 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length negative --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-003.phpt0000644000076500000240000000102513572250760024146 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length too short (less than minimum size) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-004.phpt0000644000076500000240000000106213572250760024150 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length too short (truncates scope) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-005.phpt0000644000076500000240000000106113572250760024150 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length too long (clips outer doc) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-006.phpt0000644000076500000240000000106713572250760024157 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: field length too long (longer than outer doc) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-007.phpt0000644000076500000240000000105313572250760024153 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: bad code string: length too short --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-008.phpt0000644000076500000240000000107013572250760024153 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: bad code string: length too long (clips scope) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-009.phpt0000644000076500000240000000105213572250760024154 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: bad code string: negative length --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-010.phpt0000644000076500000240000000106313572250760024146 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: bad code string: length longer than field --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-decodeError-011.phpt0000644000076500000240000000107313572250760024150 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: bad scope doc (field has bad string length) --XFAIL-- Depends on PHPC-889 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-valid-001.phpt0000644000076500000240000000150113572250760023005 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: Empty code string, empty scope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 160000000f61000e0000000100000000050000000000 {"a":{"$code":"","$scope":{}}} 160000000f61000e0000000100000000050000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-valid-002.phpt0000644000076500000240000000154513572250760023016 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: Non-empty code string, empty scope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1a0000000f610012000000050000006162636400050000000000 {"a":{"$code":"abcd","$scope":{}}} 1a0000000f610012000000050000006162636400050000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-valid-003.phpt0000644000076500000240000000163613572250760023020 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: Empty code string, non-empty scope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1d0000000f61001500000001000000000c000000107800010000000000 {"a":{"$code":"","$scope":{"x":{"$numberInt":"1"}}}} 1d0000000f61001500000001000000000c000000107800010000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-valid-004.phpt0000644000076500000240000000170513572250760023016 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: Non-empty code string and non-empty scope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 210000000f6100190000000500000061626364000c000000107800010000000000 {"a":{"$code":"abcd","$scope":{"x":{"$numberInt":"1"}}}} 210000000f6100190000000500000061626364000c000000107800010000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/code_w_scope-valid-005.phpt0000644000076500000240000000173613572250760023023 0ustar alcaeusstaff--TEST-- Javascript Code with Scope: Unicode and embedded null in code string, empty scope --XFAIL-- Embedded null in code string is not supported in libbson (CDRIVER-1879) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1a0000000f61001200000005000000c3a9006400050000000000 {"a":{"$code":"\u00e9\u0000d","$scope":{}}} 1a0000000f61001200000005000000c3a9006400050000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/datetime-decodeError-001.phpt0000644000076500000240000000073013572250760023311 0ustar alcaeusstaff--TEST-- DateTime: datetime field truncated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/datetime-valid-001.phpt0000644000076500000240000000216513572250760022157 0ustar alcaeusstaff--TEST-- DateTime: epoch --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000096100000000000000000000 {"a":{"$date":{"$numberLong":"0"}}} {"a":{"$date":"1970-01-01T00:00:00Z"}} 10000000096100000000000000000000 {"a":{"$date":"1970-01-01T00:00:00Z"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/datetime-valid-002.phpt0000644000076500000240000000223713572250760022160 0ustar alcaeusstaff--TEST-- DateTime: positive ms --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000096100c5d8d6cc3b01000000 {"a":{"$date":{"$numberLong":"1356351330501"}}} {"a":{"$date":"2012-12-24T12:15:30.501Z"}} 10000000096100c5d8d6cc3b01000000 {"a":{"$date":"2012-12-24T12:15:30.501Z"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/datetime-valid-003.phpt0000644000076500000240000000225513572250760022161 0ustar alcaeusstaff--TEST-- DateTime: negative --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000096100c33ce7b9bdffffff00 {"a":{"$date":{"$numberLong":"-284643869501"}}} {"a":{"$date":{"$numberLong":"-284643869501"}}} 10000000096100c33ce7b9bdffffff00 {"a":{"$date":{"$numberLong":"-284643869501"}}} ===DONE===mongodb-1.6.1/tests/bson-corpus/datetime-valid-004.phpt0000644000076500000240000000142013572250760022153 0ustar alcaeusstaff--TEST-- DateTime: Y10K --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1000000009610000dc1fd277e6000000 {"a":{"$date":{"$numberLong":"253402300800000"}}} 1000000009610000dc1fd277e6000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-001.phpt0000644000076500000240000000101213572250760023475 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): String with negative length --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-002.phpt0000644000076500000240000000100613572250760023501 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): String with zero length --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-003.phpt0000644000076500000240000000101113572250760023476 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): String not null terminated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-004.phpt0000644000076500000240000000102513572250760023504 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): short OID (less than minimum length for field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-005.phpt0000644000076500000240000000103613572250760023507 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): short OID (greater than minimum, but truncated) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-decodeError-006.phpt0000644000076500000240000000100413572250760023503 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): String with bad UTF-8 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-valid-001.phpt0000644000076500000240000000215113572250760022344 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): DBpointer --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1a0000000c610002000000620056e1fc72e0c917e9c471416100 {"a":{"$dbPointer":{"$ref":"b","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}} 1a0000000c610002000000620056e1fc72e0c917e9c471416100 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-valid-002.phpt0000644000076500000240000000257413572250760022356 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): DBpointer with opposite key order --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1a0000000c610002000000620056e1fc72e0c917e9c471416100 {"a":{"$dbPointer":{"$ref":"b","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}} 1a0000000c610002000000620056e1fc72e0c917e9c471416100 1a0000000c610002000000620056e1fc72e0c917e9c471416100 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbpointer-valid-003.phpt0000644000076500000240000000220213572250760022343 0ustar alcaeusstaff--TEST-- DBPointer type (deprecated): With two-byte UTF-8 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1b0000000c610003000000c3a90056e1fc72e0c917e9c471416100 {"a":{"$dbPointer":{"$ref":"\u00e9","$id":{"$oid":"56e1fc72e0c917e9c4714161"}}}} 1b0000000c610003000000c3a90056e1fc72e0c917e9c471416100 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbref-valid-001.phpt0000644000076500000240000000205513572250760021443 0ustar alcaeusstaff--TEST-- DBRef: DBRef --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 37000000036462726566002b0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0000 {"dbref":{"$ref":"collection","$id":{"$oid":"58921b3e6e32ab156a22b59e"}}} 37000000036462726566002b0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbref-valid-002.phpt0000644000076500000240000000223313572250760021442 0ustar alcaeusstaff--TEST-- DBRef: DBRef with database --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 4300000003646272656600370000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0224646200030000006462000000 {"dbref":{"$ref":"collection","$id":{"$oid":"58921b3e6e32ab156a22b59e"},"$db":"db"}} 4300000003646272656600370000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0224646200030000006462000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbref-valid-003.phpt0000644000076500000240000000231113572250760021440 0ustar alcaeusstaff--TEST-- DBRef: DBRef with database and additional fields --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e0010246964002a00000002246462000300000064620002666f6f0004000000626172000000 {"dbref":{"$ref":"collection","$id":{"$numberInt":"42"},"$db":"db","foo":"bar"}} 48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e0010246964002a00000002246462000300000064620002666f6f0004000000626172000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbref-valid-004.phpt0000644000076500000240000000225413572250760021447 0ustar alcaeusstaff--TEST-- DBRef: DBRef with additional fields --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 4400000003646272656600380000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e02666f6f0004000000626172000000 {"dbref":{"$ref":"collection","$id":{"$oid":"58921b3e6e32ab156a22b59e"},"foo":"bar"}} 4400000003646272656600380000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e02666f6f0004000000626172000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/dbref-valid-005.phpt0000644000076500000240000000222613572250760021447 0ustar alcaeusstaff--TEST-- DBRef: Document with key names similar to those of a DBRef --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000 {"$ref":"not-a-dbref","$id":{"$oid":"58921b3e6e32ab156a22b59e"},"$banana":"peel"} 3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-001.phpt0000644000076500000240000000146313572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: Special - Canonical NaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007c00 {"d":{"$numberDecimal":"NaN"}} 180000001364000000000000000000000000000000007c00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-002.phpt0000644000076500000240000000124713572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Special - Negative NaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000fc00 {"d":{"$numberDecimal":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-003.phpt0000644000076500000240000000134313572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: Special - Negative NaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000fc00 {"d":{"$numberDecimal":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-004.phpt0000644000076500000240000000125113572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: Special - Canonical SNaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007e00 {"d":{"$numberDecimal":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-005.phpt0000644000076500000240000000125013572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: Special - Negative SNaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000fe00 {"d":{"$numberDecimal":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-006.phpt0000644000076500000240000000125513572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: Special - NaN with a payload --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001200000000000000000000000000007e00 {"d":{"$numberDecimal":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-007.phpt0000644000076500000240000000151313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: Special - Canonical Positive Infinity --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-008.phpt0000644000076500000240000000151513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Special - Canonical Negative Infinity --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-009.phpt0000644000076500000240000000127213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: Special - Invalid representation treated as 0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000106c00 {"d":{"$numberDecimal":"0"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-010.phpt0000644000076500000240000000127513572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Special - Invalid representation treated as -0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400dcba9876543210deadbeef00000010ec00 {"d":{"$numberDecimal":"-0"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-011.phpt0000644000076500000240000000130213572250760022343 0ustar alcaeusstaff--TEST-- Decimal128: Special - Invalid representation treated as 0E3 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffffffffffffffffffffffff116c00 {"d":{"$numberDecimal":"0E+3"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-012.phpt0000644000076500000240000000161113572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: Regular - Adjusted Exponent Limit --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3cf22f00 {"d":{"$numberDecimal":"0.000001234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3cf22f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-013.phpt0000644000076500000240000000147013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Regular - Smallest --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d204000000000000000000000000343000 {"d":{"$numberDecimal":"0.001234"}} 18000000136400d204000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-014.phpt0000644000076500000240000000152613572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: Regular - Smallest with Trailing Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640040ef5a07000000000000000000002a3000 {"d":{"$numberDecimal":"0.00123400000"}} 1800000013640040ef5a07000000000000000000002a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-015.phpt0000644000076500000240000000145113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: Regular - 0.1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640001000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.1"}} 1800000013640001000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-016.phpt0000644000076500000240000000161413572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: Regular - 0.1234567890123456789012345678901234 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3cfc2f00 {"d":{"$numberDecimal":"0.1234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3cfc2f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-017.phpt0000644000076500000240000000144313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Regular - 0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-018.phpt0000644000076500000240000000144613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: Regular - -0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-019.phpt0000644000076500000240000000145413572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: Regular - -0.0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003eb000 {"d":{"$numberDecimal":"-0.0"}} 1800000013640000000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-020.phpt0000644000076500000240000000144313572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: Regular - 2 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000200000000000000000000000000403000 {"d":{"$numberDecimal":"2"}} 180000001364000200000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-021.phpt0000644000076500000240000000145713572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Regular - 2.000 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d0070000000000000000000000003a3000 {"d":{"$numberDecimal":"2.000"}} 18000000136400d0070000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-022.phpt0000644000076500000240000000155313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: Regular - Largest --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3c403000 {"d":{"$numberDecimal":"1234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3c403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-023.phpt0000644000076500000240000000157413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Tiniest --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff638e8d37c087adbe09ed010000 {"d":{"$numberDecimal":"9.999999999999999999999999999999999E-6143"}} 18000000136400ffffffff638e8d37c087adbe09ed010000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-024.phpt0000644000076500000240000000146513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Tiny --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000000000 {"d":{"$numberDecimal":"1E-6176"}} 180000001364000100000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-025.phpt0000644000076500000240000000150013572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Negative Tiny --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000008000 {"d":{"$numberDecimal":"-1E-6176"}} 180000001364000100000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-026.phpt0000644000076500000240000000160613572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Adjusted Exponent Limit --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3cf02f00 {"d":{"$numberDecimal":"1.234567890123456789012345678901234E-7"}} 18000000136400f2af967ed05c82de3297ff6fde3cf02f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-027.phpt0000644000076500000240000000147513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Fractional --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640064000000000000000000000000002cb000 {"d":{"$numberDecimal":"-1.00E-8"}} 1800000013640064000000000000000000000000002cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-028.phpt0000644000076500000240000000150013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - 0 with Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000205f00 {"d":{"$numberDecimal":"0E+6000"}} 180000001364000000000000000000000000000000205f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-029.phpt0000644000076500000240000000150713572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - 0 with Negative Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000007a2b00 {"d":{"$numberDecimal":"0E-611"}} 1800000013640000000000000000000000000000007a2b00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-030.phpt0000644000076500000240000000151213572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - No Decimal with Signed Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000463000 {"d":{"$numberDecimal":"1E+3"}} 180000001364000100000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-031.phpt0000644000076500000240000000150013572250760022345 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Trailing Zero --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001a04000000000000000000000000423000 {"d":{"$numberDecimal":"1.050E+4"}} 180000001364001a04000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-032.phpt0000644000076500000240000000147513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - With Decimal --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006900000000000000000000000000423000 {"d":{"$numberDecimal":"1.05E+3"}} 180000001364006900000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-033.phpt0000644000076500000240000000155313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Full --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffffffffffffffffffffffff403000 {"d":{"$numberDecimal":"5192296858534827628530496329220095"}} 18000000136400ffffffffffffffffffffffffffff403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-034.phpt0000644000076500000240000000157213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Large --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-035.phpt0000644000076500000240000000157413572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: Scientific - Largest --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff638e8d37c087adbe09edff5f00 {"d":{"$numberDecimal":"9.999999999999999999999999999999999E+6144"}} 18000000136400ffffffff638e8d37c087adbe09edff5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-036.phpt0000644000076500000240000000204113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Exponent Normalization --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640064000000000000000000000000002cb000 {"d":{"$numberDecimal":"-1.00E-8"}} 1800000013640064000000000000000000000000002cb000 1800000013640064000000000000000000000000002cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-037.phpt0000644000076500000240000000203013572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Unsigned Positive Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000463000 {"d":{"$numberDecimal":"1E+3"}} 180000001364000100000000000000000000000000463000 180000001364000100000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-038.phpt0000644000076500000240000000203413572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Lowercase Exponent Identifier --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000463000 {"d":{"$numberDecimal":"1E+3"}} 180000001364000100000000000000000000000000463000 180000001364000100000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-039.phpt0000644000076500000240000000214413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Long Significand with Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640079d9e0f9763ada429d0200000000583000 {"d":{"$numberDecimal":"1.2345689012345789012345E+34"}} 1800000013640079d9e0f9763ada429d0200000000583000 1800000013640079d9e0f9763ada429d0200000000583000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-040.phpt0000644000076500000240000000214713572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Positive Sign --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3c403000 {"d":{"$numberDecimal":"1234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3c403000 18000000136400f2af967ed05c82de3297ff6fde3c403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-041.phpt0000644000076500000240000000377213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - Long Decimal String --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000722800 {"d":{"$numberDecimal":"1E-999"}} 180000001364000100000000000000000000000000722800 180000001364000100000000000000000000000000722800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-042.phpt0000644000076500000240000000177713572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - nan --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007c00 {"d":{"$numberDecimal":"NaN"}} 180000001364000000000000000000000000000000007c00 180000001364000000000000000000000000000000007c00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-043.phpt0000644000076500000240000000177713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - nAn --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007c00 {"d":{"$numberDecimal":"NaN"}} 180000001364000000000000000000000000000000007c00 180000001364000000000000000000000000000000007c00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-044.phpt0000644000076500000240000000202513572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - +infinity --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-045.phpt0000644000076500000240000000202313572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - infinity --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-046.phpt0000644000076500000240000000202313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - infiniTY --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-047.phpt0000644000076500000240000000201113572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - inf --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-048.phpt0000644000076500000240000000201113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - inF --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-049.phpt0000644000076500000240000000202713572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - -infinity --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-050.phpt0000644000076500000240000000202713572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - -infiniTy --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-051.phpt0000644000076500000240000000202213572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - -Inf --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-052.phpt0000644000076500000240000000201513572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - -inf --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-053.phpt0000644000076500000240000000201513572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: Non-Canonical Parsing - -inF --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-054.phpt0000644000076500000240000000201113572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: Rounded Subnormal number --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000000000 {"d":{"$numberDecimal":"1E-6176"}} 180000001364000100000000000000000000000000000000 180000001364000100000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-055.phpt0000644000076500000240000000177213572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: Clamped --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0E+6112"}} 180000001364000a00000000000000000000000000fe5f00 180000001364000a00000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-1-valid-056.phpt0000644000076500000240000000404113572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: Exact rounding --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31cc3700 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E+999"}} 18000000136400000000000a5bc138938d44c64d31cc3700 18000000136400000000000a5bc138938d44c64d31cc3700 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-001.phpt0000644000076500000240000000155713572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq021] Normality --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3c40b000 {"d":{"$numberDecimal":"-1234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3c40b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-002.phpt0000644000076500000240000000154713572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq823] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400010000800000000000000000000040b000 {"d":{"$numberDecimal":"-2147483649"}} 18000000136400010000800000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-003.phpt0000644000076500000240000000154713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq822] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000800000000000000000000040b000 {"d":{"$numberDecimal":"-2147483648"}} 18000000136400000000800000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-004.phpt0000644000076500000240000000154713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq821] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffff7f0000000000000000000040b000 {"d":{"$numberDecimal":"-2147483647"}} 18000000136400ffffff7f0000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-005.phpt0000644000076500000240000000154713572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq820] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400feffff7f0000000000000000000040b000 {"d":{"$numberDecimal":"-2147483646"}} 18000000136400feffff7f0000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-006.phpt0000644000076500000240000000150313572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq152] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400393000000000000000000000000040b000 {"d":{"$numberDecimal":"-12345"}} 18000000136400393000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-007.phpt0000644000076500000240000000150113572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [decq154] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d20400000000000000000000000040b000 {"d":{"$numberDecimal":"-1234"}} 18000000136400d20400000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-008.phpt0000644000076500000240000000151213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq006] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee0200000000000000000000000040b000 {"d":{"$numberDecimal":"-750"}} 18000000136400ee0200000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-009.phpt0000644000076500000240000000150513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq164] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640039300000000000000000000000003cb000 {"d":{"$numberDecimal":"-123.45"}} 1800000013640039300000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-010.phpt0000644000076500000240000000147713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq156] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007b0000000000000000000000000040b000 {"d":{"$numberDecimal":"-123"}} 180000001364007b0000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-011.phpt0000644000076500000240000000151413572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [decq008] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee020000000000000000000000003eb000 {"d":{"$numberDecimal":"-75.0"}} 18000000136400ee020000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-012.phpt0000644000076500000240000000147513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq158] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000c0000000000000000000000000040b000 {"d":{"$numberDecimal":"-12"}} 180000001364000c0000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-013.phpt0000644000076500000240000000160413572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq122] Nmax and similar --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff638e8d37c087adbe09edffdf00 {"d":{"$numberDecimal":"-9.999999999999999999999999999999999E+6144"}} 18000000136400ffffffff638e8d37c087adbe09edffdf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-014.phpt0000644000076500000240000000154413572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq002] (mostly derived from the Strawman 4 document and examples) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee020000000000000000000000003cb000 {"d":{"$numberDecimal":"-7.50"}} 18000000136400ee020000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-015.phpt0000644000076500000240000000152213572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq004] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee0200000000000000000000000042b000 {"d":{"$numberDecimal":"-7.50E+3"}} 18000000136400ee0200000000000000000000000042b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-016.phpt0000644000076500000240000000152213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq018] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee020000000000000000000000002eb000 {"d":{"$numberDecimal":"-7.50E-7"}} 18000000136400ee020000000000000000000000002eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-017.phpt0000644000076500000240000000160413572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq125] Nmax and similar --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3cfedf00 {"d":{"$numberDecimal":"-1.234567890123456789012345678901234E+6144"}} 18000000136400f2af967ed05c82de3297ff6fde3cfedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-018.phpt0000644000076500000240000000161313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq131] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000807f1bcf85b27059c8a43cfedf00 {"d":{"$numberDecimal":"-1.230000000000000000000000000000000E+6144"}} 18000000136400000000807f1bcf85b27059c8a43cfedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-019.phpt0000644000076500000240000000150113572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq162] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007b000000000000000000000000003cb000 {"d":{"$numberDecimal":"-1.23"}} 180000001364007b000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-020.phpt0000644000076500000240000000160213572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [decq176] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400010000000a5bc138938d44c64d31008000 {"d":{"$numberDecimal":"-1.000000000000000000000000000000001E-6143"}} 18000000136400010000000a5bc138938d44c64d31008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-021.phpt0000644000076500000240000000160213572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [decq174] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31008000 {"d":{"$numberDecimal":"-1.000000000000000000000000000000000E-6143"}} 18000000136400000000000a5bc138938d44c64d31008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-022.phpt0000644000076500000240000000161313572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq133] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fedf00 {"d":{"$numberDecimal":"-1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-023.phpt0000644000076500000240000000147313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq160] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400010000000000000000000000000040b000 {"d":{"$numberDecimal":"-1"}} 18000000136400010000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-024.phpt0000644000076500000240000000147613572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq172] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000428000 {"d":{"$numberDecimal":"-1E-6143"}} 180000001364000100000000000000000000000000428000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-025.phpt0000644000076500000240000000151613572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq010] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee020000000000000000000000003ab000 {"d":{"$numberDecimal":"-0.750"}} 18000000136400ee020000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-026.phpt0000644000076500000240000000152013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq012] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee0200000000000000000000000038b000 {"d":{"$numberDecimal":"-0.0750"}} 18000000136400ee0200000000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-027.phpt0000644000076500000240000000152413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq014] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee0200000000000000000000000034b000 {"d":{"$numberDecimal":"-0.000750"}} 18000000136400ee0200000000000000000000000034b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-028.phpt0000644000076500000240000000153013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq016] derivative canonical plain strings --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ee0200000000000000000000000030b000 {"d":{"$numberDecimal":"-0.00000750"}} 18000000136400ee0200000000000000000000000030b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-029.phpt0000644000076500000240000000146313572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq404] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000000000 {"d":{"$numberDecimal":"0E-6176"}} 180000001364000000000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-030.phpt0000644000076500000240000000147613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq424] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000008000 {"d":{"$numberDecimal":"-0E-6176"}} 180000001364000000000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-031.phpt0000644000076500000240000000145513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq407] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-032.phpt0000644000076500000240000000147013572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq427] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003cb000 {"d":{"$numberDecimal":"-0.00"}} 1800000013640000000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-033.phpt0000644000076500000240000000144713572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq409] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-034.phpt0000644000076500000240000000146213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq428] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-035.phpt0000644000076500000240000000146413572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq700] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-036.phpt0000644000076500000240000000145513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq406] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-037.phpt0000644000076500000240000000147013572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq426] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003cb000 {"d":{"$numberDecimal":"-0.00"}} 1800000013640000000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-038.phpt0000644000076500000240000000145513572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq410] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000463000 {"d":{"$numberDecimal":"0E+3"}} 180000001364000000000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-039.phpt0000644000076500000240000000147013572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq431] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000046b000 {"d":{"$numberDecimal":"-0E+3"}} 18000000136400000000000000000000000000000046b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-040.phpt0000644000076500000240000000147613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq419] clamped zeros... --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fe5f00 {"d":{"$numberDecimal":"0E+6111"}} 180000001364000000000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-041.phpt0000644000076500000240000000147613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq432] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fedf00 {"d":{"$numberDecimal":"-0E+6111"}} 180000001364000000000000000000000000000000fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-042.phpt0000644000076500000240000000146313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq405] zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000000000 {"d":{"$numberDecimal":"0E-6176"}} 180000001364000000000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-043.phpt0000644000076500000240000000147613572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq425] negative zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000008000 {"d":{"$numberDecimal":"-0E-6176"}} 180000001364000000000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-044.phpt0000644000076500000240000000147013572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq508] Specials --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007800 {"d":{"$numberDecimal":"Infinity"}} 180000001364000000000000000000000000000000007800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-045.phpt0000644000076500000240000000147213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq528] Specials --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000000f800 {"d":{"$numberDecimal":"-Infinity"}} 18000000136400000000000000000000000000000000f800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-046.phpt0000644000076500000240000000145613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq541] Specials --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000007c00 {"d":{"$numberDecimal":"NaN"}} 180000001364000000000000000000000000000000007c00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-047.phpt0000644000076500000240000000160013572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq074] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31000000 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E-6143"}} 18000000136400000000000a5bc138938d44c64d31000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-048.phpt0000644000076500000240000000161113572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq602] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-049.phpt0000644000076500000240000000160713572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq604] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000000000E+6143"}} 180000001364000000000081efac855b416d2dee04fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-050.phpt0000644000076500000240000000160513572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq606] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000080264b91c02220be377e00fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000000000E+6142"}} 1800000013640000000080264b91c02220be377e00fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-051.phpt0000644000076500000240000000160313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq608] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000040eaed7446d09c2c9f0c00fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000E+6141"}} 1800000013640000000040eaed7446d09c2c9f0c00fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-052.phpt0000644000076500000240000000160113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq610] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000a0ca17726dae0f1e430100fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000000E+6140"}} 18000000136400000000a0ca17726dae0f1e430100fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-053.phpt0000644000076500000240000000157713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq612] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000106102253e5ece4f200000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000000E+6139"}} 18000000136400000000106102253e5ece4f200000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-054.phpt0000644000076500000240000000157513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq614] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000e83c80d09f3c2e3b030000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000E+6138"}} 18000000136400000000e83c80d09f3c2e3b030000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-055.phpt0000644000076500000240000000157313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq616] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000e4d20cc8dcd2b752000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000E+6137"}} 18000000136400000000e4d20cc8dcd2b752000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-056.phpt0000644000076500000240000000157113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq618] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000004a48011416954508000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000E+6136"}} 180000001364000000004a48011416954508000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-057.phpt0000644000076500000240000000156713572250760022373 0ustar alcaeusstaff--TEST-- Decimal128: [decq620] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000a1edccce1bc2d300000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000E+6135"}} 18000000136400000000a1edccce1bc2d300000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-058.phpt0000644000076500000240000000156513572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [decq622] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000080f64ae1c7022d1500000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000E+6134"}} 18000000136400000080f64ae1c7022d1500000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-059.phpt0000644000076500000240000000156313572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [decq624] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000040b2bac9e0191e0200000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000E+6133"}} 18000000136400000040b2bac9e0191e0200000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-060.phpt0000644000076500000240000000156113572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq626] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000a0dec5adc935360000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000E+6132"}} 180000001364000000a0dec5adc935360000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-061.phpt0000644000076500000240000000155713572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq628] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000010632d5ec76b050000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000E+6131"}} 18000000136400000010632d5ec76b050000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-062.phpt0000644000076500000240000000155513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq630] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000e8890423c78a000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000E+6130"}} 180000001364000000e8890423c78a000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-063.phpt0000644000076500000240000000155313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq632] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000064a7b3b6e00d000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000E+6129"}} 18000000136400000064a7b3b6e00d000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-064.phpt0000644000076500000240000000155113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq634] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000008a5d78456301000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000E+6128"}} 1800000013640000008a5d78456301000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-065.phpt0000644000076500000240000000154713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq636] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000c16ff2862300000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000E+6127"}} 180000001364000000c16ff2862300000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-066.phpt0000644000076500000240000000154513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq638] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000080c6a47e8d0300000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000E+6126"}} 180000001364000080c6a47e8d0300000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-067.phpt0000644000076500000240000000154313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq640] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000407a10f35a0000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000E+6125"}} 1800000013640000407a10f35a0000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-068.phpt0000644000076500000240000000154113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq642] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000a0724e18090000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000E+6124"}} 1800000013640000a0724e18090000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-069.phpt0000644000076500000240000000153713572250760022373 0ustar alcaeusstaff--TEST-- Decimal128: [decq644] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000010a5d4e8000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000E+6123"}} 180000001364000010a5d4e8000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-070.phpt0000644000076500000240000000153513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq646] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e8764817000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000E+6122"}} 1800000013640000e8764817000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-071.phpt0000644000076500000240000000153313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq648] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e40b5402000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000E+6121"}} 1800000013640000e40b5402000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-072.phpt0000644000076500000240000000153113572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq650] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000ca9a3b00000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000E+6120"}} 1800000013640000ca9a3b00000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-073.phpt0000644000076500000240000000152713572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq652] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e1f50500000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000E+6119"}} 1800000013640000e1f50500000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-074.phpt0000644000076500000240000000152513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq654] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364008096980000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000E+6118"}} 180000001364008096980000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-075.phpt0000644000076500000240000000152313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq656] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640040420f0000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000E+6117"}} 1800000013640040420f0000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-076.phpt0000644000076500000240000000152113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq658] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400a086010000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000E+6116"}} 18000000136400a086010000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-077.phpt0000644000076500000240000000151713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq660] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001027000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000E+6115"}} 180000001364001027000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-078.phpt0000644000076500000240000000151513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq662] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e803000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000E+6114"}} 18000000136400e803000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-079.phpt0000644000076500000240000000151313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq664] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00E+6113"}} 180000001364006400000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-080.phpt0000644000076500000240000000151113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq666] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0E+6112"}} 180000001364000a00000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-081.phpt0000644000076500000240000000147113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq060] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000403000 {"d":{"$numberDecimal":"1"}} 180000001364000100000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-082.phpt0000644000076500000240000000150513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq670] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000fc5f00 {"d":{"$numberDecimal":"1E+6110"}} 180000001364000100000000000000000000000000fc5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-083.phpt0000644000076500000240000000150513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq668] fold-down full sequence --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1E+6111"}} 180000001364000100000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-084.phpt0000644000076500000240000000147413572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq072] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000420000 {"d":{"$numberDecimal":"1E-6143"}} 180000001364000100000000000000000000000000420000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-085.phpt0000644000076500000240000000160013572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq076] Nmin and below --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400010000000a5bc138938d44c64d31000000 {"d":{"$numberDecimal":"1.000000000000000000000000000000001E-6143"}} 18000000136400010000000a5bc138938d44c64d31000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-086.phpt0000644000076500000240000000161113572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq036] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000807f1bcf85b27059c8a43cfe5f00 {"d":{"$numberDecimal":"1.230000000000000000000000000000000E+6144"}} 18000000136400000000807f1bcf85b27059c8a43cfe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-087.phpt0000644000076500000240000000147713572250760022376 0ustar alcaeusstaff--TEST-- Decimal128: [decq062] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007b000000000000000000000000003c3000 {"d":{"$numberDecimal":"1.23"}} 180000001364007b000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-088.phpt0000644000076500000240000000160213572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq034] Nmax and similar --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3cfe5f00 {"d":{"$numberDecimal":"1.234567890123456789012345678901234E+6144"}} 18000000136400f2af967ed05c82de3297ff6fde3cfe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-089.phpt0000644000076500000240000000146213572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [decq441] exponent lengths --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000403000 {"d":{"$numberDecimal":"7"}} 180000001364000700000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-090.phpt0000644000076500000240000000147613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq449] exponent lengths --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000001e5f00 {"d":{"$numberDecimal":"7E+5999"}} 1800000013640007000000000000000000000000001e5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-091.phpt0000644000076500000240000000147413572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq447] exponent lengths --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000000e3800 {"d":{"$numberDecimal":"7E+999"}} 1800000013640007000000000000000000000000000e3800 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-092.phpt0000644000076500000240000000147213572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq445] exponent lengths --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000063100 {"d":{"$numberDecimal":"7E+99"}} 180000001364000700000000000000000000000000063100 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-093.phpt0000644000076500000240000000147013572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq443] exponent lengths --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000523000 {"d":{"$numberDecimal":"7E+9"}} 180000001364000700000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-094.phpt0000644000076500000240000000157513572250760022373 0ustar alcaeusstaff--TEST-- Decimal128: [decq842] VG testcase --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000fed83f4e7c9fe4e269e38a5bcd1700 {"d":{"$numberDecimal":"7.049000000000010795488000000000000E-3097"}} 180000001364000000fed83f4e7c9fe4e269e38a5bcd1700 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-095.phpt0000644000076500000240000000153713572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [decq841] VG testcase --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000203b9db5056f000000000000002400 {"d":{"$numberDecimal":"8.000000000000000000E-1550"}} 180000001364000000203b9db5056f000000000000002400 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-096.phpt0000644000076500000240000000154313572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq840] VG testcase --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003c17258419d710c42f0000000000002400 {"d":{"$numberDecimal":"8.81125000000001349436E-1548"}} 180000001364003c17258419d710c42f0000000000002400 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-097.phpt0000644000076500000240000000146413572250760022373 0ustar alcaeusstaff--TEST-- Decimal128: [decq701] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000900000000000000000000000000403000 {"d":{"$numberDecimal":"9"}} 180000001364000900000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-098.phpt0000644000076500000240000000160213572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq032] Nmax and similar --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff638e8d37c087adbe09edff5f00 {"d":{"$numberDecimal":"9.999999999999999999999999999999999E+6144"}} 18000000136400ffffffff638e8d37c087adbe09edff5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-099.phpt0000644000076500000240000000146613572250760022377 0ustar alcaeusstaff--TEST-- Decimal128: [decq702] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000403000 {"d":{"$numberDecimal":"10"}} 180000001364000a00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-100.phpt0000644000076500000240000000147313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq057] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000c00000000000000000000000000403000 {"d":{"$numberDecimal":"12"}} 180000001364000c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-101.phpt0000644000076500000240000000146613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq703] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001300000000000000000000000000403000 {"d":{"$numberDecimal":"19"}} 180000001364001300000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-102.phpt0000644000076500000240000000146613572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq704] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001400000000000000000000000000403000 {"d":{"$numberDecimal":"20"}} 180000001364001400000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-103.phpt0000644000076500000240000000146613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq705] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001d00000000000000000000000000403000 {"d":{"$numberDecimal":"29"}} 180000001364001d00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-104.phpt0000644000076500000240000000146613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq706] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001e00000000000000000000000000403000 {"d":{"$numberDecimal":"30"}} 180000001364001e00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-105.phpt0000644000076500000240000000146613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq707] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364002700000000000000000000000000403000 {"d":{"$numberDecimal":"39"}} 180000001364002700000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-106.phpt0000644000076500000240000000146613572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq708] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364002800000000000000000000000000403000 {"d":{"$numberDecimal":"40"}} 180000001364002800000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-107.phpt0000644000076500000240000000146613572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq709] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003100000000000000000000000000403000 {"d":{"$numberDecimal":"49"}} 180000001364003100000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-108.phpt0000644000076500000240000000146613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq710] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003200000000000000000000000000403000 {"d":{"$numberDecimal":"50"}} 180000001364003200000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-109.phpt0000644000076500000240000000146613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq711] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003b00000000000000000000000000403000 {"d":{"$numberDecimal":"59"}} 180000001364003b00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-110.phpt0000644000076500000240000000146613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq712] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003c00000000000000000000000000403000 {"d":{"$numberDecimal":"60"}} 180000001364003c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-111.phpt0000644000076500000240000000146613572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq713] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004500000000000000000000000000403000 {"d":{"$numberDecimal":"69"}} 180000001364004500000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-112.phpt0000644000076500000240000000146613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq714] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004600000000000000000000000000403000 {"d":{"$numberDecimal":"70"}} 180000001364004600000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-113.phpt0000644000076500000240000000146613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq715] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004700000000000000000000000000403000 {"d":{"$numberDecimal":"71"}} 180000001364004700000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-114.phpt0000644000076500000240000000146613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq716] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004800000000000000000000000000403000 {"d":{"$numberDecimal":"72"}} 180000001364004800000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-115.phpt0000644000076500000240000000146613572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq717] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004900000000000000000000000000403000 {"d":{"$numberDecimal":"73"}} 180000001364004900000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-116.phpt0000644000076500000240000000146613572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq718] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004a00000000000000000000000000403000 {"d":{"$numberDecimal":"74"}} 180000001364004a00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-117.phpt0000644000076500000240000000146613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq719] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004b00000000000000000000000000403000 {"d":{"$numberDecimal":"75"}} 180000001364004b00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-118.phpt0000644000076500000240000000146613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq720] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004c00000000000000000000000000403000 {"d":{"$numberDecimal":"76"}} 180000001364004c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-119.phpt0000644000076500000240000000146613572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq721] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004d00000000000000000000000000403000 {"d":{"$numberDecimal":"77"}} 180000001364004d00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-120.phpt0000644000076500000240000000146613572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq722] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004e00000000000000000000000000403000 {"d":{"$numberDecimal":"78"}} 180000001364004e00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-121.phpt0000644000076500000240000000146613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq723] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004f00000000000000000000000000403000 {"d":{"$numberDecimal":"79"}} 180000001364004f00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-122.phpt0000644000076500000240000000147513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq056] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007b00000000000000000000000000403000 {"d":{"$numberDecimal":"123"}} 180000001364007b00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-123.phpt0000644000076500000240000000150313572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq064] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640039300000000000000000000000003c3000 {"d":{"$numberDecimal":"123.45"}} 1800000013640039300000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-124.phpt0000644000076500000240000000147013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq732] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000802000000000000000000000000403000 {"d":{"$numberDecimal":"520"}} 180000001364000802000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-125.phpt0000644000076500000240000000147013572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq733] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000902000000000000000000000000403000 {"d":{"$numberDecimal":"521"}} 180000001364000902000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-126.phpt0000644000076500000240000000151413572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq740] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000903000000000000000000000000403000 {"d":{"$numberDecimal":"777"}} 180000001364000903000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-127.phpt0000644000076500000240000000151413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq741] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a03000000000000000000000000403000 {"d":{"$numberDecimal":"778"}} 180000001364000a03000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-128.phpt0000644000076500000240000000151413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq742] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001303000000000000000000000000403000 {"d":{"$numberDecimal":"787"}} 180000001364001303000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-129.phpt0000644000076500000240000000151413572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq746] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001f03000000000000000000000000403000 {"d":{"$numberDecimal":"799"}} 180000001364001f03000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-130.phpt0000644000076500000240000000151413572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq743] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006d03000000000000000000000000403000 {"d":{"$numberDecimal":"877"}} 180000001364006d03000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-131.phpt0000644000076500000240000000153313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq753] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007803000000000000000000000000403000 {"d":{"$numberDecimal":"888"}} 180000001364007803000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-132.phpt0000644000076500000240000000153313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq754] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007903000000000000000000000000403000 {"d":{"$numberDecimal":"889"}} 180000001364007903000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-133.phpt0000644000076500000240000000153313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq760] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364008203000000000000000000000000403000 {"d":{"$numberDecimal":"898"}} 180000001364008203000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-134.phpt0000644000076500000240000000153313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq764] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364008303000000000000000000000000403000 {"d":{"$numberDecimal":"899"}} 180000001364008303000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-135.phpt0000644000076500000240000000151413572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq745] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d303000000000000000000000000403000 {"d":{"$numberDecimal":"979"}} 18000000136400d303000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-136.phpt0000644000076500000240000000153313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq770] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400dc03000000000000000000000000403000 {"d":{"$numberDecimal":"988"}} 18000000136400dc03000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-137.phpt0000644000076500000240000000153313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq774] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400dd03000000000000000000000000403000 {"d":{"$numberDecimal":"989"}} 18000000136400dd03000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-138.phpt0000644000076500000240000000147013572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq730] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e203000000000000000000000000403000 {"d":{"$numberDecimal":"994"}} 18000000136400e203000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-139.phpt0000644000076500000240000000147013572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq731] Selected DPD codes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e303000000000000000000000000403000 {"d":{"$numberDecimal":"995"}} 18000000136400e303000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-140.phpt0000644000076500000240000000151413572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq744] DPD: one of each of the huffman groups --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e503000000000000000000000000403000 {"d":{"$numberDecimal":"997"}} 18000000136400e503000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-141.phpt0000644000076500000240000000153313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq780] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e603000000000000000000000000403000 {"d":{"$numberDecimal":"998"}} 18000000136400e603000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-142.phpt0000644000076500000240000000153313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq787] DPD all-highs cases (includes the 24 redundant codes) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e703000000000000000000000000403000 {"d":{"$numberDecimal":"999"}} 18000000136400e703000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-143.phpt0000644000076500000240000000147713572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq053] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d204000000000000000000000000403000 {"d":{"$numberDecimal":"1234"}} 18000000136400d204000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-144.phpt0000644000076500000240000000150113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq052] fold-downs (more below) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003930000000000000000000000000403000 {"d":{"$numberDecimal":"12345"}} 180000001364003930000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-145.phpt0000644000076500000240000000152013572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq792] Miscellaneous (testers' queries, etc.) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003075000000000000000000000000403000 {"d":{"$numberDecimal":"30000"}} 180000001364003075000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-146.phpt0000644000076500000240000000152213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq793] Miscellaneous (testers' queries, etc.) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640090940d0000000000000000000000403000 {"d":{"$numberDecimal":"890000"}} 1800000013640090940d0000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-147.phpt0000644000076500000240000000154513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq824] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400feffff7f00000000000000000000403000 {"d":{"$numberDecimal":"2147483646"}} 18000000136400feffff7f00000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-148.phpt0000644000076500000240000000154513572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq825] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffff7f00000000000000000000403000 {"d":{"$numberDecimal":"2147483647"}} 18000000136400ffffff7f00000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-149.phpt0000644000076500000240000000154513572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [decq826] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000008000000000000000000000403000 {"d":{"$numberDecimal":"2147483648"}} 180000001364000000008000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-150.phpt0000644000076500000240000000154513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq827] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100008000000000000000000000403000 {"d":{"$numberDecimal":"2147483649"}} 180000001364000100008000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-151.phpt0000644000076500000240000000154513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq828] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400feffffff00000000000000000000403000 {"d":{"$numberDecimal":"4294967294"}} 18000000136400feffffff00000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-152.phpt0000644000076500000240000000154513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq829] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff00000000000000000000403000 {"d":{"$numberDecimal":"4294967295"}} 18000000136400ffffffff00000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-153.phpt0000644000076500000240000000154513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq830] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000001000000000000000000403000 {"d":{"$numberDecimal":"4294967296"}} 180000001364000000000001000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-154.phpt0000644000076500000240000000154513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq831] values around [u]int32 edges (zeros done earlier) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000001000000000000000000403000 {"d":{"$numberDecimal":"4294967297"}} 180000001364000100000001000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-155.phpt0000644000076500000240000000155513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq022] Normality --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400c7711cc7b548f377dc80a131c836403000 {"d":{"$numberDecimal":"1111111111111111111111111111111111"}} 18000000136400c7711cc7b548f377dc80a131c836403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-156.phpt0000644000076500000240000000155513572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq020] Normality --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f2af967ed05c82de3297ff6fde3c403000 {"d":{"$numberDecimal":"1234567890123456789012345678901234"}} 18000000136400f2af967ed05c82de3297ff6fde3c403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-2-valid-157.phpt0000644000076500000240000000155413572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq550] Specials --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff638e8d37c087adbe09ed413000 {"d":{"$numberDecimal":"9999999999999999999999999999999999"}} 18000000136400ffffffff638e8d37c087adbe09ed413000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-001.phpt0000644000076500000240000000207013572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx066] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace0000000000000000000038b000 {"d":{"$numberDecimal":"-345678.5432"}} 18000000136400185c0ace0000000000000000000038b000 18000000136400185c0ace0000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-002.phpt0000644000076500000240000000206713572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx065] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace0000000000000000000038b000 {"d":{"$numberDecimal":"-345678.5432"}} 18000000136400185c0ace0000000000000000000038b000 18000000136400185c0ace0000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-003.phpt0000644000076500000240000000154513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx064] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace0000000000000000000038b000 {"d":{"$numberDecimal":"-345678.5432"}} 18000000136400185c0ace0000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-004.phpt0000644000076500000240000000152313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx041] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364004c0000000000000000000000000040b000 {"d":{"$numberDecimal":"-76"}} 180000001364004c0000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-005.phpt0000644000076500000240000000154613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx027] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000f270000000000000000000000003ab000 {"d":{"$numberDecimal":"-9.999"}} 180000001364000f270000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-006.phpt0000644000076500000240000000154613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx026] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364009f230000000000000000000000003ab000 {"d":{"$numberDecimal":"-9.119"}} 180000001364009f230000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-007.phpt0000644000076500000240000000154413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx025] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364008f030000000000000000000000003cb000 {"d":{"$numberDecimal":"-9.11"}} 180000001364008f030000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-008.phpt0000644000076500000240000000154213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx024] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364005b000000000000000000000000003eb000 {"d":{"$numberDecimal":"-9.1"}} 180000001364005b000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-009.phpt0000644000076500000240000000214613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr531] negatives (Rounded) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640099761cc7b548f377dc80a131c836feaf00 {"d":{"$numberDecimal":"-1.111111111111111111111111111112345"}} 1800000013640099761cc7b548f377dc80a131c836feaf00 1800000013640099761cc7b548f377dc80a131c836feaf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-010.phpt0000644000076500000240000000154213572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx022] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000003eb000 {"d":{"$numberDecimal":"-1.0"}} 180000001364000a000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-011.phpt0000644000076500000240000000153613572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx021] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400010000000000000000000000000040b000 {"d":{"$numberDecimal":"-1"}} 18000000136400010000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-012.phpt0000644000076500000240000000177513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx601] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002e3000 {"d":{"$numberDecimal":"0E-9"}} 1800000013640000000000000000000000000000002e3000 1800000013640000000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-013.phpt0000644000076500000240000000200013572250760022343 0ustar alcaeusstaff--TEST-- Decimal128: [basx622] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002eb000 {"d":{"$numberDecimal":"-0E-9"}} 1800000013640000000000000000000000000000002eb000 1800000013640000000000000000000000000000002eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-014.phpt0000644000076500000240000000177413572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx602] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000303000 {"d":{"$numberDecimal":"0E-8"}} 180000001364000000000000000000000000000000303000 180000001364000000000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-015.phpt0000644000076500000240000000177713572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx621] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000030b000 {"d":{"$numberDecimal":"-0E-8"}} 18000000136400000000000000000000000000000030b000 18000000136400000000000000000000000000000030b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-016.phpt0000644000076500000240000000177313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx603] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000323000 {"d":{"$numberDecimal":"0E-7"}} 180000001364000000000000000000000000000000323000 180000001364000000000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-017.phpt0000644000076500000240000000177613572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx620] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000032b000 {"d":{"$numberDecimal":"-0E-7"}} 18000000136400000000000000000000000000000032b000 18000000136400000000000000000000000000000032b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-018.phpt0000644000076500000240000000146513572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx604] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000343000 {"d":{"$numberDecimal":"0.000000"}} 180000001364000000000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-019.phpt0000644000076500000240000000146713572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx619] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000034b000 {"d":{"$numberDecimal":"-0.000000"}} 18000000136400000000000000000000000000000034b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-020.phpt0000644000076500000240000000146313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx605] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000363000 {"d":{"$numberDecimal":"0.00000"}} 180000001364000000000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-021.phpt0000644000076500000240000000146513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx618] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000036b000 {"d":{"$numberDecimal":"-0.00000"}} 18000000136400000000000000000000000000000036b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-022.phpt0000644000076500000240000000176313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx680] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-023.phpt0000644000076500000240000000146113572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx606] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000383000 {"d":{"$numberDecimal":"0.0000"}} 180000001364000000000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-024.phpt0000644000076500000240000000146313572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx617] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000038b000 {"d":{"$numberDecimal":"-0.0000"}} 18000000136400000000000000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-025.phpt0000644000076500000240000000176213572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx681] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-026.phpt0000644000076500000240000000176313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx686] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-027.phpt0000644000076500000240000000176513572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx687] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-028.phpt0000644000076500000240000000205713572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx019] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003cb000 {"d":{"$numberDecimal":"-0.00"}} 1800000013640000000000000000000000000000003cb000 1800000013640000000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-029.phpt0000644000076500000240000000145713572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx607] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.000"}} 1800000013640000000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-030.phpt0000644000076500000240000000146113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx616] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003ab000 {"d":{"$numberDecimal":"-0.000"}} 1800000013640000000000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-031.phpt0000644000076500000240000000176113572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx682] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-032.phpt0000644000076500000240000000200513572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx155] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.000"}} 1800000013640000000000000000000000000000003a3000 1800000013640000000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-033.phpt0000644000076500000240000000200713572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx130] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000383000 {"d":{"$numberDecimal":"0.0000"}} 180000001364000000000000000000000000000000383000 180000001364000000000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-034.phpt0000644000076500000240000000205513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx290] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000038b000 {"d":{"$numberDecimal":"-0.0000"}} 18000000136400000000000000000000000000000038b000 18000000136400000000000000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-035.phpt0000644000076500000240000000201113572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx131] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000363000 {"d":{"$numberDecimal":"0.00000"}} 180000001364000000000000000000000000000000363000 180000001364000000000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-036.phpt0000644000076500000240000000205713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx291] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000036b000 {"d":{"$numberDecimal":"-0.00000"}} 18000000136400000000000000000000000000000036b000 18000000136400000000000000000000000000000036b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-037.phpt0000644000076500000240000000201313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx132] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000343000 {"d":{"$numberDecimal":"0.000000"}} 180000001364000000000000000000000000000000343000 180000001364000000000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-038.phpt0000644000076500000240000000206113572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx292] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000034b000 {"d":{"$numberDecimal":"-0.000000"}} 18000000136400000000000000000000000000000034b000 18000000136400000000000000000000000000000034b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-039.phpt0000644000076500000240000000200313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx133] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000323000 {"d":{"$numberDecimal":"0E-7"}} 180000001364000000000000000000000000000000323000 180000001364000000000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-040.phpt0000644000076500000240000000205113572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx293] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000032b000 {"d":{"$numberDecimal":"-0E-7"}} 18000000136400000000000000000000000000000032b000 18000000136400000000000000000000000000000032b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-041.phpt0000644000076500000240000000145513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx608] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-042.phpt0000644000076500000240000000145713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx615] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003cb000 {"d":{"$numberDecimal":"-0.00"}} 1800000013640000000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-043.phpt0000644000076500000240000000176013572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx683] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-044.phpt0000644000076500000240000000177113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx630] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-045.phpt0000644000076500000240000000177113572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx670] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-046.phpt0000644000076500000240000000176713572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx631] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.0"}} 1800000013640000000000000000000000000000003e3000 1800000013640000000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-047.phpt0000644000076500000240000000177313572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx671] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.000"}} 1800000013640000000000000000000000000000003a3000 1800000013640000000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-048.phpt0000644000076500000240000000200613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx134] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000383000 {"d":{"$numberDecimal":"0.0000"}} 180000001364000000000000000000000000000000383000 180000001364000000000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-049.phpt0000644000076500000240000000205413572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx294] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000038b000 {"d":{"$numberDecimal":"-0.0000"}} 18000000136400000000000000000000000000000038b000 18000000136400000000000000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-050.phpt0000644000076500000240000000176313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx632] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-051.phpt0000644000076500000240000000177513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx672] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000383000 {"d":{"$numberDecimal":"0.0000"}} 180000001364000000000000000000000000000000383000 180000001364000000000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-052.phpt0000644000076500000240000000201013572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx135] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000363000 {"d":{"$numberDecimal":"0.00000"}} 180000001364000000000000000000000000000000363000 180000001364000000000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-053.phpt0000644000076500000240000000205613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx295] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000036b000 {"d":{"$numberDecimal":"-0.00000"}} 18000000136400000000000000000000000000000036b000 18000000136400000000000000000000000000000036b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-054.phpt0000644000076500000240000000177113572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx633] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000423000 {"d":{"$numberDecimal":"0E+1"}} 180000001364000000000000000000000000000000423000 180000001364000000000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-055.phpt0000644000076500000240000000177713572250760022375 0ustar alcaeusstaff--TEST-- Decimal128: [basx673] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000363000 {"d":{"$numberDecimal":"0.00000"}} 180000001364000000000000000000000000000000363000 180000001364000000000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-056.phpt0000644000076500000240000000201213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx136] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000343000 {"d":{"$numberDecimal":"0.000000"}} 180000001364000000000000000000000000000000343000 180000001364000000000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-057.phpt0000644000076500000240000000200113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx674] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000343000 {"d":{"$numberDecimal":"0.000000"}} 180000001364000000000000000000000000000000343000 180000001364000000000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-058.phpt0000644000076500000240000000177113572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx634] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000443000 {"d":{"$numberDecimal":"0E+2"}} 180000001364000000000000000000000000000000443000 180000001364000000000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-059.phpt0000644000076500000240000000200213572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx137] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000323000 {"d":{"$numberDecimal":"0E-7"}} 180000001364000000000000000000000000000000323000 180000001364000000000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-060.phpt0000644000076500000240000000177113572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx635] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000463000 {"d":{"$numberDecimal":"0E+3"}} 180000001364000000000000000000000000000000463000 180000001364000000000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-061.phpt0000644000076500000240000000177113572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx675] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000323000 {"d":{"$numberDecimal":"0E-7"}} 180000001364000000000000000000000000000000323000 180000001364000000000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-062.phpt0000644000076500000240000000177113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx636] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000483000 {"d":{"$numberDecimal":"0E+4"}} 180000001364000000000000000000000000000000483000 180000001364000000000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-063.phpt0000644000076500000240000000177113572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx676] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000303000 {"d":{"$numberDecimal":"0E-8"}} 180000001364000000000000000000000000000000303000 180000001364000000000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-064.phpt0000644000076500000240000000177113572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx637] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004a3000 {"d":{"$numberDecimal":"0E+5"}} 1800000013640000000000000000000000000000004a3000 1800000013640000000000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-065.phpt0000644000076500000240000000177113572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx677] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002e3000 {"d":{"$numberDecimal":"0E-9"}} 1800000013640000000000000000000000000000002e3000 1800000013640000000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-066.phpt0000644000076500000240000000177113572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx638] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004c3000 {"d":{"$numberDecimal":"0E+6"}} 1800000013640000000000000000000000000000004c3000 1800000013640000000000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-067.phpt0000644000076500000240000000177313572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx678] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002c3000 {"d":{"$numberDecimal":"0E-10"}} 1800000013640000000000000000000000000000002c3000 1800000013640000000000000000000000000000002c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-068.phpt0000644000076500000240000000200113572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx149] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-069.phpt0000644000076500000240000000177113572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx639] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004e3000 {"d":{"$numberDecimal":"0E+7"}} 1800000013640000000000000000000000000000004e3000 1800000013640000000000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-070.phpt0000644000076500000240000000177313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx679] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002a3000 {"d":{"$numberDecimal":"0E-11"}} 1800000013640000000000000000000000000000002a3000 1800000013640000000000000000000000000000002a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-071.phpt0000644000076500000240000000206613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx063] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace00000000000000000000383000 {"d":{"$numberDecimal":"345678.5432"}} 18000000136400185c0ace00000000000000000000383000 18000000136400185c0ace00000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-072.phpt0000644000076500000240000000154213572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx018] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003eb000 {"d":{"$numberDecimal":"-0.0"}} 1800000013640000000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-073.phpt0000644000076500000240000000145313572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx609] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.0"}} 1800000013640000000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-074.phpt0000644000076500000240000000145513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx614] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003eb000 {"d":{"$numberDecimal":"-0.0"}} 1800000013640000000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-075.phpt0000644000076500000240000000175713572250760022375 0ustar alcaeusstaff--TEST-- Decimal128: [basx684] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-076.phpt0000644000076500000240000000176613572250760022376 0ustar alcaeusstaff--TEST-- Decimal128: [basx640] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.0"}} 1800000013640000000000000000000000000000003e3000 1800000013640000000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-077.phpt0000644000076500000240000000176613572250760022377 0ustar alcaeusstaff--TEST-- Decimal128: [basx660] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.0"}} 1800000013640000000000000000000000000000003e3000 1800000013640000000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-078.phpt0000644000076500000240000000176213572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx641] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-079.phpt0000644000076500000240000000177013572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx661] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.00"}} 1800000013640000000000000000000000000000003c3000 1800000013640000000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-080.phpt0000644000076500000240000000205113572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx296] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003ab000 {"d":{"$numberDecimal":"-0.000"}} 1800000013640000000000000000000000000000003ab000 1800000013640000000000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-081.phpt0000644000076500000240000000177013572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx642] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000423000 {"d":{"$numberDecimal":"0E+1"}} 180000001364000000000000000000000000000000423000 180000001364000000000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-082.phpt0000644000076500000240000000177213572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx662] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.000"}} 1800000013640000000000000000000000000000003a3000 1800000013640000000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-083.phpt0000644000076500000240000000205313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx297] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000038b000 {"d":{"$numberDecimal":"-0.0000"}} 18000000136400000000000000000000000000000038b000 18000000136400000000000000000000000000000038b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-084.phpt0000644000076500000240000000177013572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx643] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000443000 {"d":{"$numberDecimal":"0E+2"}} 180000001364000000000000000000000000000000443000 180000001364000000000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-085.phpt0000644000076500000240000000177413572250760022375 0ustar alcaeusstaff--TEST-- Decimal128: [basx663] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000383000 {"d":{"$numberDecimal":"0.0000"}} 180000001364000000000000000000000000000000383000 180000001364000000000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-086.phpt0000644000076500000240000000177013572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx644] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000463000 {"d":{"$numberDecimal":"0E+3"}} 180000001364000000000000000000000000000000463000 180000001364000000000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-087.phpt0000644000076500000240000000177613572250760022401 0ustar alcaeusstaff--TEST-- Decimal128: [basx664] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000363000 {"d":{"$numberDecimal":"0.00000"}} 180000001364000000000000000000000000000000363000 180000001364000000000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-088.phpt0000644000076500000240000000177013572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx645] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000483000 {"d":{"$numberDecimal":"0E+4"}} 180000001364000000000000000000000000000000483000 180000001364000000000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-089.phpt0000644000076500000240000000200013572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx665] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000343000 {"d":{"$numberDecimal":"0.000000"}} 180000001364000000000000000000000000000000343000 180000001364000000000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-090.phpt0000644000076500000240000000177013572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx646] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004a3000 {"d":{"$numberDecimal":"0E+5"}} 1800000013640000000000000000000000000000004a3000 1800000013640000000000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-091.phpt0000644000076500000240000000177013572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx666] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000323000 {"d":{"$numberDecimal":"0E-7"}} 180000001364000000000000000000000000000000323000 180000001364000000000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-092.phpt0000644000076500000240000000177013572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx647] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004c3000 {"d":{"$numberDecimal":"0E+6"}} 1800000013640000000000000000000000000000004c3000 1800000013640000000000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-093.phpt0000644000076500000240000000177013572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx667] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000303000 {"d":{"$numberDecimal":"0E-8"}} 180000001364000000000000000000000000000000303000 180000001364000000000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-094.phpt0000644000076500000240000000177013572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx648] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004e3000 {"d":{"$numberDecimal":"0E+7"}} 1800000013640000000000000000000000000000004e3000 1800000013640000000000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-095.phpt0000644000076500000240000000177013572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx668] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002e3000 {"d":{"$numberDecimal":"0E-9"}} 1800000013640000000000000000000000000000002e3000 1800000013640000000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-096.phpt0000644000076500000240000000200013572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx160] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-097.phpt0000644000076500000240000000200013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx161] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002e3000 {"d":{"$numberDecimal":"0E-9"}} 1800000013640000000000000000000000000000002e3000 1800000013640000000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-098.phpt0000644000076500000240000000177013572250760022375 0ustar alcaeusstaff--TEST-- Decimal128: [basx649] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000503000 {"d":{"$numberDecimal":"0E+8"}} 180000001364000000000000000000000000000000503000 180000001364000000000000000000000000000000503000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-099.phpt0000644000076500000240000000177213572250760022400 0ustar alcaeusstaff--TEST-- Decimal128: [basx669] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000002c3000 {"d":{"$numberDecimal":"0E-10"}} 1800000013640000000000000000000000000000002c3000 1800000013640000000000000000000000000000002c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-100.phpt0000644000076500000240000000206513572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx062] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace00000000000000000000383000 {"d":{"$numberDecimal":"345678.5432"}} 18000000136400185c0ace00000000000000000000383000 18000000136400185c0ace00000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-101.phpt0000644000076500000240000000153413572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx001] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-102.phpt0000644000076500000240000000153613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx017] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-103.phpt0000644000076500000240000000175613572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx611] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-104.phpt0000644000076500000240000000176113572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx613] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-105.phpt0000644000076500000240000000175613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx685] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-106.phpt0000644000076500000240000000175713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx688] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-107.phpt0000644000076500000240000000176113572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx689] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000040b000 {"d":{"$numberDecimal":"-0"}} 18000000136400000000000000000000000000000040b000 18000000136400000000000000000000000000000040b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-108.phpt0000644000076500000240000000176013572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx650] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000403000 {"d":{"$numberDecimal":"0"}} 180000001364000000000000000000000000000000403000 180000001364000000000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-109.phpt0000644000076500000240000000145513572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx651] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000423000 {"d":{"$numberDecimal":"0E+1"}} 180000001364000000000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-110.phpt0000644000076500000240000000204513572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx298] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003cb000 {"d":{"$numberDecimal":"-0.00"}} 1800000013640000000000000000000000000000003cb000 1800000013640000000000000000000000000000003cb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-111.phpt0000644000076500000240000000145513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx652] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000443000 {"d":{"$numberDecimal":"0E+2"}} 180000001364000000000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-112.phpt0000644000076500000240000000204713572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx299] some more negative zeros [systematic tests below] --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003ab000 {"d":{"$numberDecimal":"-0.000"}} 1800000013640000000000000000000000000000003ab000 1800000013640000000000000000000000000000003ab000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-113.phpt0000644000076500000240000000145513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx653] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000463000 {"d":{"$numberDecimal":"0E+3"}} 180000001364000000000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-114.phpt0000644000076500000240000000145513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx654] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000483000 {"d":{"$numberDecimal":"0E+4"}} 180000001364000000000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-115.phpt0000644000076500000240000000145513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx655] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004a3000 {"d":{"$numberDecimal":"0E+5"}} 1800000013640000000000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-116.phpt0000644000076500000240000000145513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx656] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004c3000 {"d":{"$numberDecimal":"0E+6"}} 1800000013640000000000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-117.phpt0000644000076500000240000000145513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx657] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000004e3000 {"d":{"$numberDecimal":"0E+7"}} 1800000013640000000000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-118.phpt0000644000076500000240000000145513572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx658] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000503000 {"d":{"$numberDecimal":"0E+8"}} 180000001364000000000000000000000000000000503000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-119.phpt0000644000076500000240000000200013572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx138] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-120.phpt0000644000076500000240000000147013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx139] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000000000000000000000052b000 {"d":{"$numberDecimal":"-0E+9"}} 18000000136400000000000000000000000000000052b000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-121.phpt0000644000076500000240000000146613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx144] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-122.phpt0000644000076500000240000000177613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx154] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-123.phpt0000644000076500000240000000145513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx659] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000523000 {"d":{"$numberDecimal":"0E+9"}} 180000001364000000000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-124.phpt0000644000076500000240000000204213572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx042] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400fc040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.76"}} 18000000136400fc040000000000000000000000003c3000 18000000136400fc040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-125.phpt0000644000076500000240000000200213572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx143] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-126.phpt0000644000076500000240000000206413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx061] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace00000000000000000000383000 {"d":{"$numberDecimal":"345678.5432"}} 18000000136400185c0ace00000000000000000000383000 18000000136400185c0ace00000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-127.phpt0000644000076500000240000000211313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx036] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640015cd5b0700000000000000000000203000 {"d":{"$numberDecimal":"1.23456789E-8"}} 1800000013640015cd5b0700000000000000000000203000 1800000013640015cd5b0700000000000000000000203000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-128.phpt0000644000076500000240000000211213572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx035] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640015cd5b0700000000000000000000223000 {"d":{"$numberDecimal":"1.23456789E-7"}} 1800000013640015cd5b0700000000000000000000223000 1800000013640015cd5b0700000000000000000000223000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-129.phpt0000644000076500000240000000157213572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx034] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640015cd5b0700000000000000000000243000 {"d":{"$numberDecimal":"0.00000123456789"}} 1800000013640015cd5b0700000000000000000000243000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-130.phpt0000644000076500000240000000153713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx053] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003200000000000000000000000000323000 {"d":{"$numberDecimal":"0.0000050"}} 180000001364003200000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-131.phpt0000644000076500000240000000157013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx033] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640015cd5b0700000000000000000000263000 {"d":{"$numberDecimal":"0.0000123456789"}} 1800000013640015cd5b0700000000000000000000263000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-132.phpt0000644000076500000240000000154413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx016] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000c000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.012"}} 180000001364000c000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-133.phpt0000644000076500000240000000154413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx015] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364007b000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.123"}} 180000001364007b000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-134.phpt0000644000076500000240000000157413572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx037] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640078df0d8648700000000000000000223000 {"d":{"$numberDecimal":"0.123456789012344"}} 1800000013640078df0d8648700000000000000000223000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-135.phpt0000644000076500000240000000157413572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx038] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640079df0d8648700000000000000000223000 {"d":{"$numberDecimal":"0.123456789012345"}} 1800000013640079df0d8648700000000000000000223000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-136.phpt0000644000076500000240000000147213572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx250] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-137.phpt0000644000076500000240000000201013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx257] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-138.phpt0000644000076500000240000000201213572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx256] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000363000 {"d":{"$numberDecimal":"0.01265"}} 18000000136400f104000000000000000000000000363000 18000000136400f104000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-139.phpt0000644000076500000240000000200613572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx258] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-140.phpt0000644000076500000240000000201713572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx251] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000103000 {"d":{"$numberDecimal":"1.265E-21"}} 18000000136400f104000000000000000000000000103000 18000000136400f104000000000000000000000000103000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-141.phpt0000644000076500000240000000201713572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx263] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000603000 {"d":{"$numberDecimal":"1.265E+19"}} 18000000136400f104000000000000000000000000603000 18000000136400f104000000000000000000000000603000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-142.phpt0000644000076500000240000000201413572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx255] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000343000 {"d":{"$numberDecimal":"0.001265"}} 18000000136400f104000000000000000000000000343000 18000000136400f104000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-143.phpt0000644000076500000240000000200613572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx259] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-144.phpt0000644000076500000240000000201613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx254] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000323000 {"d":{"$numberDecimal":"0.0001265"}} 18000000136400f104000000000000000000000000323000 18000000136400f104000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-145.phpt0000644000076500000240000000200613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx260] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-146.phpt0000644000076500000240000000202013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx253] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000303000 {"d":{"$numberDecimal":"0.00001265"}} 18000000136400f104000000000000000000000000303000 18000000136400f104000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-147.phpt0000644000076500000240000000200413572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx261] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-148.phpt0000644000076500000240000000201413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx252] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000283000 {"d":{"$numberDecimal":"1.265E-9"}} 18000000136400f104000000000000000000000000283000 18000000136400f104000000000000000000000000283000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-149.phpt0000644000076500000240000000201413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx262] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000483000 {"d":{"$numberDecimal":"1.265E+7"}} 18000000136400f104000000000000000000000000483000 18000000136400f104000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-150.phpt0000644000076500000240000000200613572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx159] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640049000000000000000000000000002e3000 {"d":{"$numberDecimal":"7.3E-8"}} 1800000013640049000000000000000000000000002e3000 1800000013640049000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-151.phpt0000644000076500000240000000154213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx004] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640064000000000000000000000000003c3000 {"d":{"$numberDecimal":"1.00"}} 1800000013640064000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-152.phpt0000644000076500000240000000154013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx003] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000003e3000 {"d":{"$numberDecimal":"1.0"}} 180000001364000a000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-153.phpt0000644000076500000240000000153413572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx002] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000403000 {"d":{"$numberDecimal":"1"}} 180000001364000100000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-154.phpt0000644000076500000240000000200113572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx148] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-155.phpt0000644000076500000240000000200013572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx153] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-156.phpt0000644000076500000240000000200013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx141] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-157.phpt0000644000076500000240000000200013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx146] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-158.phpt0000644000076500000240000000177713572250760022401 0ustar alcaeusstaff--TEST-- Decimal128: [basx151] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-159.phpt0000644000076500000240000000147013572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx142] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000f43000 {"d":{"$numberDecimal":"1E+90"}} 180000001364000100000000000000000000000000f43000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-160.phpt0000644000076500000240000000200213572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [basx147] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000f43000 {"d":{"$numberDecimal":"1E+90"}} 180000001364000100000000000000000000000000f43000 180000001364000100000000000000000000000000f43000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-161.phpt0000644000076500000240000000200113572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [basx152] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000f43000 {"d":{"$numberDecimal":"1E+90"}} 180000001364000100000000000000000000000000f43000 180000001364000100000000000000000000000000f43000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-162.phpt0000644000076500000240000000146613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx140] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-163.phpt0000644000076500000240000000177613572250760022374 0ustar alcaeusstaff--TEST-- Decimal128: [basx150] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000523000 {"d":{"$numberDecimal":"1E+9"}} 180000001364000100000000000000000000000000523000 180000001364000100000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-164.phpt0000644000076500000240000000154413572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx014] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400d2040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.234"}} 18000000136400d2040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-165.phpt0000644000076500000240000000147013572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx170] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-166.phpt0000644000076500000240000000200513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx177] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-167.phpt0000644000076500000240000000200713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx176] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-168.phpt0000644000076500000240000000200513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx178] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-169.phpt0000644000076500000240000000150013572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx171] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000123000 {"d":{"$numberDecimal":"1.265E-20"}} 18000000136400f104000000000000000000000000123000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-170.phpt0000644000076500000240000000150013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx183] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000623000 {"d":{"$numberDecimal":"1.265E+20"}} 18000000136400f104000000000000000000000000623000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-171.phpt0000644000076500000240000000201113572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx175] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000363000 {"d":{"$numberDecimal":"0.01265"}} 18000000136400f104000000000000000000000000363000 18000000136400f104000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-172.phpt0000644000076500000240000000200513572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx179] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-173.phpt0000644000076500000240000000201313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx174] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000343000 {"d":{"$numberDecimal":"0.001265"}} 18000000136400f104000000000000000000000000343000 18000000136400f104000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-174.phpt0000644000076500000240000000200313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx180] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-175.phpt0000644000076500000240000000201513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx173] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000323000 {"d":{"$numberDecimal":"0.0001265"}} 18000000136400f104000000000000000000000000323000 18000000136400f104000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-176.phpt0000644000076500000240000000147613572250760022375 0ustar alcaeusstaff--TEST-- Decimal128: [basx181] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000423000 {"d":{"$numberDecimal":"1.265E+4"}} 18000000136400f104000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-177.phpt0000644000076500000240000000147613572250760022376 0ustar alcaeusstaff--TEST-- Decimal128: [basx172] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000002a3000 {"d":{"$numberDecimal":"1.265E-8"}} 18000000136400f1040000000000000000000000002a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-178.phpt0000644000076500000240000000147613572250760022377 0ustar alcaeusstaff--TEST-- Decimal128: [basx182] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000004a3000 {"d":{"$numberDecimal":"1.265E+8"}} 18000000136400f1040000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-179.phpt0000644000076500000240000000146613572250760022377 0ustar alcaeusstaff--TEST-- Decimal128: [basx157] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000400000000000000000000000000523000 {"d":{"$numberDecimal":"4E+9"}} 180000001364000400000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-180.phpt0000644000076500000240000000200113572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx067] examples --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000343000 {"d":{"$numberDecimal":"0.000005"}} 180000001364000500000000000000000000000000343000 180000001364000500000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-181.phpt0000644000076500000240000000146013572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx069] examples --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000323000 {"d":{"$numberDecimal":"5E-7"}} 180000001364000500000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-182.phpt0000644000076500000240000000200413572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx385] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000403000 {"d":{"$numberDecimal":"7"}} 180000001364000700000000000000000000000000403000 180000001364000700000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-183.phpt0000644000076500000240000000201513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx365] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000543000 {"d":{"$numberDecimal":"7E+10"}} 180000001364000700000000000000000000000000543000 180000001364000700000000000000000000000000543000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-184.phpt0000644000076500000240000000150413572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx405] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000002c3000 {"d":{"$numberDecimal":"7E-10"}} 1800000013640007000000000000000000000000002c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-185.phpt0000644000076500000240000000201513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx363] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000563000 {"d":{"$numberDecimal":"7E+11"}} 180000001364000700000000000000000000000000563000 180000001364000700000000000000000000000000563000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-186.phpt0000644000076500000240000000150413572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx407] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000002a3000 {"d":{"$numberDecimal":"7E-11"}} 1800000013640007000000000000000000000000002a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-187.phpt0000644000076500000240000000201513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx361] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000583000 {"d":{"$numberDecimal":"7E+12"}} 180000001364000700000000000000000000000000583000 180000001364000700000000000000000000000000583000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-188.phpt0000644000076500000240000000150413572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx409] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000283000 {"d":{"$numberDecimal":"7E-12"}} 180000001364000700000000000000000000000000283000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-189.phpt0000644000076500000240000000150413572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx411] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000263000 {"d":{"$numberDecimal":"7E-13"}} 180000001364000700000000000000000000000000263000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-190.phpt0000644000076500000240000000201213572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx383] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000423000 {"d":{"$numberDecimal":"7E+1"}} 180000001364000700000000000000000000000000423000 180000001364000700000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-191.phpt0000644000076500000240000000201113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx387] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.7"}} 1800000013640007000000000000000000000000003e3000 1800000013640007000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-192.phpt0000644000076500000240000000201213572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx381] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000443000 {"d":{"$numberDecimal":"7E+2"}} 180000001364000700000000000000000000000000443000 180000001364000700000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-193.phpt0000644000076500000240000000201313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx389] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.07"}} 1800000013640007000000000000000000000000003c3000 1800000013640007000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-194.phpt0000644000076500000240000000201213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx379] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000463000 {"d":{"$numberDecimal":"7E+3"}} 180000001364000700000000000000000000000000463000 180000001364000700000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-195.phpt0000644000076500000240000000201513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx391] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.007"}} 1800000013640007000000000000000000000000003a3000 1800000013640007000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-196.phpt0000644000076500000240000000201213572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx377] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000483000 {"d":{"$numberDecimal":"7E+4"}} 180000001364000700000000000000000000000000483000 180000001364000700000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-197.phpt0000644000076500000240000000201713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [basx393] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000383000 {"d":{"$numberDecimal":"0.0007"}} 180000001364000700000000000000000000000000383000 180000001364000700000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-198.phpt0000644000076500000240000000201213572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx375] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000004a3000 {"d":{"$numberDecimal":"7E+5"}} 1800000013640007000000000000000000000000004a3000 1800000013640007000000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-199.phpt0000644000076500000240000000202113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx395] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000363000 {"d":{"$numberDecimal":"0.00007"}} 180000001364000700000000000000000000000000363000 180000001364000700000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-200.phpt0000644000076500000240000000201213572250760022344 0ustar alcaeusstaff--TEST-- Decimal128: [basx373] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000004c3000 {"d":{"$numberDecimal":"7E+6"}} 1800000013640007000000000000000000000000004c3000 1800000013640007000000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-201.phpt0000644000076500000240000000202313572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx397] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000343000 {"d":{"$numberDecimal":"0.000007"}} 180000001364000700000000000000000000000000343000 180000001364000700000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-202.phpt0000644000076500000240000000201213572250760022346 0ustar alcaeusstaff--TEST-- Decimal128: [basx371] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000004e3000 {"d":{"$numberDecimal":"7E+7"}} 1800000013640007000000000000000000000000004e3000 1800000013640007000000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-203.phpt0000644000076500000240000000150213572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx399] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000323000 {"d":{"$numberDecimal":"7E-7"}} 180000001364000700000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-204.phpt0000644000076500000240000000201213572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [basx369] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000503000 {"d":{"$numberDecimal":"7E+8"}} 180000001364000700000000000000000000000000503000 180000001364000700000000000000000000000000503000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-205.phpt0000644000076500000240000000150213572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx401] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000303000 {"d":{"$numberDecimal":"7E-8"}} 180000001364000700000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-206.phpt0000644000076500000240000000201213572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx367] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000700000000000000000000000000523000 {"d":{"$numberDecimal":"7E+9"}} 180000001364000700000000000000000000000000523000 180000001364000700000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-207.phpt0000644000076500000240000000150213572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx403] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640007000000000000000000000000002e3000 {"d":{"$numberDecimal":"7E-9"}} 1800000013640007000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-208.phpt0000644000076500000240000000154213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx007] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640064000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.0"}} 1800000013640064000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-209.phpt0000644000076500000240000000153613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx005] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000403000 {"d":{"$numberDecimal":"10"}} 180000001364000a00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-210.phpt0000644000076500000240000000201013572250760022343 0ustar alcaeusstaff--TEST-- Decimal128: [basx165] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000523000 {"d":{"$numberDecimal":"1.0E+10"}} 180000001364000a00000000000000000000000000523000 180000001364000a00000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-211.phpt0000644000076500000240000000200713572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [basx163] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000523000 {"d":{"$numberDecimal":"1.0E+10"}} 180000001364000a00000000000000000000000000523000 180000001364000a00000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-212.phpt0000644000076500000240000000200713572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx325] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000403000 {"d":{"$numberDecimal":"10"}} 180000001364000a00000000000000000000000000403000 180000001364000a00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-213.phpt0000644000076500000240000000202213572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx305] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000543000 {"d":{"$numberDecimal":"1.0E+11"}} 180000001364000a00000000000000000000000000543000 180000001364000a00000000000000000000000000543000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-214.phpt0000644000076500000240000000202113572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx345] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000002c3000 {"d":{"$numberDecimal":"1.0E-9"}} 180000001364000a000000000000000000000000002c3000 180000001364000a000000000000000000000000002c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-215.phpt0000644000076500000240000000202213572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx303] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000563000 {"d":{"$numberDecimal":"1.0E+12"}} 180000001364000a00000000000000000000000000563000 180000001364000a00000000000000000000000000563000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-216.phpt0000644000076500000240000000202313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx347] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000002a3000 {"d":{"$numberDecimal":"1.0E-10"}} 180000001364000a000000000000000000000000002a3000 180000001364000a000000000000000000000000002a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-217.phpt0000644000076500000240000000202213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx301] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000583000 {"d":{"$numberDecimal":"1.0E+13"}} 180000001364000a00000000000000000000000000583000 180000001364000a00000000000000000000000000583000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-218.phpt0000644000076500000240000000202313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx349] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000283000 {"d":{"$numberDecimal":"1.0E-11"}} 180000001364000a00000000000000000000000000283000 180000001364000a00000000000000000000000000283000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-219.phpt0000644000076500000240000000202313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx351] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000263000 {"d":{"$numberDecimal":"1.0E-12"}} 180000001364000a00000000000000000000000000263000 180000001364000a00000000000000000000000000263000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-220.phpt0000644000076500000240000000201713572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx323] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000423000 {"d":{"$numberDecimal":"1.0E+2"}} 180000001364000a00000000000000000000000000423000 180000001364000a00000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-221.phpt0000644000076500000240000000201213572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx327] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000003e3000 {"d":{"$numberDecimal":"1.0"}} 180000001364000a000000000000000000000000003e3000 180000001364000a000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-222.phpt0000644000076500000240000000201713572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx321] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000443000 {"d":{"$numberDecimal":"1.0E+3"}} 180000001364000a00000000000000000000000000443000 180000001364000a00000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-223.phpt0000644000076500000240000000201413572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx329] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000003c3000 {"d":{"$numberDecimal":"0.10"}} 180000001364000a000000000000000000000000003c3000 180000001364000a000000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-224.phpt0000644000076500000240000000201713572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx319] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000463000 {"d":{"$numberDecimal":"1.0E+4"}} 180000001364000a00000000000000000000000000463000 180000001364000a00000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-225.phpt0000644000076500000240000000201613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx331] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.010"}} 180000001364000a000000000000000000000000003a3000 180000001364000a000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-226.phpt0000644000076500000240000000201713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx317] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000483000 {"d":{"$numberDecimal":"1.0E+5"}} 180000001364000a00000000000000000000000000483000 180000001364000a00000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-227.phpt0000644000076500000240000000202013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx333] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000383000 {"d":{"$numberDecimal":"0.0010"}} 180000001364000a00000000000000000000000000383000 180000001364000a00000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-228.phpt0000644000076500000240000000201713572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx315] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000004a3000 {"d":{"$numberDecimal":"1.0E+6"}} 180000001364000a000000000000000000000000004a3000 180000001364000a000000000000000000000000004a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-229.phpt0000644000076500000240000000202213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx335] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000363000 {"d":{"$numberDecimal":"0.00010"}} 180000001364000a00000000000000000000000000363000 180000001364000a00000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-230.phpt0000644000076500000240000000201713572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [basx313] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000004c3000 {"d":{"$numberDecimal":"1.0E+7"}} 180000001364000a000000000000000000000000004c3000 180000001364000a000000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-231.phpt0000644000076500000240000000202413572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx337] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000343000 {"d":{"$numberDecimal":"0.000010"}} 180000001364000a00000000000000000000000000343000 180000001364000a00000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-232.phpt0000644000076500000240000000201713572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx311] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000004e3000 {"d":{"$numberDecimal":"1.0E+8"}} 180000001364000a000000000000000000000000004e3000 180000001364000a000000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-233.phpt0000644000076500000240000000202613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx339] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000323000 {"d":{"$numberDecimal":"0.0000010"}} 180000001364000a00000000000000000000000000323000 180000001364000a00000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-234.phpt0000644000076500000240000000201713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx309] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000503000 {"d":{"$numberDecimal":"1.0E+9"}} 180000001364000a00000000000000000000000000503000 180000001364000a00000000000000000000000000503000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-235.phpt0000644000076500000240000000202013572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx341] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000303000 {"d":{"$numberDecimal":"1.0E-7"}} 180000001364000a00000000000000000000000000303000 180000001364000a00000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-236.phpt0000644000076500000240000000200713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx164] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000f43000 {"d":{"$numberDecimal":"1.0E+91"}} 180000001364000a00000000000000000000000000f43000 180000001364000a00000000000000000000000000f43000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-237.phpt0000644000076500000240000000200613572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx162] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000523000 {"d":{"$numberDecimal":"1.0E+10"}} 180000001364000a00000000000000000000000000523000 180000001364000a00000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-238.phpt0000644000076500000240000000202113572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx307] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000523000 {"d":{"$numberDecimal":"1.0E+10"}} 180000001364000a00000000000000000000000000523000 180000001364000a00000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-239.phpt0000644000076500000240000000202013572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx343] Engineering notation tests --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a000000000000000000000000002e3000 {"d":{"$numberDecimal":"1.0E-8"}} 180000001364000a000000000000000000000000002e3000 180000001364000a000000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-240.phpt0000644000076500000240000000154213572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx008] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640065000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.1"}} 1800000013640065000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-241.phpt0000644000076500000240000000154213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx009] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640068000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.4"}} 1800000013640068000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-242.phpt0000644000076500000240000000154213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx010] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640069000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.5"}} 1800000013640069000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-243.phpt0000644000076500000240000000154213572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx011] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006a000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.6"}} 180000001364006a000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-244.phpt0000644000076500000240000000154213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx012] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006d000000000000000000000000003e3000 {"d":{"$numberDecimal":"10.9"}} 180000001364006d000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-245.phpt0000644000076500000240000000154213572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx013] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006e000000000000000000000000003e3000 {"d":{"$numberDecimal":"11.0"}} 180000001364006e000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-246.phpt0000644000076500000240000000152113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx040] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000c00000000000000000000000000403000 {"d":{"$numberDecimal":"12"}} 180000001364000c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-247.phpt0000644000076500000240000000147013572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx190] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-248.phpt0000644000076500000240000000200513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx197] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-249.phpt0000644000076500000240000000200513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx196] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-250.phpt0000644000076500000240000000200513572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx198] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-251.phpt0000644000076500000240000000201613572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx191] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000143000 {"d":{"$numberDecimal":"1.265E-19"}} 18000000136400f104000000000000000000000000143000 18000000136400f104000000000000000000000000143000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-252.phpt0000644000076500000240000000201613572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [basx203] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000643000 {"d":{"$numberDecimal":"1.265E+21"}} 18000000136400f104000000000000000000000000643000 18000000136400f104000000000000000000000000643000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-253.phpt0000644000076500000240000000200713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx195] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-254.phpt0000644000076500000240000000200313572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx199] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-255.phpt0000644000076500000240000000201113572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx194] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000363000 {"d":{"$numberDecimal":"0.01265"}} 18000000136400f104000000000000000000000000363000 18000000136400f104000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-256.phpt0000644000076500000240000000201313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx200] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000423000 {"d":{"$numberDecimal":"1.265E+4"}} 18000000136400f104000000000000000000000000423000 18000000136400f104000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-257.phpt0000644000076500000240000000201313572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx193] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000343000 {"d":{"$numberDecimal":"0.001265"}} 18000000136400f104000000000000000000000000343000 18000000136400f104000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-258.phpt0000644000076500000240000000201313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx201] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000443000 {"d":{"$numberDecimal":"1.265E+5"}} 18000000136400f104000000000000000000000000443000 18000000136400f104000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-259.phpt0000644000076500000240000000201313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx192] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000002c3000 {"d":{"$numberDecimal":"1.265E-7"}} 18000000136400f1040000000000000000000000002c3000 18000000136400f1040000000000000000000000002c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-260.phpt0000644000076500000240000000201313572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx202] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000004c3000 {"d":{"$numberDecimal":"1.265E+9"}} 18000000136400f1040000000000000000000000004c3000 18000000136400f1040000000000000000000000004c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-261.phpt0000644000076500000240000000204213572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx044] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400fc040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.76"}} 18000000136400fc040000000000000000000000003c3000 18000000136400fc040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-262.phpt0000644000076500000240000000152713572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx042] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400fc040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.76"}} 18000000136400fc040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-263.phpt0000644000076500000240000000203113572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx046] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001100000000000000000000000000403000 {"d":{"$numberDecimal":"17"}} 180000001364001100000000000000000000000000403000 180000001364001100000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-264.phpt0000644000076500000240000000203213572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx049] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364002c00000000000000000000000000403000 {"d":{"$numberDecimal":"44"}} 180000001364002c00000000000000000000000000403000 180000001364002c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-265.phpt0000644000076500000240000000203113572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx048] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364002c00000000000000000000000000403000 {"d":{"$numberDecimal":"44"}} 180000001364002c00000000000000000000000000403000 180000001364002c00000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-266.phpt0000644000076500000240000000200613572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx158] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364002c00000000000000000000000000523000 {"d":{"$numberDecimal":"4.4E+10"}} 180000001364002c00000000000000000000000000523000 180000001364002c00000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-267.phpt0000644000076500000240000000200413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx068] examples --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364003200000000000000000000000000323000 {"d":{"$numberDecimal":"0.0000050"}} 180000001364003200000000000000000000000000323000 180000001364003200000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-268.phpt0000644000076500000240000000201313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx169] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000523000 {"d":{"$numberDecimal":"1.00E+11"}} 180000001364006400000000000000000000000000523000 180000001364006400000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-269.phpt0000644000076500000240000000201213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx167] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000523000 {"d":{"$numberDecimal":"1.00E+11"}} 180000001364006400000000000000000000000000523000 180000001364006400000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-270.phpt0000644000076500000240000000201213572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx168] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000f43000 {"d":{"$numberDecimal":"1.00E+92"}} 180000001364006400000000000000000000000000f43000 180000001364006400000000000000000000000000f43000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-271.phpt0000644000076500000240000000201113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx166] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000523000 {"d":{"$numberDecimal":"1.00E+11"}} 180000001364006400000000000000000000000000523000 180000001364006400000000000000000000000000523000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-272.phpt0000644000076500000240000000147013572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx210] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-273.phpt0000644000076500000240000000200513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx217] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-274.phpt0000644000076500000240000000200513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx216] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-275.phpt0000644000076500000240000000200313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx218] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-276.phpt0000644000076500000240000000201613572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx211] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000163000 {"d":{"$numberDecimal":"1.265E-18"}} 18000000136400f104000000000000000000000000163000 18000000136400f104000000000000000000000000163000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-277.phpt0000644000076500000240000000201613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx223] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000663000 {"d":{"$numberDecimal":"1.265E+22"}} 18000000136400f104000000000000000000000000663000 18000000136400f104000000000000000000000000663000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-278.phpt0000644000076500000240000000200513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx215] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-279.phpt0000644000076500000240000000201313572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx219] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000423000 {"d":{"$numberDecimal":"1.265E+4"}} 18000000136400f104000000000000000000000000423000 18000000136400f104000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-280.phpt0000644000076500000240000000200713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx214] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-281.phpt0000644000076500000240000000201313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx220] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000443000 {"d":{"$numberDecimal":"1.265E+5"}} 18000000136400f104000000000000000000000000443000 18000000136400f104000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-282.phpt0000644000076500000240000000201113572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx213] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000363000 {"d":{"$numberDecimal":"0.01265"}} 18000000136400f104000000000000000000000000363000 18000000136400f104000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-283.phpt0000644000076500000240000000201313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx221] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000463000 {"d":{"$numberDecimal":"1.265E+6"}} 18000000136400f104000000000000000000000000463000 18000000136400f104000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-284.phpt0000644000076500000240000000202113572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx212] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000002e3000 {"d":{"$numberDecimal":"0.000001265"}} 18000000136400f1040000000000000000000000002e3000 18000000136400f1040000000000000000000000002e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-285.phpt0000644000076500000240000000201513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx222] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000004e3000 {"d":{"$numberDecimal":"1.265E+10"}} 18000000136400f1040000000000000000000000004e3000 18000000136400f1040000000000000000000000004e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-286.phpt0000644000076500000240000000154213572250760022371 0ustar alcaeusstaff--TEST-- Decimal128: [basx006] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e803000000000000000000000000403000 {"d":{"$numberDecimal":"1000"}} 18000000136400e803000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-287.phpt0000644000076500000240000000146613572250760022377 0ustar alcaeusstaff--TEST-- Decimal128: [basx230] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-288.phpt0000644000076500000240000000200213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx237] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000403000 {"d":{"$numberDecimal":"1265"}} 18000000136400f104000000000000000000000000403000 18000000136400f104000000000000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-289.phpt0000644000076500000240000000200413572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx236] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003e3000 {"d":{"$numberDecimal":"126.5"}} 18000000136400f1040000000000000000000000003e3000 18000000136400f1040000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-290.phpt0000644000076500000240000000201213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [basx238] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000423000 {"d":{"$numberDecimal":"1.265E+4"}} 18000000136400f104000000000000000000000000423000 18000000136400f104000000000000000000000000423000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-291.phpt0000644000076500000240000000201513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx231] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000183000 {"d":{"$numberDecimal":"1.265E-17"}} 18000000136400f104000000000000000000000000183000 18000000136400f104000000000000000000000000183000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-292.phpt0000644000076500000240000000201513572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [basx243] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000683000 {"d":{"$numberDecimal":"1.265E+23"}} 18000000136400f104000000000000000000000000683000 18000000136400f104000000000000000000000000683000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-293.phpt0000644000076500000240000000200413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx235] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.65"}} 18000000136400f1040000000000000000000000003c3000 18000000136400f1040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-294.phpt0000644000076500000240000000201213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx239] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000443000 {"d":{"$numberDecimal":"1.265E+5"}} 18000000136400f104000000000000000000000000443000 18000000136400f104000000000000000000000000443000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-295.phpt0000644000076500000240000000200413572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx234] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f1040000000000000000000000003a3000 {"d":{"$numberDecimal":"1.265"}} 18000000136400f1040000000000000000000000003a3000 18000000136400f1040000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-296.phpt0000644000076500000240000000201213572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx240] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000463000 {"d":{"$numberDecimal":"1.265E+6"}} 18000000136400f104000000000000000000000000463000 18000000136400f104000000000000000000000000463000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-297.phpt0000644000076500000240000000200613572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [basx233] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000383000 {"d":{"$numberDecimal":"0.1265"}} 18000000136400f104000000000000000000000000383000 18000000136400f104000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-298.phpt0000644000076500000240000000201213572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx241] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000483000 {"d":{"$numberDecimal":"1.265E+7"}} 18000000136400f104000000000000000000000000483000 18000000136400f104000000000000000000000000483000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-299.phpt0000644000076500000240000000201613572250760022372 0ustar alcaeusstaff--TEST-- Decimal128: [basx232] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000303000 {"d":{"$numberDecimal":"0.00001265"}} 18000000136400f104000000000000000000000000303000 18000000136400f104000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-300.phpt0000644000076500000240000000201413572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx242] Numbers with E --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f104000000000000000000000000503000 {"d":{"$numberDecimal":"1.265E+11"}} 18000000136400f104000000000000000000000000503000 18000000136400f104000000000000000000000000503000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-301.phpt0000644000076500000240000000154313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx060] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400185c0ace00000000000000000000383000 {"d":{"$numberDecimal":"345678.5432"}} 18000000136400185c0ace00000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-302.phpt0000644000076500000240000000206713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx059] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400f198670c08000000000000000000363000 {"d":{"$numberDecimal":"345678.54321"}} 18000000136400f198670c08000000000000000000363000 18000000136400f198670c08000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-303.phpt0000644000076500000240000000154713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx058] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006af90b7c50000000000000000000343000 {"d":{"$numberDecimal":"345678.543210"}} 180000001364006af90b7c50000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-304.phpt0000644000076500000240000000155113572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [basx057] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006a19562522020000000000000000343000 {"d":{"$numberDecimal":"2345678.543210"}} 180000001364006a19562522020000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-305.phpt0000644000076500000240000000155313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [basx056] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006ab9c8733a0b0000000000000000343000 {"d":{"$numberDecimal":"12345678.543210"}} 180000001364006ab9c8733a0b0000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-306.phpt0000644000076500000240000000157213572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx031] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640040af0d8648700000000000000000343000 {"d":{"$numberDecimal":"123456789.000000"}} 1800000013640040af0d8648700000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-307.phpt0000644000076500000240000000157213572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [basx030] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640080910f8648700000000000000000343000 {"d":{"$numberDecimal":"123456789.123456"}} 1800000013640080910f8648700000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-3-valid-308.phpt0000644000076500000240000000157013572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx032] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640080910f8648700000000000000000403000 {"d":{"$numberDecimal":"123456789123456"}} 1800000013640080910f8648700000000000000000403000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-001.phpt0000644000076500000240000000070013572250760023373 0ustar alcaeusstaff--TEST-- Decimal128: [basx564] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-002.phpt0000644000076500000240000000070113572250760023375 0ustar alcaeusstaff--TEST-- Decimal128: [basx565] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-003.phpt0000644000076500000240000000070213572250760023377 0ustar alcaeusstaff--TEST-- Decimal128: [basx566] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-004.phpt0000644000076500000240000000070313572250760023401 0ustar alcaeusstaff--TEST-- Decimal128: [basx567] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-005.phpt0000644000076500000240000000070413572250760023403 0ustar alcaeusstaff--TEST-- Decimal128: [basx568] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-006.phpt0000644000076500000240000000075313572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: [basx590] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-007.phpt0000644000076500000240000000070013572250760023401 0ustar alcaeusstaff--TEST-- Decimal128: [basx562] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-008.phpt0000644000076500000240000000070013572250760023402 0ustar alcaeusstaff--TEST-- Decimal128: [basx563] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-009.phpt0000644000076500000240000000075713572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-010.phpt0000644000076500000240000000073713572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr534] negatives (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-011.phpt0000644000076500000240000000073713572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr535] negatives (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-012.phpt0000644000076500000240000000073713572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr533] negatives (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-013.phpt0000644000076500000240000000073713572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr532] negatives (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-014.phpt0000644000076500000240000000076013572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr432] check rounding modes heeded (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-015.phpt0000644000076500000240000000076013572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr433] check rounding modes heeded (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-016.phpt0000644000076500000240000000076013572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr435] check rounding modes heeded (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-017.phpt0000644000076500000240000000076013572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr434] check rounding modes heeded (Rounded & Inexact) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-018.phpt0000644000076500000240000000075613572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-019.phpt0000644000076500000240000000073713572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: Inexact rounding#1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-parseError-020.phpt0000644000076500000240000000065213572250760023402 0ustar alcaeusstaff--TEST-- Decimal128: Inexact rounding#2 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-001.phpt0000644000076500000240000000154213572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx023] conform to rules and exponent will be in permitted range). --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640001000000000000000000000000003eb000 {"d":{"$numberDecimal":"-0.1"}} 1800000013640001000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-002.phpt0000644000076500000240000000204213572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [basx045] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640003000000000000000000000000003a3000 {"d":{"$numberDecimal":"0.003"}} 1800000013640003000000000000000000000000003a3000 1800000013640003000000000000000000000000003a3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-003.phpt0000644000076500000240000000176213572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [basx610] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.0"}} 1800000013640000000000000000000000000000003e3000 1800000013640000000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-004.phpt0000644000076500000240000000176513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [basx612] Zeros --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000000000000000000000000003eb000 {"d":{"$numberDecimal":"-0.0"}} 1800000013640000000000000000000000000000003eb000 1800000013640000000000000000000000000000003eb000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-005.phpt0000644000076500000240000000204213572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [basx043] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400fc040000000000000000000000003c3000 {"d":{"$numberDecimal":"12.76"}} 18000000136400fc040000000000000000000000003c3000 18000000136400fc040000000000000000000000003c3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-006.phpt0000644000076500000240000000204413572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx055] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000303000 {"d":{"$numberDecimal":"5E-8"}} 180000001364000500000000000000000000000000303000 180000001364000500000000000000000000000000303000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-007.phpt0000644000076500000240000000204313572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx054] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000323000 {"d":{"$numberDecimal":"5E-7"}} 180000001364000500000000000000000000000000323000 180000001364000500000000000000000000000000323000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-008.phpt0000644000076500000240000000153513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [basx052] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000343000 {"d":{"$numberDecimal":"0.000005"}} 180000001364000500000000000000000000000000343000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-009.phpt0000644000076500000240000000205013572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [basx051] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000363000 {"d":{"$numberDecimal":"0.00005"}} 180000001364000500000000000000000000000000363000 180000001364000500000000000000000000000000363000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-010.phpt0000644000076500000240000000153113572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [basx050] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000500000000000000000000000000383000 {"d":{"$numberDecimal":"0.0005"}} 180000001364000500000000000000000000000000383000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-011.phpt0000644000076500000240000000203213572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [basx047] strings without E cannot generate E in result --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640005000000000000000000000000003e3000 {"d":{"$numberDecimal":"0.5"}} 1800000013640005000000000000000000000000003e3000 1800000013640005000000000000000000000000003e3000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-012.phpt0000644000076500000240000000216513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [dqbsr431] check rounding modes heeded (Rounded) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640099761cc7b548f377dc80a131c836fe2f00 {"d":{"$numberDecimal":"1.111111111111111111111111111112345"}} 1800000013640099761cc7b548f377dc80a131c836fe2f00 1800000013640099761cc7b548f377dc80a131c836fe2f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-4-valid-013.phpt0000644000076500000240000000214313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: OK2 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fc2f00 {"d":{"$numberDecimal":"0.1000000000000000000000000000000000"}} 18000000136400000000000a5bc138938d44c64d31fc2f00 18000000136400000000000a5bc138938d44c64d31fc2f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-001.phpt0000644000076500000240000000214213572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [decq035] fold-downs (more below) (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000807f1bcf85b27059c8a43cfe5f00 {"d":{"$numberDecimal":"1.230000000000000000000000000000000E+6144"}} 18000000136400000000807f1bcf85b27059c8a43cfe5f00 18000000136400000000807f1bcf85b27059c8a43cfe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-002.phpt0000644000076500000240000000213713572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq037] fold-downs (more below) (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fe5f00 18000000136400000000000a5bc138938d44c64d31fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-003.phpt0000644000076500000240000000217013572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq077] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04000000 {"d":{"$numberDecimal":"1.00000000000000000000000000000000E-6144"}} 180000001364000000000081efac855b416d2dee04000000 180000001364000000000081efac855b416d2dee04000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-004.phpt0000644000076500000240000000161213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq078] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04000000 {"d":{"$numberDecimal":"1.00000000000000000000000000000000E-6144"}} 180000001364000000000081efac855b416d2dee04000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-005.phpt0000644000076500000240000000207213572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq079] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000000000 {"d":{"$numberDecimal":"1.0E-6175"}} 180000001364000a00000000000000000000000000000000 180000001364000a00000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-006.phpt0000644000076500000240000000151413572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq080] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000000000 {"d":{"$numberDecimal":"1.0E-6175"}} 180000001364000a00000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-007.phpt0000644000076500000240000000206513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq081] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000020000 {"d":{"$numberDecimal":"1E-6175"}} 180000001364000100000000000000000000000000020000 180000001364000100000000000000000000000000020000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-008.phpt0000644000076500000240000000151013572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq082] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000020000 {"d":{"$numberDecimal":"1E-6175"}} 180000001364000100000000000000000000000000020000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-009.phpt0000644000076500000240000000206613572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq083] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000000000 {"d":{"$numberDecimal":"1E-6176"}} 180000001364000100000000000000000000000000000000 180000001364000100000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-010.phpt0000644000076500000240000000151013572250760022347 0ustar alcaeusstaff--TEST-- Decimal128: [decq084] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000000000 {"d":{"$numberDecimal":"1E-6176"}} 180000001364000100000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-011.phpt0000644000076500000240000000210513572250760022351 0ustar alcaeusstaff--TEST-- Decimal128: [decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000000000 {"d":{"$numberDecimal":"1E-6176"}} 180000001364000100000000000000000000000000000000 180000001364000100000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-012.phpt0000644000076500000240000000224713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff095bc138938d44c64d31000000 {"d":{"$numberDecimal":"9.99999999999999999999999999999999E-6144"}} 18000000136400ffffffff095bc138938d44c64d31000000 18000000136400ffffffff095bc138938d44c64d31000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-013.phpt0000644000076500000240000000214513572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq130] fold-downs (more below) (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000807f1bcf85b27059c8a43cfedf00 {"d":{"$numberDecimal":"-1.230000000000000000000000000000000E+6144"}} 18000000136400000000807f1bcf85b27059c8a43cfedf00 18000000136400000000807f1bcf85b27059c8a43cfedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-014.phpt0000644000076500000240000000214213572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq132] fold-downs (more below) (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fedf00 {"d":{"$numberDecimal":"-1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fedf00 18000000136400000000000a5bc138938d44c64d31fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-015.phpt0000644000076500000240000000217313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq177] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04008000 {"d":{"$numberDecimal":"-1.00000000000000000000000000000000E-6144"}} 180000001364000000000081efac855b416d2dee04008000 180000001364000000000081efac855b416d2dee04008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-016.phpt0000644000076500000240000000161413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq178] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04008000 {"d":{"$numberDecimal":"-1.00000000000000000000000000000000E-6144"}} 180000001364000000000081efac855b416d2dee04008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-017.phpt0000644000076500000240000000207513572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq179] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000008000 {"d":{"$numberDecimal":"-1.0E-6175"}} 180000001364000a00000000000000000000000000008000 180000001364000a00000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-018.phpt0000644000076500000240000000151613572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq180] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000008000 {"d":{"$numberDecimal":"-1.0E-6175"}} 180000001364000a00000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-019.phpt0000644000076500000240000000207013572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq181] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000028000 {"d":{"$numberDecimal":"-1E-6175"}} 180000001364000100000000000000000000000000028000 180000001364000100000000000000000000000000028000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-020.phpt0000644000076500000240000000151213572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [decq182] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000028000 {"d":{"$numberDecimal":"-1E-6175"}} 180000001364000100000000000000000000000000028000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-021.phpt0000644000076500000240000000207113572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq183] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000008000 {"d":{"$numberDecimal":"-1E-6176"}} 180000001364000100000000000000000000000000008000 180000001364000100000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-022.phpt0000644000076500000240000000151213572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq184] Nmin and below (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000008000 {"d":{"$numberDecimal":"-1E-6176"}} 180000001364000100000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-023.phpt0000644000076500000240000000203513572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq190] underflow edge cases (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000100000000000000000000000000008000 {"d":{"$numberDecimal":"-1E-6176"}} 180000001364000100000000000000000000000000008000 180000001364000100000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-024.phpt0000644000076500000240000000217713572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq200] underflow edge cases (Subnormal) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400ffffffff095bc138938d44c64d31008000 {"d":{"$numberDecimal":"-9.99999999999999999999999999999999E-6144"}} 18000000136400ffffffff095bc138938d44c64d31008000 18000000136400ffffffff095bc138938d44c64d31008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-025.phpt0000644000076500000240000000201113572250760022352 0ustar alcaeusstaff--TEST-- Decimal128: [decq400] zeros (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000000000 {"d":{"$numberDecimal":"0E-6176"}} 180000001364000000000000000000000000000000000000 180000001364000000000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-026.phpt0000644000076500000240000000201113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq401] zeros (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000000000 {"d":{"$numberDecimal":"0E-6176"}} 180000001364000000000000000000000000000000000000 180000001364000000000000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-027.phpt0000644000076500000240000000202413572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq414] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fe5f00 {"d":{"$numberDecimal":"0E+6111"}} 180000001364000000000000000000000000000000fe5f00 180000001364000000000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-028.phpt0000644000076500000240000000202413572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq416] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fe5f00 {"d":{"$numberDecimal":"0E+6111"}} 180000001364000000000000000000000000000000fe5f00 180000001364000000000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-029.phpt0000644000076500000240000000202413572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq418] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fe5f00 {"d":{"$numberDecimal":"0E+6111"}} 180000001364000000000000000000000000000000fe5f00 180000001364000000000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-030.phpt0000644000076500000240000000202513572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq420] negative zeros (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000008000 {"d":{"$numberDecimal":"-0E-6176"}} 180000001364000000000000000000000000000000008000 180000001364000000000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-031.phpt0000644000076500000240000000202513572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq421] negative zeros (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000008000 {"d":{"$numberDecimal":"-0E-6176"}} 180000001364000000000000000000000000000000008000 180000001364000000000000000000000000000000008000 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-032.phpt0000644000076500000240000000202713572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq434] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fedf00 {"d":{"$numberDecimal":"-0E+6111"}} 180000001364000000000000000000000000000000fedf00 180000001364000000000000000000000000000000fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-033.phpt0000644000076500000240000000202713572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq436] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fedf00 {"d":{"$numberDecimal":"-0E+6111"}} 180000001364000000000000000000000000000000fedf00 180000001364000000000000000000000000000000fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-034.phpt0000644000076500000240000000202713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq438] clamped zeros... (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000000000000000000000000fedf00 {"d":{"$numberDecimal":"-0E+6111"}} 180000001364000000000000000000000000000000fedf00 180000001364000000000000000000000000000000fedf00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-035.phpt0000644000076500000240000000213713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq601] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000000a5bc138938d44c64d31fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000000E+6144"}} 18000000136400000000000a5bc138938d44c64d31fe5f00 18000000136400000000000a5bc138938d44c64d31fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-036.phpt0000644000076500000240000000213513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq603] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000000081efac855b416d2dee04fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000000000E+6143"}} 180000001364000000000081efac855b416d2dee04fe5f00 180000001364000000000081efac855b416d2dee04fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-037.phpt0000644000076500000240000000213313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq605] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000080264b91c02220be377e00fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000000000E+6142"}} 1800000013640000000080264b91c02220be377e00fe5f00 1800000013640000000080264b91c02220be377e00fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-038.phpt0000644000076500000240000000213113572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq607] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000000040eaed7446d09c2c9f0c00fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000000E+6141"}} 1800000013640000000040eaed7446d09c2c9f0c00fe5f00 1800000013640000000040eaed7446d09c2c9f0c00fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-039.phpt0000644000076500000240000000212713572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq609] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000a0ca17726dae0f1e430100fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000000E+6140"}} 18000000136400000000a0ca17726dae0f1e430100fe5f00 18000000136400000000a0ca17726dae0f1e430100fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-040.phpt0000644000076500000240000000212513572250760022355 0ustar alcaeusstaff--TEST-- Decimal128: [decq611] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000106102253e5ece4f200000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000000E+6139"}} 18000000136400000000106102253e5ece4f200000fe5f00 18000000136400000000106102253e5ece4f200000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-041.phpt0000644000076500000240000000212313572250760022354 0ustar alcaeusstaff--TEST-- Decimal128: [decq613] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000e83c80d09f3c2e3b030000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000000E+6138"}} 18000000136400000000e83c80d09f3c2e3b030000fe5f00 18000000136400000000e83c80d09f3c2e3b030000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-042.phpt0000644000076500000240000000212113572250760022353 0ustar alcaeusstaff--TEST-- Decimal128: [decq615] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000e4d20cc8dcd2b752000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000000E+6137"}} 18000000136400000000e4d20cc8dcd2b752000000fe5f00 18000000136400000000e4d20cc8dcd2b752000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-043.phpt0000644000076500000240000000211713572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq617] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000004a48011416954508000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000000E+6136"}} 180000001364000000004a48011416954508000000fe5f00 180000001364000000004a48011416954508000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-044.phpt0000644000076500000240000000211513572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq619] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000000a1edccce1bc2d300000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000000E+6135"}} 18000000136400000000a1edccce1bc2d300000000fe5f00 18000000136400000000a1edccce1bc2d300000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-045.phpt0000644000076500000240000000211313572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq621] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000080f64ae1c7022d1500000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000000E+6134"}} 18000000136400000080f64ae1c7022d1500000000fe5f00 18000000136400000080f64ae1c7022d1500000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-046.phpt0000644000076500000240000000211113572250760022356 0ustar alcaeusstaff--TEST-- Decimal128: [decq623] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000040b2bac9e0191e0200000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000000E+6133"}} 18000000136400000040b2bac9e0191e0200000000fe5f00 18000000136400000040b2bac9e0191e0200000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-047.phpt0000644000076500000240000000210713572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq625] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000a0dec5adc935360000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000000E+6132"}} 180000001364000000a0dec5adc935360000000000fe5f00 180000001364000000a0dec5adc935360000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-048.phpt0000644000076500000240000000210513572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq627] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000010632d5ec76b050000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000000E+6131"}} 18000000136400000010632d5ec76b050000000000fe5f00 18000000136400000010632d5ec76b050000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-049.phpt0000644000076500000240000000210313572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq629] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000e8890423c78a000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000000E+6130"}} 180000001364000000e8890423c78a000000000000fe5f00 180000001364000000e8890423c78a000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-050.phpt0000644000076500000240000000210113572250760022350 0ustar alcaeusstaff--TEST-- Decimal128: [decq631] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400000064a7b3b6e00d000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000000E+6129"}} 18000000136400000064a7b3b6e00d000000000000fe5f00 18000000136400000064a7b3b6e00d000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-051.phpt0000644000076500000240000000207713572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq633] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000008a5d78456301000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000000E+6128"}} 1800000013640000008a5d78456301000000000000fe5f00 1800000013640000008a5d78456301000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-052.phpt0000644000076500000240000000207513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq635] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000000c16ff2862300000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000000E+6127"}} 180000001364000000c16ff2862300000000000000fe5f00 180000001364000000c16ff2862300000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-053.phpt0000644000076500000240000000207313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq637] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000080c6a47e8d0300000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000000E+6126"}} 180000001364000080c6a47e8d0300000000000000fe5f00 180000001364000080c6a47e8d0300000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-054.phpt0000644000076500000240000000207113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq639] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000407a10f35a0000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000000E+6125"}} 1800000013640000407a10f35a0000000000000000fe5f00 1800000013640000407a10f35a0000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-055.phpt0000644000076500000240000000206713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq641] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000a0724e18090000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000000E+6124"}} 1800000013640000a0724e18090000000000000000fe5f00 1800000013640000a0724e18090000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-056.phpt0000644000076500000240000000206513572250760022367 0ustar alcaeusstaff--TEST-- Decimal128: [decq643] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000010a5d4e8000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000000E+6123"}} 180000001364000010a5d4e8000000000000000000fe5f00 180000001364000010a5d4e8000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-057.phpt0000644000076500000240000000206313572250760022366 0ustar alcaeusstaff--TEST-- Decimal128: [decq645] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e8764817000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000000E+6122"}} 1800000013640000e8764817000000000000000000fe5f00 1800000013640000e8764817000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-058.phpt0000644000076500000240000000206113572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq647] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e40b5402000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000000E+6121"}} 1800000013640000e40b5402000000000000000000fe5f00 1800000013640000e40b5402000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-059.phpt0000644000076500000240000000205713572250760022373 0ustar alcaeusstaff--TEST-- Decimal128: [decq649] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000ca9a3b00000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000000E+6120"}} 1800000013640000ca9a3b00000000000000000000fe5f00 1800000013640000ca9a3b00000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-060.phpt0000644000076500000240000000205513572250760022361 0ustar alcaeusstaff--TEST-- Decimal128: [decq651] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640000e1f50500000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000000E+6119"}} 1800000013640000e1f50500000000000000000000fe5f00 1800000013640000e1f50500000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-061.phpt0000644000076500000240000000205313572250760022360 0ustar alcaeusstaff--TEST-- Decimal128: [decq653] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364008096980000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000000E+6118"}} 180000001364008096980000000000000000000000fe5f00 180000001364008096980000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-062.phpt0000644000076500000240000000205113572250760022357 0ustar alcaeusstaff--TEST-- Decimal128: [decq655] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1800000013640040420f0000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000000E+6117"}} 1800000013640040420f0000000000000000000000fe5f00 1800000013640040420f0000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-063.phpt0000644000076500000240000000204713572250760022365 0ustar alcaeusstaff--TEST-- Decimal128: [decq657] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400a086010000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00000E+6116"}} 18000000136400a086010000000000000000000000fe5f00 18000000136400a086010000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-064.phpt0000644000076500000240000000204513572250760022364 0ustar alcaeusstaff--TEST-- Decimal128: [decq659] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364001027000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0000E+6115"}} 180000001364001027000000000000000000000000fe5f00 180000001364001027000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-065.phpt0000644000076500000240000000204313572250760022363 0ustar alcaeusstaff--TEST-- Decimal128: [decq661] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 18000000136400e803000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.000E+6114"}} 18000000136400e803000000000000000000000000fe5f00 18000000136400e803000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-066.phpt0000644000076500000240000000204113572250760022362 0ustar alcaeusstaff--TEST-- Decimal128: [decq663] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364006400000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.00E+6113"}} 180000001364006400000000000000000000000000fe5f00 180000001364006400000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-5-valid-067.phpt0000644000076500000240000000203713572250760022370 0ustar alcaeusstaff--TEST-- Decimal128: [decq665] fold-down full sequence (Clamped) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000001364000a00000000000000000000000000fe5f00 {"d":{"$numberDecimal":"1.0E+6112"}} 180000001364000a00000000000000000000000000fe5f00 180000001364000a00000000000000000000000000fe5f00 ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-001.phpt0000644000076500000240000000064613572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: Incomplete Exponent --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-002.phpt0000644000076500000240000000065513572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: Exponent at the beginning --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-003.phpt0000644000076500000240000000064613572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: Just a decimal place --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-004.phpt0000644000076500000240000000064413572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: 2 decimal places --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-005.phpt0000644000076500000240000000064613572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: 2 decimal places --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-006.phpt0000644000076500000240000000064513572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: 2 decimal places --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-007.phpt0000644000076500000240000000064613572250760023414 0ustar alcaeusstaff--TEST-- Decimal128: 2 decimal places --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-008.phpt0000644000076500000240000000064613572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: 2 decimal places --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-009.phpt0000644000076500000240000000065113572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: Decimal with no digits --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-010.phpt0000644000076500000240000000063613572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: 2 signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-011.phpt0000644000076500000240000000063613572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: 2 signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-012.phpt0000644000076500000240000000064713572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: 2 negative signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-013.phpt0000644000076500000240000000064713572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: 2 negative signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-014.phpt0000644000076500000240000000065213572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: End in negative sign --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-015.phpt0000644000076500000240000000065213572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: 2 negative signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-016.phpt0000644000076500000240000000065213572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: 2 negative signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-017.phpt0000644000076500000240000000064113572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: 2 signs --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-018.phpt0000644000076500000240000000063513572250760023414 0ustar alcaeusstaff--TEST-- Decimal128: Empty string --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-019.phpt0000644000076500000240000000066613572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: leading white space positive number --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-020.phpt0000644000076500000240000000066713572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: leading white space negative number --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-021.phpt0000644000076500000240000000064713572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: trailing white space --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-022.phpt0000644000076500000240000000063113572250760023403 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-023.phpt0000644000076500000240000000063713572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-024.phpt0000644000076500000240000000063113572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-025.phpt0000644000076500000240000000063213572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-026.phpt0000644000076500000240000000063313572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-027.phpt0000644000076500000240000000063213572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-028.phpt0000644000076500000240000000063313572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-029.phpt0000644000076500000240000000063713572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-030.phpt0000644000076500000240000000064313572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-6-parseError-031.phpt0000644000076500000240000000064413572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: Invalid --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-001.phpt0000644000076500000240000000070113572250760023377 0ustar alcaeusstaff--TEST-- Decimal128: [basx572] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-002.phpt0000644000076500000240000000075513572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-003.phpt0000644000076500000240000000076013572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: [basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-004.phpt0000644000076500000240000000076013572250760023407 0ustar alcaeusstaff--TEST-- Decimal128: [basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-005.phpt0000644000076500000240000000076013572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: [basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-006.phpt0000644000076500000240000000070013572250760023403 0ustar alcaeusstaff--TEST-- Decimal128: [basx569] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-007.phpt0000644000076500000240000000070113572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: [basx571] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-008.phpt0000644000076500000240000000070113572250760023406 0ustar alcaeusstaff--TEST-- Decimal128: [basx575] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-009.phpt0000644000076500000240000000075513572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-010.phpt0000644000076500000240000000075513572250760023410 0ustar alcaeusstaff--TEST-- Decimal128: [basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-011.phpt0000644000076500000240000000075513572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-012.phpt0000644000076500000240000000075513572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: [basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-013.phpt0000644000076500000240000000075513572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-014.phpt0000644000076500000240000000075613572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-015.phpt0000644000076500000240000000075613572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-016.phpt0000644000076500000240000000075313572250760023414 0ustar alcaeusstaff--TEST-- Decimal128: [basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-017.phpt0000644000076500000240000000075413572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-018.phpt0000644000076500000240000000075213572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-019.phpt0000644000076500000240000000075613572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-020.phpt0000644000076500000240000000075513572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-021.phpt0000644000076500000240000000074613572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: [basx577] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-022.phpt0000644000076500000240000000074713572250760023414 0ustar alcaeusstaff--TEST-- Decimal128: [basx578] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-023.phpt0000644000076500000240000000074513572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx581] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-024.phpt0000644000076500000240000000074613572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx582] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-025.phpt0000644000076500000240000000074713572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx583] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-026.phpt0000644000076500000240000000074613572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx579] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-027.phpt0000644000076500000240000000074513572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx580] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-028.phpt0000644000076500000240000000074613572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx584] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-029.phpt0000644000076500000240000000074513572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx585] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-030.phpt0000644000076500000240000000074713572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx589] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-031.phpt0000644000076500000240000000074613572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx586] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-032.phpt0000644000076500000240000000074713572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx587] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-033.phpt0000644000076500000240000000075513572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-034.phpt0000644000076500000240000000070013572250760023404 0ustar alcaeusstaff--TEST-- Decimal128: [basx561] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-035.phpt0000644000076500000240000000070013572250760023405 0ustar alcaeusstaff--TEST-- Decimal128: [basx573] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-036.phpt0000644000076500000240000000075013572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx588] some baddies with dots and Es and dots and specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-037.phpt0000644000076500000240000000075513572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-038.phpt0000644000076500000240000000075713572250760023424 0ustar alcaeusstaff--TEST-- Decimal128: [basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-039.phpt0000644000076500000240000000075713572250760023425 0ustar alcaeusstaff--TEST-- Decimal128: [basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-040.phpt0000644000076500000240000000075313572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-041.phpt0000644000076500000240000000070013572250760023402 0ustar alcaeusstaff--TEST-- Decimal128: [basx574] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-042.phpt0000644000076500000240000000076013572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-043.phpt0000644000076500000240000000075613572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-044.phpt0000644000076500000240000000075713572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-045.phpt0000644000076500000240000000076213572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-046.phpt0000644000076500000240000000076013572250760023415 0ustar alcaeusstaff--TEST-- Decimal128: [basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-047.phpt0000644000076500000240000000076013572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-048.phpt0000644000076500000240000000075613572250760023424 0ustar alcaeusstaff--TEST-- Decimal128: [basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-049.phpt0000644000076500000240000000075613572250760023425 0ustar alcaeusstaff--TEST-- Decimal128: [basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-050.phpt0000644000076500000240000000075713572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-051.phpt0000644000076500000240000000075713572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-052.phpt0000644000076500000240000000075713572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-053.phpt0000644000076500000240000000075713572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-054.phpt0000644000076500000240000000075713572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-055.phpt0000644000076500000240000000075613572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-056.phpt0000644000076500000240000000075513572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-057.phpt0000644000076500000240000000075413572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-058.phpt0000644000076500000240000000075513572250760023424 0ustar alcaeusstaff--TEST-- Decimal128: [basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-059.phpt0000644000076500000240000000075713572250760023427 0ustar alcaeusstaff--TEST-- Decimal128: [basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-060.phpt0000644000076500000240000000076013572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-061.phpt0000644000076500000240000000076013572250760023412 0ustar alcaeusstaff--TEST-- Decimal128: [basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-062.phpt0000644000076500000240000000076013572250760023413 0ustar alcaeusstaff--TEST-- Decimal128: [basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-063.phpt0000644000076500000240000000076013572250760023414 0ustar alcaeusstaff--TEST-- Decimal128: [basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-064.phpt0000644000076500000240000000075413572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-065.phpt0000644000076500000240000000076213572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-066.phpt0000644000076500000240000000070013572250760023411 0ustar alcaeusstaff--TEST-- Decimal128: [basx570] Near-specials (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-067.phpt0000644000076500000240000000075513572250760023424 0ustar alcaeusstaff--TEST-- Decimal128: [basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-068.phpt0000644000076500000240000000075513572250760023425 0ustar alcaeusstaff--TEST-- Decimal128: [basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-069.phpt0000644000076500000240000000075513572250760023426 0ustar alcaeusstaff--TEST-- Decimal128: [basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-070.phpt0000644000076500000240000000075713572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-071.phpt0000644000076500000240000000075613572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-072.phpt0000644000076500000240000000076213572250760023416 0ustar alcaeusstaff--TEST-- Decimal128: [basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-073.phpt0000644000076500000240000000076213572250760023417 0ustar alcaeusstaff--TEST-- Decimal128: [basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-074.phpt0000644000076500000240000000076213572250760023420 0ustar alcaeusstaff--TEST-- Decimal128: [basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-075.phpt0000644000076500000240000000076213572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-076.phpt0000644000076500000240000000076213572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-077.phpt0000644000076500000240000000076213572250760023423 0ustar alcaeusstaff--TEST-- Decimal128: [basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-078.phpt0000644000076500000240000000076013572250760023422 0ustar alcaeusstaff--TEST-- Decimal128: [basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-079.phpt0000644000076500000240000000077013572250760023424 0ustar alcaeusstaff--TEST-- Decimal128: [basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/decimal128-7-parseError-080.phpt0000644000076500000240000000076613572250760023421 0ustar alcaeusstaff--TEST-- Decimal128: [basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE===mongodb-1.6.1/tests/bson-corpus/document-decodeError-001.phpt0000644000076500000240000000103713572250760023334 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Subdocument length too long: eats outer terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/document-decodeError-002.phpt0000644000076500000240000000102513572250760023332 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Subdocument length too short: leaks terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/document-decodeError-003.phpt0000644000076500000240000000104413572250760023334 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Invalid subdocument: bad string length in field --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/document-valid-001.phpt0000644000076500000240000000131313572250760022173 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Empty subdoc --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d000000037800050000000000 {"x":{}} 0d000000037800050000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/document-valid-002.phpt0000644000076500000240000000142413572250760022177 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Empty-string key subdoc --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 150000000378000d00000002000200000062000000 {"x":{"":"b"}} 150000000378000d00000002000200000062000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/document-valid-003.phpt0000644000076500000240000000144013572250760022176 0ustar alcaeusstaff--TEST-- Document type (sub-documents): Single-character key subdoc --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 160000000378000e0000000261000200000062000000 {"x":{"a":"b"}} 160000000378000e0000000261000200000062000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/double-decodeError-001.phpt0000644000076500000240000000072313572250760022771 0ustar alcaeusstaff--TEST-- Double type: double truncated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-001.phpt0000644000076500000240000000201313572250760021625 0ustar alcaeusstaff--TEST-- Double type: +1.0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000f03f00 {"d":{"$numberDouble":"1.0"}} {"d":1} 10000000016400000000000000f03f00 {"d":1} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-002.phpt0000644000076500000240000000202013572250760021624 0ustar alcaeusstaff--TEST-- Double type: -1.0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000f0bf00 {"d":{"$numberDouble":"-1.0"}} {"d":-1} 10000000016400000000000000f0bf00 {"d":-1} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-003.phpt0000644000076500000240000000212713572250760021635 0ustar alcaeusstaff--TEST-- Double type: +1.0001220703125 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000008000f03f00 {"d":{"$numberDouble":"1.0001220703125"}} {"d":1.0001220703125} 10000000016400000000008000f03f00 {"d":1.0001220703125} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-004.phpt0000644000076500000240000000213413572250760021634 0ustar alcaeusstaff--TEST-- Double type: -1.0001220703125 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000008000f0bf00 {"d":{"$numberDouble":"-1.0001220703125"}} {"d":-1.0001220703125} 10000000016400000000008000f0bf00 {"d":-1.0001220703125} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-005.phpt0000644000076500000240000000230513572250760021635 0ustar alcaeusstaff--TEST-- Double type: 1.23456789012345677E+18 --XFAIL-- Variation in double's string representation (SPEC-850) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 1000000001640081e97df41022b14300 {"d":{"$numberDouble":"1.23456789012345677E+18"}} {"d":1.2345678901234568e+18} 1000000001640081e97df41022b14300 {"d":1.2345678901234568e+18} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-006.phpt0000644000076500000240000000231313572250760021635 0ustar alcaeusstaff--TEST-- Double type: -1.23456789012345677E+18 --XFAIL-- Variation in double's string representation (SPEC-850) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 1000000001640081e97df41022b1c300 {"d":{"$numberDouble":"-1.23456789012345677E+18"}} {"d":-1.2345678901234568e+18} 1000000001640081e97df41022b1c300 {"d":-1.2345678901234568e+18} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-007.phpt0000644000076500000240000000201213572250760021632 0ustar alcaeusstaff--TEST-- Double type: 0.0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000000000 {"d":{"$numberDouble":"0.0"}} {"d":0} 10000000016400000000000000000000 {"d":0} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-008.phpt0000644000076500000240000000202013572250760021632 0ustar alcaeusstaff--TEST-- Double type: -0.0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000008000 {"d":{"$numberDouble":"-0.0"}} {"d":-0} 10000000016400000000000000008000 {"d":-0} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-009.phpt0000644000076500000240000000171613572250760021646 0ustar alcaeusstaff--TEST-- Double type: NaN --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000f87f00 {"d":{"$numberDouble":"NaN"}} {"d":{"$numberDouble":"NaN"}} {"d":{"$numberDouble":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-010.phpt0000644000076500000240000000173313572250760021635 0ustar alcaeusstaff--TEST-- Double type: NaN with payload --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400120000000000f87f00 {"d":{"$numberDouble":"NaN"}} {"d":{"$numberDouble":"NaN"}} {"d":{"$numberDouble":"NaN"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-011.phpt0000644000076500000240000000214213572250760021631 0ustar alcaeusstaff--TEST-- Double type: Inf --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000f07f00 {"d":{"$numberDouble":"Infinity"}} {"d":{"$numberDouble":"Infinity"}} 10000000016400000000000000f07f00 {"d":{"$numberDouble":"Infinity"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/double-valid-012.phpt0000644000076500000240000000215013572250760021631 0ustar alcaeusstaff--TEST-- Double type: -Inf --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000016400000000000000f0ff00 {"d":{"$numberDouble":"-Infinity"}} {"d":{"$numberDouble":"-Infinity"}} 10000000016400000000000000f0ff00 {"d":{"$numberDouble":"-Infinity"}} ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-decodeError-001.phpt0000644000076500000240000000072213572250760022455 0ustar alcaeusstaff--TEST-- Int32 type: Bad int32 field length --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-valid-001.phpt0000644000076500000240000000203413572250760021315 0ustar alcaeusstaff--TEST-- Int32 type: MinValue --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 0c0000001069000000008000 {"i":{"$numberInt":"-2147483648"}} {"i":-2147483648} 0c0000001069000000008000 {"i":-2147483648} ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-valid-002.phpt0000644000076500000240000000202713572250760021320 0ustar alcaeusstaff--TEST-- Int32 type: MaxValue --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 0c000000106900ffffff7f00 {"i":{"$numberInt":"2147483647"}} {"i":2147483647} 0c000000106900ffffff7f00 {"i":2147483647} ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-valid-003.phpt0000644000076500000240000000175113572250760021324 0ustar alcaeusstaff--TEST-- Int32 type: -1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 0c000000106900ffffffff00 {"i":{"$numberInt":"-1"}} {"i":-1} 0c000000106900ffffffff00 {"i":-1} ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-valid-004.phpt0000644000076500000240000000174313572250760021326 0ustar alcaeusstaff--TEST-- Int32 type: 0 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 0c0000001069000000000000 {"i":{"$numberInt":"0"}} {"i":0} 0c0000001069000000000000 {"i":0} ===DONE===mongodb-1.6.1/tests/bson-corpus/int32-valid-005.phpt0000644000076500000240000000174313572250760021327 0ustar alcaeusstaff--TEST-- Int32 type: 1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 0c0000001069000100000000 {"i":{"$numberInt":"1"}} {"i":1} 0c0000001069000100000000 {"i":1} ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-decodeError-001.phpt0000644000076500000240000000072713572250760022467 0ustar alcaeusstaff--TEST-- Int64 type: int64 field truncated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-valid-001.phpt0000644000076500000240000000232213572250760021322 0ustar alcaeusstaff--TEST-- Int64 type: MinValue --SKIPIF-- --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000126100000000000000008000 {"a":{"$numberLong":"-9223372036854775808"}} {"a":-9223372036854775808} 10000000126100000000000000008000 {"a":-9223372036854775808} ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-valid-002.phpt0000644000076500000240000000231513572250760021325 0ustar alcaeusstaff--TEST-- Int64 type: MaxValue --SKIPIF-- --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000126100ffffffffffffff7f00 {"a":{"$numberLong":"9223372036854775807"}} {"a":9223372036854775807} 10000000126100ffffffffffffff7f00 {"a":9223372036854775807} ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-valid-003.phpt0000644000076500000240000000207513572250760021331 0ustar alcaeusstaff--TEST-- Int64 type: -1 --XFAIL-- PHP encodes integers as 32-bit if range allows --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000126100ffffffffffffffff00 {"a":{"$numberLong":"-1"}} {"a":-1} 10000000126100ffffffffffffffff00 {"a":-1} ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-valid-004.phpt0000644000076500000240000000206713572250760021333 0ustar alcaeusstaff--TEST-- Int64 type: 0 --XFAIL-- PHP encodes integers as 32-bit if range allows --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000126100000000000000000000 {"a":{"$numberLong":"0"}} {"a":0} 10000000126100000000000000000000 {"a":0} ===DONE===mongodb-1.6.1/tests/bson-corpus/int64-valid-005.phpt0000644000076500000240000000206713572250760021334 0ustar alcaeusstaff--TEST-- Int64 type: 1 --XFAIL-- PHP encodes integers as 32-bit if range allows --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Relaxed extJSON -> BSON -> Relaxed extJSON echo json_canonicalize(toRelaxedExtendedJSON(fromJSON($relaxedExtJson))), "\n"; ?> ===DONE=== --EXPECT-- 10000000126100010000000000000000 {"a":{"$numberLong":"1"}} {"a":1} 10000000126100010000000000000000 {"a":1} ===DONE===mongodb-1.6.1/tests/bson-corpus/maxkey-valid-001.phpt0000644000076500000240000000125513572250760021660 0ustar alcaeusstaff--TEST-- Maxkey type: Maxkey --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 080000007f610000 {"a":{"$maxKey":1}} 080000007f610000 ===DONE===mongodb-1.6.1/tests/bson-corpus/minkey-valid-001.phpt0000644000076500000240000000125513572250760021656 0ustar alcaeusstaff--TEST-- Minkey type: Minkey --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 08000000ff610000 {"a":{"$minKey":1}} 08000000ff610000 ===DONE===mongodb-1.6.1/tests/bson-corpus/multi-type-deprecated-valid-001.phpt0000644000076500000240000002117113572250760024570 0ustar alcaeusstaff--TEST-- Multiple types within the same document: All BSON types --XFAIL-- PHP encodes integers as 32-bit if range allows --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 38020000075f69640057e193d7a9cc81b4027498b50e53796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c736500000c4442506f696e746572000b000000636f6c6c656374696f6e0057e193d7a9cc81b4027498b1034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0006556e646566696e65640000 {"_id":{"$oid":"57e193d7a9cc81b4027498b5"},"Symbol":{"$symbol":"symbol"},"String":"string","Int32":{"$numberInt":"42"},"Int64":{"$numberLong":"42"},"Double":{"$numberDouble":"-1.0"},"Binary":{"$binary":{"base64":"o0w498Or7cijeBSpkquNtg==","subType":"03"}},"BinaryUserDefined":{"$binary":{"base64":"AQIDBAU=","subType":"80"}},"Code":{"$code":"function() {}"},"CodeWithScope":{"$code":"function() {}","$scope":{}},"Subdocument":{"foo":"bar"},"Array":[{"$numberInt":"1"},{"$numberInt":"2"},{"$numberInt":"3"},{"$numberInt":"4"},{"$numberInt":"5"}],"Timestamp":{"$timestamp":{"t":42,"i":1}},"Regex":{"$regularExpression":{"pattern":"pattern","options":""}},"DatetimeEpoch":{"$date":{"$numberLong":"0"}},"DatetimePositive":{"$date":{"$numberLong":"2147483647"}},"DatetimeNegative":{"$date":{"$numberLong":"-2147483648"}},"True":true,"False":false,"DBPointer":{"$dbPointer":{"$ref":"collection","$id":{"$oid":"57e193d7a9cc81b4027498b1"}}},"DBRef":{"$ref":"collection","$id":{"$oid":"57fd71e96e32ab4225b723fb"},"$db":"database"},"Minkey":{"$minKey":1},"Maxkey":{"$maxKey":1},"Null":null,"Undefined":{"$undefined":true}} 38020000075f69640057e193d7a9cc81b4027498b50e53796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c736500000c4442506f696e746572000b000000636f6c6c656374696f6e0057e193d7a9cc81b4027498b1034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0006556e646566696e65640000 ===DONE===mongodb-1.6.1/tests/bson-corpus/multi-type-valid-001.phpt0000644000076500000240000001310513572250760022470 0ustar alcaeusstaff--TEST-- Multiple types within the same document: All BSON types --XFAIL-- PHP encodes integers as 32-bit if range allows --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- f4010000075f69640057e193d7a9cc81b4027498b502537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0000 {"_id":{"$oid":"57e193d7a9cc81b4027498b5"},"String":"string","Int32":{"$numberInt":"42"},"Int64":{"$numberLong":"42"},"Double":{"$numberDouble":"-1.0"},"Binary":{"$binary":{"base64":"o0w498Or7cijeBSpkquNtg==","subType":"03"}},"BinaryUserDefined":{"$binary":{"base64":"AQIDBAU=","subType":"80"}},"Code":{"$code":"function() {}"},"CodeWithScope":{"$code":"function() {}","$scope":{}},"Subdocument":{"foo":"bar"},"Array":[{"$numberInt":"1"},{"$numberInt":"2"},{"$numberInt":"3"},{"$numberInt":"4"},{"$numberInt":"5"}],"Timestamp":{"$timestamp":{"t":42,"i":1}},"Regex":{"$regularExpression":{"pattern":"pattern","options":""}},"DatetimeEpoch":{"$date":{"$numberLong":"0"}},"DatetimePositive":{"$date":{"$numberLong":"2147483647"}},"DatetimeNegative":{"$date":{"$numberLong":"-2147483648"}},"True":true,"False":false,"DBRef":{"$ref":"collection","$id":{"$oid":"57fd71e96e32ab4225b723fb"},"$db":"database"},"Minkey":{"$minKey":1},"Maxkey":{"$maxKey":1},"Null":null} f4010000075f69640057e193d7a9cc81b4027498b502537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/null-valid-001.phpt0000644000076500000240000000122513572250760021331 0ustar alcaeusstaff--TEST-- Null type: Null --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 080000000a610000 {"a":null} 080000000a610000 ===DONE===mongodb-1.6.1/tests/bson-corpus/oid-decodeError-001.phpt0000644000076500000240000000072713572250760022276 0ustar alcaeusstaff--TEST-- ObjectId: OID truncated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/oid-valid-001.phpt0000644000076500000240000000144213572250760021133 0ustar alcaeusstaff--TEST-- ObjectId: All zeroes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1400000007610000000000000000000000000000 {"a":{"$oid":"000000000000000000000000"}} 1400000007610000000000000000000000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/oid-valid-002.phpt0000644000076500000240000000144013572250760021132 0ustar alcaeusstaff--TEST-- ObjectId: All ones --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 14000000076100ffffffffffffffffffffffff00 {"a":{"$oid":"ffffffffffffffffffffffff"}} 14000000076100ffffffffffffffffffffffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/oid-valid-003.phpt0000644000076500000240000000143613572250760021140 0ustar alcaeusstaff--TEST-- ObjectId: Random --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 1400000007610056e1fc72e0c917e9c471416100 {"a":{"$oid":"56e1fc72e0c917e9c4714161"}} 1400000007610056e1fc72e0c917e9c471416100 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-decodeError-001.phpt0000644000076500000240000000075513572250760022636 0ustar alcaeusstaff--TEST-- Regular Expression type: embedded null in pattern --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-decodeError-002.phpt0000644000076500000240000000075513572250760022637 0ustar alcaeusstaff--TEST-- Regular Expression type: embedded null in flags --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-001.phpt0000644000076500000240000000145113572250760021472 0ustar alcaeusstaff--TEST-- Regular Expression type: empty regex with no options --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0a0000000b6100000000 {"a":{"$regularExpression":{"pattern":"","options":""}}} 0a0000000b6100000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-002.phpt0000644000076500000240000000147313572250760021477 0ustar alcaeusstaff--TEST-- Regular Expression type: regex without options --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d0000000b6100616263000000 {"a":{"$regularExpression":{"pattern":"abc","options":""}}} 0d0000000b6100616263000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-003.phpt0000644000076500000240000000151013572250760021470 0ustar alcaeusstaff--TEST-- Regular Expression type: regex with options --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f0000000b610061626300696d0000 {"a":{"$regularExpression":{"pattern":"abc","options":"im"}}} 0f0000000b610061626300696d0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-004.phpt0000644000076500000240000000206113572250760021473 0ustar alcaeusstaff--TEST-- Regular Expression type: regex with options (keys reversed) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f0000000b610061626300696d0000 {"a":{"$regularExpression":{"pattern":"abc","options":"im"}}} 0f0000000b610061626300696d0000 0f0000000b610061626300696d0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-005.phpt0000644000076500000240000000152713572250760021502 0ustar alcaeusstaff--TEST-- Regular Expression type: regex with slash --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 110000000b610061622f636400696d0000 {"a":{"$regularExpression":{"pattern":"ab\/cd","options":"im"}}} 110000000b610061622f636400696d0000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-006.phpt0000644000076500000240000000265013572250760021501 0ustar alcaeusstaff--TEST-- Regular Expression type: flags not alphabetized --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate BSON -> Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($degenerateBson))), "\n"; // Degenerate BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($degenerateBson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 100000000b610061626300696d780000 {"a":{"$regularExpression":{"pattern":"abc","options":"imx"}}} 100000000b610061626300696d780000 100000000b610061626300696d780000 {"a":{"$regularExpression":{"pattern":"abc","options":"imx"}}} 100000000b610061626300696d780000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-007.phpt0000644000076500000240000000152513572250760021502 0ustar alcaeusstaff--TEST-- Regular Expression type: Required escapes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 100000000b610061625c226162000000 {"a":{"$regularExpression":{"pattern":"ab\\\"ab","options":""}}} 100000000b610061625c226162000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-008.phpt0000644000076500000240000000166213572250760021505 0ustar alcaeusstaff--TEST-- Regular Expression type: Regular expression as value of $regex query operator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 180000000b247265676578007061747465726e0069780000 {"$regex":{"$regularExpression":{"pattern":"pattern","options":"ix"}}} 180000000b247265676578007061747465726e0069780000 ===DONE===mongodb-1.6.1/tests/bson-corpus/regex-valid-009.phpt0000644000076500000240000000207113572250760021501 0ustar alcaeusstaff--TEST-- Regular Expression type: Regular expression as value of $regex query operator with $options --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 270000000b247265676578007061747465726e000002246f7074696f6e73000300000069780000 {"$regex":{"$regularExpression":{"pattern":"pattern","options":""}},"$options":"ix"} 270000000b247265676578007061747465726e000002246f7074696f6e73000300000069780000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-001.phpt0000644000076500000240000000074713572250760023033 0ustar alcaeusstaff--TEST-- String: bad string length: 0 (but no 0x00 either) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-002.phpt0000644000076500000240000000072313572250760023026 0ustar alcaeusstaff--TEST-- String: bad string length: -1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-003.phpt0000644000076500000240000000075013572250760023027 0ustar alcaeusstaff--TEST-- String: bad string length: eats terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-004.phpt0000644000076500000240000000077113572250760023033 0ustar alcaeusstaff--TEST-- String: bad string length: longer than rest of document --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-005.phpt0000644000076500000240000000074313572250760023033 0ustar alcaeusstaff--TEST-- String: string is not null-terminated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-006.phpt0000644000076500000240000000073613572250760023036 0ustar alcaeusstaff--TEST-- String: empty string, but extra null --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-decodeError-007.phpt0000644000076500000240000000071713572250760023036 0ustar alcaeusstaff--TEST-- String: invalid UTF-8 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-001.phpt0000644000076500000240000000126413572250760021670 0ustar alcaeusstaff--TEST-- String: Empty string --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d000000026100010000000000 {"a":""} 0d000000026100010000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-002.phpt0000644000076500000240000000130013572250760021660 0ustar alcaeusstaff--TEST-- String: Single character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0e00000002610002000000620000 {"a":"b"} 0e00000002610002000000620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-003.phpt0000644000076500000240000000142713572250760021673 0ustar alcaeusstaff--TEST-- String: Multi-character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d0000006162616261626162616261620000 {"a":"abababababab"} 190000000261000d0000006162616261626162616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-004.phpt0000644000076500000240000000152113572250760021667 0ustar alcaeusstaff--TEST-- String: two-byte UTF-8 (é) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 {"a":"\u00e9\u00e9\u00e9\u00e9\u00e9\u00e9"} 190000000261000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-005.phpt0000644000076500000240000000147213572250760021675 0ustar alcaeusstaff--TEST-- String: three-byte UTF-8 (☆) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d000000e29886e29886e29886e298860000 {"a":"\u2606\u2606\u2606\u2606"} 190000000261000d000000e29886e29886e29886e298860000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-006.phpt0000644000076500000240000000145413572250760021676 0ustar alcaeusstaff--TEST-- String: Embedded nulls --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000261000d0000006162006261620062616261620000 {"a":"ab\u0000bab\u0000babab"} 190000000261000d0000006162006261620062616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/string-valid-007.phpt0000644000076500000240000000242213572250760021673 0ustar alcaeusstaff--TEST-- String: Required escapes --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 320000000261002600000061625c220102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f61620000 {"a":"ab\\\"\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001fab"} 320000000261002600000061625c220102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f61620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-001.phpt0000644000076500000240000000074713572250760023032 0ustar alcaeusstaff--TEST-- Symbol: bad symbol length: 0 (but no 0x00 either) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-002.phpt0000644000076500000240000000072313572250760023025 0ustar alcaeusstaff--TEST-- Symbol: bad symbol length: -1 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-003.phpt0000644000076500000240000000075013572250760023026 0ustar alcaeusstaff--TEST-- Symbol: bad symbol length: eats terminator --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-004.phpt0000644000076500000240000000077113572250760023032 0ustar alcaeusstaff--TEST-- Symbol: bad symbol length: longer than rest of document --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-005.phpt0000644000076500000240000000074313572250760023032 0ustar alcaeusstaff--TEST-- Symbol: symbol is not null-terminated --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-006.phpt0000644000076500000240000000073613572250760023035 0ustar alcaeusstaff--TEST-- Symbol: empty symbol, but extra null --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-decodeError-007.phpt0000644000076500000240000000071713572250760023035 0ustar alcaeusstaff--TEST-- Symbol: invalid UTF-8 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-001.phpt0000644000076500000240000000144513572250760021670 0ustar alcaeusstaff--TEST-- Symbol: Empty string --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0d0000000e6100010000000000 {"a":{"$symbol":""}} 0d0000000e6100010000000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-002.phpt0000644000076500000240000000146413572250760021672 0ustar alcaeusstaff--TEST-- Symbol: Single character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0e0000000e610002000000620000 {"a":{"$symbol":"b"}} 0e0000000e610002000000620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-003.phpt0000644000076500000240000000165413572250760021674 0ustar alcaeusstaff--TEST-- Symbol: Multi-character --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000e61000d0000006162616261626162616261620000 {"a":{"$symbol":"abababababab"}} 190000000e61000d0000006162616261626162616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-004.phpt0000644000076500000240000000171013572250760021666 0ustar alcaeusstaff--TEST-- Symbol: two-byte UTF-8 (é) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000e61000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 {"a":{"$symbol":"\u00e9\u00e9\u00e9\u00e9\u00e9\u00e9"}} 190000000e61000d000000c3a9c3a9c3a9c3a9c3a9c3a90000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-005.phpt0000644000076500000240000000167713572250760021703 0ustar alcaeusstaff--TEST-- Symbol: three-byte UTF-8 (☆) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000e61000d000000e29886e29886e29886e298860000 {"a":{"$symbol":"\u2606\u2606\u2606\u2606"}} 190000000e61000d000000e29886e29886e29886e298860000 ===DONE===mongodb-1.6.1/tests/bson-corpus/symbol-valid-006.phpt0000644000076500000240000000171513572250760021675 0ustar alcaeusstaff--TEST-- Symbol: Embedded nulls --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 190000000e61000d0000006162006261620062616261620000 {"a":{"$symbol":"ab\u0000bab\u0000babab"}} 190000000e61000d0000006162006261620062616261620000 ===DONE===mongodb-1.6.1/tests/bson-corpus/timestamp-decodeError-001.phpt0000644000076500000240000000074513572250760023526 0ustar alcaeusstaff--TEST-- Timestamp type: Truncated timestamp field --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/timestamp-valid-001.phpt0000644000076500000240000000145313572250760022365 0ustar alcaeusstaff--TEST-- Timestamp type: Timestamp: (123456789, 42) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 100000001161002a00000015cd5b0700 {"a":{"$timestamp":{"t":123456789,"i":42}}} 100000001161002a00000015cd5b0700 ===DONE===mongodb-1.6.1/tests/bson-corpus/timestamp-valid-002.phpt0000644000076500000240000000200713572250760022362 0ustar alcaeusstaff--TEST-- Timestamp type: Timestamp: (123456789, 42) (keys reversed) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; // Degenerate extJSON -> Canonical BSON echo bin2hex(fromJSON($degenerateExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 100000001161002a00000015cd5b0700 {"a":{"$timestamp":{"t":123456789,"i":42}}} 100000001161002a00000015cd5b0700 100000001161002a00000015cd5b0700 ===DONE===mongodb-1.6.1/tests/bson-corpus/timestamp-valid-003.phpt0000644000076500000240000000154313572250760022367 0ustar alcaeusstaff--TEST-- Timestamp type: Timestamp with high-order bit set on both seconds and increment --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 10000000116100ffffffffffffffff00 {"a":{"$timestamp":{"t":4294967295,"i":4294967295}}} 10000000116100ffffffffffffffff00 ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-001.phpt0000644000076500000240000000105013572250760022313 0ustar alcaeusstaff--TEST-- Top-level document validity: An object size that's too small to even include the object size, but is a well-formed, empty object --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-002.phpt0000644000076500000240000000103613572250760022320 0ustar alcaeusstaff--TEST-- Top-level document validity: An object size that's only enough for the object size, but is a well-formed, empty object --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-003.phpt0000644000076500000240000000077213572250760022327 0ustar alcaeusstaff--TEST-- Top-level document validity: One object, with length shorter than size (missing EOO) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-004.phpt0000644000076500000240000000101513572250760022317 0ustar alcaeusstaff--TEST-- Top-level document validity: One object, sized correctly, with a spot for an EOO, but the EOO is 0x01 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-005.phpt0000644000076500000240000000101513572250760022320 0ustar alcaeusstaff--TEST-- Top-level document validity: One object, sized correctly, with a spot for an EOO, but the EOO is 0xff --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-006.phpt0000644000076500000240000000101513572250760022321 0ustar alcaeusstaff--TEST-- Top-level document validity: One object, sized correctly, with a spot for an EOO, but the EOO is 0x70 --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-007.phpt0000644000076500000240000000077613572250760022337 0ustar alcaeusstaff--TEST-- Top-level document validity: Byte count is zero (with non-zero input length) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-008.phpt0000644000076500000240000000102413572250760022323 0ustar alcaeusstaff--TEST-- Top-level document validity: Stated length exceeds byte count, with truncated document --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-009.phpt0000644000076500000240000000104613572250760022330 0ustar alcaeusstaff--TEST-- Top-level document validity: Stated length less than byte count, with garbage after envelope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-010.phpt0000644000076500000240000000102413572250760022314 0ustar alcaeusstaff--TEST-- Top-level document validity: Stated length exceeds byte count, with valid envelope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-011.phpt0000644000076500000240000000102613572250760022317 0ustar alcaeusstaff--TEST-- Top-level document validity: Stated length less than byte count, with valid envelope --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-012.phpt0000644000076500000240000000074413572250760022326 0ustar alcaeusstaff--TEST-- Top-level document validity: Invalid BSON type low range --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-013.phpt0000644000076500000240000000074513572250760022330 0ustar alcaeusstaff--TEST-- Top-level document validity: Invalid BSON type high range --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-decodeError-014.phpt0000644000076500000240000000074313572250760022327 0ustar alcaeusstaff--TEST-- Top-level document validity: Document truncated mid-key --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-001.phpt0000644000076500000240000000100613572250760022203 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $regularExpression (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-002.phpt0000644000076500000240000000075613572250760022217 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $regularExpression (missing options field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-003.phpt0000644000076500000240000000100713572250760022206 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $regularExpression (pattern is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-004.phpt0000644000076500000240000000101013572250760022201 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $regularExpression (options are number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-005.phpt0000644000076500000240000000075413572250760022220 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $regularExpression (missing pattern field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-006.phpt0000644000076500000240000000070113572250760022211 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $oid (number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-007.phpt0000644000076500000240000000074513572250760022222 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $oid (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-008.phpt0000644000076500000240000000071513572250760022220 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberInt (number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-009.phpt0000644000076500000240000000073313572250760022221 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberInt (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-010.phpt0000644000076500000240000000071713572250760022213 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberLong (number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-011.phpt0000644000076500000240000000073513572250760022214 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberLong (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-012.phpt0000644000076500000240000000072313572250760022212 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberDouble (number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-013.phpt0000644000076500000240000000074113572250760022213 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberDouble (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-014.phpt0000644000076500000240000000072513572250760022216 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberDecimal (number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-015.phpt0000644000076500000240000000074313572250760022217 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $numberDecimal (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-016.phpt0000644000076500000240000000075713572250760022225 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $binary (binary is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-017.phpt0000644000076500000240000000075313572250760022222 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $binary (type is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-018.phpt0000644000076500000240000000072313572250760022220 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $binary (missing $type) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-019.phpt0000644000076500000240000000072413572250760022222 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $binary (missing $binary) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-020.phpt0000644000076500000240000000076313572250760022215 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $binary (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-021.phpt0000644000076500000240000000071313572250760022211 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $code (type is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-022.phpt0000644000076500000240000000071713572250760022216 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $code (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-023.phpt0000644000076500000240000000074413572250760022217 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $code with $scope (scope is number, not doc) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-024.phpt0000644000076500000240000000072313572250760022215 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp (type is number, not doc) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-025.phpt0000644000076500000240000000076613572250760022225 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp ('t' type is string, not number) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-026.phpt0000644000076500000240000000076613572250760022226 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp ('i' type is string, not number) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-027.phpt0000644000076500000240000000102413572250760022213 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp (extra field at same level as $timestamp) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-028.phpt0000644000076500000240000000102113572250760022211 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp (extra field at same level as t and i) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-029.phpt0000644000076500000240000000072013572250760022217 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp (missing t) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-030.phpt0000644000076500000240000000072713572250760022216 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $timestamp (missing i) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-031.phpt0000644000076500000240000000102313572250760022205 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $date (number, not string or hash) --XFAIL-- Legacy extended JSON $date syntax uses numbers (CDRIVER-2223) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-032.phpt0000644000076500000240000000075613572250760022222 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $date (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-033.phpt0000644000076500000240000000073013572250760022213 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad DBRef (ref is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-034.phpt0000644000076500000240000000074413572250760022221 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad DBRef (db is number, not string) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-035.phpt0000644000076500000240000000071313572250760022216 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $minKey (boolean, not integer) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-036.phpt0000644000076500000240000000070113572250760022214 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $minKey (wrong integer) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-037.phpt0000644000076500000240000000072213572250760022220 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $minKey (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-038.phpt0000644000076500000240000000071313572250760022221 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $maxKey (boolean, not integer) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-039.phpt0000644000076500000240000000070113572250760022217 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $maxKey (wrong integer) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-040.phpt0000644000076500000240000000072213572250760022212 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad $maxKey (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-parseError-041.phpt0000644000076500000240000000105713572250760022215 0ustar alcaeusstaff--TEST-- Top-level document validity: Bad DBpointer (extra field) --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE===mongodb-1.6.1/tests/bson-corpus/top-valid-001.phpt0000644000076500000240000000142513572250760021163 0ustar alcaeusstaff--TEST-- Top-level document validity: Document with keys that start with $ --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0f00000010246b6579002a00000000 {"$key":{"$numberInt":"42"}} 0f00000010246b6579002a00000000 ===DONE===mongodb-1.6.1/tests/bson-corpus/undefined-valid-001.phpt0000644000076500000240000000143613572250760022324 0ustar alcaeusstaff--TEST-- Undefined type (deprecated): Undefined --DESCRIPTION-- Generated by scripts/convert-bson-corpus-tests.php DO NOT EDIT THIS FILE --FILE-- Native -> Canonical BSON echo bin2hex(fromPHP(toPHP($canonicalBson))), "\n"; // Canonical BSON -> Canonical extJSON echo json_canonicalize(toCanonicalExtendedJSON($canonicalBson)), "\n"; // Canonical extJSON -> Canonical BSON echo bin2hex(fromJSON($canonicalExtJson)), "\n"; ?> ===DONE=== --EXPECT-- 0800000006610000 {"a":{"$undefined":true}} 0800000006610000 ===DONE===mongodb-1.6.1/tests/bson/bson-binary-001.phpt0000644000076500000240000000650113572250760020176 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary #001 --FILE-- getData() === 'randomBinaryData'); var_dump($binary->getType() == $type); $tests[] = array("binary" => $binary); } foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Test#0 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "00" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "00" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "00" } }" bool(true) Test#1 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "01" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "01" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "01" } }" bool(true) Test#2 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "02" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "02" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "02" } }" bool(true) Test#3 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "03" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "03" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "03" } }" bool(true) Test#4 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "04" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "04" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "04" } }" bool(true) Test#5 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "05" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "05" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "05" } }" bool(true) Test#6 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "80" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "80" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "80" } }" bool(true) Test#7 { "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "85" } } string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "85" } }" string(73) "{ "binary" : { "$binary" : "cmFuZG9tQmluYXJ5RGF0YQ==", "$type" : "85" } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-compare-001.phpt0000644000076500000240000000205313572250760021620 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary comparisons --FILE-- new MongoDB\BSON\Binary('foobar', 1)); // Data length is compared first var_dump(new MongoDB\BSON\Binary('c', 1) < new MongoDB\BSON\Binary('aa', 0)); var_dump(new MongoDB\BSON\Binary('bb', 0) > new MongoDB\BSON\Binary('a', 1)); // Type is compared second var_dump(new MongoDB\BSON\Binary('foobar', 1) < new MongoDB\BSON\Binary('foobar', 2)); var_dump(new MongoDB\BSON\Binary('foobar', 1) > new MongoDB\BSON\Binary('foobar', 0)); // Data is compared last var_dump(new MongoDB\BSON\Binary('foobar', 1) < new MongoDB\BSON\Binary('foobat', 1)); var_dump(new MongoDB\BSON\Binary('foobar', 1) > new MongoDB\BSON\Binary('foobap', 1)); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-compare-002.phpt0000644000076500000240000000220313572250760021616 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary comparisons with null bytes --FILE-- new MongoDB\BSON\Binary("foo\x00bar", 1)); // Data length is compared first var_dump(new MongoDB\BSON\Binary("c\x00", 1) < new MongoDB\BSON\Binary("a\x00a", 0)); var_dump(new MongoDB\BSON\Binary("b\x00b", 0) > new MongoDB\BSON\Binary("a\x00", 1)); // Type is compared second var_dump(new MongoDB\BSON\Binary("foo\x00bar", 1) < new MongoDB\BSON\Binary("foo\x00bar", 2)); var_dump(new MongoDB\BSON\Binary("foo\x00bar", 1) > new MongoDB\BSON\Binary("foo\x00bar", 0)); // Data is compared last var_dump(new MongoDB\BSON\Binary("foo\x00bar", 1) < new MongoDB\BSON\Binary("foo\x00bat", 1)); var_dump(new MongoDB\BSON\Binary("foo\x00bar", 1) > new MongoDB\BSON\Binary("foo\x00bap", 1)); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-get_properties-001.phpt0000644000076500000240000000050713572250760023227 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(2) { ["data"]=> string(6) "foobar" ["type"]=> int(0) } ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-get_properties-002.phpt0000644000076500000240000000054413572250760023231 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(4) "data" string(6) "foobar" string(4) "type" int(0) ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-jsonserialize-001.phpt0000644000076500000240000000047613572250760023062 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(2) { ["$binary"]=> string(20) "Z2FyZ2xlYmxhc3Rlcg==" ["$type"]=> string(2) "18" } ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-jsonserialize-002.phpt0000644000076500000240000000120313572250760023050 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\Binary('gargleblaster', 24)]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$binary" : "Z2FyZ2xlYmxhc3Rlcg==", "$type" : "18" } } {"foo":{"$binary":"Z2FyZ2xlYmxhc3Rlcg==","$type":"18"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(13) "gargleblaster" ["type"]=> int(24) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-serialization-001.phpt0000644000076500000240000000373113572250760023053 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(6) "foobar" ["type"]=> int(0) } string(77) "C:19:"MongoDB\BSON\Binary":45:{a:2:{s:4:"data";s:6:"foobar";s:4:"type";i:0;}}" object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(6) "foobar" ["type"]=> int(0) } object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(0) "" ["type"]=> int(0) } string(71) "C:19:"MongoDB\BSON\Binary":39:{a:2:{s:4:"data";s:0:"";s:4:"type";i:0;}}" object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(0) "" ["type"]=> int(0) } object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(4) "%sfoo" ["type"]=> int(0) } string(75) "C:19:"MongoDB\BSON\Binary":43:{a:2:{s:4:"data";s:4:"%sfoo";s:4:"type";i:0;}}" object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(4) "%sfoo" ["type"]=> int(0) } object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%s" ["type"]=> int(4) } string(88) "C:19:"MongoDB\BSON\Binary":56:{a:2:{s:4:"data";s:16:"%s";s:4:"type";i:4;}}" object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%s" ["type"]=> int(4) } object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%s" ["type"]=> int(5) } string(88) "C:19:"MongoDB\BSON\Binary":56:{a:2:{s:4:"data";s:16:"%s";s:4:"type";i:5;}}" object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%s" ["type"]=> int(5) } ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-serialization_error-001.phpt0000644000076500000240000000220713572250760024261 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary unserialization requires "data" string and "type" integer fields --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-serialization_error-002.phpt0000644000076500000240000000145113572250760024262 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary unserialization requires unsigned 8-bit integer for type --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, 256 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-serialization_error-003.phpt0000644000076500000240000000260613572250760024266 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary unserialization requires 16-byte data length for UUID types --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-set_state-001.phpt0000644000076500000240000000204113572250760022162 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::__set_state() --FILE-- $data, 'type' => $type, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Binary::__set_state(array( %w'data' => 'foobar', %w'type' => 0, )) MongoDB\BSON\Binary::__set_state(array( %w'data' => '', %w'type' => 0, )) MongoDB\BSON\Binary::__set_state(array( %w'data' => '' . "\0" . 'foo', %w'type' => 0, )) MongoDB\BSON\Binary::__set_state(array( %w'data' => '>Egè›Ó¤VBfUD' . "\0" . '' . "\0" . '', %w'type' => 4, )) MongoDB\BSON\Binary::__set_state(array( %w'data' => '8Xö"0¬<‘_0 fCÆ?', %w'type' => 5, )) ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-set_state_error-001.phpt0000644000076500000240000000210113572250760023370 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::__set_state() requires "data" string and "type" integer fields --FILE-- 'foobar']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['type' => 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['data' => 0, 'type' => 'foobar']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Binary initialization requires "data" string and "type" integer fields ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-set_state_error-002.phpt0000644000076500000240000000136613572250760023405 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::__set_state() requires unsigned 8-bit integer for type --FILE-- 'foobar', 'type' => -1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['data' => 'foobar', 'type' => 256]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, 256 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-set_state_error-003.phpt0000644000076500000240000000263113572250760023402 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::__set_state() requires 16-byte data length for UUID types --FILE-- '0123456789abcde', 'type' => MongoDB\BSON\Binary::TYPE_OLD_UUID]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['data' => '0123456789abcdefg', 'type' => MongoDB\BSON\Binary::TYPE_OLD_UUID]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['data' => '0123456789abcde', 'type' => MongoDB\BSON\Binary::TYPE_UUID]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Binary::__set_state(['data' => '0123456789abcdefg', 'type' => MongoDB\BSON\Binary::TYPE_UUID]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binary-tostring-001.phpt0000644000076500000240000000036113572250760022043 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary::__toString() --FILE-- ===DONE=== --EXPECT-- string(6) "foobar" ===DONE=== mongodb-1.6.1/tests/bson/bson-binary_error-001.phpt0000644000076500000240000000126013572250760021404 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary #001 error --FILE-- getData(2); $binary->getType(2); throws(function() { new MongoDB\BSON\Binary("random binary data without type"); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); ?> ===DONE=== --EXPECTF-- Warning: MongoDB\BSON\Binary::getData() expects exactly 0 parameters, 1 given in %s on line %d Warning: MongoDB\BSON\Binary::getType() expects exactly 0 parameters, 1 given in %s on line %d OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/bson/bson-binary_error-002.phpt0000644000076500000240000000037713572250760021415 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyBinary may not inherit from final class (MongoDB\BSON\Binary) in %s on line %d mongodb-1.6.1/tests/bson/bson-binary_error-003.phpt0000644000076500000240000000125713572250760021414 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary constructor requires unsigned 8-bit integer for type --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected type to be an unsigned 8-bit integer, 256 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binary_error-004.phpt0000644000076500000240000000243213572250760021411 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Binary constructor requires 16-byte data length for UUID types --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 15 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected UUID length to be 16 bytes, 17 given ===DONE=== mongodb-1.6.1/tests/bson/bson-binaryinterface-001.phpt0000644000076500000240000000044613572250760022061 0ustar alcaeusstaff--TEST-- MongoDB\BSON\BinaryInterface is implemented by MongoDB\BSON\Binary --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-001.phpt0000644000076500000240000000162013572250760020675 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer #001 --FILE-- $test) { echo "Test#{$n}", "\n"; $s = fromPHP($test); $testagain = toPHP($s); var_dump($test->dbref instanceof MongoDB\BSON\DBPointer); var_dump($testagain->dbref instanceof MongoDB\BSON\DBPointer); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 bool(true) bool(true) string(76) "{ "dbref" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b405ac12" } }" string(76) "{ "dbref" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b405ac12" } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-002.phpt0000644000076500000240000000066113572250760020702 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer debug handler --FILE-- dbref); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\DBPointer)#1 (2) { ["ref"]=> string(11) "phongo.test" ["id"]=> string(24) "5a2e78accd485d55b405ac12" } ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-compare-001.phpt0000644000076500000240000000246613572250760022332 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer comparisons --FILE-- $jsonTest1b); var_dump($jsonAAAA < $jsonTest1b); var_dump($jsonZZZZ > $jsonTest1b); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-get_properties-001.phpt0000644000076500000240000000071213572250760023727 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer get_properties handler (get_object_vars) --FILE-- dbptr; var_dump(get_object_vars($dbptr)); ?> ===DONE=== --EXPECT-- array(2) { ["ref"]=> string(11) "phongo.test" ["id"]=> string(24) "5a2e78accd485d55b405ac12" } ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-get_properties-002.phpt0000644000076500000240000000074713572250760023740 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer get_properties handler (foreach) --FILE-- dbptr; foreach ($dbptr as $key => $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(3) "ref" string(11) "phongo.test" string(2) "id" string(24) "5a2e78accd485d55b405ac12" ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-jsonserialize-001.phpt0000644000076500000240000000101313572250760023550 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer::jsonSerialize() return value --FILE-- dbref; var_dump($dbref->jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$dbPointer"]=> array(2) { ["$ref"]=> string(11) "phongo.test" ["$id"]=> array(1) { ["$oid"]=> string(24) "5a2e78accd485d55b4050000" } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-jsonserialize-003.phpt0000644000076500000240000000144513572250760023563 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer::jsonSerialize() with json_encode() --FILE-- ===DONE=== --EXPECTF-- { "foo" : { "$ref" : "phongo.test", "$id" : "5a2e78accd485d55b4050000" } } {"foo":{"$dbPointer":{"$ref":"phongo.test","$id":{"$oid":"5a2e78accd485d55b4050000"}}}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\DBPointer)#%d (%d) { ["ref"]=> string(11) "phongo.test" ["id"]=> string(24) "5a2e78accd485d55b4050000" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-serialization-001.phpt0000644000076500000240000000137113572250760023553 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer serialization --FILE-- dbref; var_dump($test); var_dump($s = serialize($test)); var_dump(unserialize($s)); echo "\n"; ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\DBPointer)#1 (2) { ["ref"]=> string(11) "phongo.test" ["id"]=> string(24) "5a2e78accd485d55b4050000" } string(111) "C:22:"MongoDB\BSON\DBPointer":76:{a:2:{s:3:"ref";s:11:"phongo.test";s:2:"id";s:24:"5a2e78accd485d55b4050000";}}" object(MongoDB\BSON\DBPointer)#2 (2) { ["ref"]=> string(11) "phongo.test" ["id"]=> string(24) "5a2e78accd485d55b4050000" } ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-serialization_error-001.phpt0000644000076500000240000000147113572250760024765 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer unserialization requires "ref" and "id" string fields --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\DBPointer initialization requires "ref" and "id" string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\DBPointer initialization requires "ref" and "id" string fields ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-serialization_error-002.phpt0000644000076500000240000000154313572250760024766 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer unserialization requires "id" string field to be valid --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: QQQQ78accd485d55b4050000 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: 52e78accd485d55b4050000 ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer-tostring-001.phpt0000644000076500000240000000055213572250760022547 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer::__toString() --FILE-- dbref; var_dump((string) $dbref); ?> ===DONE=== --EXPECT-- string(38) "[phongo.test/5a2e78accd485d55b4050000]" ===DONE=== mongodb-1.6.1/tests/bson/bson-dbpointer_error-002.phpt0000644000076500000240000000041613572250760022111 0ustar alcaeusstaff--TEST-- MongoDB\BSON\DBPointer cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyDBPointer may not inherit from final class (MongoDB\BSON\DBPointer) in %s on line %d mongodb-1.6.1/tests/bson/bson-decimal128-001.phpt0000644000076500000240000000123013572250760020535 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- 1234.5678 -1234.5678 1.234E+8 1.234E+8 1.23456E-75 -234.567 2.345E+9 0.002345 1234.5678 -1234.5678 -234.567 123400000 1.23456E-75 ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-002.phpt0000644000076500000240000000065613572250760020551 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 NaN values --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- NaN NaN NaN NaN NaN NaN ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-003.phpt0000644000076500000240000000106013572250760020540 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 Infinity values --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- Infinity Infinity Infinity Infinity Infinity Infinity Infinity Infinity ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-004.phpt0000644000076500000240000000113413572250760020543 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 debug handler --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(9) "1234.5678" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(3) "NaN" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(8) "Infinity" } ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-get_properties-001.phpt0000644000076500000240000000063113572250760023572 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 get_properties handler (get_object_vars) --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- array(1) { ["dec"]=> string(9) "1234.5678" } ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-get_properties-002.phpt0000644000076500000240000000066413572250760023601 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 get_properties handler (foreach) --SKIPIF-- --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(3) "dec" string(9) "1234.5678" ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-jsonserialize-001.phpt0000644000076500000240000000064313572250760023423 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128::jsonSerialize() return value --SKIPIF-- --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$numberDecimal"]=> string(14) "12389719287312" } ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-jsonserialize-002.phpt0000644000076500000240000000132213572250760023417 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128::jsonSerialize() with json_encode() --SKIPIF-- --FILE-- new MongoDB\BSON\Decimal128('12389719287312')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$numberDecimal" : "12389719287312" } } {"foo":{"$numberDecimal":"12389719287312"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(14) "12389719287312" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-serialization-001.phpt0000644000076500000240000000326413572250760023421 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 serialization --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(9) "1234.5678" } string(68) "C:23:"MongoDB\BSON\Decimal128":32:{a:1:{s:3:"dec";s:9:"1234.5678";}}" object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(9) "1234.5678" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(10) "-1234.5678" } string(70) "C:23:"MongoDB\BSON\Decimal128":34:{a:1:{s:3:"dec";s:10:"-1234.5678";}}" object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(10) "-1234.5678" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(11) "1.23456E-75" } string(71) "C:23:"MongoDB\BSON\Decimal128":35:{a:1:{s:3:"dec";s:11:"1.23456E-75";}}" object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(11) "1.23456E-75" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(8) "Infinity" } string(67) "C:23:"MongoDB\BSON\Decimal128":31:{a:1:{s:3:"dec";s:8:"Infinity";}}" object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(8) "Infinity" } object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(3) "NaN" } string(62) "C:23:"MongoDB\BSON\Decimal128":26:{a:1:{s:3:"dec";s:3:"NaN";}}" object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(3) "NaN" } ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-serialization_error-001.phpt0000644000076500000240000000114113572250760024622 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 unserialization requires "dec" string field --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Decimal128 initialization requires "dec" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-serialization_error-002.phpt0000644000076500000240000000112313572250760024623 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 unserialization requires valid decimal string --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing Decimal128 string: INVALID ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-set_state-001.phpt0000644000076500000240000000160213572250760022531 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128::__set_state() --SKIPIF-- --FILE-- $value, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => '1234.5678', )) MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => '-1234.5678', )) MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => 'Infinity', )) MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => 'Infinity', )) MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => 'NaN', )) MongoDB\BSON\Decimal128::__set_state(array( %w'dec' => 'NaN', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-set_state_error-001.phpt0000644000076500000240000000111313572250760023737 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128::__set_state() requires "dec" string field --SKIPIF-- --FILE-- 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Decimal128 initialization requires "dec" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128-set_state_error-002.phpt0000644000076500000240000000107313572250760023745 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128::__set_state() requires valid decimal string --SKIPIF-- --FILE-- 'INVALID']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing Decimal128 string: INVALID ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128_error-001.phpt0000644000076500000240000000107513572250760021755 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 requires valid decimal string --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Decimal128::__construct() expects parameter 1 to be string, array given ===DONE=== mongodb-1.6.1/tests/bson/bson-decimal128_error-002.phpt0000644000076500000240000000042313572250760021752 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128 cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyDecimal128 may not inherit from final class (MongoDB\BSON\Decimal128) in %s on line %d mongodb-1.6.1/tests/bson/bson-decimal128interface-001.phpt0000644000076500000240000000043313572250760022422 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Decimal128Interface is implemented by MongoDB\BSON\Decimal128 --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-decode-001.phpt0000644000076500000240000000372513572250760020142 0ustar alcaeusstaff--TEST-- BSON encoding: Encoding data into BSON representation, and BSON into Extended JSON --FILE-- "world"), array((object)array("hello" => "world")), array(null), array(123), array(4.125), array(true), array(false), array("string"), array("string", true), array('test', 'foo', 'bar'), array('test' => 'test', 'foo' => 'foo', 'bar' => 'bar'), array('foo' => 'test', 'foo', 'bar'), /* (object)array("hello" => "world"), array(array("hello" => "world")), array(array(1, 2, 3, 4, 5, 6, 7, 8, 9)), array((object)array(1, 2, 3, 4, 5, 6, 7, 8, 9)), array(array("0" => 1, "1" => 2, "2" => 3, "3" => 4, "4" => 5, "5" => 6, "6" => 7, "7" => 8, "8" => 9)), array("int" => 3, "boolean" => true, "array" => array("foo", "bar"), "object" => new stdclass, "string" => "test", 3 => "test"), array(array("string", true)), array(array('test', 'foo', 'bar')), array(array('test' => 'test', 'foo' => 'foo', 'bar' => 'bar')), array(array('foo' => 'test', 'foo', 'bar')), array(array("int" => 3, "boolean" => true, "array" => array("foo", "bar"), "object" => new stdclass, "string" => "test", 3 => "test")), */ ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", toJSON($s), "\n"; $val = toPHP($s); if ($val == (object) $test) { echo "OK\n"; } else { var_dump($val, $test); } } ?> ===DONE=== --EXPECT-- Test#0 { "hello" : "world" } OK Test#1 { "0" : { "hello" : "world" } } OK Test#2 { "0" : null } OK Test#3 { "0" : 123 } OK Test#4 { "0" : 4.125 } OK Test#5 { "0" : true } OK Test#6 { "0" : false } OK Test#7 { "0" : "string" } OK Test#8 { "0" : "string", "1" : true } OK Test#9 { "0" : "test", "1" : "foo", "2" : "bar" } OK Test#10 { "test" : "test", "foo" : "foo", "bar" : "bar" } OK Test#11 { "foo" : "test", "0" : "foo", "1" : "bar" } OK ===DONE=== mongodb-1.6.1/tests/bson/bson-decode-002.phpt0000644000076500000240000000516313572250760020141 0ustar alcaeusstaff--TEST-- BSON encoding: Encoding object/arrays data into user specificied classes --FILE-- "world")), array((object)array("hello" => "world")), array("my" => array("hello" => "world")), array("my" => (object)array("hello" => "world")), array("my" => array(array("hello", "world"))), array("my" => (object)array(array("hello", "world"))), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", toJSON($s), "\n"; $val = toPHP($s, array("root"=> "MyArrayObject", "document"=> "MyArrayObject", "array" => "MyArrayObject")); var_dump($val); } ?> ===DONE=== --EXPECTF-- Test#%d { "0" : { "hello" : "world" } } object(MyArrayObject)#%d (1) { [%s]=> array(1) { [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["hello"]=> string(5) "world" } } } } Test#%d { "0" : { "hello" : "world" } } object(MyArrayObject)#%d (1) { [%s]=> array(1) { [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["hello"]=> string(5) "world" } } } } Test#%d { "my" : { "hello" : "world" } } object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["my"]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["hello"]=> string(5) "world" } } } } Test#%d { "my" : { "hello" : "world" } } object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["my"]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["hello"]=> string(5) "world" } } } } Test#%d { "my" : [ [ "hello", "world" ] ] } object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["my"]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(2) { [0]=> string(5) "hello" [1]=> string(5) "world" } } } } } } Test#%d { "my" : { "0" : [ "hello", "world" ] } } object(MyArrayObject)#%d (1) { [%s]=> array(1) { ["my"]=> object(MyArrayObject)#%d (1) { [%s]=> array(1) { [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(2) { [0]=> string(5) "hello" [1]=> string(5) "world" } } } } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-encode-001.phpt0000644000076500000240000001675613572250760020164 0ustar alcaeusstaff--TEST-- BSON encoding: Encoding data into BSON representation, and BSON into Extended JSON --FILE-- "world"), (object)array("hello" => "world"), array(array("hello" => "world")), array((object)array("hello" => "world")), array(array(1, 2, 3, 4, 5, 6, 7, 8, 9)), array((object)array(1, 2, 3, 4, 5, 6, 7, 8, 9)), array(array("0" => 1, "1" => 2, "2" => 3, "3" => 4, "4" => 5, "5" => 6, "6" => 7, "7" => 8, "8" => 9)), array(null), array(123), array(4.125), array(true), array(false), array("string"), array("string", true), array('test', 'foo', 'bar'), array('test' => 'test', 'foo' => 'foo', 'bar' => 'bar'), array('foo' => 'test', 'foo', 'bar'), array("int" => 3, "boolean" => true, "array" => array("foo", "bar"), "object" => new stdclass, "string" => "test", 3 => "test"), array(array("string", true)), array(array('test', 'foo', 'bar')), array(array('test' => 'test', 'foo' => 'foo', 'bar' => 'bar')), array(array('foo' => 'test', 'foo', 'bar')), array(array("int" => 3, "boolean" => true, "array" => array("foo", "bar"), "object" => new stdclass, "string" => "test", 3 => "test")), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", toJSON($s), "\n"; hex_dump($s); } ?> ===DONE=== --EXPECT-- Test#0 { "hello" : "world" } 0 : 16 00 00 00 02 68 65 6c 6c 6f 00 06 00 00 00 77 [.....hello.....w] 10 : 6f 72 6c 64 00 00 [orld..] Test#1 { "hello" : "world" } 0 : 16 00 00 00 02 68 65 6c 6c 6f 00 06 00 00 00 77 [.....hello.....w] 10 : 6f 72 6c 64 00 00 [orld..] Test#2 { "0" : { "hello" : "world" } } 0 : 1e 00 00 00 03 30 00 16 00 00 00 02 68 65 6c 6c [.....0......hell] 10 : 6f 00 06 00 00 00 77 6f 72 6c 64 00 00 00 [o.....world...] Test#3 { "0" : { "hello" : "world" } } 0 : 1e 00 00 00 03 30 00 16 00 00 00 02 68 65 6c 6c [.....0......hell] 10 : 6f 00 06 00 00 00 77 6f 72 6c 64 00 00 00 [o.....world...] Test#4 { "0" : [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ] } 0 : 4c 00 00 00 04 30 00 44 00 00 00 10 30 00 01 00 [L....0.D....0...] 10 : 00 00 10 31 00 02 00 00 00 10 32 00 03 00 00 00 [...1......2.....] 20 : 10 33 00 04 00 00 00 10 34 00 05 00 00 00 10 35 [.3......4......5] 30 : 00 06 00 00 00 10 36 00 07 00 00 00 10 37 00 08 [......6......7..] 40 : 00 00 00 10 38 00 09 00 00 00 00 00 [....8.......] Test#5 { "0" : { "0" : 1, "1" : 2, "2" : 3, "3" : 4, "4" : 5, "5" : 6, "6" : 7, "7" : 8, "8" : 9 } } 0 : 4c 00 00 00 03 30 00 44 00 00 00 10 30 00 01 00 [L....0.D....0...] 10 : 00 00 10 31 00 02 00 00 00 10 32 00 03 00 00 00 [...1......2.....] 20 : 10 33 00 04 00 00 00 10 34 00 05 00 00 00 10 35 [.3......4......5] 30 : 00 06 00 00 00 10 36 00 07 00 00 00 10 37 00 08 [......6......7..] 40 : 00 00 00 10 38 00 09 00 00 00 00 00 [....8.......] Test#6 { "0" : [ 1, 2, 3, 4, 5, 6, 7, 8, 9 ] } 0 : 4c 00 00 00 04 30 00 44 00 00 00 10 30 00 01 00 [L....0.D....0...] 10 : 00 00 10 31 00 02 00 00 00 10 32 00 03 00 00 00 [...1......2.....] 20 : 10 33 00 04 00 00 00 10 34 00 05 00 00 00 10 35 [.3......4......5] 30 : 00 06 00 00 00 10 36 00 07 00 00 00 10 37 00 08 [......6......7..] 40 : 00 00 00 10 38 00 09 00 00 00 00 00 [....8.......] Test#7 { "0" : null } 0 : 08 00 00 00 0a 30 00 00 [.....0..] Test#8 { "0" : 123 } 0 : 0c 00 00 00 10 30 00 7b 00 00 00 00 [.....0.{....] Test#9 { "0" : 4.125 } 0 : 10 00 00 00 01 30 00 00 00 00 00 00 80 10 40 00 [.....0........@.] Test#10 { "0" : true } 0 : 09 00 00 00 08 30 00 01 00 [.....0...] Test#11 { "0" : false } 0 : 09 00 00 00 08 30 00 00 00 [.....0...] Test#12 { "0" : "string" } 0 : 13 00 00 00 02 30 00 07 00 00 00 73 74 72 69 6e [.....0.....strin] 10 : 67 00 00 [g..] Test#13 { "0" : "string", "1" : true } 0 : 17 00 00 00 02 30 00 07 00 00 00 73 74 72 69 6e [.....0.....strin] 10 : 67 00 08 31 00 01 00 [g..1...] Test#14 { "0" : "test", "1" : "foo", "2" : "bar" } 0 : 27 00 00 00 02 30 00 05 00 00 00 74 65 73 74 00 ['....0.....test.] 10 : 02 31 00 04 00 00 00 66 6f 6f 00 02 32 00 04 00 [.1.....foo..2...] 20 : 00 00 62 61 72 00 00 [..bar..] Test#15 { "test" : "test", "foo" : "foo", "bar" : "bar" } 0 : 2e 00 00 00 02 74 65 73 74 00 05 00 00 00 74 65 [.....test.....te] 10 : 73 74 00 02 66 6f 6f 00 04 00 00 00 66 6f 6f 00 [st..foo.....foo.] 20 : 02 62 61 72 00 04 00 00 00 62 61 72 00 00 [.bar.....bar..] Test#16 { "foo" : "test", "0" : "foo", "1" : "bar" } 0 : 29 00 00 00 02 66 6f 6f 00 05 00 00 00 74 65 73 [)....foo.....tes] 10 : 74 00 02 30 00 04 00 00 00 66 6f 6f 00 02 31 00 [t..0.....foo..1.] 20 : 04 00 00 00 62 61 72 00 00 [....bar..] Test#17 { "int" : 3, "boolean" : true, "array" : [ "foo", "bar" ], "object" : { }, "string" : "test", "3" : "test" } 0 : 64 00 00 00 10 69 6e 74 00 03 00 00 00 08 62 6f [d....int......bo] 10 : 6f 6c 65 61 6e 00 01 04 61 72 72 61 79 00 1b 00 [olean...array...] 20 : 00 00 02 30 00 04 00 00 00 66 6f 6f 00 02 31 00 [...0.....foo..1.] 30 : 04 00 00 00 62 61 72 00 00 03 6f 62 6a 65 63 74 [....bar...object] 40 : 00 05 00 00 00 00 02 73 74 72 69 6e 67 00 05 00 [.......string...] 50 : 00 00 74 65 73 74 00 02 33 00 05 00 00 00 74 65 [..test..3.....te] 60 : 73 74 00 00 [st..] Test#18 { "0" : [ "string", true ] } 0 : 1f 00 00 00 04 30 00 17 00 00 00 02 30 00 07 00 [.....0......0...] 10 : 00 00 73 74 72 69 6e 67 00 08 31 00 01 00 00 [..string..1....] Test#19 { "0" : [ "test", "foo", "bar" ] } 0 : 2f 00 00 00 04 30 00 27 00 00 00 02 30 00 05 00 [/....0.'....0...] 10 : 00 00 74 65 73 74 00 02 31 00 04 00 00 00 66 6f [..test..1.....fo] 20 : 6f 00 02 32 00 04 00 00 00 62 61 72 00 00 00 [o..2.....bar...] Test#20 { "0" : { "test" : "test", "foo" : "foo", "bar" : "bar" } } 0 : 36 00 00 00 03 30 00 2e 00 00 00 02 74 65 73 74 [6....0......test] 10 : 00 05 00 00 00 74 65 73 74 00 02 66 6f 6f 00 04 [.....test..foo..] 20 : 00 00 00 66 6f 6f 00 02 62 61 72 00 04 00 00 00 [...foo..bar.....] 30 : 62 61 72 00 00 00 [bar...] Test#21 { "0" : { "foo" : "test", "0" : "foo", "1" : "bar" } } 0 : 31 00 00 00 03 30 00 29 00 00 00 02 66 6f 6f 00 [1....0.)....foo.] 10 : 05 00 00 00 74 65 73 74 00 02 30 00 04 00 00 00 [....test..0.....] 20 : 66 6f 6f 00 02 31 00 04 00 00 00 62 61 72 00 00 [foo..1.....bar..] 30 : 00 [.] Test#22 { "0" : { "int" : 3, "boolean" : true, "array" : [ "foo", "bar" ], "object" : { }, "string" : "test", "3" : "test" } } 0 : 6c 00 00 00 03 30 00 64 00 00 00 10 69 6e 74 00 [l....0.d....int.] 10 : 03 00 00 00 08 62 6f 6f 6c 65 61 6e 00 01 04 61 [.....boolean...a] 20 : 72 72 61 79 00 1b 00 00 00 02 30 00 04 00 00 00 [rray......0.....] 30 : 66 6f 6f 00 02 31 00 04 00 00 00 62 61 72 00 00 [foo..1.....bar..] 40 : 03 6f 62 6a 65 63 74 00 05 00 00 00 00 02 73 74 [.object.......st] 50 : 72 69 6e 67 00 05 00 00 00 74 65 73 74 00 02 33 [ring.....test..3] 60 : 00 05 00 00 00 74 65 73 74 00 00 00 [.....test...] ===DONE=== mongodb-1.6.1/tests/bson/bson-encode-002.phpt0000644000076500000240000000734313572250760020155 0ustar alcaeusstaff--TEST-- BSON encoding: Encoding objects into BSON representation --FILE-- "class", "data"); } public function bsonUnserialize(array $data) { echo __METHOD__, "() was called with data:\n"; var_dump($data); } } class NumericArray implements MongoDB\BSON\Serializable, MongoDB\BSON\Unserializable { public function bsonSerialize() { return array(1, 2, 3); } public function bsonUnserialize(array $data) { echo __METHOD__, "() was called with data:\n"; var_dump($data); } } echo "Testing top-level AssociativeArray:\n"; $bson = fromPHP(new AssociativeArray); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); $value = toPHP($bson, array("root" => 'AssociativeArray')); echo "Decoded BSON:\n"; var_dump($value); echo "\nTesting embedded AssociativeArray:\n"; $bson = fromPHP(array('embed' => new AssociativeArray)); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); $value = toPHP($bson, array("document" => 'AssociativeArray')); echo "Decoded BSON:\n"; var_dump($value); echo "\nTesting top-level NumericArray:\n"; $bson = fromPHP(new NumericArray); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); $value = toPHP($bson, array("root" => 'NumericArray')); echo "Decoded BSON:\n"; var_dump($value); echo "\nTesting embedded NumericArray:\n"; $bson = fromPHP(array('embed' => new NumericArray)); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); $value = toPHP($bson, array("array" => 'NumericArray')); echo "Decoded BSON:\n"; var_dump($value); ?> ===DONE=== --EXPECTF-- Testing top-level AssociativeArray: { "random" : "class", "0" : "data" } Encoded BSON: 0 : 23 00 00 00 02 72 61 6e 64 6f 6d 00 06 00 00 00 [#....random.....] 10 : 63 6c 61 73 73 00 02 30 00 05 00 00 00 64 61 74 [class..0.....dat] 20 : 61 00 00 [a..] AssociativeArray::bsonUnserialize() was called with data: array(2) { ["random"]=> string(5) "class" [0]=> string(4) "data" } Decoded BSON: object(AssociativeArray)#%d (0) { } Testing embedded AssociativeArray: { "embed" : { "random" : "class", "0" : "data" } } Encoded BSON: 0 : 2f 00 00 00 03 65 6d 62 65 64 00 23 00 00 00 02 [/....embed.#....] 10 : 72 61 6e 64 6f 6d 00 06 00 00 00 63 6c 61 73 73 [random.....class] 20 : 00 02 30 00 05 00 00 00 64 61 74 61 00 00 00 [..0.....data...] AssociativeArray::bsonUnserialize() was called with data: array(2) { ["random"]=> string(5) "class" [0]=> string(4) "data" } Decoded BSON: object(stdClass)#%d (1) { ["embed"]=> object(AssociativeArray)#%d (0) { } } Testing top-level NumericArray: { "0" : 1, "1" : 2, "2" : 3 } Encoded BSON: 0 : 1a 00 00 00 10 30 00 01 00 00 00 10 31 00 02 00 [.....0......1...] 10 : 00 00 10 32 00 03 00 00 00 00 [...2......] NumericArray::bsonUnserialize() was called with data: array(3) { [0]=> int(1) [1]=> int(2) [2]=> int(3) } Decoded BSON: object(NumericArray)#%d (0) { } Testing embedded NumericArray: { "embed" : [ 1, 2, 3 ] } Encoded BSON: 0 : 26 00 00 00 04 65 6d 62 65 64 00 1a 00 00 00 10 [&....embed......] 10 : 30 00 01 00 00 00 10 31 00 02 00 00 00 10 32 00 [0......1......2.] 20 : 03 00 00 00 00 00 [......] NumericArray::bsonUnserialize() was called with data: array(3) { [0]=> int(1) [1]=> int(2) [2]=> int(3) } Decoded BSON: object(stdClass)#%d (1) { ["embed"]=> object(NumericArray)#%d (0) { } } ===DONE=== mongodb-1.6.1/tests/bson/bson-encode-003.phpt0000644000076500000240000001016313572250760020150 0ustar alcaeusstaff--TEST-- BSON encoding: Encoding objects into BSON representation --FILE-- "class", "data" ); } function bsonUnserialize(array $data) { $this->props = $data; } } class MyClass2 implements MongoDB\BSON\Persistable { function bsonSerialize() { return array( 1, 2, 3, ); } function bsonUnserialize(array $data) { $this->props = $data; } } $tests = array( array("stuff" => new MyClass), array("stuff" => new MyClass2), array("stuff" => array(new MyClass, new MyClass2)), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", toJSON($s), "\n"; hex_dump($s); $ret = toPHP($s); var_dump($ret); } ?> ===DONE=== --EXPECTF-- Test#0 { "stuff" : { "__pclass" : { "$binary" : "TXlDbGFzcw==", "$type" : "80" }, "random" : "class", "0" : "data" } } 0 : 45 00 00 00 03 73 74 75 66 66 00 39 00 00 00 05 [E....stuff.9....] 10 : 5f 5f 70 63 6c 61 73 73 00 07 00 00 00 80 4d 79 [__pclass......My] 20 : 43 6c 61 73 73 02 72 61 6e 64 6f 6d 00 06 00 00 [Class.random....] 30 : 00 63 6c 61 73 73 00 02 30 00 05 00 00 00 64 61 [.class..0.....da] 40 : 74 61 00 00 00 [ta...] object(stdClass)#%d (1) { ["stuff"]=> object(MyClass)#%d (1) { ["props"]=> array(3) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } ["random"]=> string(5) "class" [0]=> string(4) "data" } } } Test#1 { "stuff" : { "__pclass" : { "$binary" : "TXlDbGFzczI=", "$type" : "80" }, "0" : 1, "1" : 2, "2" : 3 } } 0 : 3d 00 00 00 03 73 74 75 66 66 00 31 00 00 00 05 [=....stuff.1....] 10 : 5f 5f 70 63 6c 61 73 73 00 08 00 00 00 80 4d 79 [__pclass......My] 20 : 43 6c 61 73 73 32 10 30 00 01 00 00 00 10 31 00 [Class2.0......1.] 30 : 02 00 00 00 10 32 00 03 00 00 00 00 00 [.....2.......] object(stdClass)#%d (1) { ["stuff"]=> object(MyClass2)#%d (1) { ["props"]=> array(4) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "MyClass2" ["type"]=> int(128) } [0]=> int(1) [1]=> int(2) [2]=> int(3) } } } Test#2 { "stuff" : [ { "__pclass" : { "$binary" : "TXlDbGFzcw==", "$type" : "80" }, "random" : "class", "0" : "data" }, { "__pclass" : { "$binary" : "TXlDbGFzczI=", "$type" : "80" }, "0" : 1, "1" : 2, "2" : 3 } ] } 0 : 81 00 00 00 04 73 74 75 66 66 00 75 00 00 00 03 [.....stuff.u....] 10 : 30 00 39 00 00 00 05 5f 5f 70 63 6c 61 73 73 00 [0.9....__pclass.] 20 : 07 00 00 00 80 4d 79 43 6c 61 73 73 02 72 61 6e [.....MyClass.ran] 30 : 64 6f 6d 00 06 00 00 00 63 6c 61 73 73 00 02 30 [dom.....class..0] 40 : 00 05 00 00 00 64 61 74 61 00 00 03 31 00 31 00 [.....data...1.1.] 50 : 00 00 05 5f 5f 70 63 6c 61 73 73 00 08 00 00 00 [...__pclass.....] 60 : 80 4d 79 43 6c 61 73 73 32 10 30 00 01 00 00 00 [.MyClass2.0.....] 70 : 10 31 00 02 00 00 00 10 32 00 03 00 00 00 00 00 [.1......2.......] 80 : 00 [.] object(stdClass)#%d (1) { ["stuff"]=> array(2) { [0]=> object(MyClass)#%d (1) { ["props"]=> array(3) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } ["random"]=> string(5) "class" [0]=> string(4) "data" } } [1]=> object(MyClass2)#%d (1) { ["props"]=> array(4) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "MyClass2" ["type"]=> int(128) } [0]=> int(1) [1]=> int(2) [2]=> int(3) } } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-encode-004.phpt0000644000076500000240000001041013572250760020144 0ustar alcaeusstaff--TEST-- BSON encoding: Object Document Mapper --FILE-- addAddress($sunnyvale); $hannes->addAddress($kopavogur); $mikola = new Person("Jeremy", 21); $michigan = new Address(48169, "USA"); $hannes->addFriend($mikola); var_dump($hannes); $s = fromPHP(array($hannes)); echo "Test ", toJSON($s), "\n"; hex_dump($s); $ret = toPHP($s); var_dump($ret); ?> ===DONE=== --EXPECTF-- object(Person)#%d (5) { ["name":protected]=> string(6) "Hannes" ["age":protected]=> int(42) ["addresses":protected]=> array(2) { [0]=> object(Address)#%d (2) { ["zip":protected]=> int(94086) ["country":protected]=> string(3) "USA" } [1]=> object(Address)#%d (2) { ["zip":protected]=> int(200) ["country":protected]=> string(7) "Iceland" } } ["friends":protected]=> array(1) { [0]=> object(Person)#%d (5) { ["name":protected]=> string(6) "Jeremy" ["age":protected]=> int(21) ["addresses":protected]=> array(0) { } ["friends":protected]=> array(0) { } ["secret":protected]=> string(24) "Jeremy confidential info" } } ["secret":protected]=> string(24) "Hannes confidential info" } Test { "0" : { "__pclass" : { "$binary" : "UGVyc29u", "$type" : "80" }, "name" : "Hannes", "age" : 42, "addresses" : [ { "__pclass" : { "$binary" : "QWRkcmVzcw==", "$type" : "80" }, "zip" : 94086, "country" : "USA" }, { "__pclass" : { "$binary" : "QWRkcmVzcw==", "$type" : "80" }, "zip" : 200, "country" : "Iceland" } ], "friends" : [ { "__pclass" : { "$binary" : "UGVyc29u", "$type" : "80" }, "name" : "Jeremy", "age" : 21, "addresses" : [ ], "friends" : [ ] } ] } } 0 : 23 01 00 00 03 30 00 1b 01 00 00 05 5f 5f 70 63 [#....0......__pc] 10 : 6c 61 73 73 00 06 00 00 00 80 50 65 72 73 6f 6e [lass......Person] 20 : 02 6e 61 6d 65 00 07 00 00 00 48 61 6e 6e 65 73 [.name.....Hannes] 30 : 00 10 61 67 65 00 2a 00 00 00 04 61 64 64 72 65 [..age.*....addre] 40 : 73 73 65 73 00 79 00 00 00 03 30 00 35 00 00 00 [sses.y....0.5...] 50 : 05 5f 5f 70 63 6c 61 73 73 00 07 00 00 00 80 41 [.__pclass......A] 60 : 64 64 72 65 73 73 10 7a 69 70 00 86 6f 01 00 02 [ddress.zip..o...] 70 : 63 6f 75 6e 74 72 79 00 04 00 00 00 55 53 41 00 [country.....USA.] 80 : 00 03 31 00 39 00 00 00 05 5f 5f 70 63 6c 61 73 [..1.9....__pclas] 90 : 73 00 07 00 00 00 80 41 64 64 72 65 73 73 10 7a [s......Address.z] A0 : 69 70 00 c8 00 00 00 02 63 6f 75 6e 74 72 79 00 [ip......country.] B0 : 08 00 00 00 49 63 65 6c 61 6e 64 00 00 00 04 66 [....Iceland....f] C0 : 72 69 65 6e 64 73 00 5a 00 00 00 03 30 00 52 00 [riends.Z....0.R.] D0 : 00 00 05 5f 5f 70 63 6c 61 73 73 00 06 00 00 00 [...__pclass.....] E0 : 80 50 65 72 73 6f 6e 02 6e 61 6d 65 00 07 00 00 [.Person.name....] F0 : 00 4a 65 72 65 6d 79 00 10 61 67 65 00 15 00 00 [.Jeremy..age....] 100 : 00 04 61 64 64 72 65 73 73 65 73 00 05 00 00 00 [..addresses.....] 110 : 00 04 66 72 69 65 6e 64 73 00 05 00 00 00 00 00 [..friends.......] 120 : 00 00 00 [...] object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(Person)#%d (5) { ["name":protected]=> string(6) "Hannes" ["age":protected]=> int(42) ["addresses":protected]=> array(2) { [0]=> object(Address)#%d (2) { ["zip":protected]=> int(94086) ["country":protected]=> string(3) "USA" } [1]=> object(Address)#%d (2) { ["zip":protected]=> int(200) ["country":protected]=> string(7) "Iceland" } } ["friends":protected]=> array(1) { [0]=> object(Person)#%d (5) { ["name":protected]=> string(6) "Jeremy" ["age":protected]=> int(21) ["addresses":protected]=> array(0) { } ["friends":protected]=> array(0) { } ["secret":protected]=> string(4) "none" } } ["secret":protected]=> string(4) "none" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-encode-005.phpt0000644000076500000240000000133713572250760020155 0ustar alcaeusstaff--TEST-- BSON encoding: Object Document Mapper --FILE-- array(), "emptyclass" => new stdclass, ); $s = fromPHP($data); echo "Test ", toJSON($s), "\n"; hex_dump($s); $ret = toPHP($s); var_dump($ret); ?> ===DONE=== --EXPECTF-- Test { "emptyarray" : [ ], "emptyclass" : { } } 0 : 27 00 00 00 04 65 6d 70 74 79 61 72 72 61 79 00 ['....emptyarray.] 10 : 05 00 00 00 00 03 65 6d 70 74 79 63 6c 61 73 73 [......emptyclass] 20 : 00 05 00 00 00 00 00 [.......] object(stdClass)#%d (2) { ["emptyarray"]=> array(0) { } ["emptyclass"]=> object(stdClass)#%d (0) { } } ===DONE=== mongodb-1.6.1/tests/bson/bson-fromJSON-001.phpt0000644000076500000240000000202213572250760020341 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromJSON(): Decoding JSON --FILE-- ===DONE=== --EXPECT-- Test {} 0 : 05 00 00 00 00 [.....] Test { "foo": "bar" } 0 : 12 00 00 00 02 66 6f 6f 00 04 00 00 00 62 61 72 [.....foo.....bar] 10 : 00 00 [..] Test { "foo": [ 1, 2, 3 ]} 0 : 24 00 00 00 04 66 6f 6f 00 1a 00 00 00 10 30 00 [$....foo......0.] 10 : 01 00 00 00 10 31 00 02 00 00 00 10 32 00 03 00 [.....1......2...] 20 : 00 00 00 00 [....] Test { "foo": { "bar": 1 }} 0 : 18 00 00 00 03 66 6f 6f 00 0e 00 00 00 10 62 61 [.....foo......ba] 10 : 72 00 01 00 00 00 00 00 [r.......] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromJSON-002.phpt0000644000076500000240000000435013572250760020350 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromJSON(): Decoding extended JSON types --FILE-- ===DONE=== --EXPECT-- Test { "_id": { "$oid": "56315a7c6118fd1b920270b1" }} 0 : 16 00 00 00 07 5f 69 64 00 56 31 5a 7c 61 18 fd [....._id.V1Z|a..] 10 : 1b 92 02 70 b1 00 [...p..] Test { "binary": { "$binary": "Zm9v", "$type": "00" }} 0 : 15 00 00 00 05 62 69 6e 61 72 79 00 03 00 00 00 [.....binary.....] 10 : 00 66 6f 6f 00 [.foo.] Test { "date": { "$date": "2015-10-28T00:00:00Z" }} 0 : 13 00 00 00 09 64 61 74 65 00 00 80 be ab 50 01 [.....date.....P.] 10 : 00 00 00 [...] Test { "timestamp": { "$timestamp": { "t": 1446084619, "i": 0 }}} 0 : 18 00 00 00 11 74 69 6d 65 73 74 61 6d 70 00 00 [.....timestamp..] 10 : 00 00 00 0b 80 31 56 00 [.....1V.] Test { "regex": { "$regex": "pattern", "$options": "i" }} 0 : 16 00 00 00 0b 72 65 67 65 78 00 70 61 74 74 65 [.....regex.patte] 10 : 72 6e 00 69 00 00 [rn.i..] Test { "undef": { "$undefined": true }} 0 : 0c 00 00 00 06 75 6e 64 65 66 00 00 [.....undef..] Test { "minkey": { "$minKey": 1 }} 0 : 0d 00 00 00 ff 6d 69 6e 6b 65 79 00 00 [.....minkey..] Test { "maxkey": { "$maxKey": 1 }} 0 : 0d 00 00 00 7f 6d 61 78 6b 65 79 00 00 [.....maxkey..] Test { "long": { "$numberLong": "1234" }} 0 : 13 00 00 00 12 6c 6f 6e 67 00 d2 04 00 00 00 00 [.....long.......] 10 : 00 00 00 [...] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromJSON_error-001.phpt0000644000076500000240000000050113572250760021552 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromJSON(): invalid JSON --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP-001.phpt0000644000076500000240000000254713572250760020233 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): bsonSerialize() allows array and stdClass --FILE-- data = $data; } public function bsonSerialize() { return $this->data; } } $tests = array( array(1, 2, 3), array('foo' => 'bar'), (object) array(1, 2, 3), (object) array('foo' => 'bar'), ); echo "Testing top-level objects\n"; foreach ($tests as $test) { try { echo toJson(fromPHP(new MyDocument($test))), "\n"; } catch (MongoDB\Driver\Exception\UnexpectedValueException $e) { echo $e->getMessage(), "\n"; } } echo "\nTesting nested objects\n"; foreach ($tests as $test) { try { echo toJson(fromPHP(new MyDocument(array('nested' => new MyDocument($test))))), "\n"; } catch (MongoDB\Driver\Exception\UnexpectedValueException $e) { echo $e->getMessage(), "\n"; } } ?> ===DONE=== --EXPECT-- Testing top-level objects { "0" : 1, "1" : 2, "2" : 3 } { "foo" : "bar" } { "0" : 1, "1" : 2, "2" : 3 } { "foo" : "bar" } Testing nested objects { "nested" : [ 1, 2, 3 ] } { "nested" : { "foo" : "bar" } } { "nested" : { "0" : 1, "1" : 2, "2" : 3 } } { "nested" : { "foo" : "bar" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP-002.phpt0000644000076500000240000000076313572250760020232 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Encoding non-Persistable objects as a document --INI-- date.timezone=America/Los_Angeles --FILE-- ===DONE=== --EXPECT-- Test { "baz" : 3 } 0 : 0e 00 00 00 10 62 61 7a 00 03 00 00 00 00 [.....baz......] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP-003.phpt0000644000076500000240000000234013572250760020224 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Encoding non-Persistable objects as a document field value --INI-- date.timezone=America/Los_Angeles --FILE-- new MongoDB\BSON\UTCDateTime('1416445411987')), array(new MyDocument), array('x' => new MyDocument), ); foreach ($tests as $document) { $s = fromPHP($document); echo "Test ", toJSON($s), "\n"; hex_dump($s); } ?> ===DONE=== --EXPECT-- Test { "0" : { "$date" : 1416445411987 } } 0 : 10 00 00 00 09 30 00 93 c2 b9 ca 49 01 00 00 00 [.....0.....I....] Test { "x" : { "$date" : 1416445411987 } } 0 : 10 00 00 00 09 78 00 93 c2 b9 ca 49 01 00 00 00 [.....x.....I....] Test { "0" : { "baz" : 3 } } 0 : 16 00 00 00 03 30 00 0e 00 00 00 10 62 61 7a 00 [.....0......baz.] 10 : 03 00 00 00 00 00 [......] Test { "x" : { "baz" : 3 } } 0 : 16 00 00 00 03 78 00 0e 00 00 00 10 62 61 7a 00 [.....x......baz.] 10 : 03 00 00 00 00 00 [......] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP-005.phpt0000644000076500000240000000105213572250760020225 0ustar alcaeusstaff--TEST-- BSON\fromPHP(): PHP document with public property whose name is an empty string --FILE-- 1], (object) ['' => 1], ]; foreach ($tests as $document) { $s = fromPHP($document); echo "Test ", toJSON($s), "\n"; hex_dump($s); } ?> ===DONE=== --EXPECT-- Test { "" : 1 } 0 : 0b 00 00 00 10 00 01 00 00 00 00 [...........] Test { "" : 1 } 0 : 0b 00 00 00 10 00 01 00 00 00 00 [...........] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP-006.phpt0000644000076500000240000000153613572250760020235 0ustar alcaeusstaff--TEST-- BSON\fromPHP(): PHP documents with null bytes in field name --FILE-- 1])); echo "\nTesting object with multiple null bytes in field name\n"; hex_dump(fromPHP((object) ["\0\0\0" => 1])); ?> ===DONE=== --EXPECT-- Testing object with one leading null byte in field name 0 : 05 00 00 00 00 [.....] Testing object with multiple null bytes in field name 0 : 05 00 00 00 00 [.....] ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-001.phpt0000644000076500000240000000365413572250760021444 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): bsonSerialize() must return an array or stdClass --FILE-- data = $data; } public function bsonSerialize() { return $this->data; } } $invalidValues = array(null, 123, 'foo', true, new MyDocument); echo "Testing top-level objects\n"; foreach ($invalidValues as $invalidValue) { try { hex_dump(fromPHP(new MyDocument($invalidValue))); } catch (MongoDB\Driver\Exception\UnexpectedValueException $e) { echo $e->getMessage(), "\n"; } } echo "\nTesting nested objects\n"; foreach ($invalidValues as $invalidValue) { try { hex_dump(fromPHP(new MyDocument(array('nested' => new MyDocument($invalidValue))))); } catch (MongoDB\Driver\Exception\UnexpectedValueException $e) { echo $e->getMessage(), "\n"; } } ?> ===DONE=== --EXPECTF-- Testing top-level objects Expected MyDocument::bsonSerialize() to return an array or stdClass, %r(null|NULL)%r given Expected MyDocument::bsonSerialize() to return an array or stdClass, int%S given Expected MyDocument::bsonSerialize() to return an array or stdClass, string given Expected MyDocument::bsonSerialize() to return an array or stdClass, bool%S given Expected MyDocument::bsonSerialize() to return an array or stdClass, MyDocument given Testing nested objects Expected MyDocument::bsonSerialize() to return an array or stdClass, %r(null|NULL)%r given Expected MyDocument::bsonSerialize() to return an array or stdClass, int%S given Expected MyDocument::bsonSerialize() to return an array or stdClass, string given Expected MyDocument::bsonSerialize() to return an array or stdClass, bool%S given Expected MyDocument::bsonSerialize() to return an array or stdClass, MyDocument given ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-002.phpt0000644000076500000240000000134113572250760021434 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Encoding unknown Type objects as a document field value --FILE-- new UnknownType()), ); foreach ($tests as $document) { echo throws(function() use ($document) { fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Unexpected MongoDB\BSON\Type instance: UnknownType OK: Got MongoDB\Driver\Exception\UnexpectedValueException Unexpected MongoDB\BSON\Type instance: UnknownType ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-003.phpt0000644000076500000240000000425013572250760021437 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Encoding non-Serializable Type objects as a root element --INI-- date.timezone=America/Los_Angeles --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance UnknownType cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\Binary cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\Javascript cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\MinKey cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\MaxKey cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\ObjectId cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\Regex cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\Timestamp cannot be serialized as a root element OK: Got MongoDB\Driver\Exception\UnexpectedValueException MongoDB\BSON\Type instance MongoDB\BSON\UTCDateTime cannot be serialized as a root element ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-004.phpt0000644000076500000240000000521313572250760021440 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): PHP documents with circular references --FILE-- 1, 'y' => []]; $document['y'][] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting packed array with circular reference at 3rd position\n"; echo throws(function() { $document = ['x' => 1, 'y' => [1, 2, 3]]; $document['y'][] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting associative array with circular reference\n"; echo throws(function() { $document = ['x' => 1, 'y' => []]; $document['y']['z'] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting associative array and nested array with circular reference\n"; echo throws(function() { $document = ['x' => 1, 'y' => []]; $document['y'][0]['z'] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting object with circular reference\n"; echo throws(function() { $document = (object) ['x' => 1, 'y' => (object) []]; $document->y->z = &$document->y; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting nested object with circular reference\n"; echo throws(function() { $document = (object) ['x' => 1, 'y' => (object) ['z' => (object) []]]; $document->y->z->a = &$document->y; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- Testing packed array with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.0" Testing packed array with circular reference at 3rd position OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.3" Testing associative array with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.z" Testing associative array and nested array with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.0.z" Testing object with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.z" Testing nested object with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.z.a" ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-005.phpt0000644000076500000240000000243013572250760021437 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Serializable with circular references --FILE-- $this]; } } echo "\nTesting Serializable with direct circular reference\n"; echo throws(function() { fromPHP(new MyRecursiveSerializable); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting Serializable with indirect circular reference\n"; echo throws(function() { fromPHP(new MyIndirectlyRecursiveSerializable); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- Testing Serializable with direct circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Expected MyRecursiveSerializable::bsonSerialize() to return an array or stdClass, MyRecursiveSerializable given Testing Serializable with indirect circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "parent.parent" ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-006.phpt0000644000076500000240000000320413572250760021440 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): PHP documents with null bytes in field name --FILE-- 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting array with one trailing null byte in field name\n"; echo throws(function() { fromPHP(["a\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting array with multiple null bytes in field name\n"; echo throws(function() { fromPHP(["\0\0\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting object with one trailing null byte in field name\n"; echo throws(function() { fromPHP((object) ["a\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- Testing array with one leading null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". Testing array with one trailing null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "a". Testing array with multiple null bytes in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". Testing object with one trailing null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "a". ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-007.phpt0000644000076500000240000000612013572250760021441 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Serializable returns document with null bytes in field name --FILE-- data = $data; } public function bsonSerialize() { return $this->data; } } echo "\nTesting array with one leading null byte in field name\n"; echo throws(function() { fromPHP(new MySerializable(["\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting array with one trailing null byte in field name\n"; echo throws(function() { fromPHP(new MySerializable(["a\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting array with multiple null bytes in field name\n"; echo throws(function() { fromPHP(new MySerializable(["\0\0\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; /* Per PHPC-884, field names with a leading null byte are ignored when encoding * a document from an object's property hash table, since PHP uses leading bytes * to denote protected and private properties. However, in this case the object * was returned from Serializable::bsonSerialize() and we skip the check for * protected and private properties. */ echo "\nTesting object with one leading null byte in field name\n"; echo throws(function() { fromPHP(new MySerializable((object) ["\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting object with one trailing null byte in field name\n"; echo throws(function() { fromPHP(new MySerializable((object) ["a\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting object with multiple null bytes in field name\n"; echo throws(function() { fromPHP(new MySerializable((object) ["\0\0\0" => 1])); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- Testing array with one leading null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". Testing array with one trailing null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "a". Testing array with multiple null bytes in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". Testing object with one leading null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". Testing object with one trailing null byte in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "a". Testing object with multiple null bytes in field name OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". ===DONE=== mongodb-1.6.1/tests/bson/bson-fromPHP_error-008.phpt0000644000076500000240000000256613572250760021454 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): PHP documents with circular references --FILE-- 1, 'y' => [1, 2, 3]]; $document['y'][] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting associative array with circular reference\n"; echo throws(function() { $document = ['x' => 1, 'y' => []]; $document['y'][0]['z'] = &$document['y']; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; echo "\nTesting object with circular reference\n"; echo throws(function() { $document = (object) ['x' => 1, 'y' => (object) []]; $document->y->z = &$document->y; fromPHP($document); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- Testing packed array with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.3" Testing associative array with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.0.z" Testing object with circular reference OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected recursion for field path "y.z" ===DONE=== mongodb-1.6.1/tests/bson/bson-generate-document-id.phpt0000644000076500000240000000273713572250760022423 0ustar alcaeusstaff--TEST-- _id should only be generated for top-level document, not embedded docs --SKIPIF-- --FILE-- "bob", "address" => array( "street" => "Main St.", "city" => "New York", ), ); $bulk = new MongoDB\Driver\BulkWrite(); $user["_id"] = $bulk->insert($user); $result = $manager->executeBulkWrite(NS, $bulk); echo "Dumping inserted user document with injected _id:\n"; var_dump($user); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("_id" => $user["_id"]))); echo "\nDumping fetched user document:\n"; $array = $cursor->toArray(); var_dump($array[0]); ?> ===DONE=== --EXPECTF-- Dumping inserted user document with injected _id: array(3) { ["username"]=> string(3) "bob" ["address"]=> array(2) { ["street"]=> string(8) "Main St." ["city"]=> string(8) "New York" } ["_id"]=> object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } } Dumping fetched user document: object(stdClass)#%d (3) { ["_id"]=> object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } ["username"]=> string(3) "bob" ["address"]=> object(stdClass)#%d (%d) { ["street"]=> string(8) "Main St." ["city"]=> string(8) "New York" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-001.phpt0000644000076500000240000000337013572250760017657 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 roundtripped through BSON on 32-bit platforms --SKIPIF-- --FILE-- unserialize('C:18:"MongoDB\BSON\Int64":47:{a:1:{s:7:"integer";s:19:"9223372036854775807";}}')], (object) ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":48:{a:1:{s:7:"integer";s:20:"-9223372036854775808";}}')], (object) ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":38:{a:1:{s:7:"integer";s:10:"2147483648";}}')], (object) ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":39:{a:1:{s:7:"integer";s:11:"-2147483649";}}')], ]; foreach($tests as $test) { $bson = fromPHP($test); $testRoundtripped = toPHP($bson); $bsonRoundtripped = fromPHP($testRoundtripped); var_dump($test->int64 instanceof MongoDB\BSON\Int64); var_dump($testRoundtripped->int64 instanceof MongoDB\BSON\Int64); var_dump(toJSON($bson), toJSON($bsonRoundtripped)); var_dump($test == $testRoundtripped); echo "\n"; } ?> ===DONE=== --EXPECT-- bool(true) bool(true) string(33) "{ "int64" : 9223372036854775807 }" string(33) "{ "int64" : 9223372036854775807 }" bool(true) bool(true) bool(true) string(34) "{ "int64" : -9223372036854775808 }" string(34) "{ "int64" : -9223372036854775808 }" bool(true) bool(true) bool(true) string(24) "{ "int64" : 2147483648 }" string(24) "{ "int64" : 2147483648 }" bool(true) bool(true) bool(true) string(25) "{ "int64" : -2147483649 }" string(25) "{ "int64" : -2147483649 }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-002.phpt0000644000076500000240000000340313572250760017655 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 wraps 64-bit integers on 32-bit platforms --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["max64"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } } object(stdClass)#%d (%d) { ["min64"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(20) "-9223372036854775808" } } object(stdClass)#%d (%d) { ["max32+1"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(10) "2147483648" } } object(stdClass)#%d (%d) { ["min32-1"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(11) "-2147483649" } } object(stdClass)#%d (%d) { ["max32"]=> int(2147483647) } object(stdClass)#%d (%d) { ["min32"]=> int(-2147483648) } object(stdClass)#%d (%d) { ["zero"]=> int(0) } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-003.phpt0000644000076500000240000000277113572250760017665 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 encoded as 64-bit integer in BSON --FILE-- unserialize('C:18:"MongoDB\BSON\Int64":47:{a:1:{s:7:"integer";s:19:"9223372036854775807";}}')], ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":48:{a:1:{s:7:"integer";s:20:"-9223372036854775808";}}')], ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":38:{a:1:{s:7:"integer";s:10:"2147483648";}}')], ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":39:{a:1:{s:7:"integer";s:11:"-2147483649";}}')], ['int64' => unserialize('C:18:"MongoDB\BSON\Int64":28:{a:1:{s:7:"integer";s:1:"0";}}')], ]; foreach($tests as $test) { $bson = fromPHP($test); hex_dump($bson); echo "\n"; } ?> ===DONE=== --EXPECT-- 0 : 14 00 00 00 12 69 6e 74 36 34 00 ff ff ff ff ff [.....int64......] 10 : ff ff 7f 00 [....] 0 : 14 00 00 00 12 69 6e 74 36 34 00 00 00 00 00 00 [.....int64......] 10 : 00 00 80 00 [....] 0 : 14 00 00 00 12 69 6e 74 36 34 00 00 00 00 80 00 [.....int64......] 10 : 00 00 00 00 [....] 0 : 14 00 00 00 12 69 6e 74 36 34 00 ff ff ff 7f ff [.....int64......] 10 : ff ff ff 00 [....] 0 : 14 00 00 00 12 69 6e 74 36 34 00 00 00 00 00 00 [.....int64......] 10 : 00 00 00 00 [....] ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-compare-001.phpt0000644000076500000240000000131513572250760021300 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 comparisons --FILE-- $min); var_dump($max > $zero); var_dump($min == $min); var_dump($min < $max); var_dump($min < $zero); var_dump($zero == $zero); var_dump($zero < $max); var_dump($zero > $min); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-debug-001.phpt0000644000076500000240000000130613572250760020740 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 debug output --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(20) "-9223372036854775808" } object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(1) "0" } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-get_properties-001.phpt0000644000076500000240000000124413572250760022706 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(1) { ["integer"]=> string(19) "9223372036854775807" } array(1) { ["integer"]=> string(20) "-9223372036854775808" } array(1) { ["integer"]=> string(1) "0" } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-get_properties-002.phpt0000644000076500000240000000126513572250760022712 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } } ?> ===DONE=== --EXPECT-- string(7) "integer" string(19) "9223372036854775807" string(7) "integer" string(20) "-9223372036854775808" string(7) "integer" string(1) "0" ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-jsonserialize-001.phpt0000644000076500000240000000124513572250760022535 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64::jsonSerialize() return value --FILE-- jsonSerialize()); } ?> ===DONE=== --EXPECT-- array(1) { ["$numberLong"]=> string(19) "9223372036854775807" } array(1) { ["$numberLong"]=> string(20) "-9223372036854775808" } array(1) { ["$numberLong"]=> string(1) "0" } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-jsonserialize-002.phpt0000644000076500000240000000133713572250760022540 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64::jsonSerialize() with json_encode() --FILE-- unserialize('C:18:"MongoDB\BSON\Int64":47:{a:1:{s:7:"integer";s:19:"9223372036854775807";}}')], ['min' => unserialize('C:18:"MongoDB\BSON\Int64":48:{a:1:{s:7:"integer";s:20:"-9223372036854775808";}}')], ['zero' => unserialize('C:18:"MongoDB\BSON\Int64":28:{a:1:{s:7:"integer";s:1:"0";}}')], ]; foreach ($tests as $test) { var_dump(json_encode($test)); } ?> ===DONE=== --EXPECT-- string(45) "{"max":{"$numberLong":"9223372036854775807"}}" string(46) "{"min":{"$numberLong":"-9223372036854775808"}}" string(28) "{"zero":{"$numberLong":"0"}}" ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-serialization-001.phpt0000644000076500000240000000243313572250760022531 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } string(78) "C:18:"MongoDB\BSON\Int64":47:{a:1:{s:7:"integer";s:19:"9223372036854775807";}}" object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(20) "-9223372036854775808" } string(79) "C:18:"MongoDB\BSON\Int64":48:{a:1:{s:7:"integer";s:20:"-9223372036854775808";}}" object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(20) "-9223372036854775808" } object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(1) "0" } string(59) "C:18:"MongoDB\BSON\Int64":28:{a:1:{s:7:"integer";s:1:"0";}}" object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(1) "0" } ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-serialization_error-001.phpt0000644000076500000240000000073613572250760023746 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 unserialization requires "int" string field --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Int64 initialization requires "integer" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-serialization_error-002.phpt0000644000076500000240000000100613572250760023736 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 unserialization requires "int" string field to be valid --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1234.5678" as 64-bit integer for MongoDB\BSON\Int64 initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-int64-tostring-001.phpt0000644000076500000240000000104413572250760021522 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64::__toString() --FILE-- ===DONE=== --EXPECT-- string(19) "9223372036854775807" string(20) "-9223372036854775808" string(1) "0" ===DONE=== mongodb-1.6.1/tests/bson/bson-int64_error-001.phpt0000644000076500000240000000037213572250760021067 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Int64 cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyInt64 may not inherit from final class (MongoDB\BSON\Int64) in %s on line %d mongodb-1.6.1/tests/bson/bson-javascript-001.phpt0000644000076500000240000000136713572250760021065 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript #001 --FILE-- 42)); $tests = array( array("js" => $js), array("js" => $jswscope), ); foreach($tests as $n => $test) { echo "Test#{$n}", "\n"; $s = fromPHP($test); $testagain = toPHP($s); var_dump($test['js'] instanceof MongoDB\BSON\Javascript); var_dump($testagain->js instanceof MongoDB\BSON\Javascript); } ?> ===DONE=== --EXPECT-- Test#0 bool(true) bool(true) Test#1 bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-002.phpt0000644000076500000240000000223613572250760021062 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript debug handler --FILE-- 42), ), array( 'function foo() { return id; }', array('id' => new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')), ), ); foreach ($tests as $test) { list($code, $scope) = $test; $js = new MongoDB\BSON\Javascript($code, $scope); var_dump($js); } ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { } } object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(30) "function foo() { return foo; }" ["scope"]=> object(stdClass)#%d (%d) { ["foo"]=> int(42) } } object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(29) "function foo() { return id; }" ["scope"]=> object(stdClass)#%d (%d) { ["id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-compare-001.phpt0000644000076500000240000000103513572250760022501 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript comparisons (without scope) --FILE-- new MongoDB\BSON\Javascript('function() { return 0; }')); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-compare-002.phpt0000644000076500000240000000122313572250760022501 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript comparisons (with scope) --FILE-- 1]) == new MongoDB\BSON\Javascript('function() { return 1; }', ['x' => 1])); var_dump(new MongoDB\BSON\Javascript('function() { return 1; }', ['x' => 1]) == new MongoDB\BSON\Javascript('function() { return 1; }', ['x' => 0])); var_dump(new MongoDB\BSON\Javascript('function() { return 1; }', ['x' => 1]) == new MongoDB\BSON\Javascript('function() { return 1; }', ['x' => 2])); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-getCode-001.phpt0000644000076500000240000000131213572250760022423 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::getCode() --FILE-- 42]], ['function foo() { return id; }', ['id' => new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')]], ]; foreach ($tests as $test) { list($code, $scope) = $test; $js = new MongoDB\BSON\Javascript($code, $scope); var_dump($js->getCode()); } ?> ===DONE=== --EXPECT-- string(33) "function foo(bar) { return bar; }" string(33) "function foo(bar) { return bar; }" string(30) "function foo() { return foo; }" string(29) "function foo() { return id; }" ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-getScope-001.phpt0000644000076500000240000000137313572250760022631 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::getScope() --FILE-- 42]], ['function foo() { return id; }', ['id' => new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')]], ]; foreach ($tests as $test) { list($code, $scope) = $test; $js = new MongoDB\BSON\Javascript($code, $scope); var_dump($js->getScope()); } ?> ===DONE=== --EXPECTF-- NULL object(stdClass)#%d (%d) { } object(stdClass)#%d (%d) { ["foo"]=> int(42) } object(stdClass)#%d (%d) { ["id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-get_properties-001.phpt0000644000076500000240000000115613572250760024112 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript get_properties handler (get_object_vars) --FILE-- 42]), ]; foreach ($tests as $test) { var_dump(get_object_vars($test)); } ?> ===DONE=== --EXPECTF-- array(2) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> NULL } array(2) { ["code"]=> string(30) "function foo() { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { ["bar"]=> int(42) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-get_properties-002.phpt0000644000076500000240000000121013572250760024102 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript get_properties handler (foreach) --FILE-- 42]), ]; foreach ($tests as $test) { foreach ($test as $key => $value) { var_dump($key); var_dump($value); } } ?> ===DONE=== --EXPECTF-- string(4) "code" string(33) "function foo(bar) { return bar; }" string(5) "scope" NULL string(4) "code" string(30) "function foo() { return bar; }" string(5) "scope" object(stdClass)#%d (%d) { ["bar"]=> int(42) } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-jsonserialize-001.phpt0000644000076500000240000000051213572250760023733 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::jsonSerialize() return value (without scope) --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$code"]=> string(33) "function foo(bar) { return bar; }" } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-jsonserialize-002.phpt0000644000076500000240000000064113572250760023737 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::jsonSerialize() return value (with scope) --FILE-- 42]); var_dump($js->jsonSerialize()); ?> ===DONE=== --EXPECTF-- array(2) { ["$code"]=> string(33) "function foo(bar) { return bar; }" ["$scope"]=> object(stdClass)#%d (%d) { ["foo"]=> int(42) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-jsonserialize-003.phpt0000644000076500000240000000127213572250760023741 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::jsonSerialize() with json_encode() (without scope) --FILE-- new MongoDB\BSON\Javascript('function foo(bar) { return bar; }')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$code" : "function foo(bar) { return bar; }" } } {"foo":{"$code":"function foo(bar) { return bar; }"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> NULL } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-jsonserialize-004.phpt0000644000076500000240000000145713572250760023747 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::jsonSerialize() with json_encode() (with scope) --FILE-- new MongoDB\BSON\Javascript('function foo(bar) { return bar; }', ['foo' => 42])]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$code" : "function foo(bar) { return bar; }", "$scope" : { "foo" : 42 } } } {"foo":{"$code":"function foo(bar) { return bar; }","$scope":{"foo":42}}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { ["foo"]=> int(42) } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-serialization-001.phpt0000644000076500000240000000521613572250760023735 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript serialization --FILE-- 42]], ['function foo() { return id; }', ['id' => new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')]], ]; foreach ($tests as $test) { list($code, $scope) = $test; var_dump($js = new MongoDB\BSON\Javascript($code, $scope)); var_dump($s = serialize($js)); var_dump(unserialize($s)); echo "\n"; } ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> NULL } string(108) "C:23:"MongoDB\BSON\Javascript":72:{a:2:{s:4:"code";s:33:"function foo(bar) { return bar; }";s:5:"scope";N;}}" object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> NULL } object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { } } string(125) "C:23:"MongoDB\BSON\Javascript":89:{a:2:{s:4:"code";s:33:"function foo(bar) { return bar; }";s:5:"scope";O:8:"stdClass":0:{}}}" object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(33) "function foo(bar) { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { } } object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(30) "function foo() { return foo; }" ["scope"]=> object(stdClass)#%d (%d) { ["foo"]=> int(42) } } string(138) "C:23:"MongoDB\BSON\Javascript":101:{a:2:{s:4:"code";s:30:"function foo() { return foo; }";s:5:"scope";O:8:"stdClass":1:{s:3:"foo";i:42;}}}" object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(30) "function foo() { return foo; }" ["scope"]=> object(stdClass)#%d (%d) { ["foo"]=> int(42) } } object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(29) "function foo() { return id; }" ["scope"]=> object(stdClass)#%d (%d) { ["id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } } } string(213) "C:23:"MongoDB\BSON\Javascript":176:{a:2:{s:4:"code";s:29:"function foo() { return id; }";s:5:"scope";O:8:"stdClass":1:{s:2:"id";C:21:"MongoDB\BSON\ObjectId":48:{a:1:{s:3:"oid";s:24:"53e2a1c40640fd72175d4603";}}}}}" object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(29) "function foo() { return id; }" ["scope"]=> object(stdClass)#%d (%d) { ["id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-serialization_error-001.phpt0000644000076500000240000000075013572250760025144 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript unserialization requires "code" string field --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Javascript initialization requires "code" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-serialization_error-002.phpt0000644000076500000240000000102613572250760025142 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript unserialization expects optional scope to be array or object --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected scope to be array or object, string given ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-serialization_error-003.phpt0000644000076500000240000000077513572250760025155 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript unserialization does not allow code to contain null bytes --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Code cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-set_state-001.phpt0000644000076500000240000000270613572250760023054 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__set_state() --FILE-- 42]], ['function foo() { return id; }', ['id' => new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')]], ]; foreach ($tests as $test) { list($code, $scope) = $test; var_export(MongoDB\BSON\Javascript::__set_state([ 'code' => $code, 'scope' => $scope, ])); echo "\n\n"; } // Test with missing scope field var_export(MongoDB\BSON\Javascript::__set_state([ 'code' => 'function foo(bar) { return bar; }', ])); echo "\n\n"; ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Javascript::__set_state(array( %w'code' => 'function foo(bar) { return bar; }', %w'scope' => NULL, )) MongoDB\BSON\Javascript::__set_state(array( %w'code' => 'function foo(bar) { return bar; }', %w'scope' => %Sarray( %S), )) MongoDB\BSON\Javascript::__set_state(array( %w'code' => 'function foo() { return foo; }', %w'scope' => %Sarray( %w'foo' => 42, %S), )) MongoDB\BSON\Javascript::__set_state(array( %w'code' => 'function foo() { return id; }', %w'scope' => %Sarray( %w'id' => MongoDB\BSON\ObjectId::__set_state(array( %w'oid' => '53e2a1c40640fd72175d4603', )), %S), )) MongoDB\BSON\Javascript::__set_state(array( %w'code' => 'function foo(bar) { return bar; }', %w'scope' => NULL, )) ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-set_state_error-001.phpt0000644000076500000240000000072213572250760024261 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__set_state() requires "code" string field --FILE-- 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Javascript initialization requires "code" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-set_state_error-002.phpt0000644000076500000240000000077213572250760024267 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__set_state() expects optional scope to be array or object --FILE-- 'function foo() {}', 'scope' => 'INVALID']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected scope to be array or object, string given ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-set_state_error-003.phpt0000644000076500000240000000073213572250760024264 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__set_state() does not allow code to contain null bytes --FILE-- "function foo() { return '\0'; }"]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Code cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript-tostring-001.phpt0000644000076500000240000000062313572250760022726 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__toString() --FILE-- 1]); var_dump((string) $js); ?> ===DONE=== --EXPECT-- string(28) "function foo() { return 1; }" string(30) "function foo() { return bar; }" ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript_error-001.phpt0000644000076500000240000000051513572250760022270 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript #001 error --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/bson/bson-javascript_error-002.phpt0000644000076500000240000000042313572250760022267 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyJavascript may not inherit from final class (MongoDB\BSON\Javascript) in %s on line %d mongodb-1.6.1/tests/bson/bson-javascript_error-003.phpt0000644000076500000240000000070513572250760022273 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Javascript::__construct() does not allow code to contain null bytes --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Code cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-javascriptinterface-001.phpt0000644000076500000240000000050413572250760022736 0ustar alcaeusstaff--TEST-- MongoDB\BSON\JavascriptInterface is implemented by MongoDB\BSON\Javascript --FILE-- 1]); var_dump($javascript instanceof MongoDB\BSON\JavascriptInterface); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-001.phpt0000644000076500000240000000122013572250760020201 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey #001 --FILE-- $maxkey), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 { "max" : { "$maxKey" : 1 } } string(29) "{ "max" : { "$maxKey" : 1 } }" string(29) "{ "max" : { "$maxKey" : 1 } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-compare-001.phpt0000644000076500000240000000051313572250760021631 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey comparisons --FILE-- new MongoDB\BSON\MaxKey); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-jsonserialize-001.phpt0000644000076500000240000000035713572250760023072 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$maxKey"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-jsonserialize-002.phpt0000644000076500000240000000073613572250760023074 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\MaxKey]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$maxKey" : 1 } } {"foo":{"$maxKey":1}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\MaxKey)#%d (%d) { } } ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-serialization-001.phpt0000644000076500000240000000053313572250760023062 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\MaxKey)#%d (%d) { } string(31) "C:19:"MongoDB\BSON\MaxKey":0:{}" object(MongoDB\BSON\MaxKey)#%d (%d) { } ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey-set_state-001.phpt0000644000076500000240000000033313572250760022176 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey::__set_state() --FILE-- ===DONE=== --EXPECT-- MongoDB\BSON\MaxKey::__set_state(array( )) ===DONE=== mongodb-1.6.1/tests/bson/bson-maxkey_error-001.phpt0000644000076500000240000000037713572250760021426 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKey cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyMaxKey may not inherit from final class (MongoDB\BSON\MaxKey) in %s on line %d mongodb-1.6.1/tests/bson/bson-maxkeyinterface-001.phpt0000644000076500000240000000037413572250760022073 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MaxKeyInterface is implemented by MongoDB\BSON\MaxKey --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-001.phpt0000644000076500000240000000122013572250760020177 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey #001 --FILE-- $minkey), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 { "min" : { "$minKey" : 1 } } string(29) "{ "min" : { "$minKey" : 1 } }" string(29) "{ "min" : { "$minKey" : 1 } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-compare-001.phpt0000644000076500000240000000051413572250760021630 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey comparisons --FILE-- new MongoDB\BSON\MinKey); ?> ===DONE=== --EXPECTF-- bool(true) bool(false) bool(false) ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-jsonserialize-001.phpt0000644000076500000240000000035713572250760023070 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$minKey"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-jsonserialize-002.phpt0000644000076500000240000000073613572250760023072 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\MinKey]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$minKey" : 1 } } {"foo":{"$minKey":1}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\MinKey)#%d (%d) { } } ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-serialization-001.phpt0000644000076500000240000000053313572250760023060 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\MinKey)#%d (%d) { } string(31) "C:19:"MongoDB\BSON\MinKey":0:{}" object(MongoDB\BSON\MinKey)#%d (%d) { } ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey-set_state-001.phpt0000644000076500000240000000033313572250760022174 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey::__set_state() --FILE-- ===DONE=== --EXPECT-- MongoDB\BSON\MinKey::__set_state(array( )) ===DONE=== mongodb-1.6.1/tests/bson/bson-minkey_error-001.phpt0000644000076500000240000000037713572250760021424 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKey cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyMinKey may not inherit from final class (MongoDB\BSON\MinKey) in %s on line %d mongodb-1.6.1/tests/bson/bson-minkeyinterface-001.phpt0000644000076500000240000000037413572250760022071 0ustar alcaeusstaff--TEST-- MongoDB\BSON\MinKeyInterface is implemented by MongoDB\BSON\MinKey --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-001.phpt0000644000076500000240000000560213572250760020476 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId #001 --FILE-- my = $sameid; $samearr = array("my" => $sameid); $std = new stdclass; $std->_id = new MongoDB\BSON\ObjectId; $array = array( "_id" => new MongoDB\BSON\ObjectId, "id" => new MongoDB\BSON\ObjectId, "d" => new MongoDB\BSON\ObjectId, ); $pregenerated = new MongoDB\BSON\ObjectId("53e28b650640fd3162152de1"); $tests = array( $array, $std, $samestd, $samearr, array("pregenerated" => $pregenerated), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } throws(function() { $id = new MongoDB\BSON\ObjectId("53e28b650640fd3162152de12"); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); throws(function() { $id = new MongoDB\BSON\ObjectId("53e28b650640fd3162152dg1"); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); throws(function() { $id = new MongoDB\BSON\ObjectId("-3e28b650640fd3162152da1"); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); throws(function() { $id = new MongoDB\BSON\ObjectId(" 3e28b650640fd3162152da1"); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); raises(function() use($pregenerated) { $pregenerated->__toString(1); }, E_WARNING); ?> ===DONE=== --EXPECTF-- Test#0 { "_id" : { "$oid" : "%s" }, "id" : { "$oid" : "%s" }, "d" : { "$oid" : "%s" } } string(146) "{ "_id" : { "$oid" : "%s" }, "id" : { "$oid" : "%s" }, "d" : { "$oid" : "%s" } }" string(146) "{ "_id" : { "$oid" : "%s" }, "id" : { "$oid" : "%s" }, "d" : { "$oid" : "%s" } }" bool(true) Test#1 { "_id" : { "$oid" : "%s" } } string(51) "{ "_id" : { "$oid" : "%s" } }" string(51) "{ "_id" : { "$oid" : "%s" } }" bool(true) Test#2 { "my" : { "$oid" : "53e2a1c40640fd72175d4603" } } string(50) "{ "my" : { "$oid" : "53e2a1c40640fd72175d4603" } }" string(50) "{ "my" : { "$oid" : "53e2a1c40640fd72175d4603" } }" bool(true) Test#3 { "my" : { "$oid" : "53e2a1c40640fd72175d4603" } } string(50) "{ "my" : { "$oid" : "53e2a1c40640fd72175d4603" } }" string(50) "{ "my" : { "$oid" : "53e2a1c40640fd72175d4603" } }" bool(true) Test#4 { "pregenerated" : { "$oid" : "53e28b650640fd3162152de1" } } string(60) "{ "pregenerated" : { "$oid" : "53e28b650640fd3162152de1" } }" string(60) "{ "pregenerated" : { "$oid" : "53e28b650640fd3162152de1" } }" bool(true) OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got E_WARNING ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-002.phpt0000644000076500000240000000061013572250760020471 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId #002 generates ObjectId for null or missing constructor argument --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-003.phpt0000644000076500000240000000152613572250760020501 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId #003 construction with string argument --FILE-- value = (string) $value; } public function __toString() { return $this->value; } } $oid = new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603'); $str = new StringObject('53e2a1c40640fd72175d4603'); var_dump($oid); var_dump(new MongoDB\BSON\ObjectId($oid)); var_dump(new MongoDB\BSON\ObjectId($str)); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-004.phpt0000644000076500000240000000047613572250760020505 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId #004 Constructor supports uppercase hexadecimal strings --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "56925b7330616224d0000001" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-compare-001.phpt0000644000076500000240000000141213572250760022115 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId comparisons --FILE-- new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603')); var_dump(new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603') < new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4604')); var_dump(new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4603') > new MongoDB\BSON\ObjectId('53e2a1c40640fd72175d4602')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-compare-002.phpt0000644000076500000240000000143213572250760022120 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId comparisons with null bytes --FILE-- new MongoDB\BSON\ObjectId('00e2a1c40640fd72175d4603')); var_dump(new MongoDB\BSON\ObjectId('00e2a1c40640fd72175d4603') < new MongoDB\BSON\ObjectId('00e2a1c40640fd72175d4604')); var_dump(new MongoDB\BSON\ObjectId('00e2a1c40640fd72175d4603') > new MongoDB\BSON\ObjectId('00e2a1c40640fd72175d4602')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-getTimestamp-001.phpt0000644000076500000240000000047713572250760023144 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::getTimestamp --FILE-- getTimestamp(); echo $ts, "\n"; echo date_create( "@{$ts}" )->format( "Y-m-d H:i:s" ), "\n"; ?> --EXPECT-- 1447757782 2015-11-17 10:56:22 mongodb-1.6.1/tests/bson/bson-objectid-getTimestamp-002.phpt0000644000076500000240000000131713572250760023137 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::getTimestamp: Ensure that the Timestamp field is represented as an unsigned 32-bit integer --FILE-- getTimestamp()); echo $ts, "\n"; echo date_create("@{$ts}")->format("Y-m-d H:i:s"), "\n"; } create_object_id('000000000000000000000000'); create_object_id('7FFFFFFF0000000000000000'); create_object_id('800000000000000000000000'); create_object_id('FFFFFFFF0000000000000000'); ?> --EXPECT-- 0 1970-01-01 00:00:00 2147483647 2038-01-19 03:14:07 2147483648 2038-01-19 03:14:08 4294967295 2106-02-07 06:28:15mongodb-1.6.1/tests/bson/bson-objectid-get_properties-001.phpt0000644000076500000240000000046013572250760023524 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(1) { ["oid"]=> string(24) "53e2a1c40640fd72175d4603" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-get_properties-002.phpt0000644000076500000240000000051313572250760023524 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(3) "oid" string(24) "53e2a1c40640fd72175d4603" ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-jsonserialize-001.phpt0000644000076500000240000000044513572250760023355 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$oid"]=> string(24) "5820ca4bef62d52d9924d0d8" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-jsonserialize-002.phpt0000644000076500000240000000114413572250760023353 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\ObjectId('5820ca4bef62d52d9924d0d8')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$oid" : "5820ca4bef62d52d9924d0d8" } } {"foo":{"$oid":"5820ca4bef62d52d9924d0d8"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "5820ca4bef62d52d9924d0d8" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-serialization-001.phpt0000644000076500000240000000102413572250760023343 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "576c25db6118fd406e6e6471" } string(82) "C:21:"MongoDB\BSON\ObjectId":48:{a:1:{s:3:"oid";s:24:"576c25db6118fd406e6e6471";}}" object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "576c25db6118fd406e6e6471" } ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-serialization_error-001.phpt0000644000076500000240000000073713572250760024566 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId unserialization requires "oid" string field --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\ObjectId initialization requires "oid" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-serialization_error-002.phpt0000644000076500000240000000137713572250760024570 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId unserialization requires valid hex string --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: 0123456789abcdefghijklmn OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: INVALID ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-set_state-001.phpt0000644000076500000240000000046113572250760022465 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::__set_state() --FILE-- '576c25db6118fd406e6e6471', ])); echo "\n"; ?> ===DONE=== --EXPECTF-- MongoDB\BSON\ObjectId::__set_state(array( %w'oid' => '576c25db6118fd406e6e6471', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-set_state_error-001.phpt0000644000076500000240000000071113572250760023674 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::__set_state() requires "oid" string field --FILE-- 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\ObjectId initialization requires "oid" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid-set_state_error-002.phpt0000644000076500000240000000131713572250760023700 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::__set_state() requires valid hex string --FILE-- '0123456789abcdefghijklmn']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\ObjectId::__set_state(['oid' => 'INVALID']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: 0123456789abcdefghijklmn OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: INVALID ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid_error-001.phpt0000644000076500000240000000053013572250760021702 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId #001 error --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/bson/bson-objectid_error-002.phpt0000644000076500000240000000041113572250760021701 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyObjectId may not inherit from final class (MongoDB\BSON\ObjectId) in %s on line %d mongodb-1.6.1/tests/bson/bson-objectid_error-003.phpt0000644000076500000240000000124713572250760021712 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectId::__construct() requires valid hex string --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: 0123456789abcdefghijklmn OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing ObjectId string: INVALID ===DONE=== mongodb-1.6.1/tests/bson/bson-objectidinterface-001.phpt0000644000076500000240000000037613572250760022362 0ustar alcaeusstaff--TEST-- MongoDB\BSON\ObjectIdInterface is implemented by MongoDB\BSON\ObjectId --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-001.phpt0000644000076500000240000000166413572250760020031 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex #001 --FILE-- getPattern()); printf("Flags: %s\n", $regexp->getFlags()); printf("String representation: %s\n", $regexp); $tests = array( array("regex" => $regexp), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Pattern: regexp Flags: i String representation: /regexp/i Test#0 { "regex" : { "$regex" : "regexp", "$options" : "i" } } string(55) "{ "regex" : { "$regex" : "regexp", "$options" : "i" } }" string(55) "{ "regex" : { "$regex" : "regexp", "$options" : "i" } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-002.phpt0000644000076500000240000000043713572250760020027 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex debug handler --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(1) "i" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-003.phpt0000644000076500000240000000167013572250760020030 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex with flags omitted --FILE-- getPattern()); printf("Flags: %s\n", $regexp->getFlags()); printf("String representation: %s\n", $regexp); $tests = array( array("regex" => $regexp), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Pattern: regexp Flags: String representation: /regexp/ Test#0 { "regex" : { "$regex" : "regexp", "$options" : "" } } string(54) "{ "regex" : { "$regex" : "regexp", "$options" : "" } }" string(54) "{ "regex" : { "$regex" : "regexp", "$options" : "" } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-004.phpt0000644000076500000240000000045413572250760020030 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex debug handler with flags omitted --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(0) "" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-005.phpt0000644000076500000240000000050113572250760020022 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex initialization will alphabetize flags --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(6) "ilmsux" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-compare-001.phpt0000644000076500000240000000110513572250760021443 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex comparisons (without flags) --FILE-- new MongoDB\BSON\Regex('regexp')); var_dump(new MongoDB\BSON\Regex('regexp') < new MongoDB\BSON\Regex('regexr')); var_dump(new MongoDB\BSON\Regex('regexp') > new MongoDB\BSON\Regex('regexo')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-compare-002.phpt0000644000076500000240000000132313572250760021446 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex comparisons (with flags) --FILE-- new MongoDB\BSON\Regex('regexp', 'm')); var_dump(new MongoDB\BSON\Regex('regexp', 'm') < new MongoDB\BSON\Regex('regexp', 'x')); var_dump(new MongoDB\BSON\Regex('regexp', 'm') > new MongoDB\BSON\Regex('regexp', 'i')); var_dump(new MongoDB\BSON\Regex('regexp', 'm') > new MongoDB\BSON\Regex('regexp')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-get_properties-001.phpt0000644000076500000240000000046013572250760023053 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(2) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(1) "i" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-get_properties-002.phpt0000644000076500000240000000051513572250760023055 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(7) "pattern" string(6) "regexp" string(5) "flags" string(1) "i" ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-jsonserialize-001.phpt0000644000076500000240000000046213572250760022703 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::jsonSerialize() return value (without flags) --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(2) { ["$regex"]=> string(7) "pattern" ["$options"]=> string(0) "" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-jsonserialize-002.phpt0000644000076500000240000000046513572250760022707 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::jsonSerialize() return value (with flags) --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(2) { ["$regex"]=> string(7) "pattern" ["$options"]=> string(1) "i" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-jsonserialize-003.phpt0000644000076500000240000000115613572250760022706 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::jsonSerialize() with json_encode() (without flags) --FILE-- new MongoDB\BSON\Regex('pattern')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$regex" : "pattern", "$options" : "" } } {"foo":{"$regex":"pattern","$options":""}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(7) "pattern" ["flags"]=> string(0) "" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-jsonserialize-004.phpt0000644000076500000240000000116313572250760022705 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::jsonSerialize() with json_encode() (with flags) --FILE-- new MongoDB\BSON\Regex('pattern', 'i')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$regex" : "pattern", "$options" : "i" } } {"foo":{"$regex":"pattern","$options":"i"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(7) "pattern" ["flags"]=> string(1) "i" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-serialization-001.phpt0000644000076500000240000000103713572250760022676 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(1) "i" } string(84) "C:18:"MongoDB\BSON\Regex":53:{a:2:{s:7:"pattern";s:6:"regexp";s:5:"flags";s:1:"i";}}" object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(1) "i" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-serialization-002.phpt0000644000076500000240000000105213572250760022674 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex serialization with flags omitted --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(0) "" } string(83) "C:18:"MongoDB\BSON\Regex":52:{a:2:{s:7:"pattern";s:6:"regexp";s:5:"flags";s:0:"";}}" object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(0) "" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-serialization-003.phpt0000644000076500000240000000055613572250760022705 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex unserialization will alphabetize flags --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(6) "regexp" ["flags"]=> string(6) "ilmsux" } ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-serialization_error-001.phpt0000644000076500000240000000216313572250760024110 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex unserialization requires "pattern" and "flags" string fields --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-serialization_error-002.phpt0000644000076500000240000000145113572250760024110 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex unserialization does not allow pattern or flags to contain null bytes --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Pattern cannot contain null bytes OK: Got MongoDB\Driver\Exception\InvalidArgumentException Flags cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-set_state-001.phpt0000644000076500000240000000046413572250760022017 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::__set_state() --FILE-- 'regexp', 'flags' => 'i', ])); echo "\n"; ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Regex::__set_state(array( %w'pattern' => 'regexp', %w'flags' => 'i', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-set_state-002.phpt0000644000076500000240000000052513572250760022016 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::__set_state() will alphabetize flags --FILE-- 'regexp', 'flags' => 'xusmli', ])); echo "\n"; ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Regex::__set_state(array( %w'pattern' => 'regexp', %w'flags' => 'ilmsux', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-set_state_error-001.phpt0000644000076500000240000000205513572250760023226 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::__set_state() requires "pattern" and "flags" string fields --FILE-- 'regexp']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Regex::__set_state(['flags' => 'i']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Regex::__set_state(['pattern' => 0, 'flags' => 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Regex initialization requires "pattern" and "flags" string fields ===DONE=== mongodb-1.6.1/tests/bson/bson-regex-set_state_error-002.phpt0000644000076500000240000000133613572250760023230 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::__set_state() does not allow pattern or flags to contain null bytes --FILE-- "regexp\0", 'flags' => 'i']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Regex::__set_state(['pattern' => 'regexp', 'flags' => "i\0"]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Pattern cannot contain null bytes OK: Got MongoDB\Driver\Exception\InvalidArgumentException Flags cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-regex_error-001.phpt0000644000076500000240000000112713572250760021234 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex #001 error --FILE-- getPattern(true); $regexp->getFlags(true); throws(function() { new MongoDB\BSON\Regex; }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"); ?> ===DONE=== --EXPECTF-- Warning: %s\Regex::getPattern() expects exactly 0 parameters, 1 given in %s on line %d Warning: %s\Regex::getFlags() expects exactly 0 parameters, 1 given in %s on line %d OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/bson/bson-regex_error-002.phpt0000644000076500000240000000037213572250760021236 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyRegex may not inherit from final class (MongoDB\BSON\Regex) in %s on line %d mongodb-1.6.1/tests/bson/bson-regex_error-003.phpt0000644000076500000240000000123013572250760021231 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Regex::__construct() does not allow pattern or flags to contain null bytes --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Pattern cannot contain null bytes OK: Got MongoDB\Driver\Exception\InvalidArgumentException Flags cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-regexinterface-001.phpt0000644000076500000240000000040613572250760021703 0ustar alcaeusstaff--TEST-- MongoDB\BSON\RegexInterface is implemented by MongoDB\BSON\Regex --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-001.phpt0000644000076500000240000000115513572250760020217 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol #001 --FILE-- $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $testagain = toPHP($s); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 { "symbol" : "test" } string(21) "{ "symbol" : "test" }" string(21) "{ "symbol" : "test" }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-compare-001.phpt0000644000076500000240000000122313572250760021637 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol comparisons --FILE-- MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "symbol": {"$symbol": "val0"} }'))); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-get_properties-001.phpt0000644000076500000240000000052613572250760023251 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol get_properties handler (get_object_vars) --FILE-- symbol; var_dump(get_object_vars($symbol)); ?> ===DONE=== --EXPECT-- array(1) { ["symbol"]=> string(4) "test" } ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-get_properties-002.phpt0000644000076500000240000000056113572250760023251 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol get_properties handler (foreach) --FILE-- symbol; foreach ($symbol as $key => $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(6) "symbol" string(4) "test" ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-jsonserialize-001.phpt0000644000076500000240000000047213572250760023077 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol::jsonSerialize() return value --FILE-- symbol; var_dump($js->jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$symbol"]=> string(9) "valSymbol" } ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-jsonserialize-002.phpt0000644000076500000240000000110413572250760023071 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol::jsonSerialize() with json_encode() --FILE-- ===DONE=== --EXPECTF-- { "foo" : "symbolValue" } {"foo":{"$symbol":"symbolValue"}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Symbol)#%d (%d) { ["symbol"]=> string(11) "symbolValue" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-serialization-001.phpt0000644000076500000240000000105013572250760023064 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol serialization --FILE-- symbol; var_dump($symbol = $test); var_dump($s = serialize($symbol)); var_dump(unserialize($s)); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\Symbol)#1 (1) { ["symbol"]=> string(11) "symbolValue" } string(70) "C:19:"MongoDB\BSON\Symbol":38:{a:1:{s:6:"symbol";s:11:"symbolValue";}}" object(MongoDB\BSON\Symbol)#2 (1) { ["symbol"]=> string(11) "symbolValue" } ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-serialization_error-001.phpt0000644000076500000240000000074213572250760024304 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol unserialization requires "symbol" string field --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Symbol initialization requires "symbol" string field ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-serialization_error-002.phpt0000644000076500000240000000077113572250760024307 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol unserialization does not allow code to contain null bytes --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Symbol cannot contain null bytes ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol-tostring-001.phpt0000644000076500000240000000042013572250760022060 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol::__toString() --FILE-- symbol; var_dump((string) $symbol); ?> ===DONE=== --EXPECT-- string(11) "symbolValue" ===DONE=== mongodb-1.6.1/tests/bson/bson-symbol_error-001.phpt0000644000076500000240000000037713572250760021435 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Symbol cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MySymbol may not inherit from final class (MongoDB\BSON\Symbol) in %s on line %d mongodb-1.6.1/tests/bson/bson-timestamp-001.phpt0000644000076500000240000000153513572250760020717 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp #001 --FILE-- $timestamp), ); $s = new MongoDB\BSON\Timestamp(1234, 5678); echo $s, "\n"; foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- [1234:5678] Test#0 { "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } } string(63) "{ "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } }" string(63) "{ "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-002.phpt0000644000076500000240000000046713572250760020723 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp debug handler --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-003.phpt0000644000076500000240000000121213572250760020711 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires positive unsigned 32-bit integers --FILE-- ===DONE=== --EXPECTF-- Test [2147483647:0] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "2147483647" ["timestamp"]=> string(1) "0" } Test [0:2147483647] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "2147483647" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-004.phpt0000644000076500000240000000136613572250760020724 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires 64-bit integers to be positive unsigned 32-bit integers --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- Test [4294967295:0] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "4294967295" ["timestamp"]=> string(1) "0" } Test [0:4294967295] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "4294967295" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-005.phpt0000644000076500000240000000203613572250760020720 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires positive unsigned 32-bit integers (as string) --FILE-- ===DONE=== --EXPECTF-- Test [2147483647:0] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "2147483647" ["timestamp"]=> string(1) "0" } Test [0:2147483647] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "2147483647" } Test [4294967295:0] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "4294967295" ["timestamp"]=> string(1) "0" } Test [0:4294967295] object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "4294967295" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-compare-001.phpt0000644000076500000240000000160113572250760022335 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp comparisons --FILE-- new MongoDB\BSON\Timestamp(1234, 5678)); // Timestamp is compared first var_dump(new MongoDB\BSON\Timestamp(1234, 5678) < new MongoDB\BSON\Timestamp(1233, 5679)); var_dump(new MongoDB\BSON\Timestamp(1234, 5678) > new MongoDB\BSON\Timestamp(1235, 5677)); // Increment is compared second var_dump(new MongoDB\BSON\Timestamp(1234, 5678) < new MongoDB\BSON\Timestamp(1235, 5678)); var_dump(new MongoDB\BSON\Timestamp(1234, 5678) > new MongoDB\BSON\Timestamp(1233, 5678)); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-getIncrement-001.phpt0000644000076500000240000000074013572250760023336 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::getIncrement() --FILE-- getIncrement()); echo "\n"; } ?> ===DONE=== --EXPECTF-- Test [1234:5678] int(1234) Test [2147483647:0] int(2147483647) Test [0:2147483647] int(0) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-getTimestamp-001.phpt0000644000076500000240000000074013572250760023355 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::getTimestamp() --FILE-- getTimestamp()); echo "\n"; } ?> ===DONE=== --EXPECTF-- Test [1234:5678] int(5678) Test [2147483647:0] int(0) Test [0:2147483647] int(2147483647) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-get_properties-001.phpt0000644000076500000240000000050413572250760023743 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(2) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-get_properties-002.phpt0000644000076500000240000000054113572250760023745 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(9) "increment" string(4) "1234" string(9) "timestamp" string(4) "5678" ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-jsonserialize-001.phpt0000644000076500000240000000047413572250760023577 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$timestamp"]=> array(2) { ["t"]=> int(5678) ["i"]=> int(1234) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-jsonserialize-002.phpt0000644000076500000240000000117213572250760023574 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\Timestamp('1234', '5678')]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } } {"foo":{"$timestamp":{"t":5678,"i":1234}}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization-001.phpt0000644000076500000240000000270313572250760023570 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } string(95) "C:22:"MongoDB\BSON\Timestamp":60:{a:2:{s:9:"increment";s:4:"1234";s:9:"timestamp";s:4:"5678";}}" object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "2147483647" ["timestamp"]=> string(1) "0" } string(99) "C:22:"MongoDB\BSON\Timestamp":64:{a:2:{s:9:"increment";s:10:"2147483647";s:9:"timestamp";s:1:"0";}}" object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "2147483647" ["timestamp"]=> string(1) "0" } object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "2147483647" } string(99) "C:22:"MongoDB\BSON\Timestamp":64:{a:2:{s:9:"increment";s:1:"0";s:9:"timestamp";s:10:"2147483647";}}" object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "2147483647" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization-002.phpt0000644000076500000240000000227013572250760023570 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp serialization (64-bit) --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "4294967295" ["timestamp"]=> string(1) "0" } string(99) "C:22:"MongoDB\BSON\Timestamp":64:{a:2:{s:9:"increment";s:10:"4294967295";s:9:"timestamp";s:1:"0";}}" object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(10) "4294967295" ["timestamp"]=> string(1) "0" } object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "4294967295" } string(99) "C:22:"MongoDB\BSON\Timestamp":64:{a:2:{s:9:"increment";s:1:"0";s:9:"timestamp";s:10:"4294967295";}}" object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(1) "0" ["timestamp"]=> string(10) "4294967295" } ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization_error-001.phpt0000644000076500000240000000313713572250760025003 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp unserialization requires "increment" and "timestamp" integer fields --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization_error-002.phpt0000644000076500000240000000274013572250760025003 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp unserialization requires positive unsigned 32-bit integers --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -2147483648 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -2147483648 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization_error-003.phpt0000644000076500000240000000172213572250760025003 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp unserialization requires 64-bit integers to be positive unsigned 32-bit integers --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, 4294967296 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, 4294967296 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-serialization_error-004.phpt0000644000076500000240000000162613572250760025007 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp unserialization requires strings to parse as 64-bit integers --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1.23" as 64-bit integer increment for MongoDB\BSON\Timestamp initialization OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "5.67" as 64-bit integer timestamp for MongoDB\BSON\Timestamp initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state-001.phpt0000644000076500000240000000131113572250760022700 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() --FILE-- $increment, 'timestamp' => $timestamp, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Timestamp::__set_state(array( %w'increment' => '1234', %w'timestamp' => '5678', )) MongoDB\BSON\Timestamp::__set_state(array( %w'increment' => '2147483647', %w'timestamp' => '0', )) MongoDB\BSON\Timestamp::__set_state(array( %w'increment' => '0', %w'timestamp' => '2147483647', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state-002.phpt0000644000076500000240000000126513572250760022711 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() (64-bit) --SKIPIF-- --FILE-- $increment, 'timestamp' => $timestamp, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\Timestamp::__set_state(array( %w'increment' => '4294967295', %w'timestamp' => '0', )) MongoDB\BSON\Timestamp::__set_state(array( %w'increment' => '0', %w'timestamp' => '4294967295', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state_error-001.phpt0000644000076500000240000000300213572250760024110 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() requires "increment" and "timestamp" integer fields --FILE-- 1234]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['timestamp' => 5678]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => '1234', 'timestamp' => 5678]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => 1234, 'timestamp' => '5678']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\Timestamp initialization requires "increment" and "timestamp" integer or numeric string fields ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state_error-002.phpt0000644000076500000240000000260313572250760024117 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() requires positive unsigned 32-bit integers --FILE-- -1, 'timestamp' => 5678]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => -2147483647, 'timestamp' => 5678]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => 1234, 'timestamp' => -1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => 1234, 'timestamp' => -2147483647]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -2147483647 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -2147483647 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state_error-003.phpt0000644000076500000240000000164313572250760024123 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() requires 64-bit integers to be positive unsigned 32-bit integers --SKIPIF-- --FILE-- 4294967296, 'timestamp' => 5678]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => 1234, 'timestamp' => 4294967296]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, 4294967296 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, 4294967296 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp-set_state_error-004.phpt0000644000076500000240000000153713572250760024126 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp::__set_state() requires strings to parse as 64-bit integers --FILE-- '1.23', 'timestamp' => '5678']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\BSON\Timestamp::__set_state(['increment' => '1234', 'timestamp' => '5.67']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1.23" as 64-bit integer increment for MongoDB\BSON\Timestamp initialization OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "5.67" as 64-bit integer timestamp for MongoDB\BSON\Timestamp initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp_error-001.phpt0000644000076500000240000000051413572250760022124 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp #001 error --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp_error-002.phpt0000644000076500000240000000041613572250760022126 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyTimestamp may not inherit from final class (MongoDB\BSON\Timestamp) in %s on line %d mongodb-1.6.1/tests/bson/bson-timestamp_error-003.phpt0000644000076500000240000000273013572250760022130 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires positive unsigned 32-bit integers --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, -2147483648 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, -2147483648 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp_error-004.phpt0000644000076500000240000000151013572250760022124 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires 64-bit integers to be positive unsigned 32-bit integers --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer, 4294967296 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer, 4294967296 given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp_error-005.phpt0000644000076500000240000000141213572250760022126 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires strings to parse as 64-bit integers --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1.23" as 64-bit integer increment for MongoDB\BSON\Timestamp initialization OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "5.67" as 64-bit integer timestamp for MongoDB\BSON\Timestamp initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-timestamp_error-006.phpt0000644000076500000240000000403313572250760022131 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Timestamp constructor requires integer or string arguments --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer or string, %r(null|NULL)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer or string, %r(double|float)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer or string, bool%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer or string, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected increment to be an unsigned 32-bit integer or string, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer or string, %r(null|NULL)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer or string, %r(double|float)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer or string, bool%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer or string, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected timestamp to be an unsigned 32-bit integer or string, stdClass given ===DONE=== mongodb-1.6.1/tests/bson/bson-timestampinterface-001.phpt0000644000076500000240000000043213572250760022573 0ustar alcaeusstaff--TEST-- MongoDB\BSON\TimestampInterface is implemented by MongoDB\BSON\Timestamp --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toCanonicalJSON-001.phpt0000644000076500000240000000166213572250760021641 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toCanonicalExtendedJSON(): Encoding JSON --FILE-- null ], [ 'boolean' => true ], [ 'string' => 'foo' ], [ 'integer' => 123 ], [ 'double' => 1.0, ], [ 'nan' => NAN ], [ 'pos_inf' => INF ], [ 'neg_inf' => -INF ], [ 'array' => [ 'foo', 'bar' ]], [ 'document' => [ 'foo' => 'bar' ]], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toCanonicalExtendedJSON($bson), "\n"; } ?> ===DONE=== --EXPECT-- { } { "null" : null } { "boolean" : true } { "string" : "foo" } { "integer" : { "$numberInt" : "123" } } { "double" : { "$numberDouble" : "1.0" } } { "nan" : { "$numberDouble" : "NaN" } } { "pos_inf" : { "$numberDouble" : "Infinity" } } { "neg_inf" : { "$numberDouble" : "-Infinity" } } { "array" : [ "foo", "bar" ] } { "document" : { "foo" : "bar" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toCanonicalJSON-002.phpt0000644000076500000240000000270013572250760021634 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toCanonicalExtendedJSON(): Encoding extended JSON types --FILE-- new MongoDB\BSON\ObjectId('56315a7c6118fd1b920270b1') ], [ 'binary' => new MongoDB\BSON\Binary('foo', MongoDB\BSON\Binary::TYPE_GENERIC) ], [ 'date' => new MongoDB\BSON\UTCDateTime(1445990400000) ], [ 'timestamp' => new MongoDB\BSON\Timestamp(1234, 5678) ], [ 'regex' => new MongoDB\BSON\Regex('pattern', 'i') ], [ 'code' => new MongoDB\BSON\Javascript('function() { return 1; }') ], [ 'code_ws' => new MongoDB\BSON\Javascript('function() { return a; }', ['a' => 1]) ], [ 'minkey' => new MongoDB\BSON\MinKey ], [ 'maxkey' => new MongoDB\BSON\MaxKey ], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toCanonicalExtendedJSON($bson), "\n"; } ?> ===DONE=== --EXPECT-- { "_id" : { "$oid" : "56315a7c6118fd1b920270b1" } } { "binary" : { "$binary" : { "base64": "Zm9v", "subType" : "00" } } } { "date" : { "$date" : { "$numberLong" : "1445990400000" } } } { "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } } { "regex" : { "$regularExpression" : { "pattern" : "pattern", "options" : "i" } } } { "code" : { "$code" : "function() { return 1; }" } } { "code_ws" : { "$code" : "function() { return a; }", "$scope" : { "a" : { "$numberInt" : "1" } } } } { "minkey" : { "$minKey" : 1 } } { "maxkey" : { "$maxKey" : 1 } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toCanonicalJSON_error-001.phpt0000644000076500000240000000142713572250760023051 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toCanonicalExtendedJSON(): BSON decoding exceptions --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Reading document did not exhaust input buffer ===DONE=== mongodb-1.6.1/tests/bson/bson-toCanonicalJSON_error-002.phpt0000644000076500000240000000135113572250760023046 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toCanonicalExtendedJSON(): BSON decoding exceptions for malformed documents --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader ===DONE=== mongodb-1.6.1/tests/bson/bson-toCanonicalJSON_error-003.phpt0000644000076500000240000000272413572250760023054 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toCanonicalExtendedJSON(): BSON decoding exceptions for bson_as_canonical_json() failure --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string ===DONE=== mongodb-1.6.1/tests/bson/bson-toJSON-001.phpt0000644000076500000240000000170313572250760020025 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toJSON(): Encoding JSON --FILE-- null ], [ 'boolean' => true ], [ 'string' => 'foo' ], [ 'integer' => 123 ], [ 'double' => 1.0, ], /* Note: toJSON() does not properly handle NAN and INF values. * toCanonicalExtendedJSON() or toRelaxedExtendedJSON() should be used * instead. */ [ 'nan' => NAN ], [ 'pos_inf' => INF ], [ 'neg_inf' => -INF ], [ 'array' => [ 'foo', 'bar' ]], [ 'document' => [ 'foo' => 'bar' ]], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toJSON($bson), "\n"; } ?> ===DONE=== --EXPECTF-- { } { "null" : null } { "boolean" : true } { "string" : "foo" } { "integer" : 123 } { "double" : 1.0 } { "nan" : %r-?nan(\(ind\))?%r } { "pos_inf" : inf } { "neg_inf" : -inf } { "array" : [ "foo", "bar" ] } { "document" : { "foo" : "bar" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toJSON-002.phpt0000644000076500000240000000251013572250760020023 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toJSON(): Encoding extended JSON types --FILE-- new MongoDB\BSON\ObjectId('56315a7c6118fd1b920270b1') ], [ 'binary' => new MongoDB\BSON\Binary('foo', MongoDB\BSON\Binary::TYPE_GENERIC) ], [ 'date' => new MongoDB\BSON\UTCDateTime(1445990400000) ], [ 'timestamp' => new MongoDB\BSON\Timestamp(1234, 5678) ], [ 'regex' => new MongoDB\BSON\Regex('pattern', 'i') ], [ 'code' => new MongoDB\BSON\Javascript('function() { return 1; }') ], [ 'code_ws' => new MongoDB\BSON\Javascript('function() { return a; }', ['a' => 1]) ], [ 'minkey' => new MongoDB\BSON\MinKey ], [ 'maxkey' => new MongoDB\BSON\MaxKey ], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toJSON($bson), "\n"; } ?> ===DONE=== --EXPECT-- { "_id" : { "$oid" : "56315a7c6118fd1b920270b1" } } { "binary" : { "$binary" : "Zm9v", "$type" : "00" } } { "date" : { "$date" : 1445990400000 } } { "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } } { "regex" : { "$regex" : "pattern", "$options" : "i" } } { "code" : { "$code" : "function() { return 1; }" } } { "code_ws" : { "$code" : "function() { return a; }", "$scope" : { "a" : 1 } } } { "minkey" : { "$minKey" : 1 } } { "maxkey" : { "$maxKey" : 1 } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toJSON_error-001.phpt0000644000076500000240000000136513572250760021242 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toJSON(): BSON decoding exceptions --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Reading document did not exhaust input buffer ===DONE=== mongodb-1.6.1/tests/bson/bson-toJSON_error-002.phpt0000644000076500000240000000130713572250760021237 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toJSON(): BSON decoding exceptions for malformed documents --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader ===DONE=== mongodb-1.6.1/tests/bson/bson-toJSON_error-003.phpt0000644000076500000240000000265013572250760021242 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toJSON(): BSON decoding exceptions for bson_as_json() failure --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-001.phpt0000644000076500000240000000452713572250760017712 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): __pclass must be both instantiatable and Persistable --FILE-- unserialized = true; } } // Create base64-encoded class names for __pclass field's binary data $bMyAbstractDocument = base64_encode('MyAbstractDocument'); $bMyDocument = base64_encode('MyDocument'); $bUnserializable = base64_encode('MongoDB\BSON\Unserializable'); $bPersistable = base64_encode('MongoDB\BSON\Persistable'); $tests = array( '{ "foo": "yes", "__pclass": { "$binary": "' . $bMyAbstractDocument . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bMyDocument . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bUnserializable . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bPersistable . '", "$type": "44" } }', ); foreach ($tests as $test) { echo $test, "\n"; var_dump(toPHP(fromJSON($test))); echo "\n"; } ?> ===DONE=== --EXPECTF-- { "foo": "yes", "__pclass": { "$binary": "TXlBYnN0cmFjdERvY3VtZW50", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(18) "MyAbstractDocument" ["type"]=> int(128) } } { "foo": "yes", "__pclass": { "$binary": "TXlEb2N1bWVudA==", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(10) "MyDocument" ["type"]=> int(128) } } { "foo": "yes", "__pclass": { "$binary": "TW9uZ29EQlxCU09OXFVuc2VyaWFsaXphYmxl", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(27) "MongoDB\BSON\Unserializable" ["type"]=> int(128) } } { "foo": "yes", "__pclass": { "$binary": "TW9uZ29EQlxCU09OXFBlcnNpc3RhYmxl", "$type": "44" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(24) "MongoDB\BSON\Persistable" ["type"]=> int(68) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-002.phpt0000644000076500000240000000337413572250760017712 0ustar alcaeusstaff--TEST-- MongoDB\BSON\fromPHP(): Null type map values imply default behavior --FILE-- data = array( 'list' => array(1, 2, 3), 'map' => (object) array('foo' => 'bar'), ); } public function bsonSerialize() { return $this->data; } public function bsonUnserialize(array $data) { foreach (array('list', 'map') as $key) { if (isset($data[$key])) { $this->data[$key] = $data[$key]; } } } } $bson = fromPHP(new MyDocument); echo "Test ", toJSON($bson), "\n"; hex_dump($bson); $typeMap = array( 'array' => null, 'document' => null, 'root' => null, ); var_dump(toPHP($bson, $typeMap)); ?> ===DONE=== --EXPECTF-- Test { "__pclass" : { "$binary" : "TXlEb2N1bWVudA==", "$type" : "80" }, "list" : [ 1, 2, 3 ], "map" : { "foo" : "bar" } } 0 : 55 00 00 00 05 5f 5f 70 63 6c 61 73 73 00 0a 00 [U....__pclass...] 10 : 00 00 80 4d 79 44 6f 63 75 6d 65 6e 74 04 6c 69 [...MyDocument.li] 20 : 73 74 00 1a 00 00 00 10 30 00 01 00 00 00 10 31 [st......0......1] 30 : 00 02 00 00 00 10 32 00 03 00 00 00 00 03 6d 61 [......2.......ma] 40 : 70 00 12 00 00 00 02 66 6f 6f 00 04 00 00 00 62 [p......foo.....b] 50 : 61 72 00 00 00 [ar...] object(MyDocument)#%d (1) { ["data"]=> array(2) { ["list"]=> array(3) { [0]=> int(1) [1]=> int(2) [2]=> int(3) } ["map"]=> object(stdClass)#%d (1) { ["foo"]=> string(3) "bar" } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-003.phpt0000644000076500000240000002455113572250760017713 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Tests from serialization specification --FILE-- $value) { $this->$key = $value; } $this->unserialized = true; } } class OurClass implements MongoDB\BSON\Persistable { function bsonSerialize() { // Not tested with this test, so return empty array return array(); } function bsonUnserialize(array $data) { foreach ($data as $key => $value) { $this->$key = $value; } $this->unserialized = true; } } class TheirClass extends OurClass { } // Create base64-encoded class names for __pclass field's binary data $bMyClass = base64_encode('MyClass'); $bYourClass = base64_encode('YourClass'); $bOurClass = base64_encode('OurClass'); $bTheirClass = base64_encode('TheirClass'); $bInterface = base64_encode('MongoDB\BSON\Unserializable'); $testGroups = array( array( 'name' => 'DEFAULT TYPEMAP', 'typemap' => array(), 'tests' => array( '{ "foo": "yes", "bar" : false }', '{ "foo": "no", "array" : [ 5, 6 ] }', '{ "foo": "no", "obj" : { "embedded" : 4.125 } }', '{ "foo": "yes", "__pclass": "MyClass" }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bMyClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bYourClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bOurClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass": { "$binary": "' . $bYourClass . '", "$type": "44" } }', ), ), array( 'name' => 'NONEXISTING CLASS', 'typemap' => array('root' => 'MissingClass'), 'tests' => array( '{ "foo": "yes" }', ), ), array( 'name' => 'DOES NOT IMPLEMENT UNSERIALIZABLE', 'typemap' => array('root' => 'MyClass'), 'tests' => array( '{ "foo": "yes", "__pclass": { "$binary": "' . $bMyClass . '", "$type": "80" } }', ), ), array( 'name' => 'IS NOT A CONCRETE CLASS', 'typemap' => array('root' => 'MongoDB\BSON\Unserializable'), 'tests' => array( '{ "foo": "yes" }', ), ), array( 'name' => 'IS NOT A CONCRETE CLASS VIA PCLASS', 'typemap' => array('root' => 'YourClass'), 'tests' => array( '{ "foo": "yes", "__pclass" : { "$binary": "' . $bInterface . '", "$type": "80" } }', ), ), array( 'name' => 'PCLASS OVERRIDES TYPEMAP (1)', 'typemap' => array('root' => 'YourClass'), 'tests' => array( '{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bTheirClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bYourClass . '", "$type": "80" } }', ), ), array( 'name' => 'PCLASS OVERRIDES TYPEMAP (2)', 'typemap' => array('root' => 'OurClass'), 'tests' => array( '{ "foo": "yes", "__pclass" : { "$binary": "' . $bTheirClass . '", "$type": "80" } }', ), ), array( 'name' => 'OBJECTS AS ARRAY', 'typemap' => array('root' => 'array', 'document' => 'array'), 'tests' => array( '{ "foo": "yes", "bar" : false }', '{ "foo": "no", "array" : [ 5, 6 ] }', '{ "foo": "no", "obj" : { "embedded" : 4.125 } }', '{ "foo": "yes", "__pclass": "MyClass" }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }', ), ), array( 'name' => 'OBJECTS AS STDCLASS', 'typemap' => array('root' => 'object', 'document' => 'object'), 'tests' => array( '{ "foo": "yes", "__pclass" : { "$binary": "' . $bMyClass . '", "$type": "80" } }', '{ "foo": "yes", "__pclass" : { "$binary": "' . $bOurClass . '", "$type": "80" } }', ), ), ); foreach ($testGroups as $testGroup) { printf("=== %s ===\n\n", $testGroup['name']); foreach ($testGroup['tests'] as $test) { echo $test, "\n"; $bson = fromJSON($test); try { var_dump(toPHP($bson, $testGroup['typemap'])); } catch (MongoDB\Driver\Exception\Exception $e) { echo $e->getMessage(), "\n"; } echo "\n"; } echo "\n"; } ?> ===DONE=== --EXPECTF-- === DEFAULT TYPEMAP === { "foo": "yes", "bar" : false } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["bar"]=> bool(false) } { "foo": "no", "array" : [ 5, 6 ] } object(stdClass)#%d (2) { ["foo"]=> string(2) "no" ["array"]=> array(2) { [0]=> int(5) [1]=> int(6) } } { "foo": "no", "obj" : { "embedded" : 4.125 } } object(stdClass)#%d (2) { ["foo"]=> string(2) "no" ["obj"]=> object(stdClass)#%d (1) { ["embedded"]=> float(4.125) } } { "foo": "yes", "__pclass": "MyClass" } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> string(7) "MyClass" } { "foo": "yes", "__pclass": { "$binary": "TXlDbGFzcw==", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } } { "foo": "yes", "__pclass": { "$binary": "WW91ckNsYXNz", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(9) "YourClass" ["type"]=> int(128) } } { "foo": "yes", "__pclass": { "$binary": "T3VyQ2xhc3M=", "$type": "80" } } object(OurClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "OurClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } { "foo": "yes", "__pclass": { "$binary": "WW91ckNsYXNz", "$type": "44" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(9) "YourClass" ["type"]=> int(68) } } === NONEXISTING CLASS === { "foo": "yes" } Class MissingClass does not exist === DOES NOT IMPLEMENT UNSERIALIZABLE === { "foo": "yes", "__pclass": { "$binary": "TXlDbGFzcw==", "$type": "80" } } Class MyClass does not implement MongoDB\BSON\Unserializable === IS NOT A CONCRETE CLASS === { "foo": "yes" } Class MongoDB\BSON\Unserializable is not instantiatable === IS NOT A CONCRETE CLASS VIA PCLASS === { "foo": "yes", "__pclass" : { "$binary": "TW9uZ29EQlxCU09OXFVuc2VyaWFsaXphYmxl", "$type": "80" } } object(YourClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(27) "MongoDB\BSON\Unserializable" ["type"]=> int(128) } ["unserialized"]=> bool(true) } === PCLASS OVERRIDES TYPEMAP (1) === { "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } } object(YourClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } { "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } } object(OurClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "OurClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } { "foo": "yes", "__pclass" : { "$binary": "VGhlaXJDbGFzcw==", "$type": "80" } } object(TheirClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(10) "TheirClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } { "foo": "yes", "__pclass" : { "$binary": "WW91ckNsYXNz", "$type": "80" } } object(YourClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(9) "YourClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } === PCLASS OVERRIDES TYPEMAP (2) === { "foo": "yes", "__pclass" : { "$binary": "VGhlaXJDbGFzcw==", "$type": "80" } } object(TheirClass)#%d (3) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(10) "TheirClass" ["type"]=> int(128) } ["unserialized"]=> bool(true) } === OBJECTS AS ARRAY === { "foo": "yes", "bar" : false } array(2) { ["foo"]=> string(3) "yes" ["bar"]=> bool(false) } { "foo": "no", "array" : [ 5, 6 ] } array(2) { ["foo"]=> string(2) "no" ["array"]=> array(2) { [0]=> int(5) [1]=> int(6) } } { "foo": "no", "obj" : { "embedded" : 4.125 } } array(2) { ["foo"]=> string(2) "no" ["obj"]=> array(1) { ["embedded"]=> float(4.125) } } { "foo": "yes", "__pclass": "MyClass" } array(2) { ["foo"]=> string(3) "yes" ["__pclass"]=> string(7) "MyClass" } { "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } } array(2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } } { "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } } array(2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "OurClass" ["type"]=> int(128) } } === OBJECTS AS STDCLASS === { "foo": "yes", "__pclass" : { "$binary": "TXlDbGFzcw==", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(7) "MyClass" ["type"]=> int(128) } } { "foo": "yes", "__pclass" : { "$binary": "T3VyQ2xhc3M=", "$type": "80" } } object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(8) "OurClass" ["type"]=> int(128) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-004.phpt0000644000076500000240000002443313572250760017713 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): BSON array keys should be disregarded during visitation --FILE-- [$value]]); // Alter the key of the BSON array's first element $bson[12] = '1'; var_dump(toPHP($bson)); /* Note that numeric indexes within the HashTable are not accessible without * casting the object to an array. This is because the entries are only * stored with numeric indexes and do not also have string equivalents, as * might be created with zend_symtable_update(). This behavior is not unique * to the driver, as `(object) ['foo']` would demonstrate the same issue. */ var_dump(toPHP($bson, ['array' => 'object'])); var_dump(toPHP($bson, ['array' => 'MyArrayObject'])); echo "\n"; } ?> ===DONE=== --EXPECTF-- Testing NULL visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> NULL } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> NULL } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> NULL } } } Testing boolean visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> bool(true) } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> bool(true) } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> bool(true) } } } Testing integer visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> int(1) } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> int(1) } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> int(1) } } } Testing double visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> float(4.125) } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> float(4.125) } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> float(4.125) } } } Testing string visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> string(3) "foo" } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> string(3) "foo" } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> string(3) "foo" } } } Testing array visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> array(0) { } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(stdClass)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(0) { } } } } } Testing stdClass visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(stdClass)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(stdClass)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(stdClass)#%d (0) { } } } } Testing MongoDB\BSON\Binary visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(3) "foo" ["type"]=> int(0) } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(3) "foo" ["type"]=> int(0) } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\Binary)#%d (2) { ["data"]=> string(3) "foo" ["type"]=> int(0) } } } } Testing MongoDB\BSON\Decimal128 visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\Decimal128)#%d (1) { ["dec"]=> string(4) "3.14" } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\Decimal128)#%d (1) { ["dec"]=> string(4) "3.14" } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\Decimal128)#%d (1) { ["dec"]=> string(4) "3.14" } } } } Testing MongoDB\BSON\Javascript visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\Javascript)#%d (2) { ["code"]=> string(12) "function(){}" ["scope"]=> NULL } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\Javascript)#%d (2) { ["code"]=> string(12) "function(){}" ["scope"]=> NULL } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\Javascript)#%d (2) { ["code"]=> string(12) "function(){}" ["scope"]=> NULL } } } } Testing MongoDB\BSON\MaxKey visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\MaxKey)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\MaxKey)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\MaxKey)#%d (0) { } } } } Testing MongoDB\BSON\MinKey visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\MinKey)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\MinKey)#%d (0) { } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\MinKey)#%d (0) { } } } } Testing MongoDB\BSON\ObjectId visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\ObjectId)#%d (1) { ["oid"]=> string(24) "586c18d86118fd6c9012dec1" } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\ObjectId)#%d (1) { ["oid"]=> string(24) "586c18d86118fd6c9012dec1" } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\ObjectId)#%d (1) { ["oid"]=> string(24) "586c18d86118fd6c9012dec1" } } } } Testing MongoDB\BSON\Regex visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\Regex)#%d (2) { ["pattern"]=> string(3) "foo" ["flags"]=> string(0) "" } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\Regex)#%d (2) { ["pattern"]=> string(3) "foo" ["flags"]=> string(0) "" } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\Regex)#%d (2) { ["pattern"]=> string(3) "foo" ["flags"]=> string(0) "" } } } } Testing MongoDB\BSON\Timestamp visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\Timestamp)#%d (2) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\Timestamp)#%d (2) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\Timestamp)#%d (2) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } } } } Testing MongoDB\BSON\UTCDateTime visitor function object(stdClass)#%d (1) { ["x"]=> array(1) { [0]=> object(MongoDB\BSON\UTCDateTime)#%d (1) { ["milliseconds"]=> string(13) "1483479256924" } } } object(stdClass)#%d (1) { ["x"]=> object(stdClass)#%d (1) { [%r(0|"0")%r]=> object(MongoDB\BSON\UTCDateTime)#%d (1) { ["milliseconds"]=> string(13) "1483479256924" } } } object(stdClass)#%d (1) { ["x"]=> object(MyArrayObject)#%d (1) { ["storage":"ArrayObject":private]=> array(1) { [0]=> object(MongoDB\BSON\UTCDateTime)#%d (1) { ["milliseconds"]=> string(13) "1483479256924" } } } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-006.phpt0000644000076500000240000000302313572250760017705 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Decodes Binary UUID types with any data length --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(15) "0123456789abcde" ["type"]=> int(3) } } object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(17) "0123456789abcdefg" ["type"]=> int(3) } } object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(15) "0123456789abcde" ["type"]=> int(4) } } object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(17) "0123456789abcdefg" ["type"]=> int(4) } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-007.phpt0000644000076500000240000001007013572250760017706 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): fieldPath typemaps without server --FILE-- 1, 'object' => [ 'parent1' => [ 'child1' => [ 1, 2, 3 ], 'child2' => [ 4, 5, 6 ], ], 'parent2' => [ 'child1' => [ 7, 8, 9 ], 'child2' => [ 10, 11, 12 ], ], ], ] ); function fetch($bson, $typeMap = []) { for ($i = 0; $i < 25000; $i++) { $documents = [ \MongoDB\BSON\toPHP($bson, $typeMap) ]; } return $documents; } echo "\nSetting 'object.$.child1' path to 'MyWildCardArrayObject'\n"; $documents = fetch($bson, ["fieldPaths" => [ 'object.$.child1' => "MyWildCardArrayObject" ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildCardArrayObject); var_dump(is_array($documents[0]->object->parent1->child2)); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyWildCardArrayObject); var_dump(is_array($documents[0]->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.parent2.child1' to 'MyArrayObject'\n"; $documents = fetch($bson, ["fieldPaths" => [ 'object.parent1.$' => "MyWildCardArrayObject", 'object.parent2.child1' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildCardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyWildCardArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyArrayObject); var_dump(is_array($documents[0]->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.$.$' to 'MyArrayObject'\n"; $documents = fetch($bson, ["fieldPaths" => [ 'object.parent1.$' => "MyWildCardArrayObject", 'object.$.$' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildCardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyWildCardArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyArrayObject); var_dump($documents[0]->object->parent2->child2 instanceof MyArrayObject); echo "\nSetting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.$.child2' to 'MyArrayObject'\n"; $documents = fetch($bson, ["fieldPaths" => [ 'object.parent1.child1' => "MyWildCardArrayObject", 'object.$.child2' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildCardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump(is_array($documents[0]->object->parent2->child1)); var_dump($documents[0]->object->parent2->child2 instanceof MyArrayObject); ?> ===DONE=== --EXPECT-- Setting 'object.$.child1' path to 'MyWildCardArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.parent2.child1' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.$.$' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildCardArrayObject' and 'object.$.child2' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-008.phpt0000644000076500000240000000306013572250760017710 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Setting fieldPath typemaps for compound types with string keys --FILE-- 1, 'array' => [1, 2, 3], 'object' => ['string' => 'keys', 'for' => 'ever'] ] ); function fetch($bson, $typeMap = []) { return \MongoDB\BSON\toPHP($bson, $typeMap); } echo "Default\n"; $document = fetch($bson); var_dump($document instanceof stdClass); var_dump(is_array($document->array)); var_dump($document->object instanceof stdClass); echo "\nSetting 'object' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object' => "MyArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_array($document->array)); var_dump($document->object instanceof MyArrayObject); echo "\nSetting 'object' and 'array' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object' => "MyArrayObject", 'array' => "MyArrayObject", ]]); var_dump($document instanceof stdClass); var_dump($document->array instanceof MyArrayObject); var_dump($document->object instanceof MyArrayObject); ?> ===DONE=== --EXPECT-- Default bool(true) bool(true) bool(true) Setting 'object' path to 'MyArrayObject' bool(true) bool(true) bool(true) Setting 'object' and 'array' path to 'MyArrayObject' bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-009.phpt0000644000076500000240000000416313572250760017716 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Setting fieldPath typemaps for compound types with numerical keys --FILE-- 1, 'array0' => [0 => [ 4, 5, 6 ], 1 => [ 7, 8, 9 ]], 'array1' => [1 => [ 4, 5, 6 ], 2 => [ 7, 8, 9 ]], ] ); function fetch($bson, $typeMap = []) { return \MongoDB\BSON\toPHP($bson, $typeMap); } echo "Default\n"; $document = fetch($bson); var_dump($document instanceof stdClass); var_dump(is_array($document->array0)); var_dump(is_object($document->array1)); var_dump($document->array1 instanceof stdClass); echo "\nSetting 'array0' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array0' => "MyArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_object($document->array0)); var_dump($document->array0 instanceof MyArrayObject); echo "\nSetting 'array0.1' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array0.1' => "MyArrayObject", ]]); var_dump($document instanceof stdClass); var_dump(is_array($document->array0)); var_dump(is_array($document->array0[0])); var_dump($document->array0[1] instanceof MyArrayObject); echo "\nSetting 'array1.1' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array1.1' => "MyArrayObject", ]]); var_dump($document instanceof stdClass); var_dump(is_object($document->array1)); var_dump($document->array1 instanceof stdClass); $a = ((array) $document->array1); var_dump($a[1] instanceof MyArrayObject); var_dump(is_array($a[2])); ?> ===DONE=== --EXPECT-- Default bool(true) bool(true) bool(true) bool(true) Setting 'array0' path to 'MyArrayObject' bool(true) bool(true) bool(true) Setting 'array0.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array1.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-010.phpt0000644000076500000240000000772513572250760017715 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Setting fieldPath typemaps for compound types with wildcard keys --FILE-- 1, 'array' => [0 => [ 4, 5, 6 ], 1 => [ 7, 8, 9 ]], 'object' => ['one' => [ 4, 5, 6 ], 'two' => [ 7, 8, 9 ]], ] ); function fetch($bson, $typeMap = []) { return \MongoDB\BSON\toPHP($bson, $typeMap); } echo "\nSetting 'array.$' path to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array.$' => "MyWildcardArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_array($document->array)); var_dump($document->array[0] instanceof MyWildcardArrayObject); var_dump($document->array[1] instanceof MyWildcardArrayObject); echo "\nSetting 'array.1' to 'MyArrayObject' and 'array.$' path to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array.1' => "MyArrayObject", 'array.$' => "MyWildcardArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_array($document->array)); var_dump($document->array[0] instanceof MyWildcardArrayObject); var_dump($document->array[1] instanceof MyArrayObject); echo "\nSetting 'array.$' to 'MyWildcardArrayObject' and 'array.1' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'array.$' => "MyWildcardArrayObject", 'array.1' => "MyArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_array($document->array)); var_dump($document->array[0] instanceof MyWildcardArrayObject); var_dump($document->array[1] instanceof MyWildcardArrayObject); echo "\nSetting 'object.$' path to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.$' => "MyWildcardArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_object($document->object)); var_dump($document->object->one instanceof MyWildcardArrayObject); var_dump($document->object->two instanceof MyWildcardArrayObject); echo "\nSetting 'object.two' to 'MyArrayObject' and 'object.$' path to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.two' => "MyArrayObject", 'object.$' => "MyWildcardArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_object($document->object)); var_dump($document->object->one instanceof MyWildcardArrayObject); var_dump($document->object->two instanceof MyArrayObject); echo "\nSetting 'object.$' to 'MyWildcardArrayObject' and 'object.one' path to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.$' => "MyWildcardArrayObject", 'object.one' => "MyArrayObject" ]]); var_dump($document instanceof stdClass); var_dump(is_object($document->object)); var_dump($document->object->one instanceof MyWildcardArrayObject); var_dump($document->object->two instanceof MyWildcardArrayObject); ?> ===DONE=== --EXPECT-- Setting 'array.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array.1' to 'MyArrayObject' and 'array.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array.$' to 'MyWildcardArrayObject' and 'array.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.two' to 'MyArrayObject' and 'object.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.$' to 'MyWildcardArrayObject' and 'object.one' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP-011.phpt0000644000076500000240000001146213572250760017707 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Setting fieldPath typemaps for compound types with wildcard keys (nested) --FILE-- 1, 'object' => [ 'parent1' => [ 'child1' => [ 1, 2, 3 ], 'child2' => [ 4, 5, 6 ], ], 'parent2' => [ 'child1' => [ 7, 8, 9 ], 'child2' => [ 10, 11, 12 ], ], ], ] ); function fetch($bson, $typeMap = []) { return \MongoDB\BSON\toPHP($bson, $typeMap); } echo "\nSetting 'object.$.child1' path to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.$.child1' => "MyWildcardArrayObject" ]]); var_dump($document->object->parent1 instanceof stdClass); var_dump($document->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump(is_array($document->object->parent1->child2)); var_dump($document->object->parent2 instanceof stdClass); var_dump($document->object->parent2->child1 instanceof MyWildcardArrayObject); var_dump(is_array($document->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.parent2.child1' to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.parent1.$' => "MyWildcardArrayObject", 'object.parent2.child1' => "MyArrayObject", ]]); var_dump($document->object->parent1 instanceof stdClass); var_dump($document->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($document->object->parent1->child2 instanceof MyWildcardArrayObject); var_dump($document->object->parent2 instanceof stdClass); var_dump($document->object->parent2->child1 instanceof MyArrayObject); var_dump(is_array($document->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.$' to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.parent1.$' => "MyWildcardArrayObject", 'object.$.$' => "MyArrayObject", ]]); var_dump($document->object->parent1 instanceof stdClass); var_dump($document->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($document->object->parent1->child2 instanceof MyWildcardArrayObject); var_dump($document->object->parent2 instanceof stdClass); var_dump($document->object->parent2->child1 instanceof MyArrayObject); var_dump($document->object->parent2->child2 instanceof MyArrayObject); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.child2' to 'MyArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.parent1.child1' => "MyWildcardArrayObject", 'object.$.child2' => "MyArrayObject", ]]); var_dump($document->object->parent1 instanceof stdClass); var_dump($document->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($document->object->parent1->child2 instanceof MyArrayObject); var_dump($document->object->parent2 instanceof stdClass); var_dump(is_array($document->object->parent2->child1)); var_dump($document->object->parent2->child2 instanceof MyArrayObject); echo "\nSetting 'object.parent1.child2 path to 'MyArrayObject' and 'object.$.$' to 'MyWildcardArrayObject'\n"; $document = fetch($bson, ["fieldPaths" => [ 'object.parent1.child2' => "MyArrayObject", 'object.$.$' => "MyWildcardArrayObject", ]]); var_dump($document->object->parent1 instanceof stdClass); var_dump($document->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($document->object->parent1->child2 instanceof MyArrayObject); var_dump($document->object->parent2 instanceof stdClass); var_dump($document->object->parent2->child1 instanceof MyWildcardArrayObject); var_dump($document->object->parent2->child2 instanceof MyWildcardArrayObject); ?> ===DONE=== --EXPECT-- Setting 'object.$.child1' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.parent2.child1' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.$' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.child2' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.child2 path to 'MyArrayObject' and 'object.$.$' to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-001.phpt0000644000076500000240000000525213572250760021117 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Type classes must be instantiatable and implement Unserializable --FILE-- $class]; printf("Test typeMap: %s\n", json_encode($typeMap)); echo throws(function() use ($bson, $typeMap) { toPHP($bson, $typeMap); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo "\n"; } } ?> ===DONE=== --EXPECT-- Test typeMap: {"array":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"array":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"array":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"array":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable Test typeMap: {"document":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"document":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"document":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"document":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable Test typeMap: {"root":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"root":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"root":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"root":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-002.phpt0000644000076500000240000000125013572250760021112 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): BSON decoding exceptions --FILE-- getMessage(), "\n"; } } ?> ===DONE=== --EXPECT-- Could not read document from BSON reader Reading document did not exhaust input buffer ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-003.phpt0000644000076500000240000000131313572250760021113 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): BSON decoding exceptions for malformed documents --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-004.phpt0000644000076500000240000000401113572250760021112 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): BSON decoding exceptions for bson_iter_visit_all() failure --FILE-- 'bar'])), // Invalid UTF-8 character in embedded document's field name str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => ['INVALID!' => 'bar']])), // Invalid UTF-8 character in string within array field str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => ['INVALID!']])), /* Note: we don't use a three-character string in the underflow case, as * the 4-byte string length and payload (i.e. three characters + null byte) * coincidentally satisfy the expected size for an 8-byte double. We also * don't use a four-character string, since its null byte would be * interpreted as the document terminator. The actual document terminator * would then remain in the buffer and trigger a "did not exhaust" error. */ pack('VCa*xVa*xx', 17, 1, 'foo', 3, 'ab'), // Invalid field type (underflow) pack('VCa*xVa*xx', 20, 1, 'foo', 6, 'abcde'), // Invalid field type (overflow) ); foreach ($tests as $bson) { echo throws(function() use ($bson) { toPHP($bson); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path '' at offset 4 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path '' at offset 9 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected unknown BSON type 0x65 for field path "". Are you using the latest driver? ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-005.phpt0000644000076500000240000000265113572250760021123 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): Field path values with bson_iter_visit_all() failures --FILE-- ['INVALID!' => 'bar'] ])), str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => ['bar' => ['INVALID!' => 'bar']]])), str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => ['bar' => ['INVALID!']]])), str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => [['INVALID!']]])), str_replace('INVALID!', "INVALID\xFE", fromPHP(['foo' => [ ['bar' => ['INVALID!' => 'bar']], 6 ]])), ); foreach ($tests as $bson) { echo throws(function() use ($bson) { toPHP($bson); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo.bar' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo.bar' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo.0' at offset 0 OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected corrupt BSON data for field path 'foo.0.bar' at offset 0 ===DONE=== mongodb-1.6.1/tests/bson/bson-toPHP_error-006.phpt0000644000076500000240000000103713572250760021121 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toPHP(): BSON decoding exception with unknown BSON type --FILE-- ["cruel" => "world"]]); $bson[15] = chr(0x42); echo throws(function() use ($bson) { toPHP($bson); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected unknown BSON type 0x42 for field path "hello.cruel". Are you using the latest driver? ===DONE=== mongodb-1.6.1/tests/bson/bson-toRelaxedJSON-001.phpt0000644000076500000240000000160113572250760021327 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toRelaxedExtendedJSON(): Encoding JSON --FILE-- null ], [ 'boolean' => true ], [ 'string' => 'foo' ], [ 'integer' => 123 ], [ 'double' => 1.0, ], [ 'nan' => NAN ], [ 'pos_inf' => INF ], [ 'neg_inf' => -INF ], [ 'array' => [ 'foo', 'bar' ]], [ 'document' => [ 'foo' => 'bar' ]], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toRelaxedExtendedJSON($bson), "\n"; } ?> ===DONE=== --EXPECT-- { } { "null" : null } { "boolean" : true } { "string" : "foo" } { "integer" : 123 } { "double" : 1.0 } { "nan" : { "$numberDouble" : "NaN" } } { "pos_inf" : { "$numberDouble" : "Infinity" } } { "neg_inf" : { "$numberDouble" : "-Infinity" } } { "array" : [ "foo", "bar" ] } { "document" : { "foo" : "bar" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toRelaxedJSON-002.phpt0000644000076500000240000000263213572250760021335 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toRelaxedExtendedJSON(): Encoding extended JSON types --FILE-- new MongoDB\BSON\ObjectId('56315a7c6118fd1b920270b1') ], [ 'binary' => new MongoDB\BSON\Binary('foo', MongoDB\BSON\Binary::TYPE_GENERIC) ], [ 'date' => new MongoDB\BSON\UTCDateTime(1445990400000) ], [ 'timestamp' => new MongoDB\BSON\Timestamp(1234, 5678) ], [ 'regex' => new MongoDB\BSON\Regex('pattern', 'i') ], [ 'code' => new MongoDB\BSON\Javascript('function() { return 1; }') ], [ 'code_ws' => new MongoDB\BSON\Javascript('function() { return a; }', ['a' => 1]) ], [ 'minkey' => new MongoDB\BSON\MinKey ], [ 'maxkey' => new MongoDB\BSON\MaxKey ], ]; foreach ($tests as $value) { $bson = fromPHP($value); echo toRelaxedExtendedJSON($bson), "\n"; } ?> ===DONE=== --EXPECT-- { "_id" : { "$oid" : "56315a7c6118fd1b920270b1" } } { "binary" : { "$binary" : { "base64": "Zm9v", "subType" : "00" } } } { "date" : { "$date" : "2015-10-28T00:00:00Z" } } { "timestamp" : { "$timestamp" : { "t" : 5678, "i" : 1234 } } } { "regex" : { "$regularExpression" : { "pattern" : "pattern", "options" : "i" } } } { "code" : { "$code" : "function() { return 1; }" } } { "code_ws" : { "$code" : "function() { return a; }", "$scope" : { "a" : 1 } } } { "minkey" : { "$minKey" : 1 } } { "maxkey" : { "$maxKey" : 1 } } ===DONE=== mongodb-1.6.1/tests/bson/bson-toRelaxedJSON_error-001.phpt0000644000076500000240000000142313572250760022542 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toRelaxedExtendedJSON(): BSON decoding exceptions --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Reading document did not exhaust input buffer ===DONE=== mongodb-1.6.1/tests/bson/bson-toRelaxedJSON_error-002.phpt0000644000076500000240000000134513572250760022546 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toRelaxedExtendedJSON(): BSON decoding exceptions for malformed documents --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not read document from BSON reader ===DONE=== mongodb-1.6.1/tests/bson/bson-toRelaxedJSON_error-003.phpt0000644000076500000240000000272013572250760022545 0ustar alcaeusstaff--TEST-- MongoDB\BSON\toRelaxedExtendedJSON(): BSON decoding exceptions for bson_as_canonical_json() failure --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string OK: Got MongoDB\Driver\Exception\UnexpectedValueException Could not convert BSON document to a JSON string ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-001.phpt0000644000076500000240000000131713572250760020653 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined #001 --FILE-- $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 { "undefined" : { "$undefined" : true } } string(41) "{ "undefined" : { "$undefined" : true } }" string(41) "{ "undefined" : { "$undefined" : true } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-compare-001.phpt0000644000076500000240000000126013572250760022274 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined comparisons --FILE-- MongoDB\BSON\toPHP(MongoDB\BSON\fromJSON('{ "undefined": {"$undefined": true} }'))); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-jsonserialize-001.phpt0000644000076500000240000000050513572250760023530 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined::jsonSerialize() return value --FILE-- undefined; var_dump($undefined->jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$undefined"]=> bool(true) } ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-jsonserialize-002.phpt0000644000076500000240000000103213572250760023525 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined::jsonSerialize() with json_encode() --FILE-- ===DONE=== --EXPECTF-- { "foo" : { "$undefined" : true } } {"foo":{"$undefined":true}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\Undefined)#%d (%d) { } } ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-serialization-001.phpt0000644000076500000240000000066313572250760023531 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined serialization --FILE-- undefined); var_dump($s = serialize($undefined)); var_dump(unserialize($s)); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\Undefined)#%d (%d) { } string(34) "C:22:"MongoDB\BSON\Undefined":0:{}" object(MongoDB\BSON\Undefined)#%d (%d) { } ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined-tostring-001.phpt0000644000076500000240000000041513572250760022520 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined::__toString() --FILE-- undefined; var_dump((string) $undefined); ?> ===DONE=== --EXPECT-- string(0) "" ===DONE=== mongodb-1.6.1/tests/bson/bson-undefined_error-001.phpt0000644000076500000240000000041613572250760022063 0ustar alcaeusstaff--TEST-- MongoDB\BSON\Undefined cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyUndefined may not inherit from final class (MongoDB\BSON\Undefined) in %s on line %d mongodb-1.6.1/tests/bson/bson-unknown-001.phpt0000644000076500000240000000112013572250760020401 0ustar alcaeusstaff--TEST-- BSON Serializing a PHP resource should throw exception --FILE-- STDERR); $b = fromPHP($a); }, "MongoDB\Driver\Exception\UnexpectedValueException"); throws(function() { $a = array("stderr" => STDERR, "stdout" => STDOUT); $b = fromPHP($a); }, "MongoDB\Driver\Exception\UnexpectedValueException"); ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException OK: Got MongoDB\Driver\Exception\UnexpectedValueException ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-001.phpt0000644000076500000240000000254713572250760021230 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime #001 --INI-- date.timezone=America/Los_Angeles --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => $utcdatetime)); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('_id' => 1)); $cursor = $manager->executeQuery(NS, $query); $results = iterator_to_array($cursor); $tests = array( array($utcdatetime), array($results[0]->x), ); foreach($tests as $n => $test) { $s = fromPHP($test); echo "Test#{$n} ", $json = toJSON($s), "\n"; $bson = fromJSON($json); $testagain = toPHP($bson); var_dump(toJSON(fromPHP($test)), toJSON(fromPHP($testagain))); var_dump((object)$test == (object)$testagain); } ?> ===DONE=== --EXPECT-- Test#0 { "0" : { "$date" : 1416445411987 } } string(37) "{ "0" : { "$date" : 1416445411987 } }" string(37) "{ "0" : { "$date" : 1416445411987 } }" bool(true) Test#1 { "0" : { "$date" : 1416445411987 } } string(37) "{ "0" : { "$date" : 1416445411987 } }" string(37) "{ "0" : { "$date" : 1416445411987 } }" bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-002.phpt0000644000076500000240000000050213572250760021216 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime debug handler --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> %rint\(|string\(13\) "|%r1416445411987%r"|\)%r } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-003.phpt0000644000076500000240000000070413572250760021223 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime construction from 64-bit integer --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1416445411987" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-004.phpt0000644000076500000240000000070313572250760021223 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime constructor defaults to current time --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "%d" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "%d" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-005.phpt0000644000076500000240000000140713572250760021226 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime construction from DateTime --INI-- date.timezone=UTC --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "%d" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1215282385000" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1293894181012" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "2551871655999" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-006.phpt0000644000076500000240000000146413572250760021232 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime construction from DateTimeImmutable --INI-- date.timezone=UTC --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "%d" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1215282385000" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1293894181012" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "2551871655999" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-007.phpt0000644000076500000240000000117013572250760021225 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime constructor truncates floating point values --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1416445411987" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(10) "2147483647" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(4) "1234" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-compare-001.phpt0000644000076500000240000000111713572250760022644 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime comparisons --FILE-- new MongoDB\BSON\UTCDateTime(1234)); var_dump(new MongoDB\BSON\UTCDateTime(1234) < new MongoDB\BSON\UTCDateTime(1235)); var_dump(new MongoDB\BSON\UTCDateTime(1234) > new MongoDB\BSON\UTCDateTime(1233)); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-get_properties-001.phpt0000644000076500000240000000047113572250760024253 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime get_properties handler (get_object_vars) --FILE-- ===DONE=== --EXPECT-- array(1) { ["milliseconds"]=> string(13) "1416445411987" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-get_properties-002.phpt0000644000076500000240000000052513572250760024254 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime get_properties handler (foreach) --FILE-- $value) { var_dump($key); var_dump($value); } ?> ===DONE=== --EXPECT-- string(12) "milliseconds" string(13) "1416445411987" ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-int-size-001.phpt0000644000076500000240000000111413572250760022755 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime integer parsing from string --INI-- date.timezone=UTC error_reporting=-1 dislay_errors=1 --FILE-- toDateTime()); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (1) { ["milliseconds"]=> %r(string\(13\) "|int\()%r1416445411987%r("|\))%r } object(DateTime)#%d (3) { ["date"]=> string(26) "2014-11-20 01:03:31.987000" ["timezone_type"]=> int(1) ["timezone"]=> string(6) "+00:00" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-int-size-002.phpt0000644000076500000240000000125113572250760022760 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime integer parsing from number (64-bit) --SKIPIF-- --INI-- date.timezone=UTC error_reporting=-1 dislay_errors=1 --FILE-- toDateTime()); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (1) { ["milliseconds"]=> %r(string\(13\) "|int\()%r1416445411987%r("|\))%r } object(DateTime)#%d (3) { ["date"]=> string(26) "2014-11-20 01:03:31.987000" ["timezone_type"]=> int(1) ["timezone"]=> string(6) "+00:00" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-jsonserialize-001.phpt0000644000076500000240000000053313572250760024100 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::jsonSerialize() return value --FILE-- jsonSerialize()); ?> ===DONE=== --EXPECT-- array(1) { ["$date"]=> array(1) { ["$numberLong"]=> string(13) "1476192866817" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-jsonserialize-002.phpt0000644000076500000240000000116613572250760024104 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::jsonSerialize() with json_encode() --FILE-- new MongoDB\BSON\UTCDateTime(new DateTime('2016-10-11 13:34:26.817 UTC'))]; $json = json_encode($doc); echo toJSON(fromPHP($doc)), "\n"; echo $json, "\n"; var_dump(toPHP(fromJSON($json))); ?> ===DONE=== --EXPECTF-- { "foo" : { "$date" : 1476192866817 } } {"foo":{"$date":{"$numberLong":"1476192866817"}}} object(stdClass)#%d (%d) { ["foo"]=> object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1476192866817" } } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-serialization-001.phpt0000644000076500000240000000226413572250760024077 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime serialization --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(1) "0" } string(71) "C:24:"MongoDB\BSON\UTCDateTime":34:{a:1:{s:12:"milliseconds";s:1:"0";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(1) "0" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(14) "-1416445411987" } string(85) "C:24:"MongoDB\BSON\UTCDateTime":48:{a:1:{s:12:"milliseconds";s:14:"-1416445411987";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(14) "-1416445411987" } object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1416445411987" } string(84) "C:24:"MongoDB\BSON\UTCDateTime":47:{a:1:{s:12:"milliseconds";s:13:"1416445411987";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1416445411987" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-serialization-002.phpt0000644000076500000240000000221313572250760024072 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime serialization (unserialize 32-bit data on 64-bit) --SKIPIF-- --FILE-- ===DONE=== --EXPECTF-- string(71) "C:24:"MongoDB\BSON\UTCDateTime":34:{a:1:{s:12:"milliseconds";s:1:"0";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(1) "0" } string(85) "C:24:"MongoDB\BSON\UTCDateTime":48:{a:1:{s:12:"milliseconds";s:14:"-1416445411987";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(14) "-1416445411987" } string(84) "C:24:"MongoDB\BSON\UTCDateTime":47:{a:1:{s:12:"milliseconds";s:13:"1416445411987";}}" object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1416445411987" } ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-serialization_error-001.phpt0000644000076500000240000000105213572250760025302 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime unserialization requires "milliseconds" integer or numeric string field --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\UTCDateTime initialization requires "milliseconds" integer or numeric string field ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-serialization_error-002.phpt0000644000076500000240000000117513572250760025311 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime unserialization requires "milliseconds" string to parse as 64-bit integer --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1234.5678" as 64-bit integer for MongoDB\BSON\UTCDateTime initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-set_state-001.phpt0000644000076500000240000000112313572250760023206 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::__set_state() --FILE-- $milliseconds, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '0', )) MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '-1416445411987', )) MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '1416445411987', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-set_state-002.phpt0000644000076500000240000000125413572250760023214 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::__set_state() (64-bit) --SKIPIF-- --FILE-- $milliseconds, ])); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '0', )) MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '-1416445411987', )) MongoDB\BSON\UTCDateTime::__set_state(array( %w'milliseconds' => '1416445411987', )) ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-set_state_error-001.phpt0000644000076500000240000000102513572250760024420 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::__set_state() requires "milliseconds" integer or numeric string field --FILE-- 1.0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\BSON\UTCDateTime initialization requires "milliseconds" integer or numeric string field ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-set_state_error-002.phpt0000644000076500000240000000114413572250760024423 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::__set_state() requires "milliseconds" string to parse as 64-bit integer --FILE-- '1234.5678']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; /* TODO: Add tests for out-of-range values once CDRIVER-1377 is resolved */ ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1234.5678" as 64-bit integer for MongoDB\BSON\UTCDateTime initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-todatetime-001.phpt0000644000076500000240000000053313572250760023356 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::toDateTime() --INI-- date.timezone=America/Los_Angeles --FILE-- toDateTime(); var_dump($datetime->format(DATE_RSS)); ?> ===DONE=== --EXPECT-- string(31) "Thu, 20 Nov 2014 01:03:31 +0000" ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-todatetime-002.phpt0000644000076500000240000000051713572250760023361 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::toDateTime() dumping seconds and microseconds --INI-- date.timezone=UTC --FILE-- toDateTime(); echo $datetime->format('U.u'), "\n"; ?> ===DONE=== --EXPECT-- 1416445411.987000 ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime-tostring-001.phpt0000644000076500000240000000043313572250760023067 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime::__toString() --INI-- date.timezone=America/Los_Angeles --FILE-- ===DONE=== --EXPECT-- string(13) "1416445411987" ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime_error-001.phpt0000644000076500000240000000071113572250760022430 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime requires object argument to implement DateTimeInterface --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected instance of DateTimeInterface, stdClass given ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime_error-002.phpt0000644000076500000240000000043013572250760022427 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyUTCDateTime may not inherit from final class (MongoDB\BSON\UTCDateTime) in %s on line %d mongodb-1.6.1/tests/bson/bson-utcdatetime_error-003.phpt0000644000076500000240000000106713572250760022437 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime constructor requires strings to parse as 64-bit integers --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Error parsing "1234.5678" as 64-bit integer for MongoDB\BSON\UTCDateTime initialization ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetime_error-004.phpt0000644000076500000240000000156213572250760022440 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTime constructor requires integer or string argument --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected integer or string, bool%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected integer or string, array given ===DONE=== mongodb-1.6.1/tests/bson/bson-utcdatetimeinterface-001.phpt0000644000076500000240000000043513572250760023103 0ustar alcaeusstaff--TEST-- MongoDB\BSON\UTCDateTimeInterface is implemented by MongoDB\BSON\UTCDateTime --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bug0274.phpt0000644000076500000240000000247113572250760016551 0ustar alcaeusstaff--TEST-- Test for PHPC-274: zval_to_bson() should process BSON\Serializable instances --FILE-- "class", "data"); } } class NumericArray implements MongoDB\BSON\Serializable { public function bsonSerialize() { return array(1, 2, 3); } } echo "Testing top-level AssociativeArray:\n"; $bson = fromPHP(new AssociativeArray); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); echo "\nTesting top-level NumericArray:\n"; $bson = fromPHP(new NumericArray); echo toJSON($bson), "\n"; echo "Encoded BSON:\n"; hex_dump($bson); ?> ===DONE=== --EXPECT-- Testing top-level AssociativeArray: { "random" : "class", "0" : "data" } Encoded BSON: 0 : 23 00 00 00 02 72 61 6e 64 6f 6d 00 06 00 00 00 [#....random.....] 10 : 63 6c 61 73 73 00 02 30 00 05 00 00 00 64 61 74 [class..0.....dat] 20 : 61 00 00 [a..] Testing top-level NumericArray: { "0" : 1, "1" : 2, "2" : 3 } Encoded BSON: 0 : 1a 00 00 00 10 30 00 01 00 00 00 10 31 00 02 00 [.....0......1...] 10 : 00 00 10 32 00 03 00 00 00 00 [...2......] ===DONE=== mongodb-1.6.1/tests/bson/bug0325.phpt0000644000076500000240000000070713572250760016546 0ustar alcaeusstaff--TEST-- Test for PHPC-325: Memory leak decoding buffers with multiple documents --FILE-- getMessage(), "\n"; } ?> ===DONE=== --EXPECT-- Reading document did not exhaust input buffer ===DONE=== mongodb-1.6.1/tests/bson/bug0334-001.phpt0000644000076500000240000000111713572250760017040 0ustar alcaeusstaff--TEST-- PHPC-334: Injected __pclass should override a __pclass key in bsonSerialize() return value --FILE-- "baz", "foo" => "bar", ); } function bsonUnserialize(array $data) { } } $bson = fromPHP(new MyClass); $php = toPHP($bson, array('root' => 'array')); var_dump($php['__pclass']->getData()); ?> ===DONE=== --EXPECT-- string(7) "MyClass" ===DONE=== mongodb-1.6.1/tests/bson/bug0334-002.phpt0000644000076500000240000000125113572250760017040 0ustar alcaeusstaff--TEST-- PHPC-334: Encoded BSON should never have multiple __pclass keys --FILE-- "baz", "foo" => "bar", ); } function bsonUnserialize(array $data) { } } hex_dump(fromPHP(new MyClass)) ?> ===DONE=== --EXPECT-- 0 : 28 00 00 00 05 5f 5f 70 63 6c 61 73 73 00 07 00 [(....__pclass...] 10 : 00 00 80 4d 79 43 6c 61 73 73 02 66 6f 6f 00 04 [...MyClass.foo..] 20 : 00 00 00 62 61 72 00 00 [...bar..] ===DONE=== mongodb-1.6.1/tests/bson/bug0341.phpt0000644000076500000240000000143013572250760016536 0ustar alcaeusstaff--TEST-- PHPC-341: fromJSON() leaks when JSON contains array or object fields --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (2) { ["foo"]=> string(3) "yes" ["bar"]=> bool(false) } object(stdClass)#%d (2) { ["foo"]=> string(2) "no" ["array"]=> array(2) { [0]=> int(5) [1]=> int(6) } } object(stdClass)#%d (2) { ["foo"]=> string(2) "no" ["obj"]=> object(stdClass)#%d (1) { ["embedded"]=> float(4.125) } } ===DONE=== mongodb-1.6.1/tests/bson/bug0347.phpt0000644000076500000240000000054013572250760016545 0ustar alcaeusstaff--TEST-- Test for PHPC-347: Memory leak decoding empty buffer --FILE-- getMessage(), "\n"; } ?> ===DONE=== --EXPECT-- Could not read document from BSON reader ===DONE=== mongodb-1.6.1/tests/bson/bug0528.phpt0000644000076500000240000000047113572250760016551 0ustar alcaeusstaff--TEST-- PHPC-528: Cannot append reference to BSON --FILE-- &$embedded]; $bson = fromPHP($data); echo toJson(fromPHP($data)), "\n"; ?> ===DONE=== --EXPECT-- { "embedded" : [ "foo" ] } ===DONE=== mongodb-1.6.1/tests/bson/bug0531-001.phpt0000644000076500000240000000101013572250760017027 0ustar alcaeusstaff--TEST-- PHPC-531: Segfault due to double free by corrupt BSON visitor (top-level) --FILE-- "world"]); $bson[4] = 1; echo throws(function() use ($bson) { toPHP($bson); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected unknown BSON type 0x31 for field path "hello". Are you using the latest driver? ===DONE=== mongodb-1.6.1/tests/bson/bug0544.phpt0000644000076500000240000000425313572250760016551 0ustar alcaeusstaff--TEST-- PHPC-544: Consult SIZEOF_ZEND_LONG for 64-bit integer support --SKIPIF-- --FILE-- -2147483648], ['x' => 2147483647], ['x' => -4294967294], ['x' => 4294967294], ['x' => -4294967295], ['x' => 4294967295], ['x' => -9223372036854775807], ['x' => 9223372036854775807], ]; foreach ($tests as $test) { $bson = fromPHP($test); /* Note: Although libbson can parse the extended JSON representation for * 64-bit integers (i.e. "$numberLong"), it currently prints them as * doubles (see: https://jira.mongodb.org/browse/CDRIVER-375). */ printf("Test %s\n", toJSON($bson)); hex_dump($bson); var_dump(toPHP($bson)); echo "\n"; } ?> ===DONE=== --EXPECTF-- Test { "x" : -2147483648 } 0 : 0c 00 00 00 10 78 00 00 00 00 80 00 [.....x......] object(stdClass)#%d (%d) { ["x"]=> int(-2147483648) } Test { "x" : 2147483647 } 0 : 0c 00 00 00 10 78 00 ff ff ff 7f 00 [.....x......] object(stdClass)#%d (%d) { ["x"]=> int(2147483647) } Test { "x" : -4294967294 } 0 : 10 00 00 00 12 78 00 02 00 00 00 ff ff ff ff 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(-4294967294) } Test { "x" : 4294967294 } 0 : 10 00 00 00 12 78 00 fe ff ff ff 00 00 00 00 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(4294967294) } Test { "x" : -4294967295 } 0 : 10 00 00 00 12 78 00 01 00 00 00 ff ff ff ff 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(-4294967295) } Test { "x" : 4294967295 } 0 : 10 00 00 00 12 78 00 ff ff ff ff 00 00 00 00 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(4294967295) } Test { "x" : -9223372036854775807 } 0 : 10 00 00 00 12 78 00 01 00 00 00 00 00 00 80 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(-9223372036854775807) } Test { "x" : 9223372036854775807 } 0 : 10 00 00 00 12 78 00 ff ff ff ff ff ff ff 7f 00 [.....x..........] object(stdClass)#%d (%d) { ["x"]=> int(9223372036854775807) } ===DONE=== mongodb-1.6.1/tests/bson/bug0592.phpt0000644000076500000240000000537313572250760016560 0ustar alcaeusstaff--TEST-- PHPC-592: Property name corrupted when unserializing 64-bit integer on 32-bit platform --SKIPIF-- --FILE-- getMessage(), "\n"; } echo "\n"; } ?> ===DONE=== --EXPECTF-- Test { "x": { "$numberLong": "-2147483648" }} object(stdClass)#%d (%d) { ["x"]=> int(-2147483648) } Test { "x": { "$numberLong": "2147483647" }} object(stdClass)#%d (%d) { ["x"]=> int(2147483647) } Test { "x": { "$numberLong": "4294967294" }} object(stdClass)#%d (%d) { ["x"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(10) "4294967294" } } Test { "x": { "$numberLong": "4294967295" }} object(stdClass)#%d (%d) { ["x"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(10) "4294967295" } } Test { "x": { "$numberLong": "9223372036854775807" }} object(stdClass)#%d (%d) { ["x"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } } Test { "longFieldName": { "$numberLong": "-2147483648" }} object(stdClass)#%d (%d) { ["longFieldName"]=> int(-2147483648) } Test { "longFieldName": { "$numberLong": "2147483647" }} object(stdClass)#%d (%d) { ["longFieldName"]=> int(2147483647) } Test { "longFieldName": { "$numberLong": "4294967294" }} object(stdClass)#%d (%d) { ["longFieldName"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(10) "4294967294" } } Test { "longFieldName": { "$numberLong": "4294967295" }} object(stdClass)#%d (%d) { ["longFieldName"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(10) "4294967295" } } Test { "longFieldName": { "$numberLong": "9223372036854775807" }} object(stdClass)#%d (%d) { ["longFieldName"]=> object(MongoDB\BSON\Int64)#%d (%d) { ["integer"]=> string(19) "9223372036854775807" } } ===DONE=== mongodb-1.6.1/tests/bson/bug0623.phpt0000644000076500000240000000306613572250760016550 0ustar alcaeusstaff--TEST-- PHPC-623: Numeric keys limited to unsigned 32-bit integer --SKIPIF-- --FILE-- 'a', 'X9781449410247' => 'b', 9781449410248 => 'c', ], [ '4294967295' => 'a', '4294967296' => 'b', '4294967297' => 'c', ] ]; foreach ($tests as $test) { printf("Test %s\n", json_encode($test)); $bson = fromPHP($test); hex_dump($bson); echo toJSON($bson), "\n\n"; } ?> ===DONE=== --EXPECT-- Test {"9781449410247":"a","X9781449410247":"b","9781449410248":"c"} 0 : 45 00 00 00 02 39 37 38 31 34 34 39 34 31 30 32 [E....97814494102] 10 : 34 37 00 02 00 00 00 61 00 02 58 39 37 38 31 34 [47.....a..X97814] 20 : 34 39 34 31 30 32 34 37 00 02 00 00 00 62 00 02 [49410247.....b..] 30 : 39 37 38 31 34 34 39 34 31 30 32 34 38 00 02 00 [9781449410248...] 40 : 00 00 63 00 00 [..c..] { "9781449410247" : "a", "X9781449410247" : "b", "9781449410248" : "c" } Test {"4294967295":"a","4294967296":"b","4294967297":"c"} 0 : 3b 00 00 00 02 34 32 39 34 39 36 37 32 39 35 00 [;....4294967295.] 10 : 02 00 00 00 61 00 02 34 32 39 34 39 36 37 32 39 [....a..429496729] 20 : 36 00 02 00 00 00 62 00 02 34 32 39 34 39 36 37 [6.....b..4294967] 30 : 32 39 37 00 02 00 00 00 63 00 00 [297.....c..] { "4294967295" : "a", "4294967296" : "b", "4294967297" : "c" } ===DONE=== mongodb-1.6.1/tests/bson/bug0631.phpt0000644000076500000240000000153713572250760016550 0ustar alcaeusstaff--TEST-- PHPC-631: UTCDateTime::toDateTime() may return object that cannot be serialized --INI-- date.timezone=UTC --FILE-- toDateTime(); $s = serialize($datetime); var_dump($datetime); echo "\n", $s, "\n\n"; var_dump(unserialize($s)); ?> ===DONE=== --EXPECTF-- object(DateTime)#%d (%d) { ["date"]=> string(26) "2016-06-21 20:25:55.123000" ["timezone_type"]=> int(1) ["timezone"]=> string(6) "+00:00" } O:8:"DateTime":3:{s:4:"date";s:26:"2016-06-21 20:25:55.123000";s:13:"timezone_type";i:1;s:8:"timezone";s:6:"+00:00";} object(DateTime)#%d (%d) { ["date"]=> string(26) "2016-06-21 20:25:55.123000" ["timezone_type"]=> int(1) ["timezone"]=> string(6) "+00:00" } ===DONE=== mongodb-1.6.1/tests/bson/bug0672.phpt0000644000076500000240000000126213572250760016550 0ustar alcaeusstaff--TEST-- PHPC-672: ObjectId constructor should not modify string argument's memory --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "56925b7330616224d0000001" } string(24) "56925B7330616224D0000001" OK: Got MongoDB\Driver\Exception\InvalidArgumentException string(7) "T123456" ===DONE=== mongodb-1.6.1/tests/bson/bug0894-001.phpt0000644000076500000240000000126413572250760017056 0ustar alcaeusstaff--TEST-- PHPC-849: BSON get_properties handlers leak during gc_possible_root() checks --FILE-- 42]), new MongoDB\BSON\MaxKey, new MongoDB\BSON\MinKey, new MongoDB\BSON\ObjectId, new MongoDB\BSON\Regex('foo', 'i'), new MongoDB\BSON\Timestamp(1234, 5678), new MongoDB\BSON\UTCDateTime, ]; printf("Created array of %d BSON objects\n", count($objects)); gc_collect_cycles(); ?> ===DONE=== --EXPECT-- Created array of 9 BSON objects ===DONE=== mongodb-1.6.1/tests/bson/bug0923-001.phpt0000644000076500000240000000156713572250760017055 0ustar alcaeusstaff--TEST-- PHPC-923: Use zend_string_release() to free class names (type map) --FILE-- 'MissingClass'])); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; var_dump($classes); ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist array(1) { [0]=> string(12) "MissingClass" } ===DONE=== mongodb-1.6.1/tests/bson/bug0923-002.phpt0000644000076500000240000000212213572250760017042 0ustar alcaeusstaff--TEST-- PHPC-923: Use zend_string_release() to free class names (__pclass) --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["x"]=> object(stdClass)#%d (%d) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(12) "MissingClass" ["type"]=> int(128) } } } array(1) { [0]=> string(12) "MissingClass" } ===DONE=== mongodb-1.6.1/tests/bson/bug0939-001.phpt0000644000076500000240000000513113572250760017053 0ustar alcaeusstaff--TEST-- PHPC-939: BSON classes should not assign public properties after var_dump() --FILE-- 42]), ['code', 'scope'] ], [ new MongoDB\BSON\MaxKey, [] ], [ new MongoDB\BSON\MinKey, [] ], [ new MongoDB\BSON\ObjectId, ['oid'] ], [ new MongoDB\BSON\Regex('foo', 'i'), ['pattern', 'flags'] ], [ new MongoDB\BSON\Timestamp(1234, 5678), ['increment', 'timestamp'] ], [ new MongoDB\BSON\UTCDateTime, ['milliseconds'] ], ]; foreach ($tests as $test) { list($object, $properties) = $test; var_dump($object); foreach ($properties as $property) { var_dump($object->{$property}); } echo "\n"; } ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(3) "foo" ["type"]=> int(0) } Notice: Undefined property: MongoDB\BSON\Binary::$data in %s on line %d NULL Notice: Undefined property: MongoDB\BSON\Binary::$type in %s on line %d NULL object(MongoDB\BSON\Decimal128)#%d (%d) { ["dec"]=> string(4) "3.14" } Notice: Undefined property: MongoDB\BSON\Decimal128::$dec in %s on line %d NULL object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(30) "function foo() { return bar; }" ["scope"]=> object(stdClass)#%d (%d) { ["bar"]=> int(42) } } Notice: Undefined property: MongoDB\BSON\Javascript::$code in %s on line %d NULL Notice: Undefined property: MongoDB\BSON\Javascript::$scope in %s on line %d NULL object(MongoDB\BSON\MaxKey)#%d (%d) { } object(MongoDB\BSON\MinKey)#%d (%d) { } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } Notice: Undefined property: MongoDB\BSON\ObjectId::$oid in %s on line %d NULL object(MongoDB\BSON\Regex)#%d (%d) { ["pattern"]=> string(3) "foo" ["flags"]=> string(1) "i" } Notice: Undefined property: MongoDB\BSON\Regex::$pattern in %s on line %d NULL Notice: Undefined property: MongoDB\BSON\Regex::$flags in %s on line %d NULL object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } Notice: Undefined property: MongoDB\BSON\Timestamp::$increment in %s on line %d NULL Notice: Undefined property: MongoDB\BSON\Timestamp::$timestamp in %s on line %d NULL object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(%d) "%d" } Notice: Undefined property: MongoDB\BSON\UTCDateTime::$milliseconds in %s on line %d NULL ===DONE=== mongodb-1.6.1/tests/bson/bug0974-001.phpt0000644000076500000240000000223513572250760017054 0ustar alcaeusstaff--TEST-- PHPC-974: Converting JSON to BSON to PHP introduces gaps in array indexes --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["myArray"]=> array(1) { [0]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "201700601301102102609060" } } } object(stdClass)#%d (%d) { [%r(0|"0")%r]=> int(1) [%r(1|"1")%r]=> int(2) [%r(2|"2")%r]=> int(3) [%r(3|"3")%r]=> object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1497352886906" } } object(stdClass)#3 (2) { [%r(0|"0")%r]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "55f2b3f1f657b3fa97c9c0a2" } [%r(1|"1")%r]=> object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1497352886906" } } ===DONE=== mongodb-1.6.1/tests/bson/bug1006-001.phpt0000644000076500000240000000176513572250760017046 0ustar alcaeusstaff--TEST-- PHPC-1006: Do not modify memory of Persistable::bsonSerialize() return value --FILE-- data = [ '__pclass' => 'baz', 'foo' => 'bar', ]; } function bsonSerialize() { return $this->data; } function bsonUnserialize(array $data) { } } $obj = new MyClass; var_dump($obj->data); hex_dump(fromPHP($obj)); var_dump($obj->data); ?> ===DONE=== --EXPECT-- array(2) { ["__pclass"]=> string(3) "baz" ["foo"]=> string(3) "bar" } 0 : 28 00 00 00 05 5f 5f 70 63 6c 61 73 73 00 07 00 [(....__pclass...] 10 : 00 00 80 4d 79 43 6c 61 73 73 02 66 6f 6f 00 04 [...MyClass.foo..] 20 : 00 00 00 62 61 72 00 00 [...bar..] array(2) { ["__pclass"]=> string(3) "baz" ["foo"]=> string(3) "bar" } ===DONE=== mongodb-1.6.1/tests/bson/bug1006-002.phpt0000644000076500000240000000120213572250760017031 0ustar alcaeusstaff--TEST-- PHPC-1006: Do not skip __pclass in Serializable::bsonSerialize() return value --FILE-- 'baz', 'foo' => 'bar', ]; } } hex_dump(fromPHP(new MyClass)); ?> ===DONE=== --EXPECT-- 0 : 24 00 00 00 02 5f 5f 70 63 6c 61 73 73 00 04 00 [$....__pclass...] 10 : 00 00 62 61 7a 00 02 66 6f 6f 00 04 00 00 00 62 [..baz..foo.....b] 20 : 61 72 00 00 [ar..] ===DONE=== mongodb-1.6.1/tests/bson/bug1053.phpt0000644000076500000240000000047113572250760016543 0ustar alcaeusstaff--TEST-- PHPC-1053: MongoDB\BSON\UTCDateTime's constructor has argument defined as required --FILE-- getParameters()[0]->isOptional()); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bson/bug1067.phpt0000644000076500000240000000066613572250760016556 0ustar alcaeusstaff--TEST-- PHPC-1067: BSON document produces driver segfault with insert --FILE-- new MongoDB\BSON\ObjectID('111111111111111111111111'), '___________________________________' => new MongoDB\BSON\Regex('_______________________________________________________', 'i'), ]; $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert($x); ?> ==DONE== --EXPECT-- ==DONE== mongodb-1.6.1/tests/bson/bug1266.phpt0000644000076500000240000000301213572250760016543 0ustar alcaeusstaff--TEST-- Test for PHPC-1266: Empty deeply nested BSON document causes unallocated memory writes --FILE-- ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["value"]=> object(stdClass)#%d (%d) { ["payload"]=> object(stdClass)#%d (%d) { ["PayloadMasterDataMeteringPointPartyEvent"]=> object(stdClass)#%d (%d) { ["MeteringPointPartyDetailMeteringPointPartyCharacteristic"]=> object(stdClass)#%d (%d) { ["AdministrativePartyMPAdministrativeParty"]=> array(%d) { [0]=> object(stdClass)#%d (%d) { ["AdministrativePartyAddressLocationAddress"]=> object(stdClass)#%d (%d) { ["StreetCode"]=> object(stdClass)#%d (%d) { } } } } } } } } } ===DONE=== mongodb-1.6.1/tests/bson/typemap-001.phpt0000644000076500000240000000757213572250760017443 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting typemaps --SKIPIF-- --FILE-- insert(array('_id' => 1, 'bson_array' => array(1, 2, 3), 'bson_object' => array("string" => "keys", "for" => "ever"))); $bulk->insert(array('_id' => 2, 'bson_array' => array(4, 5, 6))); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = array()) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array('bson_array' => 1))); if ($typemap) { $cursor->setTypeMap($typemap); } $documents = $cursor->toArray(); return $documents; } echo "Default\n"; $documents = fetch($manager); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->bson_array)); var_dump($documents[0]->bson_object instanceof stdClass); echo "\nSetting to 'MyArrayObject' for arrays\n"; $documents = fetch($manager, array("array" => "MyArrayObject")); var_dump($documents[0] instanceof stdClass); var_dump($documents[0]->bson_array instanceof MyArrayObject); var_dump($documents[0]->bson_object instanceof stdClass); echo "\nSetting to 'MyArrayObject' for arrays, embedded, and root documents\n"; $documents = fetch($manager, array("array" => "MyArrayObject", "document" => "MyArrayObject", "root" => "MyArrayObject")); var_dump($documents[0] instanceof MyArrayObject); var_dump($documents[0]['bson_array'] instanceof MyArrayObject); var_dump($documents[0]['bson_object'] instanceof MyArrayObject); echo "\nSetting to 'array' for arrays, embedded, and root documents\n"; $documents = fetch($manager, array("array" => "array", "document" => "array", "root" => "array")); var_dump(is_array($documents[0])); var_dump(is_array($documents[0]['bson_array'])); var_dump(is_array($documents[0]['bson_object'])); echo "\nSetting to 'stdclass' for arrays and 'array' for embedded and root documents\n"; $documents = fetch($manager, array("array" => "stdclass", "document" => "array", "root" => "array")); var_dump(is_array($documents[0])); var_dump($documents[0]['bson_array'] instanceof stdClass); var_dump(is_array($documents[0]['bson_object'])); echo "\nSetting to 'array' for arrays, 'stdclass' for embedded document, and 'MyArrayObject' for root document\n"; $documents = fetch($manager, array("array" => "array", "document" => "stdclass", "root" => "MyArrayObject")); var_dump($documents[0] instanceof MyArrayObject); var_dump(is_array($documents[0]['bson_array'])); var_dump($documents[0]['bson_object'] instanceof stdClass); echo "\nSetting to 'stdclass' for arrays, embedded, and root documents\n"; $documents = fetch($manager, array("array" => "stdclass", "document" => "stdclass", "root" => "stdclass")); var_dump($documents[0] instanceof stdClass); var_dump($documents[0]->bson_array instanceof stdClass); var_dump($documents[0]->bson_object instanceof stdClass); ?> ===DONE=== --EXPECT-- Default bool(true) bool(true) bool(true) Setting to 'MyArrayObject' for arrays bool(true) bool(true) bool(true) Setting to 'MyArrayObject' for arrays, embedded, and root documents bool(true) bool(true) bool(true) Setting to 'array' for arrays, embedded, and root documents bool(true) bool(true) bool(true) Setting to 'stdclass' for arrays and 'array' for embedded and root documents bool(true) bool(true) bool(true) Setting to 'array' for arrays, 'stdclass' for embedded document, and 'MyArrayObject' for root document bool(true) bool(true) bool(true) Setting to 'stdclass' for arrays, embedded, and root documents bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-002.phpt0000644000076500000240000000426113572250760017434 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting using type "object" --SKIPIF-- --FILE-- insert(array('_id' => 1, 'bson_array' => array(1, 2, 3), 'bson_object' => array("string" => "keys", "for" => "ever"))); $bulk->insert(array('_id' => 2, 'bson_array' => array(4, 5, 6))); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = array()) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array('bson_array' => 1))); if ($typemap) { $cursor->setTypeMap($typemap); } $documents = $cursor->toArray(); return $documents; } echo "Setting to 'object' for arrays and 'array' for embedded and root documents\n"; $documents = fetch($manager, array("array" => "object", "document" => "array", "root" => "array")); var_dump(is_array($documents[0])); var_dump($documents[0]['bson_array'] instanceof stdClass); var_dump(is_array($documents[0]['bson_object'])); echo "\nSetting to 'array' for arrays and 'object' for embedded and root documents\n"; $documents = fetch($manager, array("array" => "array", "document" => "object", "root" => "object")); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->bson_array)); var_dump($documents[0]->bson_object instanceof stdClass); echo "\nSetting to 'object' for arrays, embedded, and root documents\n"; $documents = fetch($manager, array("array" => "object", "document" => "object", "root" => "object")); var_dump($documents[0] instanceof stdClass); var_dump($documents[0]->bson_array instanceof stdClass); var_dump($documents[0]->bson_object instanceof stdClass); ?> ===DONE=== --EXPECT-- Setting to 'object' for arrays and 'array' for embedded and root documents bool(true) bool(true) bool(true) Setting to 'array' for arrays and 'object' for embedded and root documents bool(true) bool(true) bool(true) Setting to 'object' for arrays, embedded, and root documents bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-003.phpt0000644000076500000240000000377313572250760017444 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting and replacing typemaps --SKIPIF-- --FILE-- 1, 'array' => [1, 2, 3], 'object' => ['string' => ['sleutels', 'keys'] ] ]; $document2 = [ '_id' => 2, 'array' => [4, 5, 6], 'object' => ['associative' => ['elementen', 'elements' ]] ]; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document1); $bulk->insert($document2); $manager->executeBulkWrite(NS, $bulk); $typemap1 = ["fieldPaths" => [ 'object.string' => "MyArrayObject", 'object' => "MyArrayObject", ]]; $typemap2 = ["fieldPaths" => [ 'object.associative' => "MyProperties", 'object' => "MyArrayObject", ]]; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $cursor->setTypeMap($typemap1); $cursor->setTypeMap($typemap2); $documents = $cursor->toArray(); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->object instanceof MyArrayObject); var_dump(is_array($documents[0]->object['string'])); var_dump(is_array($documents[0]->object->string)); var_dump($documents[1] instanceof stdClass); var_dump(is_array($documents[1]->array)); var_dump($documents[1]->object instanceof MyArrayObject); var_dump($documents[1]->object['associative'] instanceof MyProperties); var_dump($documents[1]->object->associative instanceof MyProperties); ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-004.phpt0000644000076500000240000000375313572250760017443 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting fieldPath typemaps for compound types with string keys --SKIPIF-- --FILE-- 1, 'array' => [1, 2, 3], 'object' => ['string' => 'keys', 'for' => 'ever'] ]; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = []) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); if ($typemap) { $cursor->setTypeMap($typemap); } return $cursor->toArray(); } echo "Default\n"; $documents = fetch($manager); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->object instanceof stdClass); echo "\nSetting 'object' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object' => "MyArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->object instanceof MyArrayObject); echo "\nSetting 'object' and 'array' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object' => "MyArrayObject", 'array' => "MyArrayObject", ]]); var_dump($documents[0] instanceof stdClass); var_dump($documents[0]->array instanceof MyArrayObject); var_dump($documents[0]->object instanceof MyArrayObject); ?> ===DONE=== --EXPECT-- Default bool(true) bool(true) bool(true) Setting 'object' path to 'MyArrayObject' bool(true) bool(true) bool(true) Setting 'object' and 'array' path to 'MyArrayObject' bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-005.phpt0000644000076500000240000000514713572250760017443 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting fieldPath typemaps for compound types with numerical keys --SKIPIF-- --FILE-- 1, 'array0' => [0 => [ 4, 5, 6 ], 1 => [ 7, 8, 9 ]], 'array1' => [1 => [ 4, 5, 6 ], 2 => [ 7, 8, 9 ]], ]; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = []) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); if ($typemap) { $cursor->setTypeMap($typemap); } $documents = $cursor->toArray(); return $documents; } echo "Default\n"; $documents = fetch($manager); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array0)); var_dump(is_object($documents[0]->array1)); var_dump($documents[0]->array1 instanceof stdClass); echo "\nSetting 'array0' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array0' => "MyArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_object($documents[0]->array0)); var_dump($documents[0]->array0 instanceof MyArrayObject); echo "\nSetting 'array0.1' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array0.1' => "MyArrayObject", ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array0)); var_dump(is_array($documents[0]->array0[0])); var_dump($documents[0]->array0[1] instanceof MyArrayObject); echo "\nSetting 'array1.1' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array1.1' => "MyArrayObject", ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_object($documents[0]->array1)); var_dump($documents[0]->array1 instanceof stdClass); $a = ((array) $documents[0]->array1); var_dump($a[1] instanceof MyArrayObject); var_dump(is_array($a[2])); ?> ===DONE=== --EXPECT-- Default bool(true) bool(true) bool(true) bool(true) Setting 'array0' path to 'MyArrayObject' bool(true) bool(true) bool(true) Setting 'array0.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array1.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-006.phpt0000644000076500000240000001076513572250760017446 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting fieldPath typemaps for compound types with wildcard keys --SKIPIF-- --FILE-- 1, 'array' => [0 => [ 4, 5, 6 ], 1 => [ 7, 8, 9 ]], 'object' => ['one' => [ 4, 5, 6 ], 'two' => [ 7, 8, 9 ]], ]; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = []) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); if ($typemap) { $cursor->setTypeMap($typemap); } $documents = $cursor->toArray(); return $documents; } echo "\nSetting 'array.$' path to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array.$' => "MyWildcardArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->array[0] instanceof MyWildcardArrayObject); var_dump($documents[0]->array[1] instanceof MyWildcardArrayObject); echo "\nSetting 'array.1' to 'MyArrayObject' and 'array.$' path to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array.1' => "MyArrayObject", 'array.$' => "MyWildcardArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->array[0] instanceof MyWildcardArrayObject); var_dump($documents[0]->array[1] instanceof MyArrayObject); echo "\nSetting 'array.$' to 'MyWildcardArrayObject' and 'array.1' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'array.$' => "MyWildcardArrayObject", 'array.1' => "MyArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_array($documents[0]->array)); var_dump($documents[0]->array[0] instanceof MyWildcardArrayObject); var_dump($documents[0]->array[1] instanceof MyWildcardArrayObject); echo "\nSetting 'object.$' path to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.$' => "MyWildcardArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_object($documents[0]->object)); var_dump($documents[0]->object->one instanceof MyWildcardArrayObject); var_dump($documents[0]->object->two instanceof MyWildcardArrayObject); echo "\nSetting 'object.two' to 'MyArrayObject' and 'object.$' path to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.two' => "MyArrayObject", 'object.$' => "MyWildcardArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_object($documents[0]->object)); var_dump($documents[0]->object->one instanceof MyWildcardArrayObject); var_dump($documents[0]->object->two instanceof MyArrayObject); echo "\nSetting 'object.$' to 'MyWildcardArrayObject' and 'object.one' path to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.$' => "MyWildcardArrayObject", 'object.one' => "MyArrayObject" ]]); var_dump($documents[0] instanceof stdClass); var_dump(is_object($documents[0]->object)); var_dump($documents[0]->object->one instanceof MyWildcardArrayObject); var_dump($documents[0]->object->two instanceof MyWildcardArrayObject); ?> ===DONE=== --EXPECT-- Setting 'array.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array.1' to 'MyArrayObject' and 'array.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'array.$' to 'MyWildcardArrayObject' and 'array.1' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.two' to 'MyArrayObject' and 'object.$' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) Setting 'object.$' to 'MyWildcardArrayObject' and 'object.one' path to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bson/typemap-007.phpt0000644000076500000240000001254613572250760017446 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::setTypeMap(): Setting fieldPath typemaps for compound types with wildcard keys (nested) --SKIPIF-- --FILE-- 1, 'object' => [ 'parent1' => [ 'child1' => [ 1, 2, 3 ], 'child2' => [ 4, 5, 6 ], ], 'parent2' => [ 'child1' => [ 7, 8, 9 ], 'child2' => [ 10, 11, 12 ], ], ], ]; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document); $manager->executeBulkWrite(NS, $bulk); function fetch($manager, $typemap = []) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); if ($typemap) { $cursor->setTypeMap($typemap); } $documents = $cursor->toArray(); return $documents; } echo "\nSetting 'object.$.child1' path to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.$.child1' => "MyWildcardArrayObject" ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump(is_array($documents[0]->object->parent1->child2)); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyWildcardArrayObject); var_dump(is_array($documents[0]->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.parent2.child1' to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.parent1.$' => "MyWildcardArrayObject", 'object.parent2.child1' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyArrayObject); var_dump(is_array($documents[0]->object->parent2->child2)); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.$' to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.parent1.$' => "MyWildcardArrayObject", 'object.$.$' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyArrayObject); var_dump($documents[0]->object->parent2->child2 instanceof MyArrayObject); echo "\nSetting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.child2' to 'MyArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.parent1.child1' => "MyWildcardArrayObject", 'object.$.child2' => "MyArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump(is_array($documents[0]->object->parent2->child1)); var_dump($documents[0]->object->parent2->child2 instanceof MyArrayObject); echo "\nSetting 'object.parent1.child2 path to 'MyArrayObject' and 'object.$.$' to 'MyWildcardArrayObject'\n"; $documents = fetch($manager, ["fieldPaths" => [ 'object.parent1.child2' => "MyArrayObject", 'object.$.$' => "MyWildcardArrayObject", ]]); var_dump($documents[0]->object->parent1 instanceof stdClass); var_dump($documents[0]->object->parent1->child1 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent1->child2 instanceof MyArrayObject); var_dump($documents[0]->object->parent2 instanceof stdClass); var_dump($documents[0]->object->parent2->child1 instanceof MyWildcardArrayObject); var_dump($documents[0]->object->parent2->child2 instanceof MyWildcardArrayObject); ?> ===DONE=== --EXPECT-- Setting 'object.$.child1' path to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.parent2.child1' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.$' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.$' path to 'MyWildcardArrayObject' and 'object.$.child2' to 'MyArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) Setting 'object.parent1.child2 path to 'MyArrayObject' and 'object.$.$' to 'MyWildcardArrayObject' bool(true) bool(true) bool(true) bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/bulk/bug0667.phpt0000644000076500000240000000067313572250760016555 0ustar alcaeusstaff--TEST-- PHPC-667: BulkWrite::insert() does not generate ObjectId if another field has "_id" prefix --FILE-- insert(['_ids' => 1])); var_dump($bulk->insert((object) ['_ids' => 1])); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-count-001.phpt0000644000076500000240000000117613572250760021110 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::count() should return the number of operations --FILE-- count()); $bulk->insert(['x' => 1]); var_dump($bulk->count()); $bulk->insert(['x' => 2]); var_dump($bulk->count()); $bulk->update(['x' => 3], ['$set' => ['y' => 3]]); var_dump($bulk->count()); $bulk->update(['x' => 4], ['$set' => ['y' => 4]]); var_dump($bulk->count()); $bulk->delete(['x' => 5]); var_dump($bulk->count()); $bulk->delete(['x' => 6]); var_dump($bulk->count()); ?> ===DONE=== --EXPECT-- int(0) int(1) int(2) int(3) int(4) int(5) int(6) ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-countable-001.phpt0000644000076500000240000000032513572250760021727 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite implements Countable --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-debug-001.phpt0000644000076500000240000000316313572250760021044 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite debug output before execution --FILE-- true], ['ordered' => false], ['bypassDocumentValidation' => true], ['bypassDocumentValidation' => false], ]; foreach ($tests as $options) { var_dump(new MongoDB\Driver\BulkWrite($options)); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(false) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> bool(true) ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> bool(false) ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-delete-001.phpt0000644000076500000240000000371313572250760021221 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::delete() should always encode __pclass for Persistable objects --SKIPIF-- --FILE-- id = $id; $this->child = $child; } public function bsonSerialize() { return [ '_id' => $this->id, 'child' => $this->child, ]; } public function bsonUnserialize(array $data) { $this->id = $data['_id']; $this->child = $data['child']; } } $manager = new MongoDB\Driver\Manager(URI); $document = new MyClass('foo', new MyClass('bar', new MyClass('baz'))); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($document); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->delete($document); $result = $manager->executeBulkWrite(NS, $bulk); printf("Deleted %d document(s)\n", $result->getDeletedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- Inserted 1 document(s) array(1) { [0]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } } Deleted 1 document(s) array(0) { } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-delete_error-001.phpt0000644000076500000240000000075713572250760022437 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::delete() with invalid options --FILE-- delete(['x' => 1], ['collation' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "collation" option to be array or object, int%S given ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-delete_error-002.phpt0000644000076500000240000000141513572250760022430 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::delete() with BSON encoding error (invalid UTF-8 string) --FILE-- delete(['x' => "\xc3\x28"]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(['x' => 1], ['collation' => ['locale' => "\xc3\x28"]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "x": %s OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "locale": %s ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-delete_error-003.phpt0000644000076500000240000000357213572250760022437 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::delete() with BSON encoding error (null bytes in keys) --FILE-- delete(["\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(["x\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(["\0\0\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(['x' => 1], ['collation' => ["\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(['x' => 1], ['collation' => ["x\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->delete(['x' => 1], ['collation' => ["\0\0\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-delete_error-004.phpt0000644000076500000240000000154713572250760022440 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::delete() collation option requires MongoDB 3.4 --SKIPIF-- =', '3.4'); ?> --FILE-- delete( ['name' => 'foo'], ['collation' => ['locale' => 'en_US']] ); echo throws(function() use ($manager, $bulk) { $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); }, 'MongoDB\Driver\Exception\BulkWriteException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\RuntimeException: The selected server does not support collation ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-insert-001.phpt0000644000076500000240000000317113572250760021261 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::insert() should always encode __pclass for Persistable objects --SKIPIF-- --FILE-- id = $id; $this->child = $child; } public function bsonSerialize() { return [ '_id' => $this->id, 'child' => $this->child, ]; } public function bsonUnserialize(array $data) { $this->id = $data['_id']; $this->child = $data['child']; } } $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(new MyClass('foo', new MyClass('bar', new MyClass('baz')))); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- Inserted 1 document(s) array(1) { [0]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-insert-004.phpt0000644000076500000240000000460713572250760021271 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::insert() returns "_id" of inserted document --SKIPIF-- --FILE-- id = $id; } public function bsonSerialize() { return ['id' => $this->id]; } } class MyPersistableId extends MySerializableId implements MongoDB\BSON\Persistable { public function bsonUnserialize(array $data) { $this->id = $data['id']; } } $documents = [ ['x' => 1], ['_id' => new MongoDB\BSON\ObjectId('590b72d606e9660190656a55')], ['_id' => ['foo' => 1]], ['_id' => new MySerializableId('foo')], ['_id' => new MyPersistableId('bar')], ]; $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); foreach ($documents as $document) { var_dump($bulk->insert($document)); } $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "590b72d606e9660190656a55" } object(stdClass)#%d (%d) { ["foo"]=> int(1) } object(stdClass)#%d (%d) { ["id"]=> string(3) "foo" } object(MyPersistableId)#%d (%d) { ["id"]=> string(3) "bar" } Inserted 5 document(s) array(5) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } ["x"]=> int(1) } [1]=> object(stdClass)#%d (%d) { ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "590b72d606e9660190656a55" } } [2]=> object(stdClass)#%d (%d) { ["_id"]=> object(stdClass)#%d (%d) { ["foo"]=> int(1) } } [3]=> object(stdClass)#%d (%d) { ["_id"]=> object(stdClass)#%d (%d) { ["id"]=> string(3) "foo" } } [4]=> object(stdClass)#%d (%d) { ["_id"]=> object(MyPersistableId)#%d (%d) { ["id"]=> string(3) "bar" } } } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-insert_error-001.phpt0000644000076500000240000000227213572250760022473 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::insert() with invalid insert document --FILE-- insert(['' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->insert(['x.y' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->insert(['$x' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->insert(["\xc3\x28" => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid document for insert: empty key OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid document for insert: keys cannot contain ".": "x.y" OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid document for insert: keys cannot begin with "$": "$x" OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid document for insert: corrupt BSON ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-insert_error-002.phpt0000644000076500000240000000075613572250760022501 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::insert() with BSON encoding error (invalid UTF-8 string) --FILE-- insert(['x' => "\xc3\x28"]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "x": %s ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-insert_error-003.phpt0000644000076500000240000000201313572250760022466 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::insert() with BSON encoding error (null bytes in keys) --FILE-- insert(["\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->insert(["x\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->insert(["\0\0\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update-001.phpt0000644000076500000240000000473713572250760021250 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() should always encode __pclass for Persistable objects --SKIPIF-- --FILE-- id = $id; $this->child = $child; } public function bsonSerialize() { return [ '_id' => $this->id, 'child' => $this->child, ]; } public function bsonUnserialize(array $data) { $this->id = $data['_id']; $this->child = $data['child']; } } $manager = new MongoDB\Driver\Manager(URI); $document = new MyClass('foo', new MyClass('bar', new MyClass('baz'))); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update( ['_id' => 'foo'], $document, ['upsert' => true] ); $result = $manager->executeBulkWrite(NS, $bulk); printf("Upserted %d document(s)\n", $result->getUpsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update( $document, ['$set' => ['child' => new MyClass('yip', new MyClass('yap'))]] ); $result = $manager->executeBulkWrite(NS, $bulk); printf("Modified %d document(s)\n", $result->getModifiedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- Upserted 1 document(s) array(1) { [0]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } } Modified 1 document(s) array(1) { [0]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "yip" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "yap" ["child":"MyClass":private]=> NULL } } } } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update-002.phpt0000644000076500000240000000335613572250760021245 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with arrayFilters option --SKIPIF-- --FILE-- insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]); $bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]); $bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]); $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); $updateBulk = new MongoDB\Driver\BulkWrite(); $query = ['grades' => ['$gte' => 100]]; $update = [ '$set' => [ 'grades.$[element]' => 100 ] ]; $options = [ 'arrayFilters' => [ [ 'element' => [ '$gte' => 100 ] ] ], 'multi' => true ]; $updateBulk->update($query, $update, $options); $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $updateBulk); $cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(%d) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(92) [2]=> int(90) } } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["grades"]=> array(%d) { [0]=> int(98) [1]=> int(100) [2]=> int(100) } } [2]=> object(stdClass)#%d (%d) { ["_id"]=> int(3) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(100) [2]=> int(100) } } } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update-003.phpt0000644000076500000240000000241313572250760021237 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with pipeline option --SKIPIF-- --FILE-- insert([ '_id' => 1, 'x' => 1, 'y' => 1, 't' => [ 'u' => [ 'v' => 1 ] ] ]); $bulk->insert([ '_id' => 2, 'x' => 2, 'y' => 1]); $manager->executeBulkWrite(NS, $bulk); $updateBulk = new MongoDB\Driver\BulkWrite(); $query = ['_id' => 1]; $update = [ [ '$replaceRoot' => [ 'newRoot' => '$t' ], ], [ '$addFields' => [ 'foo' => 1 ], ], ]; $updateBulk->update($query, $update); $manager->executeBulkWrite(NS, $updateBulk); $cursor = $manager->executeQuery(NS, new \MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(%d) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["u"]=> object(stdClass)#%d (%d) { ["v"]=> int(1) } ["foo"]=> int(1) } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["x"]=> int(2) ["y"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update_error-001.phpt0000644000076500000240000000237313572250760022453 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with invalid replacement document --FILE-- update(['x' => 1], ['' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['x.y' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => ['$x' => 1]]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ["\xc3\x28" => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for replace: empty key OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for replace: keys cannot contain ".": "x.y" OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for replace: keys cannot begin with "$": "$x" OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for replace: corrupt BSON ===DONE===mongodb-1.6.1/tests/bulk/bulkwrite-update_error-002.phpt0000644000076500000240000000244413572250760022453 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with invalid update document --FILE-- update(['x' => 1], ['$set' => ['x' => ['' => 1]]]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['x' => ["\xc3\x28" => 1]]]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; /* This newObj argument mixes an update and replacement document, but * php_phongo_bulkwrite_update_has_operators() will categorize it as an update * due to the presence of an atomic operator. As such, _mongoc_validate_update() * will report the error. */ echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['y' => 1], 'z' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for update: empty key OK: Got MongoDB\Driver\Exception\InvalidArgumentException invalid argument for update: corrupt BSON OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid key 'z': update only works with $ operators and pipelines ===DONE===mongodb-1.6.1/tests/bulk/bulkwrite-update_error-003.phpt0000644000076500000240000000325013572250760022450 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with invalid options --FILE-- update(['x' => 1], ['y' => 1], ['multi' => true]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => 1], ['collation' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['y' => 1]], ['collation' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['y' => 1]], ['arrayFilters' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['y' => 1]], ['arrayFilters' => ['foo' => 'bar']]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Replacement document conflicts with true "multi" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "collation" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "collation" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "arrayFilters" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException "arrayFilters" option has invalid keys for a BSON array ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update_error-004.phpt0000644000076500000240000000247613572250760022462 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with BSON encoding error (invalid UTF-8 string) --FILE-- update(['x' => "\xc3\x28"], ['x' => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['x' => "\xc3\x28"]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['$set' => ['x' => "\xc3\x28"]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => 1], ['collation' => ['locale' => "\xc3\x28"]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "x": %s OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "x": %s OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "$set.x": %s OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "locale": %s ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update_error-005.phpt0000644000076500000240000000537613572250760022465 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() with BSON encoding error (null bytes in keys) --FILE-- update(["\0" => 1], ['x' => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(["x\0" => 1], ['x' => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(["\0\0\0" => 1], ['x' => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ["\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ["x\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ["\0\0\0" => 1]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => 1], ['collation' => ["\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => 1], ['collation' => ["x\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n\n"; echo throws(function() use ($bulk) { $bulk->update(['x' => 1], ['y' => 1], ['collation' => ["\0\0\0" => 1]]); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "x". OK: Got MongoDB\Driver\Exception\UnexpectedValueException BSON keys cannot contain null bytes. Unexpected null byte after "". ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update_error-006.phpt0000644000076500000240000000160613572250760022456 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() collation option requires MongoDB 3.4 --SKIPIF-- =', '3.4'); ?> --FILE-- update( ['name' => 'foo'], ['$inc' => ['size' => 1]], ['collation' => ['locale' => 'en_US']] ); echo throws(function() use ($manager, $bulk) { $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); }, 'MongoDB\Driver\Exception\BulkWriteException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\RuntimeException: The selected server does not support collation ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite-update_error-007.phpt0000644000076500000240000000173713572250760022464 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite::update() arrayFilters option requires MongoDB 3.6 --SKIPIF-- =', '3.6'); ?> --FILE-- update( ['grades' => ['$gte' => 100]], ['$set' => ['grades.$[element]' => 100 ]], [ 'arrayFilters' => [['element' => ['$gte' => 100]]], 'multi' => true, ] ); echo throws(function() use ($manager, $bulk) { $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); }, 'MongoDB\Driver\Exception\BulkWriteException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\RuntimeException: The selected server does not support array filters ===DONE=== mongodb-1.6.1/tests/bulk/bulkwrite_error-001.phpt0000644000076500000240000000042413572250760021166 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyBulkWrite may not inherit from final class (MongoDB\Driver\BulkWrite) in %s on line %d mongodb-1.6.1/tests/bulk/bulkwrite_error-002.phpt0000644000076500000240000000154713572250760021176 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite cannot be executed multiple times --SKIPIF-- --FILE-- insert(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); echo throws(function() use ($manager, $bulk) { $result = $manager->executeBulkWrite(NS, $bulk); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- Inserted 1 document(s) OK: Got MongoDB\Driver\Exception\InvalidArgumentException BulkWrite objects may only be executed once and this instance has already been executed ===DONE=== mongodb-1.6.1/tests/bulk/write-0001.phpt0000644000076500000240000000470513572250760017165 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite: #001 Variety Bulk --SKIPIF-- --FILE-- insert(array("my" => "value")); $bulk->insert(array("my" => "value", "foo" => "bar")); $bulk->insert(array("my" => "value", "foo" => "bar")); var_dump($bulk); $bulk->delete(array("my" => "value", "foo" => "bar"), array("limit" => 1)); var_dump($bulk); $bulk->update(array("foo" => "bar"), array('$set' => array("foo" => "baz")), array("limit" => 1, "upsert" => 0)); var_dump($bulk); $retval = $manager->executeBulkWrite(NS, $bulk); var_dump($bulk); printf("Inserted: %d\n", getInsertCount($retval)); printf("Deleted: %d\n", getDeletedCount($retval)); printf("Updated: %d\n", getModifiedCount($retval)); printf("Upserted: %d\n", getUpsertedCount($retval)); foreach(getWriteErrors($retval) as $error) { printf("WriteErrors: %", $error); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(15) "bulk_write_0001" ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(true) ["server_id"]=> int(%r[1-9]\d*%r) ["write_concern"]=> NULL } Inserted: 3 Deleted: 1 Updated: 1 Upserted: 0 ===DONE=== mongodb-1.6.1/tests/bulk/write-0002.phpt0000644000076500000240000000335013572250760017161 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite: #002 Get the generated ID --SKIPIF-- --FILE-- "Hannes", "country" => "USA", "gender" => "male"); $hayley = array("name" => "Bayley", "country" => "USA", "gender" => "female"); $insertBulk = new \MongoDB\Driver\BulkWrite(['ordered' => true]); $hannes_id = $insertBulk->insert($hannes); $hayley_id = $insertBulk->insert($hayley); $w = 1; $wtimeout = 1000; $writeConcern = new \MongoDB\Driver\WriteConcern($w, $wtimeout); var_dump($insertBulk); $result = $manager->executeBulkWrite(NS, $insertBulk, $writeConcern); var_dump($insertBulk); assert($result instanceof \MongoDB\Driver\WriteResult); printf( "Inserted %d documents to %s\n", $result->getInsertedCount(), $result->getServer()->getHost() ); printf("hannes: %s\nhayley: %s\n", $hannes_id, $hayley_id); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> NULL ["collection"]=> NULL ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(false) ["server_id"]=> int(0) ["write_concern"]=> NULL } object(MongoDB\Driver\BulkWrite)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(15) "bulk_write_0002" ["ordered"]=> bool(true) ["bypassDocumentValidation"]=> NULL ["executed"]=> bool(true) ["server_id"]=> int(%r[1-9]\d*%r) ["write_concern"]=> array(%d) { ["w"]=> int(1) ["wtimeout"]=> int(1000) } } Inserted 2 documents to %s hannes: %s hayley: %s ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-001.phpt0000644000076500000240000000110613572250760024424 0ustar alcaeusstaff--TEST-- Causal consistency: new session has no operation time --SKIPIF-- --FILE-- startSession(); echo "Initial operation time:\n"; var_dump($session->getOperationTime()); ?> ===DONE=== --EXPECT-- Initial operation time: NULL ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-002.phpt0000644000076500000240000000174213572250760024433 0ustar alcaeusstaff--TEST-- Causal consistency: first read in session does not include afterClusterTime --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("Read includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Read includes afterClusterTime: no ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-003.phpt0000644000076500000240000000671713572250760024443 0ustar alcaeusstaff--TEST-- Causal consistency: first read or write in session updates operationTime --SKIPIF-- --FILE-- lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeCommand() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeQuery() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); $hasOperationTime = isset($reply->{'operationTime'}); printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no'); if ($hasOperationTime) { $this->lastSeenOperationTime = $reply->operationTime; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } echo "Testing executeBulkWrite()\n"; (new Test)->executeBulkWrite(); echo "\nTesting executeCommand()\n"; (new Test)->executeCommand(); echo "\nTesting executeQuery()\n"; (new Test)->executeQuery(); ?> ===DONE=== --EXPECT-- Testing executeBulkWrite() insert command reply includes operationTime: yes Session reports last seen operationTime: yes Testing executeCommand() ping command reply includes operationTime: yes Session reports last seen operationTime: yes Testing executeQuery() find command reply includes operationTime: yes Session reports last seen operationTime: yes ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-004.phpt0000644000076500000240000001071113572250760024431 0ustar alcaeusstaff--TEST-- Causal consistency: first read or write in session updates operationTime (even on error) --SKIPIF-- --FILE-- lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); throws(function() use ($manager, $bulk, $session) { $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); }, 'MongoDB\Driver\Exception\BulkWriteException'); printf("Session reports last seen operationTime: %s\n", ($session->getOperationTime() == $this->lastSeenOperationTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeCommand() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$unsupportedOperator' => 1], ], 'cursor' => new stdClass, ]); throws(function() use ($manager, $command, $session) { $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); }, 'MongoDB\Driver\Exception\RuntimeException'); /* We cannot access the server reply if an exception is thrown for a * failed command (see: PHPC-1076). For the time being, just assert that * the operationTime is not null. */ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no')); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeQuery() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query(['$unsupportedOperator' => 1]); throws(function() use ($manager, $query, $session) { $manager->executeQuery(NS, $query, ['session' => $session]); }, 'MongoDB\Driver\Exception\RuntimeException'); /* We cannot access the server reply if an exception is thrown for a * failed command (see: PHPC-1076). For the time being, just assert that * the operationTime is not null. */ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no')); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); $hasOperationTime = isset($reply->operationTime); printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no'); if ($hasOperationTime) { $this->lastSeenOperationTime = $reply->operationTime; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } echo "Testing executeBulkWrite()\n"; (new Test)->executeBulkWrite(); echo "\nTesting executeCommand()\n"; (new Test)->executeCommand(); echo "\nTesting executeQuery()\n"; (new Test)->executeQuery(); ?> ===DONE=== --EXPECT-- Testing executeBulkWrite() insert command reply includes operationTime: yes OK: Got MongoDB\Driver\Exception\BulkWriteException Session reports last seen operationTime: yes Testing executeCommand() OK: Got MongoDB\Driver\Exception\RuntimeException Session has non-null operationTime: yes Testing executeQuery() OK: Got MongoDB\Driver\Exception\RuntimeException Session has non-null operationTime: yes ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-005.phpt0000644000076500000240000000661313572250760024440 0ustar alcaeusstaff--TEST-- Causal consistency: second read's afterClusterTime uses last reply's operationTime --SKIPIF-- --FILE-- lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeReadAfterWrite() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $command = $event->getCommand(); $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("%s command includes afterClusterTime: %s\n", $event->getCommandName(), ($hasAfterClusterTime ? 'yes' : 'no')); if ($hasAfterClusterTime && $this->lastSeenOperationTime !== null) { printf("%s command uses last seen operationTime: %s\n", $event->getCommandName(), ($command->readConcern->afterClusterTime == $this->lastSeenOperationTime) ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); $hasOperationTime = isset($reply->operationTime); printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no'); if ($hasOperationTime) { $this->lastSeenOperationTime = $reply->operationTime; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } echo "Testing read after read\n"; (new Test)->executeReadAfterRead(); echo "\nTesting read after write\n"; (new Test)->executeReadAfterWrite(); ?> ===DONE=== --EXPECT-- Testing read after read find command includes afterClusterTime: no find command reply includes operationTime: yes find command includes afterClusterTime: yes find command uses last seen operationTime: yes find command reply includes operationTime: yes Testing read after write insert command includes afterClusterTime: no insert command reply includes operationTime: yes find command includes afterClusterTime: yes find command uses last seen operationTime: yes find command reply includes operationTime: yes ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-006.phpt0000644000076500000240000001024213572250760024432 0ustar alcaeusstaff--TEST-- Causal consistency: second read's afterClusterTime uses last reply's operationTime (even on error) --SKIPIF-- --FILE-- lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query(['$unsupportedOperator' => 1]); throws(function() use ($manager, $query, $session) { $manager->executeQuery(NS, $query, ['session' => $session]); }, 'MongoDB\Driver\Exception\RuntimeException'); /* We cannot access the server reply if an exception is thrown for a * failed command (see: PHPC-1076). For the time being, just assert that * the operationTime is not null. */ printf("Session has non-null operationTime: %s\n", ($session->getOperationTime() !== null ? 'yes' : 'no')); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function executeReadAfterWriteError() { $this->lastSeenOperationTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); throws(function() use ($manager, $bulk, $session) { $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); }, 'MongoDB\Driver\Exception\BulkWriteException'); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $command = $event->getCommand(); $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("%s command includes afterClusterTime: %s\n", $event->getCommandName(), ($hasAfterClusterTime ? 'yes' : 'no')); if ($hasAfterClusterTime && $this->lastSeenOperationTime !== null) { printf("%s command uses last seen operationTime: %s\n", $event->getCommandName(), ($command->readConcern->afterClusterTime == $this->lastSeenOperationTime) ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); $hasOperationTime = isset($reply->operationTime); printf("%s command reply includes operationTime: %s\n", $event->getCommandName(), $hasOperationTime ? 'yes' : 'no'); if ($hasOperationTime) { $this->lastSeenOperationTime = $reply->operationTime; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } echo "\nTesting read after read error\n"; (new Test)->executeReadAfterReadError(); echo "\nTesting read after write error\n"; (new Test)->executeReadAfterWriteError(); ?> ===DONE=== --EXPECT-- Testing read after read error find command includes afterClusterTime: no OK: Got MongoDB\Driver\Exception\RuntimeException Session has non-null operationTime: yes find command includes afterClusterTime: yes find command reply includes operationTime: yes Testing read after write error insert command includes afterClusterTime: no insert command reply includes operationTime: yes OK: Got MongoDB\Driver\Exception\BulkWriteException find command includes afterClusterTime: yes find command uses last seen operationTime: yes find command reply includes operationTime: yes ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-007.phpt0000644000076500000240000000217013572250760024434 0ustar alcaeusstaff--TEST-- Causal consistency: reads in non-causally consistent session never include afterClusterTime --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(['causalConsistency' => false]); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("Read includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Read includes afterClusterTime: no Read includes afterClusterTime: no ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-008.phpt0000644000076500000240000000246313572250760024442 0ustar alcaeusstaff--TEST-- Causal consistency: default read concern includes afterClusterTime but not level --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("Read concern includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no')); $hasLevel = isset($command->readConcern->level); printf("Read concern includes level: %s\n", ($hasLevel ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Read concern includes afterClusterTime: no Read concern includes level: no Read concern includes afterClusterTime: yes Read concern includes level: no ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-009.phpt0000644000076500000240000000265413572250760024445 0ustar alcaeusstaff--TEST-- Causal consistency: custom read concern merges afterClusterTime and level --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $readConcern = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY); $query = new MongoDB\Driver\Query([], ['readConcern' => $readConcern]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasAfterClusterTime = isset($command->readConcern->afterClusterTime); printf("Read concern includes afterClusterTime: %s\n", ($hasAfterClusterTime ? 'yes' : 'no')); $hasLevel = isset($command->readConcern->level); printf("Read concern includes level: %s\n", ($hasLevel ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Read concern includes afterClusterTime: no Read concern includes level: yes Read concern includes afterClusterTime: yes Read concern includes level: yes ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-010.phpt0000644000076500000240000000224713572250760024433 0ustar alcaeusstaff--TEST-- Causal consistency: unacknowledged write does not update operationTime --SKIPIF-- --FILE-- startSession(); echo "Initial operation time:\n"; var_dump($session->getOperationTime()); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $writeConcern = new MongoDB\Driver\WriteConcern(0); /* Ignore the InvalidArgumentException for trying to combine an unacknowledged * write concern with an explicit session. */ try { $manager->executeBulkWrite(NS, $bulk, ['session' => $session, 'writeConcern' => $writeConcern]); } catch (MongoDB\Driver\Exception\InvalidArgumentException $e) {} echo "\nOperation time after unacknowledged write:\n"; var_dump($session->getOperationTime()); ?> ===DONE=== --EXPECT-- Initial operation time: NULL Operation time after unacknowledged write: NULL ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-011.phpt0000644000076500000240000000207213572250760024430 0ustar alcaeusstaff--TEST-- Causal consistency: $clusterTime is not sent in commands to unsupported deployments --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasClusterTime = isset($command->{'$clusterTime'}); printf("Command includes \$clusterTime: %s\n", ($hasClusterTime ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Command includes $clusterTime: no Command includes $clusterTime: no ===DONE=== mongodb-1.6.1/tests/causal-consistency/causal-consistency-012.phpt0000644000076500000240000000206513572250760024433 0ustar alcaeusstaff--TEST-- Causal consistency: $clusterTime is sent in commands to supported deployments --SKIPIF-- --FILE-- observe( function() { $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); }, function(stdClass $command) { $hasClusterTime = isset($command->{'$clusterTime'}); printf("Command includes \$clusterTime: %s\n", ($hasClusterTime ? 'yes' : 'no')); } ); ?> ===DONE=== --EXPECT-- Command includes $clusterTime: yes Command includes $clusterTime: yes ===DONE=== mongodb-1.6.1/tests/command/command-ctor-001.phpt0000644000076500000240000000422713572250760021016 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command construction should always encode __pclass for Persistable objects --SKIPIF-- --FILE-- id = $id; $this->child = $child; } public function bsonSerialize() { return [ '_id' => $this->id, 'child' => $this->child, ]; } public function bsonUnserialize(array $data) { $this->id = $data['_id']; $this->child = $data['child']; } } $manager = new MongoDB\Driver\Manager(URI); $document = new MyClass('foo', new MyClass('bar', new MyClass('baz'))); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'update' => $document, 'upsert' => true, 'new' => true, ])); var_dump($cursor->toArray()[0]->value); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$match' => $document], ], 'cursor' => new stdClass(), ])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } ===DONE=== mongodb-1.6.1/tests/command/command_error-001.phpt0000644000076500000240000000041213572250760021252 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyCommand may not inherit from final class (MongoDB\Driver\Command) in %s on line %d mongodb-1.6.1/tests/command/cursor-batchsize-001.phpt0000644000076500000240000000471213572250760021721 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command non-zero batchSize applies to getMore --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 2] ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $cursor->toArray(); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $command = $event->getCommand(); if ($event->getCommandName() === 'aggregate') { printf("aggregate command specifies batchSize: %d\n", $command->cursor->batchSize); } if ($event->getCommandName() === 'getMore') { printf("getMore command specifies batchSize: %d\n", $command->batchSize); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); if ($event->getCommandName() === 'aggregate') { printf("aggregate response contains %d document(s)\n", count($reply->cursor->firstBatch)); } if ($event->getCommandName() === 'getMore') { printf("getMore response contains %d document(s)\n", count($reply->cursor->nextBatch)); } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } (new Test)->executeCommand(); ?> ===DONE=== --EXPECT-- Inserted: 5 aggregate command specifies batchSize: 2 aggregate response contains 2 document(s) getMore command specifies batchSize: 2 getMore response contains 2 document(s) getMore command specifies batchSize: 2 getMore response contains 1 document(s) ===DONE=== mongodb-1.6.1/tests/command/cursor-batchsize-002.phpt0000644000076500000240000000462413572250760021724 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command batchSize of zero is ignored for getMore --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 0] ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $cursor->toArray(); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $command = $event->getCommand(); if ($event->getCommandName() === 'aggregate') { printf("aggregate command specifies batchSize: %d\n", $command->cursor->batchSize); } if ($event->getCommandName() === 'getMore') { printf("getMore command specifies batchSize: %s\n", isset($command->batchSize) ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); if ($event->getCommandName() === 'aggregate') { printf("aggregate response contains %d document(s)\n", count($reply->cursor->firstBatch)); } if ($event->getCommandName() === 'getMore') { printf("getMore response contains %d document(s)\n", count($reply->cursor->nextBatch)); } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } (new Test)->executeCommand(); ?> ===DONE=== --EXPECT-- Inserted: 5 aggregate command specifies batchSize: 0 aggregate response contains 0 document(s) getMore command specifies batchSize: no getMore response contains 5 document(s) ===DONE=== mongodb-1.6.1/tests/command/cursor-tailable-001.phpt0000644000076500000240000000352113572250760021517 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command tailable cursor iteration with maxAwaitTimeMS option --SKIPIF-- --FILE-- executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); $bulkWrite = new MongoDB\Driver\BulkWrite; $bulkWrite->insert(['_id' => 1]); $manager->executeBulkWrite(NS, $bulkWrite); $pipeline = [ [ '$changeStream' => [ 'fullDocument' => 'updateLookup' ] ] ]; $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => $pipeline, 'cursor' => ['batchSize' => 0], ], [ 'maxAwaitTimeMS' => 500, ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $it = new IteratorIterator($cursor); $it->rewind(); $it->next(); $bulkWrite = new MongoDB\Driver\BulkWrite; $bulkWrite->insert(['_id' => "new-document"]); $manager->executeBulkWrite(NS, $bulkWrite); $startTime = microtime(true); echo "Awaiting results...\n"; $it->next(); var_dump($it->current()->operationType, $it->current()->documentKey); printf("Waited for %.6f seconds\n", microtime(true) - $startTime); $startTime = microtime(true); echo "Awaiting results...\n"; $it->next(); var_dump($it->current()); /* Should be NULL */ printf("Waited for %.6f seconds\n", microtime(true) - $startTime); ?> ===DONE=== --EXPECTF-- Awaiting results... string(6) "insert" object(stdClass)#%d (%d) { ["_id"]=> string(12) "new-document" } Waited for 0.%d seconds Awaiting results... NULL Waited for 0.%r(4|5)\d*%r seconds ===DONE=== mongodb-1.6.1/tests/command/findAndModify-001.phpt0000644000076500000240000000347413572250760021151 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command with findAndModify and arrayFilters --SKIPIF-- --FILE-- insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]); $bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]); $bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]); $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['grades' => [ '$gt' => 100 ] ], 'update' => ['$set' => [ 'grades.$[element]' => 100 ] ], 'arrayFilters' => [ [ 'element' => [ '$gt' => 100 ] ] ], ]); // Running this twice, because findAndModify only updates the first document // it finds. $manager->executeCommand(DATABASE_NAME, $command); $manager->executeCommand(DATABASE_NAME, $command); $cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(%d) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(92) [2]=> int(90) } } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["grades"]=> array(%d) { [0]=> int(98) [1]=> int(100) [2]=> int(100) } } [2]=> object(stdClass)#%d (%d) { ["_id"]=> int(3) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(100) [2]=> int(100) } } } ===DONE=== mongodb-1.6.1/tests/command/update-001.phpt0000644000076500000240000000333313572250760017712 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Command with update and arrayFilters --SKIPIF-- --FILE-- insert([ '_id' => 1, 'grades' => [ 95, 92, 90 ] ]); $bulk->insert([ '_id' => 2, 'grades' => [ 98, 100, 102 ] ]); $bulk->insert([ '_id' => 3, 'grades' => [ 95, 110, 100 ] ]); $manager->executeBulkWrite(DATABASE_NAME . '.' . COLLECTION_NAME, $bulk); $command = new MongoDB\Driver\Command([ 'update' => COLLECTION_NAME, 'updates' => [[ 'q' => [ 'grades' => [ '$gte' => 100 ] ], 'u' => [ '$set' => [ 'grades.$[element]' => 100 ] ], 'arrayFilters' => [ [ 'element' => [ '$gte' => 100 ] ] ], 'multi' => true ]], ]); $manager->executeCommand(DATABASE_NAME, $command); $cursor = $manager->executeQuery( DATABASE_NAME . '.' . COLLECTION_NAME, new \MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(%d) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(92) [2]=> int(90) } } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["grades"]=> array(%d) { [0]=> int(98) [1]=> int(100) [2]=> int(100) } } [2]=> object(stdClass)#%d (%d) { ["_id"]=> int(3) ["grades"]=> array(%d) { [0]=> int(95) [1]=> int(100) [2]=> int(100) } } } ===DONE=== mongodb-1.6.1/tests/connect/bug0720.phpt0000644000076500000240000000206413572250760017233 0ustar alcaeusstaff--TEST-- PHPC-720: Do not persist SSL streams to avoid SSL reinitialization errors --SKIPIF-- --FILE-- true, 'ca_file' => $SSL_DIR . '/ca.pem', ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); unset($manager, $cursor); $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/bug1015.phpt0000644000076500000240000000172713572250760017236 0ustar alcaeusstaff--TEST-- PHPC-1015: Initial DNS Seedlist test --SKIPIF-- --FILE-- selectServer( new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_NEAREST ) ); $servers = $m->getServers(); foreach ( $servers as $server ) { echo $server->getHost(), ':', $server->getPort(), "\n"; } ?> ===DONE=== --EXPECTF-- %d.%d.%d.%d:27017 %d.%d.%d.%d:27018 %d.%d.%d.%d:27019 ===DONE=== mongodb-1.6.1/tests/connect/bug1045.phpt0000644000076500000240000000131713572250760017234 0ustar alcaeusstaff--TEST-- PHPC-1045: Segfault if username is not provided for SCRAM-SHA-1 authMechanism --SKIPIF-- --FILE-- 'SCRAM-SHA-1']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: 'SCRAM-SHA-1' authentication mechanism requires username. ===DONE=== mongodb-1.6.1/tests/connect/compression_error-001.phpt0000644000076500000240000000072613572250760022220 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Connecting with unsupported compressor --SKIPIF-- --FILE-- 'zli'] ); ini_set('mongodb.debug', null); ?> ===DONE=== --EXPECTF-- %AWARNING > Unsupported compressor: 'zli'%A ===DONE=== mongodb-1.6.1/tests/connect/compression_error-002.phpt0000644000076500000240000000107613572250760022220 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Connecting with invalid compressor values --SKIPIF-- --FILE-- "foo\xFEbar"] ); }, 'MongoDB\Driver\Exception\UnexpectedValueException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\UnexpectedValueException Detected invalid UTF-8 for field path "compressors": %s ===DONE=== mongodb-1.6.1/tests/connect/replicaset-seedlist-001.phpt0000644000076500000240000000212413572250760022405 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Connecting to Replica Set with only secondary in seedlist --SKIPIF-- --FILE-- getInfo(); // As we're building our own URL here, we do need to extract username and password $url = parse_url(URI); if (array_key_exists('user', $url) && array_key_exists('pass', $url)) { $dsn = sprintf('mongodb://%s:%s@%s', $url['user'], $url['pass'], $info['me']); } else { $dsn = 'mongodb://' . $info['me']; } $manager = new MongoDB\Driver\Manager($dsn, ['replicaSet' => $info['setName']]); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array("_id" => 1, "x" => 2, "y" => 3)); $bulk->insert(array("_id" => 2, "x" => 3, "y" => 4)); $bulk->insert(array("_id" => 3, "x" => 4, "y" => 5)); $manager->executeBulkWrite(NS, $bulk); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/connect/replicaset-seedlist-002.phpt0000644000076500000240000000225313572250760022411 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Connecting to Replica Set with only arbiter in seedlist --SKIPIF-- --FILE-- getInfo(); // As we're building our own URL here, we do need to extract username and password // We already checked whether there is an arbiter through `skip_if_no_arbiter` $url = parse_url(URI); if (array_key_exists('user', $url) && array_key_exists('pass', $url)) { $dsn = sprintf('mongodb://%s:%s@%s', $url['user'], $url['pass'], $info['arbiters'][0]); } else { $dsn = 'mongodb://' . $info['arbiters'][0]; } $manager = new MongoDB\Driver\Manager($dsn, ['replicaSet' => $info['setName']]); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array("_id" => 1, "x" => 2, "y" => 3)); $bulk->insert(array("_id" => 2, "x" => 3, "y" => 4)); $bulk->insert(array("_id" => 3, "x" => 4, "y" => 5)); $manager->executeBulkWrite(NS, $bulk); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/connect/standalone-auth-001.phpt0000644000076500000240000000221213572250760021525 0ustar alcaeusstaff--TEST-- Connect to MongoDB with using default auth mechanism --SKIPIF-- --FILE-- insert(array("my" => "value")); $bulk->insert(array("my" => "value", "foo" => "bar")); $bulk->insert(array("my" => "value", "foo" => "bar")); $bulk->delete(array("my" => "value", "foo" => "bar"), array("limit" => 1)); $bulk->update(array("foo" => "bar"), array('$set' => array("foo" => "baz")), array("limit" => 1, "upsert" => 0)); $retval = $manager->executeBulkWrite(NS, $bulk); printf("Inserted: %d\n", getInsertCount($retval)); printf("Deleted: %d\n", getDeletedCount($retval)); printf("Updated: %d\n", getModifiedCount($retval)); printf("Upserted: %d\n", getUpsertedCount($retval)); foreach(getWriteErrors($retval) as $error) { printf("WriteErrors: %", $error); } ?> ===DONE=== --EXPECT-- Inserted: 3 Deleted: 1 Updated: 1 Upserted: 0 ===DONE=== mongodb-1.6.1/tests/connect/standalone-auth_error-001.phpt0000644000076500000240000000176213572250760022747 0ustar alcaeusstaff--TEST-- Connect to MongoDB with using default auth mechanism and wrong password --SKIPIF-- --FILE-- insert(array("my" => "value")); echo throws(function() use($manager, $bulk) { $manager->executeBulkWrite(NS, $bulk); }, 'MongoDB\Driver\Exception\BulkWriteException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\AuthenticationException: Authentication failed. ===DONE=== mongodb-1.6.1/tests/connect/standalone-plain-0001.phpt0000644000076500000240000000405213572250760021753 0ustar alcaeusstaff--TEST-- Connect to MongoDB with using PLAIN auth mechanism --XFAIL-- authMechanism=PLAIN (LDAP) tests must be reimplemented (PHPC-1172) parse_url() tests must be reimplemented (PHPC-1177) --SKIPIF-- --FILE-- "bugs", "roles" => array(array("role" => "readWrite", "db" => DATABASE_NAME)), ); $command = new MongoDB\Driver\Command($cmd); try { $result = $adminmanager->executeCommand('$external', $command); echo "User Created\n"; } catch(Exception $e) { echo $e->getMessage(), "\n"; } $username = "bugs"; $password = "password"; $database = '$external'; $dsn = sprintf("mongodb://%s:%s@%s:%d/?authSource=%s&authMechanism=PLAIN", $username, $password, $parsed["host"], $parsed["port"], $database); $manager = new MongoDB\Driver\Manager($dsn); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array("very" => "important")); try { $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array("very" => "important")); $cursor = $manager->executeQuery(NS, $query); foreach($cursor as $document) { var_dump($document->very); } $cmd = new MongoDB\Driver\Command(array("drop" => COLLECTION_NAME)); $result = $manager->executeCommand(DATABASE_NAME, $cmd); } catch(Exception $e) { printf("Caught %s: %s\n", get_class($e), $e->getMessage()); } $cmd = array( "dropUser" => "bugs", ); $command = new MongoDB\Driver\Command($cmd); try { $result = $adminmanager->executeCommand('$external', $command); echo "User deleted\n"; } catch(Exception $e) { echo $e->getMessage(), "\n"; } ?> ===DONE=== --EXPECT-- User Created string(9) "important" User deleted ===DONE=== mongodb-1.6.1/tests/connect/standalone-plain-0002.phpt0000644000076500000240000000343013572250760021753 0ustar alcaeusstaff--TEST-- Connect to MongoDB with using PLAIN auth mechanism #002 --XFAIL-- authMechanism=PLAIN (LDAP) tests must be reimplemented (PHPC-1172) parse_url() tests must be reimplemented (PHPC-1177) --SKIPIF-- --FILE-- "bugs", "roles" => array(array("role" => "readWrite", "db" => DATABASE_NAME)), ); $command = new MongoDB\Driver\Command($cmd); try { $result = $adminmanager->executeCommand('$external', $command); echo "User Created\n"; } catch(Exception $e) { echo $e->getMessage(), "\n"; } $username = "bugs"; $password = "wrong-password"; $database = '$external'; $dsn = sprintf("mongodb://%s:%s@%s:%d/?authSource=%s&authMechanism=PLAIN", $username, $password, $parsed["host"], $parsed["port"], $database); $manager = new MongoDB\Driver\Manager($dsn); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array("very" => "important")); throws(function() use($manager, $bulk) { $manager->executeBulkWrite(NS, $bulk); }, "MongoDB\Driver\Exception\AuthenticationException"); $cmd = array( "dropUser" => "bugs", ); $command = new MongoDB\Driver\Command($cmd); try { $result = $adminmanager->executeCommand('$external', $command); echo "User deleted\n"; } catch(Exception $e) { echo $e->getMessage(), "\n"; } ?> ===DONE=== --EXPECT-- User Created OK: Got MongoDB\Driver\Exception\AuthenticationException User deleted ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-no_verify-001.phpt0000644000076500000240000000122213572250760023363 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and no host/cert verification --SKIPIF-- --FILE-- true, "weak_cert_validation" => true, ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-no_verify-002.phpt0000644000076500000240000000144013572250760023366 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and no host/cert verification (context options) --SKIPIF-- --FILE-- stream_context_create([ 'ssl' => [ 'allow_invalid_hostname' => true, 'allow_self_signed' => true, // "weak_cert_validation" alias ], ]), ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-verify_cert-001.phpt0000644000076500000240000000146313572250760023713 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and cert verification --SKIPIF-- --FILE-- true, 'weak_cert_validation' => false, 'ca_file' => $SSL_DIR . '/ca.pem', ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-verify_cert-002.phpt0000644000076500000240000000174313572250760023715 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and cert verification (context options) --SKIPIF-- --FILE-- stream_context_create([ 'ssl' => [ // libmongoc does not allow the hostname to be overridden as "server" 'allow_invalid_hostname' => true, 'allow_self_signed' => false, // "weak_cert_validation" alias 'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias ], ]), ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-verify_cert-error-001.phpt0000644000076500000240000000174713572250760025047 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and cert verification error --SKIPIF-- --FILE-- true, 'weak_cert_validation' => false, ]; echo throws(function() use ($driverOptions) { $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException', 'executeCommand'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException thrown from executeCommand No suitable servers found (`serverSelectionTryOnce` set): [%s calling ismaster on '%s:%d'] ===DONE=== mongodb-1.6.1/tests/connect/standalone-ssl-verify_cert-error-002.phpt0000644000076500000240000000217513572250760025044 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and cert verification error (context options) --SKIPIF-- --FILE-- stream_context_create([ 'ssl' => [ // libmongoc does not allow the hostname to be overridden as "server" 'allow_invalid_hostname' => true, 'allow_self_signed' => false, // "weak_cert_validation" alias ], ]), ]; echo throws(function() use ($driverOptions) { $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException', 'executeCommand'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException thrown from executeCommand No suitable servers found (`serverSelectionTryOnce` set): [%s calling ismaster on '%s:%d'] ===DONE=== mongodb-1.6.1/tests/connect/standalone-x509-auth-001.phpt0000644000076500000240000000161413572250760022235 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and X509 auth --SKIPIF-- --FILE-- true, 'weak_cert_validation' => false, 'ca_file' => $SSL_DIR . '/ca.pem', 'pem_file' => $SSL_DIR . '/client.pem', ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-x509-auth-002.phpt0000644000076500000240000000213113572250760022231 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and X509 auth (stream context) --SKIPIF-- --FILE-- stream_context_create([ 'ssl' => [ // libmongoc does not allow the hostname to be overridden as "server" 'allow_invalid_hostname' => true, 'allow_self_signed' => false, // "weak_cert_validation" alias 'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias 'local_cert' => $SSL_DIR . '/client.pem', // "pem_file" alias ], ]), ]; $manager = new MongoDB\Driver\Manager(URI, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-x509-error-0001.phpt0000644000076500000240000000302613572250760022504 0ustar alcaeusstaff--TEST-- X509 connection should not reuse previous stream after an auth failure --XFAIL-- parse_url() tests must be reimplemented (PHPC-1177) --SKIPIF-- --FILE-- true, 'ca_file' => $SSL_DIR . '/ca.pem', 'pem_file' => $SSL_DIR . '/client.pem', ]; // Wrong username for X509 authentication $parsed = parse_url(URI); $dsn = sprintf('mongodb://username@%s:%d/?ssl=true&authMechanism=MONGODB-X509', $parsed['host'], $parsed['port']); // Both should fail with auth failure, without reusing the previous stream for ($i = 0; $i < 2; $i++) { echo throws(function() use ($dsn, $driverOptions) { $manager = new MongoDB\Driver\Manager($dsn, [], $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); }, 'MongoDB\Driver\Exception\AuthenticationException', 'executeCommand'), "\n"; } ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\AuthenticationException thrown from executeCommand auth failed OK: Got MongoDB\Driver\Exception\AuthenticationException thrown from executeCommand auth failed ===DONE=== mongodb-1.6.1/tests/connect/standalone-x509-extract_username-001.phpt0000644000076500000240000000231113572250760024640 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and X509 auth and username retrieved from cert --XFAIL-- parse_url() tests must be reimplemented (PHPC-1177) --SKIPIF-- --FILE-- true, 'weak_cert_validation' => false, 'ca_file' => $SSL_DIR . '/ca.pem', 'pem_file' => $SSL_DIR . '/client.pem', ]; $uriOptions = ['authMechanism' => 'MONGODB-X509', 'ssl' => true]; $parsed = parse_url(URI); $uri = sprintf('mongodb://%s:%d', $parsed['host'], $parsed['port']); $manager = new MongoDB\Driver\Manager($uri, $uriOptions, $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/connect/standalone-x509-extract_username-002.phpt0000644000076500000240000000262613572250760024652 0ustar alcaeusstaff--TEST-- Connect to MongoDB with SSL and X509 auth and username retrieved from cert (stream context) --XFAIL-- parse_url() tests must be reimplemented (PHPC-1177) --SKIPIF-- --FILE-- stream_context_create([ 'ssl' => [ // libmongoc does not allow the hostname to be overridden as "server" 'allow_invalid_hostname' => true, 'allow_self_signed' => false, // "weak_cert_validation" alias 'cafile' => $SSL_DIR . '/ca.pem', // "ca_file" alias 'local_cert' => $SSL_DIR . '/client.pem', // "pem_file" alias ], ]), ]; $uriOptions = ['authMechanism' => 'MONGODB-X509', 'ssl' => true]; $parsed = parse_url(URI); $uri = sprintf('mongodb://%s:%d', $parsed['host'], $parsed['port']); $manager = new MongoDB\Driver\Manager($uri, $uriOptions, $driverOptions); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/cursor/bug0671-001.phpt0000644000076500000240000000122713572250760017422 0ustar alcaeusstaff--TEST-- PHPC-671: Segfault if Manager is already freed when destructing live Cursor --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); unset($manager); unset($cursor); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/cursor/bug0732-001.phpt0000644000076500000240000000166013572250760017421 0ustar alcaeusstaff--TEST-- PHPC-732: Possible mongoc_client_t use-after-free with Cursor wrapped in generator --SKIPIF-- --FILE-- $value) { yield $key => $value; } } $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); $generator = wrapCursor($cursor); foreach ($generator as $value) { echo "Exiting during first iteration on generator\n"; exit(0); } ?> ===DONE=== --EXPECT-- Exiting during first iteration on generator mongodb-1.6.1/tests/cursor/bug0849-001.phpt0000644000076500000240000000177713572250760017443 0ustar alcaeusstaff--TEST-- PHPC-849: Cursor::setTypeMap() leaks current element if called during iteration --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $cursor->setTypeMap(['root' => 'stdClass']); foreach ($cursor as $i => $document) { // Type map will apply to the next iteration, since current element is already converted $cursor->setTypeMap(['root' => ($i % 2 ? 'stdClass' : 'array')]); var_dump($document); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["_id"]=> int(1) } array(1) { ["_id"]=> int(2) } object(stdClass)#%d (%d) { ["_id"]=> int(3) } ===DONE=== mongodb-1.6.1/tests/cursor/bug0924-001.phpt0000644000076500000240000000270313572250760017423 0ustar alcaeusstaff--TEST-- PHPC-924: Cursor::setTypeMap() may unnecessarily convert first BSON document (type map) --SKIPIF-- --FILE-- data['_id'] = $id; } public function bsonSerialize() { return (object) $this->data; } public function bsonUnserialize(array $data) { printf("%s called for ID: %s\n", __METHOD__, $data['_id']); $this->data = $data; } } $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(new MyDocument('a')); $bulk->insert(new MyDocument('b')); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $cursor->setTypeMap(['root' => 'MyDocument']); foreach ($cursor as $i => $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- MyDocument::bsonUnserialize called for ID: a object(MyDocument)#%d (%d) { ["data":"MyDocument":private]=> array(1) { ["_id"]=> string(1) "a" } } MyDocument::bsonUnserialize called for ID: b object(MyDocument)#%d (%d) { ["data":"MyDocument":private]=> array(1) { ["_id"]=> string(1) "b" } } ===DONE=== mongodb-1.6.1/tests/cursor/bug0924-002.phpt0000644000076500000240000000354213572250760017426 0ustar alcaeusstaff--TEST-- PHPC-924: Cursor::setTypeMap() may unnecessarily convert first BSON document (__pclass) --SKIPIF-- --FILE-- data['_id'] = $id; } public function bsonSerialize() { return (object) $this->data; } public function bsonUnserialize(array $data) { printf("%s called for ID: %s\n", __METHOD__, $data['_id']); $this->data = $data; } } $manager = new MongoDB\Driver\Manager(URI); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(new MyDocument('a')); $bulk->insert(new MyDocument('b')); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); /* This type map will have no effect on the query result, since the document * only contains an ID, but it allows us to test for unnecessary conversion. */ $cursor->setTypeMap(['array' => 'array']); foreach ($cursor as $i => $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- MyDocument::bsonUnserialize called for ID: a object(MyDocument)#%d (%d) { ["data":"MyDocument":private]=> array(2) { ["_id"]=> string(1) "a" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(10) "MyDocument" ["type"]=> int(128) } } } MyDocument::bsonUnserialize called for ID: b object(MyDocument)#%d (%d) { ["data":"MyDocument":private]=> array(2) { ["_id"]=> string(1) "b" ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(10) "MyDocument" ["type"]=> int(128) } } } ===DONE=== mongodb-1.6.1/tests/cursor/bug1050-001.phpt0000644000076500000240000000356013572250760017414 0ustar alcaeusstaff--TEST-- PHPC-1050: Command cursor should not invoke getMore at execution --SKIPIF-- --FILE-- COLLECTION_NAME, 'pipeline' => [ ['$changeStream' => (object) []], ], 'cursor' => (object) [], ], [ 'maxAwaitTimeMS' => 500, ] ); $start = microtime(true); $cursor = $manager->executeReadCommand(DATABASE_NAME, $cmd); printf("Executing command took %0.6f seconds\n", microtime(true) - $start); $it = new IteratorIterator($cursor); $start = microtime(true); $it->rewind(); printf("Rewinding cursor took %0.6f seconds\n", microtime(true) - $start); printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no'); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk); $start = microtime(true); $it->next(); printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start); printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no'); $document = $it->current(); if (isset($document)) { printf("Operation type: %s\n", $document->operationType); var_dump($document->fullDocument); } ?> ===DONE=== --EXPECTF-- Executing command took 0.%d seconds Rewinding cursor took 0.%r(4|5)%r%d seconds Current position is valid: no Advancing cursor took %d.%d seconds Current position is valid: yes Operation type: insert object(stdClass)#%d (%d) { ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } ["x"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/bug1050-002.phpt0000644000076500000240000000374613572250760017423 0ustar alcaeusstaff--TEST-- PHPC-1050: Command cursor should not invoke getMore at execution (rewind omitted) --SKIPIF-- --FILE-- COLLECTION_NAME, 'pipeline' => [ ['$changeStream' => (object) []], ], 'cursor' => (object) [], ], [ 'maxAwaitTimeMS' => 500, ] ); $start = microtime(true); $cursor = $manager->executeReadCommand(DATABASE_NAME, $cmd); printf("Executing command took %0.6f seconds\n", microtime(true) - $start); $it = new IteratorIterator($cursor); printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no'); $start = microtime(true); $it->next(); printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start); printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no'); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk); $start = microtime(true); $it->next(); printf("Advancing cursor took %0.6f seconds\n", microtime(true) - $start); printf("Current position is valid: %s\n", $it->valid() ? 'yes' : 'no'); $document = $it->current(); if (isset($document)) { printf("Operation type: %s\n", $document->operationType); var_dump($document->fullDocument); } ?> ===DONE=== --EXPECTF-- Executing command took 0.%d seconds Current position is valid: no Advancing cursor took 0.%r(4|5)%r%d seconds Current position is valid: no Advancing cursor took %d.%d seconds Current position is valid: yes Operation type: insert object(stdClass)#%d (%d) { ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } ["x"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/bug1151-001.phpt0000644000076500000240000000155413572250760017417 0ustar alcaeusstaff--TEST-- PHPC-1151: Segfault if session unset before first getMore (find) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $session = $manager->startSession(); $cursor = $manager->executeQuery(NS, $query, ['session' => $session]); foreach ($cursor as $document) { unset($session); echo $document->_id, "\n"; } ?> ===DONE=== --EXPECT-- 1 2 3 ===DONE=== mongodb-1.6.1/tests/cursor/bug1151-002.phpt0000644000076500000240000000172313572250760017416 0ustar alcaeusstaff--TEST-- PHPC-1151: Segfault if session unset before first getMore (aggregate) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [], 'cursor' => ['batchSize' => 2], ]); $session = $manager->startSession(); $cursor = $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); foreach ($cursor as $document) { unset($session); echo $document->_id, "\n"; } ?> ===DONE=== --EXPECT-- 1 2 3 ===DONE=== mongodb-1.6.1/tests/cursor/bug1151-003.phpt0000644000076500000240000000146213572250760017417 0ustar alcaeusstaff--TEST-- PHPC-1151: Segfault if session unset before cursor is killed (find) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $session = $manager->startSession(); $cursor = $manager->executeQuery(NS, $query, ['session' => $session]); unset($session); unset($cursor); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/cursor/bug1151-004.phpt0000644000076500000240000000163113572250760017416 0ustar alcaeusstaff--TEST-- PHPC-1151: Segfault if session unset before cursor is killed (aggregate) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [], 'cursor' => ['batchSize' => 2], ]); $session = $manager->startSession(); $cursor = $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); unset($session); unset($cursor); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/cursor/bug1152-001.phpt0000644000076500000240000001135013572250760017413 0ustar alcaeusstaff--TEST-- PHPC-1152: Command cursors should use the same session for getMore and killCursors (implicit) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 2], ]); MongoDB\Driver\Monitoring\addSubscriber($this); /* By creating two cursors with the same name, PHP's reference counting * will destroy the first after the second is created. Note that * mongoc_cursor_destroy also destroys implicit sessions and returns * them to the LIFO pool. This sequencing allows us to test that getMore * and killCursors use the session ID corresponding to the original * aggregate command. */ $cursor = $manager->executeCommand(DATABASE_NAME, $command); $cursor->toArray(); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $cursor->toArray(); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $cursor = $manager->executeCommand(DATABASE_NAME, $command); unset($cursor); MongoDB\Driver\Monitoring\removeSubscriber($this); /* We should expect two unique session IDs over the course of the test, * since at most two implicit sessions would have been in use at any * given time. */ printf("Unique session IDs used: %d\n", count(array_unique($this->lsidByRequestId))); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $requestId = $event->getRequestId(); $sessionId = bin2hex((string) $event->getCommand()->lsid->id); printf("%s session ID: %s\n", $event->getCommandName(), $sessionId); if ($event->getCommandName() === 'aggregate') { if (isset($this->lsidByRequestId[$requestId])) { throw new UnexpectedValueException('Previous command observed for request ID: ' . $requestId); } $this->lsidByRequestId[$requestId] = $sessionId; } if ($event->getCommandName() === 'getMore') { $cursorId = (string) $event->getCommand()->getMore; if ( ! isset($this->lsidByCursorId[$cursorId])) { throw new UnexpectedValueException('No previous command observed for cursor ID: ' . $cursorId); } printf("getMore used same session as aggregate: %s\n", $sessionId === $this->lsidByCursorId[$cursorId] ? 'yes' : 'no'); } if ($event->getCommandName() === 'killCursors') { $cursorId = (string) $event->getCommand()->cursors[0]; if ( ! isset($this->lsidByCursorId[$cursorId])) { throw new UnexpectedValueException('No previous command observed for cursor ID: ' . $cursorId); } printf("killCursors used same session as aggregate: %s\n", $sessionId === $this->lsidByCursorId[$cursorId] ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { /* Associate the aggregate's session ID with its cursor ID so it can be * looked up by the subsequent getMore or killCursors */ if ($event->getCommandName() === 'aggregate') { $cursorId = (string) $event->getReply()->cursor->id; $requestId = $event->getRequestId(); $this->lsidByCursorId[$cursorId] = $this->lsidByRequestId[$requestId]; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } (new Test)->executeCommand(); ?> ===DONE=== --EXPECTF-- aggregate session ID: %x getMore session ID: %x getMore used same session as aggregate: yes aggregate session ID: %x getMore session ID: %x getMore used same session as aggregate: yes aggregate session ID: %x aggregate session ID: %x killCursors session ID: %x killCursors used same session as aggregate: yes killCursors session ID: %x killCursors used same session as aggregate: yes Unique session IDs used: 2 ===DONE=== mongodb-1.6.1/tests/cursor/bug1152-002.phpt0000644000076500000240000001120113572250760017407 0ustar alcaeusstaff--TEST-- PHPC-1152: Command cursors should use the same session for getMore and killCursors (explicit) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 2], ]); $session = $manager->startSession(); MongoDB\Driver\Monitoring\addSubscriber($this); /* This uses the same sequencing as the implicit session test; however, * we should expect all commands (aggregate, getMore, and killCursors) * to use the same explicit session ID. */ $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $cursor->toArray(); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $cursor->toArray(); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); unset($cursor); MongoDB\Driver\Monitoring\removeSubscriber($this); /* We should expect one unique session ID over the course of the test, * since all commands used the same explicit session. */ printf("Unique session IDs used: %d\n", count(array_unique($this->lsidByRequestId))); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $requestId = $event->getRequestId(); $sessionId = bin2hex((string) $event->getCommand()->lsid->id); printf("%s session ID: %s\n", $event->getCommandName(), $sessionId); if ($event->getCommandName() === 'aggregate') { if (isset($this->lsidByRequestId[$requestId])) { throw new UnexpectedValueException('Previous command observed for request ID: ' . $requestId); } $this->lsidByRequestId[$requestId] = $sessionId; } if ($event->getCommandName() === 'getMore') { $cursorId = (string) $event->getCommand()->getMore; if ( ! isset($this->lsidByCursorId[$cursorId])) { throw new UnexpectedValueException('No previous command observed for cursor ID: ' . $cursorId); } printf("getMore used same session as aggregate: %s\n", $sessionId === $this->lsidByCursorId[$cursorId] ? 'yes' : 'no'); } if ($event->getCommandName() === 'killCursors') { $cursorId = (string) $event->getCommand()->cursors[0]; if ( ! isset($this->lsidByCursorId[$cursorId])) { throw new UnexpectedValueException('No previous command observed for cursor ID: ' . $cursorId); } printf("killCursors used same session as aggregate: %s\n", $sessionId === $this->lsidByCursorId[$cursorId] ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { /* Associate the aggregate's session ID with its cursor ID so it can be * looked up by the subsequent getMore or killCursors */ if ($event->getCommandName() === 'aggregate') { $cursorId = (string) $event->getReply()->cursor->id; $requestId = $event->getRequestId(); $this->lsidByCursorId[$cursorId] = $this->lsidByRequestId[$requestId]; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } (new Test)->executeCommand(); ?> ===DONE=== --EXPECTF-- aggregate session ID: %x getMore session ID: %x getMore used same session as aggregate: yes aggregate session ID: %x getMore session ID: %x getMore used same session as aggregate: yes aggregate session ID: %x aggregate session ID: %x killCursors session ID: %x killCursors used same session as aggregate: yes killCursors session ID: %x killCursors used same session as aggregate: yes Unique session IDs used: 1 ===DONE=== mongodb-1.6.1/tests/cursor/bug1162-001.phpt0000644000076500000240000000167013572250760017420 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor segfault dumping cursor while iterating with IteratorIterator --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $cursor = $manager->executeQuery(NS, $query); $iterator = new IteratorIterator($cursor); $iterator->rewind(); var_dump($cursor); $iterator->next(); var_dump($cursor); $iterator->next(); var_dump($cursor); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Cursor)#%d (%d) {%A } object(MongoDB\Driver\Cursor)#%d (%d) {%A } object(MongoDB\Driver\Cursor)#%d (%d) {%A } ===DONE=== mongodb-1.6.1/tests/cursor/bug1419-001.phpt0000644000076500000240000000230613572250760017422 0ustar alcaeusstaff--TEST-- PHPC-1419: error labels from getMore are not exposed --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $server->executeQuery(NS, new \MongoDB\Driver\Query([], ['batchSize' => 1])); $iterator = new IteratorIterator($cursor); configureTargetedFailPoint( $server, 'failCommand', [ 'times' => 1] , [ 'errorCode' => 280, 'failCommands' => ['getMore'] ] ); try { $iterator->next(); } catch (\MongoDB\Driver\Exception\ServerException $e) { var_dump($e->hasErrorLabel('NonResumableChangeStreamError')); } ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-001.phpt0000644000076500000240000000054313572250760017644 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor implements MongoDB\Driver\CursorInterface and Traversable --FILE-- ===DONE=== --EXPECT-- bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-IteratorIterator-001.phpt0000644000076500000240000000150513572250760023144 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration through IteratorIterator --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1))); foreach (new IteratorIterator($cursor) as $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-IteratorIterator-002.phpt0000644000076500000240000000176313572250760023153 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor command result iteration through IteratorIterator --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command(array( 'aggregate' => COLLECTION_NAME, 'pipeline' => array( array('$match' => array('x' => 1)), ), 'cursor' => new stdClass, )); $cursor = $manager->executeCommand(DATABASE_NAME, $command); foreach (new IteratorIterator($cursor) as $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-IteratorIterator-003.phpt0000644000076500000240000000202513572250760023144 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor iteration beyond last document (find command) --SKIPIF-- --FILE-- insert(['_id' => 1]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $iterator = new IteratorIterator($cursor); $iterator->rewind(); var_dump($iterator->current()); $iterator->next(); var_dump($iterator->current()); // libmongoc throws on superfluous iteration of find command cursor (CDRIVER-1234) echo throws(function() use ($iterator) { $iterator->next(); }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["_id"]=> int(1) } NULL OK: Got MongoDB\Driver\Exception\RuntimeException Cannot advance a completed or failed cursor. ===DONE=== mongodb-1.6.1/tests/cursor/cursor-IteratorIterator-004.phpt0000644000076500000240000000213113572250760023143 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor iteration beyond last document (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['_id' => 1]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array(), array('batchSize' => 2))); $iterator = new IteratorIterator($cursor); $iterator->rewind(); var_dump($iterator->current()); $iterator->next(); var_dump($iterator->current()); // libmongoc throws on superfluous iteration of OP_QUERY cursor (CDRIVER-1234) echo throws(function() use ($iterator) { $iterator->next(); }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["_id"]=> int(1) } NULL OK: Got MongoDB\Driver\Exception\RuntimeException Cannot advance a completed or failed cursor. ===DONE=== mongodb-1.6.1/tests/cursor/cursor-NoRewindIterator-001.phpt0000644000076500000240000000315013572250760023076 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration through NoRewindIterator --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1))); /* IteratorIterator requires either rewind() or next() to be called at least * once to populate its current.data pointer, which valid() checks. Since next() * would skip the first element and NoRewindIterator::rewind() is a NOP, we must * explicitly call IteratorIterator::rewind() before composing it. */ $iteratorIterator = new IteratorIterator($cursor); $iteratorIterator->rewind(); $noRewindIterator = new NoRewindIterator($iteratorIterator); foreach ($noRewindIterator as $document) { var_dump($document); } /* NoRewindIterator::rewind() is a NOP, so attempting to iterate a second time * or calling rewind() directly accomplishes nothing. That said, it does avoid * the exception one would otherwise get invoking the rewind handler after * iteration has started. */ foreach ($noRewindIterator as $document) { var_dump($document); } $noRewindIterator->rewind(); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-destruct-001.phpt0000644000076500000240000000311213572250760021472 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor destruct should kill a live cursor --SKIPIF-- --FILE-- executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(array('serverStatus' => 1))); $result = current($cursor->toArray()); if (isset($result->metrics->cursor->open->total)) { return $result->metrics->cursor->open->total; } if (isset($result->cursors->totalOpen)) { return $result->cursors->totalOpen; } throw new RuntimeException('Could not find number of open cursors in serverStatus'); } $manager = new MongoDB\Driver\Manager(URI); // Select a specific server for future operations to avoid mongos switching in sharded clusters $server = $manager->selectServer(new \MongoDB\Driver\ReadPreference('primary')); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1)); $bulk->insert(array('_id' => 2)); $bulk->insert(array('_id' => 3)); $server->executeBulkWrite(NS, $bulk); $numOpenCursorsBeforeQuery = getNumOpenCursors($server); $cursor = $server->executeQuery(NS, new MongoDB\Driver\Query(array(), array('batchSize' => 2))); var_dump($cursor->isDead()); var_dump(getNumOpenCursors($server) == $numOpenCursorsBeforeQuery + 1); unset($cursor); var_dump(getNumOpenCursors($server) == $numOpenCursorsBeforeQuery); ?> ===DONE=== --EXPECT-- bool(false) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-get_iterator-001.phpt0000644000076500000240000000247513572250760022340 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor get_iterator handler does not yield multiple iterators (foreach) --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); echo "\nFirst foreach statement:\n"; foreach ($cursor as $document) { var_dump($document); } echo "\nSecond foreach statement:\n"; try { foreach ($cursor as $document) { echo "FAILED: get_iterator should not yield multiple iterators\n"; } } catch (MongoDB\Driver\Exception\LogicException $e) { printf("LogicException: %s\n", $e->getMessage()); } ?> ===DONE=== --EXPECTF-- Inserted: 3 First foreach statement: object(stdClass)#%d (1) { ["_id"]=> int(0) } object(stdClass)#%d (1) { ["_id"]=> int(1) } object(stdClass)#%d (1) { ["_id"]=> int(2) } Second foreach statement: LogicException: Cursors cannot yield multiple iterators ===DONE=== mongodb-1.6.1/tests/cursor/cursor-get_iterator-002.phpt0000644000076500000240000000223313572250760022331 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor get_iterator handler does not yield multiple iterators (IteratorIterator) --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); echo "\nFirst IteratorIterator wrapping:\n"; var_dump(new IteratorIterator($cursor)); echo "\nSecond IteratorIterator wrapping:\n"; try { var_dump(new IteratorIterator($cursor)); } catch (MongoDB\Driver\Exception\LogicException $e) { printf("LogicException: %s\n", $e->getMessage()); } ?> ===DONE=== --EXPECTF-- Inserted: 3 First IteratorIterator wrapping: object(IteratorIterator)#%d (0) { } Second IteratorIterator wrapping: LogicException: Cursors cannot yield multiple iterators ===DONE=== mongodb-1.6.1/tests/cursor/cursor-get_iterator-003.phpt0000644000076500000240000000241613572250760022335 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor get_iterator handler does not yield multiple iterators (toArray()) --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); echo "\nFirst Cursor::toArray():\n"; var_dump($cursor->toArray()); echo "\nSecond Cursor::toArray():\n"; try { var_dump($cursor->toArray()); } catch (MongoDB\Driver\Exception\LogicException $e) { printf("LogicException: %s\n", $e->getMessage()); } ?> ===DONE=== --EXPECTF-- Inserted: 3 First Cursor::toArray(): array(3) { [0]=> object(stdClass)#%d (1) { ["_id"]=> int(0) } [1]=> object(stdClass)#%d (1) { ["_id"]=> int(1) } [2]=> object(stdClass)#%d (1) { ["_id"]=> int(2) } } Second Cursor::toArray(): LogicException: Cursors cannot yield multiple iterators ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-001.phpt0000644000076500000240000000163313572250760021305 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration with batchSize requiring getmore with full batches --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array(), array('batchSize' => 2))); foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } ?> ===DONE=== --EXPECT-- Inserted: 6 0 => {_id: 0} 1 => {_id: 1} 2 => {_id: 2} 3 => {_id: 3} 4 => {_id: 4} 5 => {_id: 5} ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-002.phpt0000644000076500000240000000162113572250760021303 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration with batchSize requiring getmore with non-full batches --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array(), array('batchSize' => 2))); foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } ?> ===DONE=== --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} 2 => {_id: 2} 3 => {_id: 3} 4 => {_id: 4} ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-003.phpt0000644000076500000240000000210013572250760021275 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor command result iteration with batchSize requiring getmore with full batches --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command(array( 'aggregate' => COLLECTION_NAME, 'pipeline' => array( array('$match' => new stdClass), ), 'cursor' => array('batchSize' => 2), )); $cursor = $manager->executeCommand(DATABASE_NAME, $command); foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } ?> ===DONE=== --EXPECT-- Inserted: 6 0 => {_id: 0} 1 => {_id: 1} 2 => {_id: 2} 3 => {_id: 3} 4 => {_id: 4} 5 => {_id: 5} ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-004.phpt0000644000076500000240000000206613572250760021311 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor command result iteration with batchSize requiring getmore with non-full batches --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command(array( 'aggregate' => COLLECTION_NAME, 'pipeline' => array( array('$match' => new stdClass), ), 'cursor' => array('batchSize' => 2), )); $cursor = $manager->executeCommand(DATABASE_NAME, $command); foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } ?> ===DONE=== --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} 2 => {_id: 2} 3 => {_id: 3} 4 => {_id: 4} ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-005.phpt0000644000076500000240000000310413572250760021304 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration with getmore failure --SKIPIF-- =", "3.6"); ?> --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $cursor = $manager->executeQuery(NS, $query); failGetMore($manager); throws(function() use ($cursor) { foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } }, "MongoDB\Driver\Exception\ConnectionException"); ?> ===DONE=== --CLEAN-- --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} OK: Got MongoDB\Driver\Exception\ConnectionException ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-006.phpt0000644000076500000240000000331713572250760021313 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor command result iteration with getmore failure --SKIPIF-- =", "3.6"); ?> --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$match' => new stdClass], ], 'cursor' => ['batchSize' => 2], ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); failGetMore($manager); throws(function() use ($cursor) { foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } }, "MongoDB\Driver\Exception\ConnectionException"); ?> ===DONE=== --CLEAN-- --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} OK: Got MongoDB\Driver\Exception\ConnectionException ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-007.phpt0000644000076500000240000000307313572250760021313 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor query result iteration with getmore failure --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $cursor = $manager->executeQuery(NS, $query); failGetMore($manager); throws(function() use ($cursor) { foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } }, "MongoDB\Driver\Exception\ServerException"); ?> ===DONE=== --CLEAN-- --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} OK: Got MongoDB\Driver\Exception\ServerException ===DONE=== mongodb-1.6.1/tests/cursor/cursor-getmore-008.phpt0000644000076500000240000000330613572250760021313 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor command result iteration with getmore failure --SKIPIF-- --FILE-- insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$match' => new stdClass], ], 'cursor' => ['batchSize' => 2], ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); failGetMore($manager); throws(function() use ($cursor) { foreach ($cursor as $i => $document) { printf("%d => {_id: %d}\n", $i, $document->_id); } }, "MongoDB\Driver\Exception\ServerException"); ?> ===DONE=== --CLEAN-- --EXPECT-- Inserted: 5 0 => {_id: 0} 1 => {_id: 1} OK: Got MongoDB\Driver\Exception\ServerException ===DONE=== mongodb-1.6.1/tests/cursor/cursor-isDead-001.phpt0000644000076500000240000000137013572250760021032 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::isDead() with basic iteration (find command) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); foreach ($cursor as $_) { var_dump($cursor->isDead()); } var_dump($cursor->isDead()); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-isDead-002.phpt0000644000076500000240000000152413572250760021034 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::isDead() with IteratorIterator (find command) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); $iterator = new IteratorIterator($cursor); $iterator->rewind(); for ($i = 0; $i < 3; $i++) { var_dump($cursor->isDead()); $iterator->next(); } var_dump($cursor->isDead()); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-isDead-003.phpt0000644000076500000240000000144213572250760021034 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::isDead() with basic iteration (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); foreach ($cursor as $_) { var_dump($cursor->isDead()); } var_dump($cursor->isDead()); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-isDead-004.phpt0000644000076500000240000000157613572250760021045 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::isDead() with IteratorIterator (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); $iterator = new IteratorIterator($cursor); $iterator->rewind(); for ($i = 0; $i < 3; $i++) { var_dump($cursor->isDead()); $iterator->next(); } var_dump($cursor->isDead()); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor-iterator_handlers-001.phpt0000644000076500000240000000401613572250760023352 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor iterator handlers --SKIPIF-- --FILE-- name = (string) $name; } public function dump() { $key = parent::key(); $current = parent::current(); $position = is_int($key) ? (string) $key : 'null'; $document = is_object($current) ? sprintf("{_id: %d}", $current->_id) : 'null'; printf("%s: %s => %s\n", $this->name, $position, $document); } } $manager = new MongoDB\Driver\Manager(URI); $bulkWrite = new MongoDB\Driver\BulkWrite; for ($i = 0; $i < 5; $i++) { $bulkWrite->insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); $a = new MyIteratorIterator($cursor, 'A'); echo "\nBefore rewinding, position and current element are not populated:\n"; $a->dump(); echo "\nAfter rewinding, current element is populated:\n"; $a->rewind(); $a->dump(); echo "\nAfter advancing, next element is populated:\n"; $a->next(); $a->dump(); echo "\nAdvancing through remaining elements:\n"; $a->next(); $a->dump(); $a->next(); $a->dump(); $a->next(); $a->dump(); echo "\nAdvancing beyond the last element:\n"; $a->next(); $a->dump(); ?> ===DONE=== --EXPECT-- Inserted: 5 Before rewinding, position and current element are not populated: A: null => null After rewinding, current element is populated: A: 0 => {_id: 0} After advancing, next element is populated: A: 1 => {_id: 1} Advancing through remaining elements: A: 2 => {_id: 2} A: 3 => {_id: 3} A: 4 => {_id: 4} Advancing beyond the last element: A: null => null ===DONE=== mongodb-1.6.1/tests/cursor/cursor-rewind-001.phpt0000644000076500000240000000451713572250760021137 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor cannot rewind after starting iteration --SKIPIF-- --FILE-- name = (string) $name; } public function dump() { $key = parent::key(); $current = parent::current(); $position = is_int($key) ? (string) $key : 'null'; $document = is_object($current) ? sprintf("{_id: %d}", $current->_id) : 'null'; printf("%s: %s => %s\n", $this->name, $position, $document); } } $manager = new MongoDB\Driver\Manager(URI); $bulkWrite = new MongoDB\Driver\BulkWrite; for ($i = 0; $i < 5; $i++) { $bulkWrite->insert(array('_id' => $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); $a = new MyIteratorIterator($cursor, 'A'); echo "\nRewinding sets the current element:\n"; $a->rewind(); $a->dump(); echo "\nRewinding again is OK since we haven't advanced:\n"; $a->rewind(); $a->dump(); echo "\nAdvancing populates the next element:\n"; $a->next(); $a->dump(); echo "\nRewinding after advancing is not OK:\n"; try { $a->rewind(); echo "FAILED: rewind should throw if iteration has started\n"; } catch (MongoDB\Driver\Exception\LogicException $e) { printf("LogicException: %s\n", $e->getMessage()); } echo "\nAdvancing through remaining elements:\n"; $a->next(); $a->dump(); $a->next(); $a->dump(); $a->next(); $a->dump(); echo "\nAdvancing beyond the last element:\n"; $a->next(); $a->dump(); ?> ===DONE=== --EXPECT-- Inserted: 5 Rewinding sets the current element: A: 0 => {_id: 0} Rewinding again is OK since we haven't advanced: A: 0 => {_id: 0} Advancing populates the next element: A: 1 => {_id: 1} Rewinding after advancing is not OK: LogicException: Cursors cannot rewind after starting iteration Advancing through remaining elements: A: 2 => {_id: 2} A: 3 => {_id: 3} A: 4 => {_id: 4} Advancing beyond the last element: A: null => null ===DONE=== mongodb-1.6.1/tests/cursor/cursor-session-001.phpt0000644000076500000240000000311313572250760021321 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor debug output for query cursor includes explicit session --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $session = $manager->startSession(); $cursor = $manager->executeQuery(NS, $query, ['session' => $session]); $iterator = new IteratorIterator($cursor); $iterator->rewind(); $iterator->next(); printf("Cursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); $iterator->next(); /* Per PHPC-1161, the Cursor will free a reference to the Session as soon as it * is exhausted. While this is primarily done to ensure implicit sessions for * command cursors are returned to the pool ASAP, it also applies to explicit * sessions. */ printf("\nCursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); ?> ===DONE=== --EXPECTF-- Cursor ID is zero: no object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> object(MongoDB\Driver\Session)#%d (%d) { %a } %a } Cursor ID is zero: yes object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> NULL %a } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-session-002.phpt0000644000076500000240000000261513572250760021330 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor debug output for query cursor omits implicit session --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $cursor = $manager->executeQuery(NS, $query); $iterator = new IteratorIterator($cursor); $iterator->rewind(); $iterator->next(); /* Implicit sessions for query cursors are never exposed to PHPC, as they are * handled internally by libmongoc. Cursor debug ouput should never report such * sessions. */ printf("Cursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); $iterator->next(); printf("\nCursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); ?> ===DONE=== --EXPECTF-- Cursor ID is zero: no object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> NULL %a } Cursor ID is zero: yes object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> NULL %a } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-session-003.phpt0000644000076500000240000000330513572250760021326 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor debug output for command cursor includes explicit session --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 2], ]); $session = $manager->startSession(); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $iterator = new IteratorIterator($cursor); $iterator->rewind(); $iterator->next(); printf("Cursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); $iterator->next(); /* Per PHPC-1161, the Cursor will free a reference to the Session as soon as it * is exhausted. While this is primarily done to ensure implicit sessions for * command cursors are returned to the pool ASAP, it also applies to explicit * sessions. */ printf("\nCursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); ?> ===DONE=== --EXPECTF-- Cursor ID is zero: no object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> object(MongoDB\Driver\Session)#%d (%d) { %a } %a } Cursor ID is zero: yes object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> NULL %a } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-session-004.phpt0000644000076500000240000000322413572250760021327 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor debug output for command cursor includes implicit session --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$match' => new stdClass]], 'cursor' => ['batchSize' => 2], ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $iterator = new IteratorIterator($cursor); $iterator->rewind(); $iterator->next(); printf("Cursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); $iterator->next(); /* Unlike implicit sessions for query cursors, which are handled internally by * libmongoc, PHPC-1152 emulates its own implicit sessions for command cursors * in order to ensure that command cursors always share the same session as the * originating command. */ printf("\nCursor ID is zero: %s\n", (string) $cursor->getId() === '0' ? 'yes' : 'no'); var_dump($cursor); ?> ===DONE=== --EXPECTF-- Cursor ID is zero: no object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> object(MongoDB\Driver\Session)#%d (%d) { %a } %a } Cursor ID is zero: yes object(MongoDB\Driver\Cursor)#%d (%d) { %a ["session"]=> NULL %a } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-setTypeMap_error-001.phpt0000644000076500000240000000557113572250760023154 0ustar alcaeusstaff--TEST-- Cursor::setTypeMap(): Type classes must be instantiatable and implement Unserializable --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query([])); foreach ($types as $type) { foreach ($classes as $class) { $typeMap = [$type => $class]; printf("Test typeMap: %s\n", json_encode($typeMap)); echo throws(function() use ($cursor, $typeMap) { $cursor->setTypeMap($typeMap); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo "\n"; } } ?> ===DONE=== --EXPECT-- Test typeMap: {"array":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"array":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"array":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"array":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable Test typeMap: {"document":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"document":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"document":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"document":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable Test typeMap: {"root":"MissingClass"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"root":"MyAbstractDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"root":"MyDocument"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"root":"MongoDB\\BSON\\Unserializable"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable ===DONE=== mongodb-1.6.1/tests/cursor/cursor-setTypeMap_error-002.phpt0000644000076500000240000000232513572250760023147 0ustar alcaeusstaff--TEST-- Cursor::setTypeMap() error does not alter current element --SKIPIF-- --FILE-- insert(['_id' => 1]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $iterator = new IteratorIterator($cursor); $iterator->rewind(); var_dump($iterator->current()); echo throws(function() use ($cursor) { $cursor->setTypeMap(['root' => 'MissingClass']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; /* IteratorIterator only invokes spl_dual_it_fetch() for rewind() and next(). * We rewind a second time to ensure that the Cursor iterator's current element * is fetched again is remains unchanged. */ $iterator->rewind(); var_dump($iterator->current()); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["_id"]=> int(1) } OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist object(stdClass)#%d (%d) { ["_id"]=> int(1) } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-setTypeMap_error-003.phpt0000644000076500000240000000401213572250760023143 0ustar alcaeusstaff--TEST-- Cursor::setTypeMap(): fieldPaths must be an array, with single key/string elements --SKIPIF-- --FILE-- 'MissingClass'], ['abstract' => 'MyAbstractDocument'], ['my' => 'MyDocument'], ['unserialize' => 'MongoDB\BSON\Unserializable'], ]; $manager = new MongoDB\Driver\Manager(URI); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); foreach ($fieldPaths as $fieldPath) { $typeMap = ['fieldPaths' => $fieldPath]; printf("Test typeMap: %s\n", json_encode($typeMap)); echo throws(function() use ($cursor, $typeMap) { $cursor->setTypeMap($typeMap); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; } ?> ===DONE=== --EXPECT-- Test typeMap: {"fieldPaths":"notAnArray"} OK: Got MongoDB\Driver\Exception\InvalidArgumentException The 'fieldPaths' element is not an array Test typeMap: {"fieldPaths":["notAssociative"]} OK: Got MongoDB\Driver\Exception\InvalidArgumentException The 'fieldPaths' element is not an associative array Test typeMap: {"fieldPaths":{"missing":"MissingClass"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MissingClass does not exist Test typeMap: {"fieldPaths":{"abstract":"MyAbstractDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyAbstractDocument is not instantiatable Test typeMap: {"fieldPaths":{"my":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MyDocument does not implement MongoDB\BSON\Unserializable Test typeMap: {"fieldPaths":{"unserialize":"MongoDB\\BSON\\Unserializable"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException Class MongoDB\BSON\Unserializable is not instantiatable ===DONE=== mongodb-1.6.1/tests/cursor/cursor-setTypeMap_error-004.phpt0000644000076500000240000000353113572250760023151 0ustar alcaeusstaff--TEST-- Cursor::setTypeMap(): invalid fieldPaths keys --SKIPIF-- --FILE-- 'MyDocument'], ['.foo' => 'MyDocument'], ['...' => 'MyDocument'], ['foo.' => 'MyDocument'], ['foo..bar' => 'MyDocument'], ]; $manager = new MongoDB\Driver\Manager(URI); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); foreach ($fieldPaths as $fieldPath) { $typeMap = ['fieldPaths' => $fieldPath]; printf("Test typeMap: %s\n", json_encode($typeMap)); echo throws(function() use ($cursor, $typeMap) { $cursor->setTypeMap($typeMap); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo "\n"; } ?> ===DONE=== --EXPECT-- Test typeMap: {"fieldPaths":{"":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException The 'fieldPaths' element may not be an empty string Test typeMap: {"fieldPaths":{".foo":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException A 'fieldPaths' key may not start with a '.' Test typeMap: {"fieldPaths":{"...":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException A 'fieldPaths' key may not start with a '.' Test typeMap: {"fieldPaths":{"foo.":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException A 'fieldPaths' key may not end with a '.' Test typeMap: {"fieldPaths":{"foo..bar":"MyDocument"}} OK: Got MongoDB\Driver\Exception\InvalidArgumentException A 'fieldPaths' key may not have an empty segment ===DONE=== mongodb-1.6.1/tests/cursor/cursor-tailable-001.phpt0000644000076500000240000000351613572250760021422 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor tailable iteration --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted %d document(s): %s\n", $writeResult->getInsertedCount(), implode(', ', range($from, $to))); } $manager = new MongoDB\Driver\Manager(URI); $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); insert($manager, 1, 3); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['tailable' => true])); $it = new IteratorIterator($cursor); $numAwaitAttempts = 0; $maxAwaitAttempts = 7; for ($it->rewind(); $numAwaitAttempts < $maxAwaitAttempts; $it->next()) { $document = $it->current(); if ($document !== null) { printf("{_id: %d}\n", $document->_id); continue; } if ($numAwaitAttempts === 2) { insert($manager, 4, 6); } if ($numAwaitAttempts === 5) { insert($manager, 7, 9); } echo "Awaiting results...\n"; $numAwaitAttempts += 1; } ?> ===DONE=== --EXPECT-- Inserted 3 document(s): 1, 2, 3 {_id: 1} {_id: 2} {_id: 3} Awaiting results... Awaiting results... Inserted 3 document(s): 4, 5, 6 Awaiting results... {_id: 4} {_id: 5} {_id: 6} Awaiting results... Awaiting results... Inserted 3 document(s): 7, 8, 9 Awaiting results... {_id: 7} {_id: 8} {_id: 9} Awaiting results... ===DONE=== mongodb-1.6.1/tests/cursor/cursor-tailable-002.phpt0000644000076500000240000000361213572250760021420 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor tailable iteration with awaitData option --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted %d document(s): %s\n", $writeResult->getInsertedCount(), implode(', ', range($from, $to))); } $manager = new MongoDB\Driver\Manager(URI); $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); insert($manager, 1, 3); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['tailable' => true, 'awaitData' => true])); $it = new IteratorIterator($cursor); $numAwaitAttempts = 0; $maxAwaitAttempts = 7; for ($it->rewind(); $numAwaitAttempts < $maxAwaitAttempts; $it->next()) { $document = $it->current(); if ($document !== null) { printf("{_id: %d}\n", $document->_id); continue; } if ($numAwaitAttempts === 2) { insert($manager, 4, 6); } if ($numAwaitAttempts === 5) { insert($manager, 7, 9); } echo "Awaiting results...\n"; $numAwaitAttempts += 1; } ?> ===DONE=== --EXPECT-- Inserted 3 document(s): 1, 2, 3 {_id: 1} {_id: 2} {_id: 3} Awaiting results... Awaiting results... Inserted 3 document(s): 4, 5, 6 Awaiting results... {_id: 4} {_id: 5} {_id: 6} Awaiting results... Awaiting results... Inserted 3 document(s): 7, 8, 9 Awaiting results... {_id: 7} {_id: 8} {_id: 9} Awaiting results... ===DONE=== mongodb-1.6.1/tests/cursor/cursor-tailable-003.phpt0000644000076500000240000000233613572250760021423 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor tailable iteration with awaitData and maxAwaitTimeMS options --SKIPIF-- --FILE-- executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); $bulkWrite = new MongoDB\Driver\BulkWrite; $bulkWrite->insert(['_id' => 1]); $manager->executeBulkWrite(NS, $bulkWrite); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], [ 'tailable' => true, 'awaitData' => true, 'maxAwaitTimeMS' => 10, ])); $it = new IteratorIterator($cursor); $it->rewind(); printf("{_id: %d}\n", $it->current()->_id); $it->next(); $startTime = microtime(true); echo "Awaiting results...\n"; $it->next(); printf("Waited for %.6f seconds\n", microtime(true) - $startTime); // Sometimes the cursor will wait for 0.0099 seconds and sometimes it will wait for 0.01. ?> ===DONE=== --EXPECTF-- {_id: 1} Awaiting results... Waited for 0.0%d seconds ===DONE=== mongodb-1.6.1/tests/cursor/cursor-tailable_error-001.phpt0000644000076500000240000000413013572250760022624 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor collection dropped during tailable iteration --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted %d document(s): %s\n", $writeResult->getInsertedCount(), implode(', ', range($from, $to))); } $manager = new MongoDB\Driver\Manager(URI); $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); insert($manager, 1, 3); echo throws(function() use ($manager) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['tailable' => true])); $it = new IteratorIterator($cursor); $numAwaitAttempts = 0; $maxAwaitAttempts = 7; for ($it->rewind(); $numAwaitAttempts < $maxAwaitAttempts; $it->next()) { $document = $it->current(); if ($document !== null) { printf("{_id: %d}\n", $document->_id); continue; } if ($numAwaitAttempts === 2) { insert($manager, 4, 6); } if ($numAwaitAttempts === 5) { $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['drop' => COLLECTION_NAME])); } echo "Awaiting results...\n"; $numAwaitAttempts += 1; } }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- Inserted 3 document(s): 1, 2, 3 {_id: 1} {_id: 2} {_id: 3} Awaiting results... Awaiting results... Inserted 3 document(s): 4, 5, 6 Awaiting results... {_id: 4} {_id: 5} {_id: 6} Awaiting results... Awaiting results... Awaiting results... OK: Got MongoDB\Driver\Exception\RuntimeException %Scollection dropped%S ===DONE=== mongodb-1.6.1/tests/cursor/cursor-tailable_error-002.phpt0000644000076500000240000000447513572250760022641 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor cursor killed during tailable iteration --SKIPIF-- --FILE-- insert(['_id' => $i]); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted %d document(s): %s\n", $writeResult->getInsertedCount(), implode(', ', range($from, $to))); } $manager = new MongoDB\Driver\Manager(URI); $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'create' => COLLECTION_NAME, 'capped' => true, 'size' => 1048576, ])); insert($manager, 1, 3); echo throws(function() use ($manager) { $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['tailable' => true])); $it = new IteratorIterator($cursor); $numAwaitAttempts = 0; $maxAwaitAttempts = 7; for ($it->rewind(); $numAwaitAttempts < $maxAwaitAttempts; $it->next()) { $document = $it->current(); if ($document !== null) { printf("{_id: %d}\n", $document->_id); continue; } if ($numAwaitAttempts === 2) { insert($manager, 4, 6); } if ($numAwaitAttempts === 5) { $cursor->getServer()->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command([ 'killCursors' => COLLECTION_NAME, 'cursors' => [ $cursor->getId() ], ])); } echo "Awaiting results...\n"; $numAwaitAttempts += 1; } }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- Inserted 3 document(s): 1, 2, 3 {_id: 1} {_id: 2} {_id: 3} Awaiting results... Awaiting results... Inserted 3 document(s): 4, 5, 6 Awaiting results... {_id: 4} {_id: 5} {_id: 6} Awaiting results... Awaiting results... Awaiting results... OK: Got MongoDB\Driver\Exception\RuntimeException %r(Cursor not found, cursor id: \d+|cursor id \d+ not found|Cursor not found \(namespace: '.*', id: \d+\)\.)%r ===DONE=== mongodb-1.6.1/tests/cursor/cursor-toArray-001.phpt0000644000076500000240000000243013572250760021260 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::toArray() --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1))); echo "Dumping Cursor::toArray():\n"; var_dump($cursor->toArray()); // Execute the query a second time, since we cannot iterate twice $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1))); echo "\nDumping iterated Cursor:\n"; var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- Dumping Cursor::toArray(): array(2) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } } Dumping iterated Cursor: array(2) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/cursor/cursor-toArray-002.phpt0000644000076500000240000000167213572250760021270 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor::toArray() respects type map --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => array(1, 2, 3))); $bulk->insert(array('_id' => 2, 'x' => array(4, 5, 6))); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array('x' => 1))); $cursor->setTypeMap(array("array" => "MyArrayObject")); $documents = $cursor->toArray(); var_dump($documents[0]->x instanceof MyArrayObject); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursor_error-001.phpt0000644000076500000240000000040513572250760021052 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Cursor cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyCursor may not inherit from final class (MongoDB\Driver\Cursor) in %s on line %d mongodb-1.6.1/tests/cursor/cursorinterface-001.phpt0000644000076500000240000000126213572250760021524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\CursorInterface is implemented by MongoDB\Driver\Cursor --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1))); var_dump($cursor instanceof MongoDB\Driver\CursorInterface); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/cursor/cursorinterface-002.phpt0000644000076500000240000000034313572250760021524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\CursorInterface extends Traversable --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/cursorid/cursorid-001.phpt0000644000076500000240000000144613572250760020501 0ustar alcaeusstaff--TEST-- MongoDB\Driver\CursorID BSON serialization --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); $cursorId = $cursor->getId(); hex_dump(fromPHP(['cid' => $cursorId])); ?> ===DONE=== --EXPECTF-- 0 : 12 00 00 00 12 63 69 64 00 %x %x %x %x %x %x %x [.....cid.%s] 10 : %x 00 [%s.] ===DONE=== mongodb-1.6.1/tests/cursorid/cursorid-002.phpt0000644000076500000240000000263313572250760020501 0ustar alcaeusstaff--TEST-- MongoDB\Driver\CursorID BSON serialization for killCursors command --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $server->executeBulkWrite(NS, $bulk); $cursor = $server->executeQuery(NS, new MongoDB\Driver\Query([], ['batchSize' => 2])); $cursorId = $cursor->getId(); $command = new MongoDB\Driver\Command([ 'killCursors' => COLLECTION_NAME, 'cursors' => [ $cursorId ], ]); /* Since the killCursors command result includes cursor IDs as 64-bit integers, * unserializing the result document requires a 64-bit platform. */ $result = $server->executeCommand(DATABASE_NAME, $command)->toArray()[0]; printf("Killed %d cursor(s)\n", count($result->cursorsKilled)); printf("Killed expected cursor: %s\n", (string) $cursorId === (string) $result->cursorsKilled[0] ? 'yes' : 'no'); ?> ===DONE=== --EXPECT-- Killed 1 cursor(s) Killed expected cursor: yes ===DONE=== mongodb-1.6.1/tests/cursorid/cursorid_error-001.phpt0000644000076500000240000000041713572250760021707 0ustar alcaeusstaff--TEST-- MongoDB\Driver\CursorId cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyCursorId may not inherit from final class (MongoDB\Driver\CursorId) in %s on line %d mongodb-1.6.1/tests/exception/bulkwriteexception-getwriteresult-001.phpt0000644000076500000240000000101513572250760026021 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\BulkWriteException::getWriteResult() --FILE-- 1]; $reflection = new ReflectionClass($exception); $resultDocumentProperty = $reflection->getProperty('writeResult'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $writeResult); var_dump($writeResult === $exception->getWriteResult()); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/exception/bulkwriteexception-haserrorlabel-001.phpt0000644000076500000240000000106213572250760025557 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\BulkWriteException::hasErrorLabel() --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('foo')); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/exception/bulkwriteexception-haserrorlabel_error-001.phpt0000644000076500000240000000102313572250760026765 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\BulkWriteException::hasErrorLabel() with non-array values --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(false) ===DONE=== mongodb-1.6.1/tests/exception/commandexception-getresultdocument-001.phpt0000644000076500000240000000103313572250760026113 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\CommandException::getResultDocument() --FILE-- 1]; $reflection = new ReflectionClass($exception); $resultDocumentProperty = $reflection->getProperty('resultDocument'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $resultDocument); var_dump($resultDocument === $exception->getResultDocument()); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/exception/commandexception-haserrorlabel-001.phpt0000644000076500000240000000105613572250760025170 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\CommandException::hasErrorLabel() --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('foo')); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/exception/commandexception-haserrorlabel_error-001.phpt0000644000076500000240000000101713572250760026376 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\CommandException::hasErrorLabel() with non-array values --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(false) ===DONE=== mongodb-1.6.1/tests/exception/exception-001.phpt0000644000076500000240000000051313572250760021003 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\Exception extends Throwable --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/exception/runtimeexception-haserrorlabel-001.phpt0000644000076500000240000000105613572250760025235 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\RuntimeException::hasErrorLabel() --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('foo')); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/exception/runtimeexception-haserrorlabel_error-001.phpt0000644000076500000240000000101713572250760026443 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Exception\RuntimeException::hasErrorLabel() with non-array values --FILE-- getProperty('errorLabels'); $resultDocumentProperty->setAccessible(true); $resultDocumentProperty->setValue($exception, $labels); var_dump($exception->hasErrorLabel('bar')); ?> ===DONE=== --EXPECT-- bool(false) ===DONE=== mongodb-1.6.1/tests/functional/cursor-001.phpt0000644000076500000240000000360413572250760020472 0ustar alcaeusstaff--TEST-- Sorting single field, ascending, using the Cursor Iterator --SKIPIF-- --FILE-- array('_id' => 0, 'username' => 1), 'sort' => array('username' => 1), 'limit' => 104, )); $cursor = $manager->executeQuery(NS, $query); foreach ($cursor as $document) { echo $document->username . "\n"; } ?> ===DONE=== --EXPECT-- aaliyah.kertzmann aaron89 abbott.alden abbott.flo abby76 abernathy.adrienne abernathy.audrey abner.kreiger aboehm abshire.icie abshire.jazlyn adams.delta adolph20 adonis.schamberger agleason ahartmann ahettinger akreiger al.cormier al97 albin95 alda.murray alden.blanda alessandra76 alex73 alexa01 alfred.ritchie alia07 alia72 alize.hegmann allie48 alta.sawayn alvena.pacocha alvis22 alycia48 amalia84 amely01 amos.corkery amos78 anahi95 anais.feest anais58 andreanne.steuber angela.dickinson angelina.bartoletti angelina31 aniyah.franecki annalise40 antoinette.gaylord antoinette.weissnat aoberbrunner apacocha apollich ara92 arch44 arely.ryan armstrong.clara armstrong.gordon arnold.kiehn arvel.hilll asatterfield aschuppe ashlynn71 ashlynn85 ashton.o'kon austen03 austen47 austin67 awintheiser awyman ayana.brakus bailey.mertz bailey.sarina balistreri.donald barrett.prohaska bartell.susie bashirian.lina bayer.ova baylee.maggio bbernier bblick beahan.oleta beatty.layne beatty.myrtis beau49 beaulah.mann bechtelar.nadia becker.theron beer.mossie beer.roselyn benedict.johnson berge.enoch bergnaum.roberto bernardo.mccullough bernardo52 bernhard.margaretta bernie.morissette bethel20 betty09 bins.aliyah bins.laisha bjori blanda.danielle blanda.irving ===DONE=== mongodb-1.6.1/tests/functional/cursorid-001.phpt0000644000076500000240000000152313572250760021005 0ustar alcaeusstaff--TEST-- Sorting single field, ascending, using the Cursor Iterator --SKIPIF-- --FILE-- array('_id' => 0, 'username' => 1), 'sort' => array('username' => 1), 'batchSize' => 11, 'limit' => 110, )); $cursor = $manager->executeQuery(NS, $query); $cursorid = $cursor->getId(); $s1 = (string)$cursorid; var_dump( $cursorid, $s1 ); var_dump($s1 > 0); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\CursorId)#%d (%d) { ["id"]=> %rint\(\d+\)|string\(\d+\) "\d+"%r } string(%d) "%d" bool(true) ===DONE=== mongodb-1.6.1/tests/functional/phpinfo-1.phpt0000644000076500000240000000026313572250760020456 0ustar alcaeusstaff--TEST-- phpinfo() reports mongodb.debug (no value) --FILE-- ===DONE=== --EXPECTF-- %a mongodb.debug => no value => no value %a ===DONE=== mongodb-1.6.1/tests/functional/phpinfo-2.phpt0000644000076500000240000000037613572250760020464 0ustar alcaeusstaff--TEST-- phpinfo() reports mongodb.debug (default and overridden) --INI-- mongodb.debug=stderr --FILE-- ===DONE=== --EXPECTF-- %a mongodb.debug => stdout => stderr %a ===DONE=== mongodb-1.6.1/tests/functional/query-sort-001.phpt0000644000076500000240000000347113572250760021311 0ustar alcaeusstaff--TEST-- Sorting single field, ascending --SKIPIF-- --FILE-- array('_id' => 0, 'username' => 1), 'sort' => array('username' => 1), "limit" => 100, )); $cursor = $manager->executeQuery(NS, $query); foreach ($cursor as $document) { echo $document->username . "\n"; } ?> ===DONE=== --EXPECT-- aaliyah.kertzmann aaron89 abbott.alden abbott.flo abby76 abernathy.adrienne abernathy.audrey abner.kreiger aboehm abshire.icie abshire.jazlyn adams.delta adolph20 adonis.schamberger agleason ahartmann ahettinger akreiger al.cormier al97 albin95 alda.murray alden.blanda alessandra76 alex73 alexa01 alfred.ritchie alia07 alia72 alize.hegmann allie48 alta.sawayn alvena.pacocha alvis22 alycia48 amalia84 amely01 amos.corkery amos78 anahi95 anais.feest anais58 andreanne.steuber angela.dickinson angelina.bartoletti angelina31 aniyah.franecki annalise40 antoinette.gaylord antoinette.weissnat aoberbrunner apacocha apollich ara92 arch44 arely.ryan armstrong.clara armstrong.gordon arnold.kiehn arvel.hilll asatterfield aschuppe ashlynn71 ashlynn85 ashton.o'kon austen03 austen47 austin67 awintheiser awyman ayana.brakus bailey.mertz bailey.sarina balistreri.donald barrett.prohaska bartell.susie bashirian.lina bayer.ova baylee.maggio bbernier bblick beahan.oleta beatty.layne beatty.myrtis beau49 beaulah.mann bechtelar.nadia becker.theron beer.mossie beer.roselyn benedict.johnson berge.enoch bergnaum.roberto bernardo.mccullough bernardo52 bernhard.margaretta bernie.morissette bethel20 betty09 bins.aliyah ===DONE=== mongodb-1.6.1/tests/functional/query-sort-002.phpt0000644000076500000240000000323513572250760021310 0ustar alcaeusstaff--TEST-- Sorting single field, descending --SKIPIF-- --FILE-- array('_id' => 0, 'username' => 1), 'sort' => array('username' => -1), 'limit' => 100, )); $cursor = $manager->executeQuery(NS, $query); foreach ($cursor as $document) { echo $document->username . "\n"; } ?> ===DONE=== --EXPECT-- zulauf.amaya zstanton zoe41 zieme.noemi ziemann.webster zheathcote zella78 zboyle zachery33 yyost ywyman ywiza ypredovic yost.magali yost.ari ylarkin yklein yhudson yfritsch ycole yasmine.lowe yasmin55 xrodriguez xkohler xhermann xgutmann xgibson xcassin wwilkinson wunsch.mose wschimmel wschaefer wpacocha wolff.caroline wkertzmann wiza.carmel witting.walker witting.chris wisozk.cortez winnifred08 wilson.white willms.amari will.lamont will.jerod will.edwina wilfred.feil wilderman.sophia wiegand.blanche west.jude west.cristobal weimann.tillman webster70 webster48 watson70 warren.feest walton33 walter.norval walter.lester walsh.vincenza walker.alec wade91 vwaters vvolkman vschulist vrolfson vpfeffer vorn von.britney vivianne.macejkovic veum.tyrell vesta.ritchie verda93 vena.schumm velma37 velda.wehner veffertz vdickinson vconn vbraun vborer vbins vandervort.ezekiel van.ruecker uzieme uwisoky usmith uschumm uschmeler urban24 upton.zackery unique.pagac una.larkin umraz ullrich.layne ulises44 ulises.beatty ulesch ukovacek ujenkins uhansen ===DONE=== mongodb-1.6.1/tests/functional/query-sort-003.phpt0000644000076500000240000003134713572250760021316 0ustar alcaeusstaff--TEST-- Sorting single field, ascending, using the Cursor Iterator --SKIPIF-- --FILE-- array('_id' => 0, 'username' => 1), 'sort' => array('username' => 1), )); var_dump($query); $cursor = $manager->executeQuery(NS, $query); var_dump(get_class($cursor)); foreach ($cursor as $document) { echo $document->username . "\n"; } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { } ["options"]=> object(stdClass)#%d (%d) { ["projection"]=> object(stdClass)#%d (%d) { ["_id"]=> int(0) ["username"]=> int(1) } ["sort"]=> object(stdClass)#%d (%d) { ["username"]=> int(1) } } ["readConcern"]=> NULL } string(21) "MongoDB\Driver\Cursor" aaliyah.kertzmann aaron89 abbott.alden abbott.flo abby76 abernathy.adrienne abernathy.audrey abner.kreiger aboehm abshire.icie abshire.jazlyn adams.delta adolph20 adonis.schamberger agleason ahartmann ahettinger akreiger al.cormier al97 albin95 alda.murray alden.blanda alessandra76 alex73 alexa01 alfred.ritchie alia07 alia72 alize.hegmann allie48 alta.sawayn alvena.pacocha alvis22 alycia48 amalia84 amely01 amos.corkery amos78 anahi95 anais.feest anais58 andreanne.steuber angela.dickinson angelina.bartoletti angelina31 aniyah.franecki annalise40 antoinette.gaylord antoinette.weissnat aoberbrunner apacocha apollich ara92 arch44 arely.ryan armstrong.clara armstrong.gordon arnold.kiehn arvel.hilll asatterfield aschuppe ashlynn71 ashlynn85 ashton.o'kon austen03 austen47 austin67 awintheiser awyman ayana.brakus bailey.mertz bailey.sarina balistreri.donald barrett.prohaska bartell.susie bashirian.lina bayer.ova baylee.maggio bbernier bblick beahan.oleta beatty.layne beatty.myrtis beau49 beaulah.mann bechtelar.nadia becker.theron beer.mossie beer.roselyn benedict.johnson berge.enoch bergnaum.roberto bernardo.mccullough bernardo52 bernhard.margaretta bernie.morissette bethel20 betty09 bins.aliyah bins.laisha bjori blanda.danielle blanda.irving blanda.ruthe blaze.miller block.kasandra block.toby bmccullough botsford.edwardo botsford.jennie boyd.balistreri boyer.khalid boyle.franco bpaucek bpurdy bradford.heidenreich brannon24 braun.adaline braun.jeanie breanne.schmeler breitenberg.demarco brennan.emmerich bret57 broderick53 brooklyn22 bruecker bstamm buckridge.julius buddy42 bwalker camilla20 cara.bechtelar carlotta.kreiger carolyn09 carolyne63 carroll.emmalee cartwright.garland casimir.keebler casper.eldred casper.juliana casper38 cassin.carmel cassin.krystel catherine.hilll cathrine.gislason cbartoletti cbecker cbednar cbreitenberg cecelia.schoen celestine97 cfriesen cgreenfelder chad.kuphal chance.conroy chasity63 chet.pacocha christina.simonis chyna05 citlalli41 ckertzmann clarabelle65 clementine.grimes clotilde39 cnikolaus cole.alice coleman55 collier.sage collins.skylar columbus78 connelly.josefina conner.doyle coralie47 cordelia25 corkery.arch cormier.adriana cormier.amy cormier.landen cormier.vida cory76 cpaucek cprice craig93 creola.emard creola88 crona.jaclyn cronin.clint crooks.josh crystel24 csipes cummings.frederic cwaelchi cwest cwhite cwolf cydney.hayes dahlia.white daisy.johns dakota.bednar dakota.wiza dallas.marquardt dante.shields darwin.howe dave46 davis.bennett davis.solon dayne.padberg dayton03 delaney91 delbert.auer delia.lindgren deontae36 dereck.ward derek.bahringer derek79 deven.spinka devon34 dgottlieb dhudson dickinson.ashleigh dillan66 djerde dock.bednar dolly.beer donnie.langosh dorothy67 dorthy.legros doyle.nelle drippin dubuque.brooklyn dubuque.cordia dvandervort dwiegand dwolf earlene.marvin earline.baumbach easter73 eauer ebert.cordie ebony.williamson ebony59 edgar33 edgardo.gorczany edibbert effertz.mateo effie.keeling efren31 egrimes ehirthe ehuel ehuels eino23 ekoelpin eldora.steuber eldred65 elenor33 elesch eli.mann elisabeth95 eliseo49 ella.roberts ellen.krajcik ellen12 elliot.kling elliot.weissnat ellis37 elsie.kuhic elva.baumbach elvis45 emelia.ortiz emerald.shanahan emerson07 emie.schneider emilio.crona emily91 emmalee.waters enid57 enid78 enoch.hilll enola.rath ephraim76 erdman.ethyl erdman.niko eriberto.russel erik04 erika74 ernser.addison ernser.geovany ervin.carter espinka ethan.daugherty ethel56 ethelyn46 ethyl68 ettie49 eulah49 fabian55 fadel.trevion fae00 fahey.rosalee farrell.asha farrell.lessie fbraun feeney.angelica feeney.elizabeth feeney.nathanial feil.rae ferdman ferry.eusebio fherman filomena18 finn.torphy flavie41 florida.o'hara ford85 fosinski frami.bulah franecki.rosetta fred35 freda25 frederik.stracke fsporer fstokes fturner gabriel.mccullough gardner.jacobson garnet.oberbrunner garry.windler gaylord.myrtis gblock gbrakus georgette.mueller geovanni.jones geovany07 german.leffler german40 ggislason gia15 gibson.amiya giovani.langworth giovanna.hickle giovanny.haley gislason.mae gisselle.jacobs gladyce88 glang gottlieb.jerry goyette.roman gparker gprosacco gracie.mcdermott graciela.jacobson grayson78 greenfelder.amya greenfelder.larry greenfelder.ozella gretchen19 gretchen38 greynolds greyson63 grimes.andreane gulgowski.allie gusikowski.aliyah gutkowski.laron gwunsch haag.alaina hackett.alycia hadley.abernathy hailee01 hal67 haley.grace haley.krystel haley.lauretta halvorson.bulah hammes.dimitri hand.lauren hand.tiana hansen.vanessa harber.larissa harber.vicenta harris.kailey hartmann.dedrick harvey.hillard haven13 hayes.delores hayley08 hazle21 hazle43 heathcote.ashly hegmann.sallie heidenreich.julia helene.o'connell henriette21 herman.sanford herzog.eileen hessel.barry hflatley hhackett hhyatt hickle.isabell hirthe.bryana hirthe.letitia hirthe.reymundo hmarvin hoeger.anastacio hollie29 howe.abagail howell.daugherty hquigley hrodriguez hspinka hstamm htowne hudson.bernie hudson.deion huels.alfred huels.enid hugh22 humberto98 hvandervort hyatt.astrid hyatt.soledad iabernathy idaugherty idella50 idonnelly ifeil ileuschke imuller ipredovic irwin.gutkowski irwin31 isabell95 isabella.parisian isac13 isac67 isaiah47 isaiah50 isaias90 isobel.mraz ivy73 izabella.hermann jacobs.carmela jada.romaguera jadon.reinger jailyn62 jalon90 jamaal.cassin jamarcus.weissnat janelle93 janice.walker jannie71 jaquan94 jaqueline.o'kon jarod94 jarrod.lindgren jasmin.ruecker javier.volkman javier13 javier62 jayda.d'amore jazmyne63 jborer jeanette45 jedidiah.hyatt jefferey02 jenkins.letha jerald.konopelski jeremy.o'keefe jessika.schmeler jessy16 jett00 jfeest jheaney jherzog jlebsack jlockman jo'hara jodie.casper johnnie66 johnston.brooklyn jonas97 jones.jazmyn jordan.turner joshua.mraz josiah59 joyce.casper jruecker jschamberger jschinner jthompson jtowne jude.jakubowski jude92 juliana.witting juliet55 june.runolfsson justina63 jwindler kadams kadin.mayer kaelyn05 kaelyn88 kamille.watsica kamron88 karson.mante kasey.abshire kassandra.reilly katheryn.walsh kathlyn02 kathryne.boehm kattie12 kaya24 kayleigh62 kbeahan kdicki keagan.hirthe keanu21 keanu42 keebler.rupert keeling.sydnee keira.dach kelly.konopelski kelvin.jakubowski kerluke.hiram kernser keshawn.boyle kessler.marisol keyon.gaylord keyon65 kherman khills khudson kiley63 kip12 kirk40 kirstin.cruickshank klarson kleuschke kling.laila klocko.filiberto kmohr ko'keefe koch.emmett koch.sophia koelpin.yoshiko krystel.stark kturcotte kub.marcel kuhic.hattie kuhlman.noel kuphal.ahmed kutch.chase kutch.madonna kutch.pasquale kuvalis.nicolette lane05 larkin.lawson larue.schuster laurel35 laurel72 laurence28 lauryn.beer lbode lbradtke leanne.cronin leannon.zander lebsack.harmony ledner.finn leif52 leilani73 lemke.ernestina lempi56 leopold69 lesch.delfina lesch.edna lesch.nyah leuschke.erika lexie.bernier lexie65 lgrady lillian50 lilliana.schaden lily.hansen lind.dane lloyd60 lmckenzie lnicolas london07 lonnie.little lonnie10 loraine.hammes lorna31 louisa76 lquitzon lubowitz.colleen lubowitz.jazmyne lucas.ferry luciano79 lucienne13 lucio.huel lucio20 luella.deckow lullrich luther.lesch mac.hermann macey95 macie.corwin macy.greenholt maddison66 madilyn.wyman madisyn51 madyson.johns maeve.raynor maggio.kayli maia14 mante.ashlee mante.maymie marc97 marcel56 marco.gerlach mariana.sipes marietta.swift marina.mayert marion15 marion35 marjolaine45 mark.casper marks.trace marlen34 marlene95 marley.sipes marvin.ivory maryjane.kutch maudie25 mayer.tanner mccullough.vella mcdermott.kaitlyn mckenzie.maximus mdare meaghan89 melisa61 metz.elmer metz.ima michaela.wolf miles.pollich milford39 milford40 mills.emmanuel mills.rickey miracle53 misty.boyer mitchell.delta mitchell.rafael mohammad.gorczany mohammed.lemke mohr.kylee mollie.deckow monroe.o'keefe monserrate.leannon monserrate.nikolaus monty.mills morar.aniya mosciski.alanis mraz.marcelina mrunte mtoy mueller.woodrow muller.akeem murazik.maximillia mwalter mylene.rogahn myra43 myron.bechtelar mzemlak mzieme nash88 nasir24 natalia66 nathanial37 nayeli.vandervort ndouglas neal.hand neichmann neil.gorczany nellie23 ngoldner nhaag nharber nharris nicolas.melyssa nicolas.wendy nikita.romaguera nikko.langosh nikolas.lang nikolas78 nikolaus.celestino njacobs nkshlerin noah.blick nolan.nora nolan.zachariah nolan56 norma46 novella67 npurdy nrath nrowe nstamm nward o'conner.arthur obie.weissnat oboyer octavia36 oda.robel odare odell96 ogulgowski ohaley ohowe okuneva.ebba olga.mertz olga.waelchi olin13 oliver.reichert olson.dedrick olynch omarvin omer.kirlin ondricka.alexzander ondricka.joy orion.quigley orn.katelyn orval95 oswaldo.kunze otreutel owehner owen82 pacocha.quentin pagac.coleman paige.murphy parisian.dena parker.ellie patience65 patricia.macejkovic pattie.waters pattie97 paul.hayes paula.fahey paxton73 pbotsford pconroy pcruickshank pdach perry63 pfannerstill.erna pframi phahn philpert phodkiewicz phoebe.crona phuel pierre.grant plesch pollich.danika polson powlowski.alfredo ppurdy price49 prohaska.ransom prudence76 prussel pschowalter pwaters pwatsica pwisozk qarmstrong qbatz qgislason qkunze qmayert qo'hara qpowlowski qromaguera qryan qschiller qschneider queen75 queenie33 quitzon.greyson quitzon.maxime rachel45 raphaelle55 ratke.aurelia rau.brent raven.walter raven.ziemann raymundo.ferry raynor.wilmer rdickens regan86 reginald.gulgowski reichert.margaretta reinger.johnathan remington.russel renner.lucius rey29 rice.ronaldo rico71 river66 rkoelpin rmayer robel.chance rocky.hoeger rodger.raynor rodolfo.effertz rohan.harmon rolando38 rolfson.jaren rosalee52 rosemarie.conn rosenbaum.elisa rosetta45 rowe.erik rschowalter rubie.hyatt russel64 rutherford.dawn sabina11 samara90 sarina.bednar savannah89 savion82 sawayn.catharine sawayn.pink sbailey schamberger.marcelle schiller.kameron schimmel.mavis schimmel.russell schmeler.dillon schmeler.flo schmidt.elwyn schmitt.magali schneider.rita schowalter.abbigail schroeder.zoey schulist.angelo schumm.carley schumm.danielle sebert selina.thiel sferry shaina.emard shanie.murazik sheathcote shegmann shields.bethany shoeger shyann28 sienna53 sigmund.schinner simeon.nader skihn skiles.darrin skye.jast skyla.friesen smith.nico so'kon soledad.connelly sonia05 sorn spencer.bessie spencer.darrel sschumm ssteuber stacy.leffler stark.vladimir stehr.odell stella.schowalter stracke.dakota streich.abdiel stroman.rae susanna55 swyman sylvia82 tabitha.mohr talon74 tanya65 tatum.harvey tbarrows tcole terry.corene terry.florian tessie.stroman tgrady thalia22 theo62 theodore55 theresia68 theron10 tkonopelski tlind tomas04 toni57 toy.deshawn trace03 tressa.price tressie47 treutel.evert treutel.minnie trolfson tromp.kaleigh trudie09 trutherford tsatterfield tstamm turcotte.armand turner.considine twila75 uabshire uchamplin udach ugusikowski uhansen ujenkins ukovacek ulesch ulises.beatty ulises44 ullrich.layne umraz una.larkin unique.pagac upton.zackery urban24 uschmeler uschumm usmith uwisoky uzieme van.ruecker vandervort.ezekiel vbins vborer vbraun vconn vdickinson veffertz velda.wehner velma37 vena.schumm verda93 vesta.ritchie veum.tyrell vivianne.macejkovic von.britney vorn vpfeffer vrolfson vschulist vvolkman vwaters wade91 walker.alec walsh.vincenza walter.lester walter.norval walton33 warren.feest watson70 webster48 webster70 weimann.tillman west.cristobal west.jude wiegand.blanche wilderman.sophia wilfred.feil will.edwina will.jerod will.lamont willms.amari wilson.white winnifred08 wisozk.cortez witting.chris witting.walker wiza.carmel wkertzmann wolff.caroline wpacocha wschaefer wschimmel wunsch.mose wwilkinson xcassin xgibson xgutmann xhermann xkohler xrodriguez yasmin55 yasmine.lowe ycole yfritsch yhudson yklein ylarkin yost.ari yost.magali ypredovic ywiza ywyman yyost zachery33 zboyle zella78 zheathcote ziemann.webster zieme.noemi zoe41 zstanton zulauf.amaya ===DONE=== mongodb-1.6.1/tests/functional/query-sort-004.phpt0000644000076500000240000000237513572250760021316 0ustar alcaeusstaff--TEST-- Sort query option is always serialized as a BSON document --SKIPIF-- --FILE-- insert(array('_id' => $i, '0' => 4 - $i)); } $writeResult = $manager->executeBulkWrite(NS, $bulkWrite); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $query = new MongoDB\Driver\Query(array(), array( 'sort' => array('0' => 1), )); var_dump($query); $cursor = $manager->executeQuery(NS, $query); /* Numeric keys of stdClass instances cannot be directly accessed, so ensure the * document is decoded as a PHP array. */ $cursor->setTypeMap(array('root' => 'array')); foreach ($cursor as $document) { echo $document['0'] . "\n"; } ?> ===DONE=== --EXPECTF-- Inserted: 5 object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { } ["options"]=> object(stdClass)#%d (%d) { ["sort"]=> object(stdClass)#%d (%d) { [%r(0|"0")%r]=> int(1) } } ["readConcern"]=> NULL } 0 1 2 3 4 ===DONE=== mongodb-1.6.1/tests/manager/bug0572.phpt0000644000076500000240000000150213572250760017215 0ustar alcaeusstaff--TEST-- PHPC-572: Ensure stream context does not go out of scope before socket init --SKIPIF-- --FILE-- [ 'verify_peer' => false, 'verify_peer_name' => false, 'allow_self_signed' => true, ], ]); return new MongoDB\Driver\Manager(URI, [], ['context' => $context]); }; $manager = $closure(); $cursor = $manager->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1) } ===DONE=== mongodb-1.6.1/tests/manager/bug0851-001.phpt0000644000076500000240000000175013572250760017520 0ustar alcaeusstaff--TEST-- PHPC-851: Manager constructor should not modify options argument --FILE-- 'secondaryPreferred', 'readPreferenceTags' => [ ['dc' => 'ny'], [], ], ]; $manager = new MongoDB\Driver\Manager(null, $options); var_dump($options); /* Dump the Manager's ReadPreference to ensure that each element in the * readPreferenceTags option was converted to an object. */ var_dump($manager->getReadPreference()); ?> ===DONE=== --EXPECTF-- array(2) { ["readPreference"]=> string(18) "secondaryPreferred" ["readPreferenceTags"]=> array(2) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(0) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } [1]=> object(stdClass)#%d (%d) { } } } ===DONE=== mongodb-1.6.1/tests/manager/bug0851-002.phpt0000644000076500000240000000102313572250760017512 0ustar alcaeusstaff--TEST-- PHPC-851: Manager constructor should not modify driverOptions argument --FILE-- true, 'context' => stream_context_create([ 'ssl' => [ 'allow_self_signed' => true, ], ]), ]; $manager = new MongoDB\Driver\Manager(null, [], $driverOptions); var_dump($driverOptions); ?> ===DONE=== --EXPECT-- array(2) { ["weak_cert_validation"]=> bool(true) ["context"]=> resource(4) of type (stream-context) } ===DONE=== mongodb-1.6.1/tests/manager/bug0912-001.phpt0000644000076500000240000000317413572250760017520 0ustar alcaeusstaff--TEST-- PHPC-912: Child process should not destroy mongoc_client_t objects from parent --SKIPIF-- --FILE-- 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $uri = $cursor->toArray()[0]->you; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['pid' => getmypid(), 'uri' => $uri]); $manager->executeBulkWrite(NS, $bulk); } $manager = new MongoDB\Driver\Manager(URI); logMyURI($manager); $parentPid = getmypid(); $childPid = pcntl_fork(); if ($childPid === 0) { $manager = new MongoDB\Driver\Manager(URI); logMyURI($manager); exit; } if ($childPid) { $waitPid = pcntl_waitpid($childPid, $status); if ($waitPid > 0) { printf("Parent(%d) waited for child(%d) to exit\n", $parentPid, $waitPid); } $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $results = $cursor->toArray(); printf("%d connections were logged\n", count($results)); printf("PIDs differ: %s\n", $results[0]->pid !== $results[1]->pid ? 'yes' : 'no'); printf("URIs differ: %s\n", $results[0]->uri !== $results[1]->uri ? 'yes' : 'no'); } ?> ===DONE=== --EXPECTF-- Parent(%d) waited for child(%d) to exit 2 connections were logged PIDs differ: yes URIs differ: yes ===DONE=== mongodb-1.6.1/tests/manager/bug0913-001.phpt0000644000076500000240000000413613572250760017520 0ustar alcaeusstaff--TEST-- PHPC-913: Child process should not re-use mongoc_client_t objects from parent --SKIPIF-- --FILE-- 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $uri = $cursor->toArray()[0]->you; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['pid' => getmypid(), 'uri' => $uri]); $manager->executeBulkWrite(NS, $bulk); } $manager = new MongoDB\Driver\Manager(URI); logMyURI($manager); $parentPid = getmypid(); $childPid = pcntl_fork(); if ($childPid === 0) { $manager = new MongoDB\Driver\Manager(URI); logMyURI($manager); /* Due to PHPC-912, we cannot allow the child process to terminate before * the parent is done using its client, lest it destroy the mongoc_client_t * object and shutdown its socket(s). Sleep for 250ms to allow the parent * time to query for our logged URI. */ usleep(250000); exit; } if ($childPid) { /* Sleep for 100ms to allow the child time to log its URI. Ideally, we would * wait for the child to finish, but PHPC-912 prevents us from doing so. */ usleep(100000); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); $results = $cursor->toArray(); printf("%d connections were logged\n", count($results)); printf("PIDs differ: %s\n", $results[0]->pid !== $results[1]->pid ? 'yes' : 'no'); printf("URIs differ: %s\n", $results[0]->uri !== $results[1]->uri ? 'yes' : 'no'); $waitPid = pcntl_waitpid($childPid, $status); if ($waitPid > 0) { printf("Parent(%d) waited for child(%d) to exit\n", $parentPid, $waitPid); } } ?> ===DONE=== --EXPECTF-- 2 connections were logged PIDs differ: yes URIs differ: yes Parent(%d) waited for child(%d) to exit ===DONE=== mongodb-1.6.1/tests/manager/bug0940-001.phpt0000644000076500000240000000077013572250760017520 0ustar alcaeusstaff--TEST-- PHPC-940: php_phongo_free_ssl_opt() attempts to free interned strings --SKIPIF-- --FILE-- false])); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(20) "mongodb://127.0.0.1/" ["cluster"]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/manager/bug0940-002.phpt0000644000076500000240000000112013572250760017507 0ustar alcaeusstaff--TEST-- PHPC-940: php_phongo_free_ssl_opt() attempts to free interned strings (context option) --SKIPIF-- --FILE-- ['cafile' => false]]); var_dump(new MongoDB\Driver\Manager(null, [], ['context' => $context])); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(20) "mongodb://127.0.0.1/" ["cluster"]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/manager/bug1163-001.phpt0000644000076500000240000000474513572250760017524 0ustar alcaeusstaff--TEST-- PHPC-1163: Unacknowledged write concern should omit implicit session --SKIPIF-- --FILE-- 0]); MongoDB\Driver\Monitoring\addSubscriber($this); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); echo "Testing executeBulkWrite\n"; $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'insert' => COLLECTION_NAME, 'documents' => [['x' => 1]], ]); /* Note: executeCommand() and executeReadCommand() are not tested * because they do not inherit the client-level write concern. */ echo "\nTesting executeWriteCommand\n"; $manager->executeWriteCommand(DATABASE_NAME, $command); /* We can safely re-use the insert command with executeReadWriteCommand * because there is no readConcern to inherit. */ echo "\nTesting executeReadWriteCommand\n"; $manager->executeReadWriteCommand(DATABASE_NAME, $command); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { if ($event->getCommandName() === 'insert') { $command = $event->getCommand(); $hasSession = isset($command->lsid); $writeConcern = isset($command->writeConcern) ? $command->writeConcern: null; printf("insert command write concern: %s\n", json_encode($writeConcern)); printf("insert command has session: %s\n", $hasSession ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } (new Test)->run(); ?> ===DONE=== --EXPECT-- Testing executeBulkWrite insert command write concern: {"w":0} insert command has session: no Testing executeWriteCommand insert command write concern: {"w":0} insert command has session: no Testing executeReadWriteCommand insert command write concern: {"w":0} insert command has session: no ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-001.phpt0000644000076500000240000000026013572250760020777 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct() with default URI --FILE-- ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-002.phpt0000644000076500000240000000027613572250760021007 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct() with URI --FILE-- ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-003.phpt0000644000076500000240000000052113572250760021001 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct() URI defaults to "mongodb://127.0.0.1/" --FILE-- ===DONE=== --EXPECTF-- [%s] PHONGO: DEBUG > Connection string: 'mongodb://127.0.0.1/' %A ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-004.phpt0000644000076500000240000000116313572250760021005 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): Deprecated boolean options in URI string --FILE-- getWriteConcern()->getJournal()); } ?> ===DONE=== --EXPECTF-- bool(true) bool(true) bool(true) bool(true) bool(false) bool(false) bool(false) bool(false) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-005.phpt0000644000076500000240000000036513572250760021011 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): Ensure environmental URI is parsable --FILE-- ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-006.phpt0000644000076500000240000000100713572250760021004 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): Unparsable environmental URI --ENV-- MONGODB_URI=invalid --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'invalid'. Invalid URI Schema, expecting 'mongodb://' or 'mongodb+srv://'. ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-appname-001.phpt0000644000076500000240000000111113572250760022412 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): appname option --SKIPIF-- --FILE-- "2-{$name2}"]); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand("test", $command); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-appname_error-001.phpt0000644000076500000240000000332613572250760023635 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid appname --FILE-- "2-{$name2}"]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid appname value: '2-PHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGOPHONGO' ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-auth_mechanism-001.phpt0000644000076500000240000000145413572250760023770 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): authMechanism option --FILE-- 'MONGODB-X509', 'username' => 'username']], [null, ['authMechanism' => 'MONGODB-X509']], [null, ['authMechanism' => 'GSSAPI', 'username' => 'username']], ]; foreach ($tests as $test) { list($uri, $options) = $test; /* Note: the Manager's debug information does not include the auth mechanism * so we are merely testing that no exception is thrown. */ $manager = new MongoDB\Driver\Manager($uri, $options); } ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-auth_mechanism-002.phpt0000644000076500000240000000260113572250760023764 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): authMechanismProperties option --FILE-- 'username', 'authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['CANONICALIZE_HOST_NAME' => 'true', 'SERVICE_NAME' => 'foo', 'SERVICE_REALM' => 'bar']]], // Options are case-insensitive ['mongodb://username@127.0.0.1/?authMechanism=GSSAPI&authMechanismProperties=canonicalize_host_name:TRUE,service_name:foo,service_realm:bar', []], [null, ['username' => 'username', 'authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['canonicalize_host_name' => 'TRUE', 'service_name' => 'foo', 'service_realm' => 'bar']]], // Boolean true "CANONICALIZE_HOST_NAME" value is converted to "true" [null, ['username' => 'username', 'authMechanism' => 'GSSAPI', 'authMechanismProperties' => ['canonicalize_host_name' => true]]], ]; foreach ($tests as $test) { list($uri, $options) = $test; /* Note: the Manager's debug information does not include the auth mechanism * so we are merely testing that no exception is thrown and that option * processing does not leak memory. */ $manager = new MongoDB\Driver\Manager($uri, $options); } ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-auth_mechanism-error-001.phpt0000644000076500000240000000533113572250760025115 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): authentication options are validated --FILE-- 'GSSAPI', 'authSource' => 'admin']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/?authMechanism=MONGODB-X509&authSource=admin'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/', ['authMechanism' => 'MONGODB-X509', 'authSource' => 'admin']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://@localhost:27017/?authMechanism=SCRAM-SHA-1'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/', ['username' => '', 'authMechanism' => 'SCRAM-SHA-1']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/', ['password' => 'password', 'authMechanism' => 'MONGODB-X509']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://localhost:27017/?authMechanism=GSSAPI&authSource=admin'. GSSAPI and X509 require "$external" authSource. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: GSSAPI and X509 require "$external" authSource. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://localhost:27017/?authMechanism=MONGODB-X509&authSource=admin'. GSSAPI and X509 require "$external" authSource. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: GSSAPI and X509 require "$external" authSource. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://@localhost:27017/?authMechanism=SCRAM-SHA-1'. 'SCRAM-SHA-1' authentication mechanism requires username. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: 'SCRAM-SHA-1' authentication mechanism requires username. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: X509 authentication mechanism does not accept a password. ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_concern-001.phpt0000644000076500000240000000143513572250760023424 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): read concern options --FILE-- 'local']], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadConcern()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(1) "1" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_concern-error-001.phpt0000644000076500000240000000072713572250760024556 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid read concern --FILE-- 1]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "readConcernLevel" URI option, 32-bit integer given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-001.phpt0000644000076500000240000000326313572250760024114 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): read preference options --FILE-- 'primary']], [null, ['readPreference' => 'secondary', 'readPreferenceTags' => [['tag' => 'one'], []]]], [null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => 1000]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadPreference()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["tag"]=> string(3) "one" } [1]=> object(stdClass)#%d (%d) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1000) } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["tag"]=> string(3) "one" } [1]=> object(stdClass)#%d (%d) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-002.phpt0000644000076500000240000000262013572250760024111 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): read preference options (maxStalenessSeconds) --FILE-- 1231]], ['mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=1000', ['maxStalenessSeconds' => 2000]], ['mongodb://127.0.0.1/?readpreference=secondary&maxstalenessseconds=1231', []], ['mongodb://127.0.0.1/?readpreference=secondary', ['maxstalenessseconds' => 1231]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadPreference()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1231) } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1231) } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(2000) } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1231) } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1231) } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-004.phpt0000644000076500000240000000635613572250760024125 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): read preference options (slaveok) --FILE-- true]], ['mongodb://127.0.0.1/?readPreference=nearest', ['slaveok' => true]], // False array option is ignored ['mongodb://127.0.0.1/?slaveok=true', ['slaveok' => false]], ['mongodb://127.0.0.1/?readPreference=nearest', ['slaveok' => false]], // readPreference option takes priority ['mongodb://127.0.0.1/?slaveok=true&readPreference=nearest', []], ['mongodb://127.0.0.1/?slaveok=false&readPreference=nearest', []], ['mongodb://127.0.0.1/?slaveok=true', ['readPreference' => 'nearest']], ['mongodb://127.0.0.1/?slaveok=false', ['readPreference' => 'nearest']], [null, ['readPreference' => 'nearest', 'slaveok' => true]], [null, ['readPreference' => 'nearest', 'slaveok' => true]], // Alternative values for true in URI string (all other strings are false) ['mongodb://127.0.0.1/?slaveok=t', []], ['mongodb://127.0.0.1/?slaveok=1', []], // Case insensitivity for URI string and array options ['mongodb://127.0.0.1/?slaveOk=True', []], ['mongodb://127.0.0.1/?SLAVEOK=TRUE', []], [null, ['slaveOk' => true]], [null, ['SLAVEOK' => true]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadPreference()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-error-001.phpt0000644000076500000240000000555613572250760025252 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid read preference (mode and tags) --FILE-- 1]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['readPreference' => 'primary', 'readPreferenceTags' => 'invalid']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; // Invalid values echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary&readPreferenceTags=dc:ny'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['readPreference' => 'nothing']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary', ['readPreferenceTags' => [[]]]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=primary', ['readPreferenceTags' => ['invalid']]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=1'. Unsupported readPreference value [readPreference=1]. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=invalid'. Unsupported value for "readPreferenceTags": "invalid". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "readPreference" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array for "readPreferenceTags" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=primary&readPreferenceTags=dc:ny'. Invalid readPreferences. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Unsupported readPreference value: 'nothing' OK: Got MongoDB\Driver\Exception\InvalidArgumentException Primary read preference mode conflicts with tags OK: Got MongoDB\Driver\Exception\InvalidArgumentException Read preference tags must be an array of zero or more documents ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-error-002.phpt0000644000076500000240000000700213572250760025237 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid read preference (maxStalenessSeconds) --FILE-- 'invalid']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; // Invalid range in URI string (array option is tested in 64-bit error test) echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=2147483648'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; // Invalid values echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?maxstalenessseconds=1231'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?maxStalenessSeconds=1231'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['maxstalenessseconds' => 1231]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['maxStalenessSeconds' => 1231]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => -2]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => 0]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['readPreference' => 'secondary', 'maxStalenessSeconds' => 42]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=invalid'. Unsupported value for "maxstalenessseconds": "invalid". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected integer for "maxStalenessSeconds" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?readPreference=secondary&maxStalenessSeconds=2147483648'. Unsupported value for "maxstalenessseconds": "2147483648". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?maxstalenessseconds=1231'. Invalid readPreferences. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?maxStalenessSeconds=1231'. Invalid readPreferences. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Primary read preference mode conflicts with maxStalenessSeconds OK: Got MongoDB\Driver\Exception\InvalidArgumentException Primary read preference mode conflicts with maxStalenessSeconds OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, -2 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, 0 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, 42 given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-error-003.phpt0000644000076500000240000000143013572250760025237 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid read preference (slaveOk) --FILE-- 1]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?slaveok=other'. Unsupported value for "slaveok": "other". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected boolean for "slaveOk" URI option, 32-bit integer given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-read_preference-error-004.phpt0000644000076500000240000000116313572250760025243 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid read preference (maxStalenessSeconds range) --SKIPIF-- --FILE-- 'secondary', 'maxStalenessSeconds' => 2147483648]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be <= 2147483647, 2147483648 given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-ssl-001.phpt0000644000076500000240000000146213572250760021603 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): ssl option does not require driverOptions --SKIPIF-- --FILE-- true])); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(29) "mongodb://127.0.0.1/?ssl=true" ["cluster"]=> array(0) { } } object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(20) "mongodb://127.0.0.1/" ["cluster"]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-ssl-002.phpt0000644000076500000240000000066513572250760021610 0ustar alcaeusstaff--TEST-- PHPC-1239: Passing SSL driverOptions overrides SSL options from URI --SKIPIF-- --FILE-- executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-tls-error-001.phpt0000644000076500000240000000347213572250760022736 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): tlsInsecure cannot be combined with tlsAllowInvalidHostnames --FILE-- true, 'tlsAllowInvalidHostnames' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/?tlsInsecure=true', ['tlsAllowInvalidHostnames' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/?tlsAllowInvalidHostnames=true', ['tlsInsecure' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://localhost:27017/?tlsInsecure=true&tlsAllowInvalidHostnames=true'. tlsinsecure may not be specified with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-tls-error-002.phpt0000644000076500000240000000351413572250760022734 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): tlsInsecure cannot be combined with tlsAllowInvalidCertificates --FILE-- true, 'tlsAllowInvalidCertificates' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/?tlsInsecure=true', ['tlsAllowInvalidCertificates' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://localhost:27017/?tlsAllowInvalidCertificates=true', ['tlsInsecure' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://localhost:27017/?tlsInsecure=true&tlsAllowInvalidCertificates=true'. tlsinsecure may not be specified with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse URI options: tlsinsecure may not be combined with tlsallowinvalidcertificates or tlsallowinvalidhostnames. ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-wireversion.phpt0000644000076500000240000000102313572250760023051 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): wire version support --FILE-- 1]); try { $manager->executeCommand("test", $command); } catch (\MongoDB\Driver\Exception\ConnectionException $e) { if ($e->getCode() == 15) { // MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION echo "Bad wire version detected: ", $e->getMessage(), "\n"; } } ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-001.phpt0000644000076500000240000000261313572250760023642 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): write concern options (w) --FILE-- -1]], [null, ['w' => -0]], [null, ['w' => 1]], [null, ['w' => 'majority']], [null, ['w' => 'customTagSet']], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getWriteConcern()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(-1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(12) "customTagSet" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(-1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(12) "customTagSet" } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-002.phpt0000644000076500000240000000302713572250760023643 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): write concern options (wtimeoutms) --FILE-- 1000]], [null, ['w' => 2, 'wtimeoutms' => 1000]], [null, ['w' => 'majority', 'wtimeoutms' => 1000]], [null, ['w' => 'customTagSet', 'wtimeoutms' => 1000]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getWriteConcern()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(2) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(12) "customTagSet" ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(2) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(12) "customTagSet" ["wtimeout"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-003.phpt0000644000076500000240000000141613572250760023644 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): write concern options (journal) --FILE-- true]], [null, ['journal' => false]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getWriteConcern()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(true) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(false) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(true) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(false) } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-004.phpt0000644000076500000240000000301713572250760023644 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): write concern options (safe) --FILE-- true]], [null, ['safe' => false]], [null, ['w' => 1, 'safe' => false]], [null, ['w' => 0, 'safe' => true]], // safe in URI options array may override w in URI string ['mongodb://127.0.0.1/?w=0', ['safe' => true]], ['mongodb://127.0.0.1/?w=1', ['safe' => false]], ]; foreach ($tests as $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getWriteConcern()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-001.phpt0000644000076500000240000000177513572250760025001 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (w) --FILE-- 1.0]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; /* Note: Values of w < -1 are invalid, but libmongoc's URI string parsing only * logs a warning instead of raising an error (see: CDRIVER-2234), so we cannot * test for this. */ echo throws(function() { new MongoDB\Driver\Manager(null, ['w' => -2]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer or string for "w" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Unsupported w value: -2 ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-002.phpt0000644000076500000240000000125013572250760024766 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (w range) --SKIPIF-- --FILE-- 2147483648]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer or string for "w" URI option, 64-bit integer given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-003.phpt0000644000076500000240000000146613572250760025000 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (wtimeoutms) --FILE-- 'invalid']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?wtimeoutms=invalid'. Unsupported value for "wtimeoutms": "invalid". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "wTimeoutMS" URI option, string given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-004.phpt0000644000076500000240000000133413572250760024773 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (wtimeoutms range exceeds INT32_MAX) --SKIPIF-- --FILE-- 2147483648]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "wTimeoutMS" URI option, 64-bit integer given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-005.phpt0000644000076500000240000000603513572250760024777 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (journal) --FILE-- 'invalid']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; // Invalid values (journal conflicts with unacknowledged write concerns) echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1&journal=true'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0&journal=true'); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1', ['journal' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0', ['journal' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?journal=true', ['w' => -1]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager('mongodb://127.0.0.1/?journal=true', ['w' => 0]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['w' => -1, 'journal' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() { new MongoDB\Driver\Manager(null, ['w' => 0, 'journal' => true]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?journal=invalid'. Unsupported value for "journal": "invalid". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected boolean for "journal" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=-1&journal=true'. Journal conflicts with w value [w=-1]. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?w=0&journal=true'. Journal conflicts with w value [w=0]. OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: -1 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: 0 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: -1 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: 0 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: -1 OK: Got MongoDB\Driver\Exception\InvalidArgumentException Journal conflicts with w value: 0 ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-006.phpt0000644000076500000240000000143413572250760024776 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (safe) --FILE-- 'invalid']); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'mongodb://127.0.0.1/?safe=invalid'. Unsupported value for "safe": "invalid". OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected boolean for "safe" URI option, string given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor-write_concern-error-007.phpt0000644000076500000240000000103413572250760024773 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid write concern (wtimeoutms range) --SKIPIF-- --FILE-- -1]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected wtimeoutMS to be >= 0, -1 given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor_error-001.phpt0000644000076500000240000000072313572250760022214 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): too many arguments --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\Manager::__construct() expects at most 3 parameters, 4 given ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor_error-002.phpt0000644000076500000240000000101613572250760022211 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid URI --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Failed to parse MongoDB URI: 'not a valid connection string'. Invalid URI Schema, expecting 'mongodb://' or 'mongodb+srv://'. ===DONE=== mongodb-1.6.1/tests/manager/manager-ctor_error-003.phpt0000644000076500000240000003100113572250760022207 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::__construct(): invalid types in URI options arrays --FILE-- 1], ]; foreach ($integerOptions as $option) { foreach ($invalidIntegerValues as $value) { echo throws(function() use ($option, $value) { new MongoDB\Driver\Manager(null, [$option => $value]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; } } echo "\nTesting string options:\n"; $stringOptions = [ 'appname', 'authMechanism', 'authSource', 'gssapiServiceName', 'password', 'replicaSet', 'username', ]; $invalidStringValues = [ true, 1.0, 42, new MongoDB\BSON\ObjectId, [ 1, 2, 3 ], ['x' => 1], ]; foreach ($stringOptions as $option) { foreach ($invalidStringValues as $value) { echo throws(function() use ($option, $value) { new MongoDB\Driver\Manager(null, [$option => $value]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; } } echo "\nTesting document options:\n"; $invalidDocumentValues = [ true, 1.0, 42, 'string', new MongoDB\BSON\ObjectId, [ 1, 2, 3 ], ]; foreach ($invalidDocumentValues as $value) { echo throws(function() use ($value) { new MongoDB\Driver\Manager(null, ['authMechanismProperties' => $value]); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; } ?> ===DONE=== --EXPECT-- Testing 32-bit integer options: OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "connectTimeoutMS" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "heartbeatFrequencyMS" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "localThresholdMS" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "serverSelectionTimeoutMS" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketCheckIntervalMS" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected 32-bit integer for "socketTimeoutMS" URI option, document given Testing string options: OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "appname" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authMechanism" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "authSource" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "gssapiServiceName" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "password" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "replicaSet" URI option, document given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected string for "username" URI option, document given Testing document options: OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, boolean given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, double given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, 32-bit integer given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, ObjectId given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected array or object for "authMechanismProperties" URI option, array given ===DONE=== mongodb-1.6.1/tests/manager/manager-debug-001.phpt0000644000076500000240000000111313572250760021114 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Writing debug log files --FILE-- ===DONE=== --EXPECTF-- %A[%s] PHONGO: DEBUG > Connection string: '%s' [%s] PHONGO: DEBUG > Creating Manager, phongo-1.%d.%d%S[%s] - mongoc-1.%s(%s), libbson-1.%s(%s), php-%s %A===DONE===%A mongodb-1.6.1/tests/manager/manager-debug-002.phpt0000644000076500000240000000061113572250760021117 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: mongodb.debug=stderr (connection string and version) --INI-- mongodb.debug=stderr --FILE-- ===DONE=== --EXPECTF-- %A[%s] PHONGO: DEBUG > Connection string: '%s' [%s] PHONGO: DEBUG > Creating Manager, phongo-1.%d.%d%S[%s] - mongoc-1.%s(%s), libbson-1.%s(%s), php-%s %A===DONE===%A mongodb-1.6.1/tests/manager/manager-debug-003.phpt0000644000076500000240000000041313572250760021120 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: mongodb.debug=stderr (date format) --INI-- mongodb.debug=stderr --FILE-- ===DONE=== --EXPECTF-- [%r(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{6}\+00:00)%r]%A ===DONE===%A mongodb-1.6.1/tests/manager/manager-destruct-001.phpt0000644000076500000240000000206713572250760021674 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager destruct should not free streams that are still in use --SKIPIF-- --INI-- ignore_repeated_errors=1 --FILE-- insert(array('_id' => 1)); $writeResult = $manager1->executeBulkWrite(NS, $bulk); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 2)); $writeResult = $manager2->executeBulkWrite(NS, $bulk); printf("Inserted: %d\n", $writeResult->getInsertedCount()); $manager2 = null; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 3)); $writeResult = $manager1->executeBulkWrite(NS, $bulk); printf("Inserted: %d\n", $writeResult->getInsertedCount()); ?> ===DONE=== --EXPECT-- Inserted: 1 Inserted: 1 Inserted: 1 ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-001.phpt0000644000076500000240000000245513572250760023333 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 2)); $bulk->update(array('x' => 2), array('$set' => array('x' => 1)), array("limit" => 1, "upsert" => false)); $bulk->update(array('_id' => 3), array('$set' => array('x' => 3)), array("limit" => 1, "upsert" => true)); $bulk->delete(array('x' => 1), array("limit" => 1)); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 2 matchedCount: 1 modifiedCount: 1 upsertedCount: 1 deletedCount: 1 upsertedId[3]: int(3) ===> Collection array(2) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(3) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-002.phpt0000644000076500000240000000313513572250760023330 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with upserted ids --SKIPIF-- --FILE-- false]); $bulk->update(array('x' => 'foo'), array('$set' => array('y' => 'foo')), array('upsert' => true)); $bulk->update(array('x' => 'bar'), array('$set' => array('y' => 'bar')), array('upsert' => true)); $bulk->update(array('x' => 'foo'), array('$set' => array('y' => 'bar'))); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 1 modifiedCount: 1 upsertedCount: 2 deletedCount: 0 upsertedId[0]: object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } upsertedId[1]: object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } ===> Collection array(2) { [0]=> object(stdClass)#%d (3) { ["_id"]=> object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } ["x"]=> string(3) "foo" ["y"]=> string(3) "bar" } [1]=> object(stdClass)#%d (3) { ["_id"]=> object(%s\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } ["x"]=> string(3) "bar" ["y"]=> string(3) "bar" } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-003.phpt0000644000076500000240000000214713572250760023333 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() delete one document --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->delete(array('x' => 1), array('limit' => 1)); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 1 ===> Collection array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-004.phpt0000644000076500000240000000202613572250760023330 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() delete multiple documents --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->delete(array('x' => 1), array('limit' => 0)); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 2 ===> Collection array(0) { } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-005.phpt0000644000076500000240000000163413572250760023335 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() insert one document --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 1 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 ===> Collection array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-006.phpt0000644000076500000240000000370713572250760023341 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() insert one document (with embedded) --SKIPIF-- --FILE-- addAddress($sunnyvale); $hannes->addAddress($kopavogur); $mikola = new Person("Jeremy", 21); $michigan = new Address(48169, "USA"); $hannes->addFriend($mikola); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert($hannes); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); foreach($cursor as $object) { var_dump($object); } ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 1 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 ===> Collection object(Person)#%d (5) { ["name":protected]=> string(6) "Hannes" ["age":protected]=> int(42) ["addresses":protected]=> array(2) { [0]=> object(Address)#%d (2) { ["zip":protected]=> int(94086) ["country":protected]=> string(3) "USA" } [1]=> object(Address)#%d (2) { ["zip":protected]=> int(200) ["country":protected]=> string(7) "Iceland" } } ["friends":protected]=> array(1) { [0]=> object(Person)#%d (5) { ["name":protected]=> string(6) "Jeremy" ["age":protected]=> int(21) ["addresses":protected]=> array(0) { } ["friends":protected]=> array(0) { } ["secret":protected]=> string(4) "none" } } ["secret":protected]=> string(4) "none" } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-007.phpt0000644000076500000240000000222313572250760023332 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() update one document with no upsert --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update( array('_id' => 1), array('$set' => array('x' => 2)), array('multi' => false, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 1 modifiedCount: 1 upsertedCount: 0 deletedCount: 0 ===> Collection array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(2) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-008.phpt0000644000076500000240000000263613572250760023343 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() update multiple documents with no upsert --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $bulk->insert(array('_id' => 3, 'x' => 3)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update( array('x' => 1), array('$set' => array('x' => 2)), array('multi' => true, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 2 modifiedCount: 2 upsertedCount: 0 deletedCount: 0 ===> Collection array(3) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(2) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(2) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(3) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-009.phpt0000644000076500000240000000201713572250760023335 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() update one document with upsert --SKIPIF-- --FILE-- update( array('_id' => 1), array('$set' => array('x' => 1)), array('multi' => false, 'upsert' => true) ); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 1 deletedCount: 0 upsertedId[0]: int(1) ===> Collection array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-010.phpt0000644000076500000240000000202413572250760023323 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() update multiple documents with upsert --SKIPIF-- --FILE-- update( array('_id' => 1), array('$set' => array('x' => 1)), array('multi' => true, 'upsert' => true) ); $result = $manager->executeBulkWrite(NS, $bulk); echo "\n===> WriteResult\n"; printWriteResult($result); echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 1 deletedCount: 0 upsertedId[0]: int(1) ===> Collection array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-011.phpt0000644000076500000240000000427413572250760023335 0ustar alcaeusstaff--TEST-- MongoDB\Driver\BulkWrite: bypassDocumentValidation option --SKIPIF-- --FILE-- COLLECTION_NAME, 'validator' => ['x' => ['$type' => 'number']], ]); $manager->executeCommand(DATABASE_NAME, $command); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1, 'x' => 1]); $bulk->insert(['_id' => 2, 'x' => 2]); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(['bypassDocumentValidation' => true]); $bulk->update(['_id' => 2], ['$set' => ['x' => 'two']]); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(['bypassDocumentValidation' => true]); $bulk->insert(['_id' => 3, 'x' => 'three']); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 4, 'x' => 'four']); echo throws(function() use($manager, $bulk) { $manager->executeBulkWrite(NS, $bulk); }, "MongoDB\Driver\Exception\BulkWriteException"), "\n"; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update(['_id' => 1], ['$set' => ['x' => 'one']]); echo throws(function() use($manager, $bulk) { $manager->executeBulkWrite(NS, $bulk); }, "MongoDB\Driver\Exception\BulkWriteException"), "\n"; $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update(['_id' => 2], ['$set' => ['x' => 2]]); $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query([])); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\BulkWriteException Document failed validation OK: Got MongoDB\Driver\Exception\BulkWriteException Document failed validation array(3) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(1) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(2) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> string(5) "three" } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-012.phpt0000644000076500000240000000164113572250760023331 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with legacy write concern (replica set primary) --SKIPIF-- --FILE-- insert(['wc' => $wc]); $options = [ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc), ]; $result = $manager->executeBulkWrite(NS, $bulk, $options); var_dump($result->isAcknowledged()); var_dump($result->getInsertedCount()); } ?> ===DONE=== --EXPECT-- bool(false) NULL bool(true) int(1) bool(true) int(1) bool(true) int(1) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite-013.phpt0000644000076500000240000000314413572250760023332 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() pins transaction to server --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-001.phpt0000644000076500000240000000276513572250760024550 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with duplicate key errors (ordered) --SKIPIF-- --FILE-- true]); $bulk->insert(array('_id' => 1)); $bulk->insert(array('_id' => 1)); $bulk->insert(array('_id' => 2)); $bulk->insert(array('_id' => 2)); try { $result = $manager->executeBulkWrite(NS, $bulk); echo "FAILED\n"; } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("BulkWriteException: %s\n", $e->getMessage()); echo "\n===> WriteResult\n"; printWriteResult($e->getWriteResult()); } echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- BulkWriteException:%S E11000 duplicate key error %s: phongo.manager_manager_executeBulkWrite_error_001%sdup key: { %S: 1 } ===> WriteResult server: %s:%d insertedCount: 1 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%s" ["code"]=> int(11000) ["index"]=> int(1) ["info"]=> NULL } writeError[1].message: %s writeError[1].code: 11000 ===> Collection array(1) { [0]=> object(stdClass)#%d (1) { ["_id"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-002.phpt0000644000076500000240000000360513572250760024543 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with duplicate key errors (unordered) --SKIPIF-- --FILE-- false]); $bulk->insert(array('_id' => 1)); $bulk->insert(array('_id' => 1)); $bulk->insert(array('_id' => 2)); $bulk->insert(array('_id' => 2)); try { $result = $manager->executeBulkWrite(NS, $bulk); echo "FAILED\n"; } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("BulkWriteException: %s\n", $e->getMessage()); echo "\n===> WriteResult\n"; printWriteResult($e->getWriteResult()); } echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- BulkWriteException: Multiple write errors: "%SE11000 duplicate key error %s: phongo.manager_manager_executeBulkWrite_error_002%sdup key: { %S: 1 }", "%SE11000 duplicate key error %s: phongo.manager_manager_executeBulkWrite_error_002%sdup key: { %S: 2 }" ===> WriteResult server: %s:%d insertedCount: 2 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%s" ["code"]=> int(11000) ["index"]=> int(1) ["info"]=> NULL } writeError[1].message: %s writeError[1].code: 11000 object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%s" ["code"]=> int(11000) ["index"]=> int(3) ["info"]=> NULL } writeError[3].message: %s writeError[3].code: 11000 ===> Collection array(2) { [0]=> object(stdClass)#%d (1) { ["_id"]=> int(1) } [1]=> object(stdClass)#%d (1) { ["_id"]=> int(2) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-003.phpt0000644000076500000240000000263113572250760024542 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() write concern error --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); try { $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(30)); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("BulkWriteException: %s\n", $e->getMessage()); echo "\n===> WriteResult\n"; printWriteResult($e->getWriteResult()); } echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- BulkWriteException: Not enough data-bearing nodes ===> WriteResult server: %s:%d insertedCount: 1 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(29) "Not enough data-bearing nodes" ["code"]=> int(100) ["info"]=> NULL } writeConcernError.message: Not enough data-bearing nodes writeConcernError.code: 100 writeConcernError.info: NULL ===> Collection array(1) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-004.phpt0000644000076500000240000000274113572250760024545 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() delete write error --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->delete(['$foo' => 1], ['limit' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("BulkWriteException: %s\n", $e->getMessage()); echo "\n===> WriteResult\n"; printWriteResult($e->getWriteResult()); } echo "\n===> Collection\n"; $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- BulkWriteException: unknown top level operator: $foo ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(32) "unknown top level operator: $foo" ["code"]=> int(2) ["index"]=> int(0) ["info"]=> NULL } writeError[0].message: unknown top level operator: $foo writeError[0].code: 2 ===> Collection array(1) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-005.phpt0000644000076500000240000000367013572250760024550 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() WriteResult accessible for network error --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); configureTargetedFailPoint($server, 'failCommand', [ 'times' => 1 ], [ 'failCommands' => ['delete'], 'closeConnection' => true, ]); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 1]]); $bulk->delete(['x' => 1]); try { $server->executeBulkWrite(NS, $bulk); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("%s(%d): %s\n", get_class($e), $e->getCode(), $e->getMessage()); $prev = $e->getPrevious(); printf("%s(%d): %s\n", get_class($prev), $prev->getCode(), $prev->getMessage()); var_dump($e->getWriteResult()); } ?> ===DONE=== --EXPECTF-- MongoDB\Driver\Exception\BulkWriteException(0): Bulk write failed due to previous MongoDB\Driver\Exception\ConnectionTimeoutException: Failed to send "delete" command with database "%s": Failed to read 4 bytes: socket error or timeout MongoDB\Driver\Exception\ConnectionTimeoutException(%d): Failed to send "delete" command with database "%s": Failed to read 4 bytes: socket error or timeout object(MongoDB\Driver\WriteResult)#%d (9) { ["nInserted"]=> int(1) ["nMatched"]=> int(1) ["nModified"]=> int(1) ["nRemoved"]=> int(0) ["nUpserted"]=> int(0) ["upsertedIds"]=> array(0) { } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (0) { } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-006.phpt0000644000076500000240000000225713572250760024551 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() update write error --SKIPIF-- --FILE-- insert(array('x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->update(['x' => 1], ['$foo' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("BulkWriteException: %s\n", $e->getMessage()); echo "\n===> WriteResult\n"; printWriteResult($e->getWriteResult()); } ?> ===DONE=== --EXPECTF-- BulkWriteException: Unknown modifier: $foo%S ===> WriteResult server: %s:%d insertedCount: 0 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "Unknown modifier: $foo%S" ["code"]=> int(9) ["index"]=> int(0) ["info"]=> NULL } writeError[0].message: Unknown modifier: $foo%S writeError[0].code: 9 ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-007.phpt0000644000076500000240000000223013572250760024541 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() should not issue warning before exception --FILE-- 1]); echo throws(function() use ($manager) { $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; // Valid host refuses connection $manager = new MongoDB\Driver\Manager('mongodb://localhost:54321', ['serverSelectionTimeoutMS' => 1]); echo throws(function() use ($manager) { $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-008.phpt0000644000076500000240000000115413572250760024546 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with empty BulkWrite --SKIPIF-- --FILE-- executeBulkWrite(NS, new MongoDB\Driver\BulkWrite); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot do an empty bulk write ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-009.phpt0000644000076500000240000000351013572250760024545 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() with invalid options --SKIPIF-- --FILE-- insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-010.phpt0000644000076500000240000000251613572250760024542 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() cannot combine session with unacknowledged write concern --SKIPIF-- --FILE-- insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, [ 'session' => $manager->startSession(), 'writeConcern' => new MongoDB\Driver\WriteConcern(0), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { $manager = new MongoDB\Driver\Manager(URI, ['w' => 0]); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, [ 'session' => $manager->startSession(), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern ===DONE=== mongodb-1.6.1/tests/manager/manager-executeBulkWrite_error-011.phpt0000644000076500000240000000422713572250760024544 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeBulkWrite() BulkWriteException inherits labels from previous exception --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); // Create collection since it can't be (automatically) done within the transaction $majority = new MongoDB\Driver\WriteConcern(MongoDB\Driver\WriteConcern::MAJORITY); $server->executeWriteCommand( DATABASE_NAME, new MongoDB\Driver\Command(['create' => COLLECTION_NAME]), ['writeConcern' => $majority] ); configureTargetedFailPoint($server, 'failCommand', [ 'times' => 1 ], [ 'failCommands' => ['insert'], 'closeConnection' => true, ]); $session = $manager->startSession(); $session->startTransaction(); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); try { $server->executeBulkWrite(NS, $bulk, ['session' => $session]); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { printf("%s(%d): %s\n", get_class($e), $e->getCode(), $e->getMessage()); var_dump($e->hasErrorLabel('TransientTransactionError')); $prev = $e->getPrevious(); printf("%s(%d): %s\n", get_class($prev), $prev->getCode(), $prev->getMessage()); var_dump($prev->hasErrorLabel('TransientTransactionError')); } ?> ===DONE=== --EXPECTF-- MongoDB\Driver\Exception\BulkWriteException(0): Bulk write failed due to previous MongoDB\Driver\Exception\ConnectionTimeoutException: Failed to send "insert" command with database "%s": Failed to read 4 bytes: socket error or timeout bool(true) MongoDB\Driver\Exception\ConnectionTimeoutException(%d): Failed to send "insert" command with database "%s": Failed to read 4 bytes: socket error or timeout bool(true) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand-001.phpt0000644000076500000240000000274113572250760022777 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() --SKIPIF-- --FILE-- 1)); $result = $manager->executeCommand(DATABASE_NAME, $command); var_dump($command); var_dump($result instanceof MongoDB\Driver\Cursor); var_dump($result); echo "\nDumping response document:\n"; var_dump(current($result->toArray())); $server = $result->getServer(); var_dump($server instanceof MongoDB\Driver\Server); var_dump($server->getHost()); var_dump($server->getPort()); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Command)#%d (%d) { ["command"]=> object(stdClass)#%d (1) { ["ping"]=> int(1) } } bool(true) object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> NULL ["query"]=> NULL ["command"]=> object(MongoDB\Driver\Command)#%d (%d) { ["command"]=> object(stdClass)#%d (%d) { ["ping"]=> int(1) } } ["readPreference"]=> NULL ["session"]=> %a ["isDead"]=> bool(false) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } Dumping response document: object(stdClass)#%d (%d) { ["ok"]=> float(1)%A } bool(true) string(%d) "%s" int(%d) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand-002.phpt0000644000076500000240000000260013572250760022772 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() takes a read preference in options array --SKIPIF-- --FILE-- 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => $primary]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => $secondary]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand-003.phpt0000644000076500000240000000252413572250760023000 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() takes a read preference as legacy option --SKIPIF-- --FILE-- 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command, $primary); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $manager->executeCommand(DATABASE_NAME, $command, $secondary); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand-004.phpt0000644000076500000240000000313313572250760022776 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() options (MONGOC_CMD_RAW) --SKIPIF-- --FILE-- observe( function() use ($manager) { $command = new MongoDB\Driver\Command([ 'ping' => true, ]); try { $manager->executeCommand( DATABASE_NAME, $command, [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY), 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL), 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); } catch ( Exception $e ) { // Ignore exception that ping doesn't support writeConcern } }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; echo "Read Concern: ", $command->readConcern->level, "\n"; echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: secondary Read Concern: local Write Concern: majority ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand-005.phpt0000644000076500000240000000330713572250760023002 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() pins transaction to server --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand_error-001.phpt0000644000076500000240000000214713572250760024210 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() should not issue warning before exception --FILE-- 1]); // Invalid host cannot be resolved $manager = new MongoDB\Driver\Manager('mongodb://invalid.host:27017', ['serverSelectionTimeoutMS' => 1]); echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; // Valid host refuses connection $manager = new MongoDB\Driver\Manager('mongodb://localhost:54321', ['serverSelectionTimeoutMS' => 1]); echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand_error-002.phpt0000644000076500000240000000605213572250760024210 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() with invalid options (MONGOC_CMD_RAW) --SKIPIF-- --FILE-- 1]); echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand_error-003.phpt0000644000076500000240000000115513572250760024210 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() connection error --FILE-- 1]); echo throws(function() use($manager, $command) { $manager->executeCommand('test', $command); }, "MongoDB\Driver\Exception\ConnectionTimeoutException"), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand_error-004.phpt0000644000076500000240000000116513572250760024212 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() with empty command document --SKIPIF-- --FILE-- executeCommand(DATABASE_NAME, $command); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Empty command document ===DONE=== mongodb-1.6.1/tests/manager/manager-executeCommand_error-005.phpt0000644000076500000240000000170713572250760024215 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeCommand() cannot combine session with unacknowledged write concern --SKIPIF-- --FILE-- COLLECTION_NAME, 'documents' => [['x' => 1]], ]); $manager->executeCommand(DATABASE_NAME, $command, [ 'session' => $manager->startSession(), 'writeConcern' => new MongoDB\Driver\WriteConcern(0), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-001.phpt0000644000076500000240000000355413572250760022531 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() one document (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $qr = $manager->executeQuery(NS, $query); var_dump($qr instanceof MongoDB\Driver\Cursor); var_dump($qr); $server = $qr->getServer(); var_dump($server instanceof MongoDB\Driver\Server); var_dump($server->getHost()); var_dump($server->getPort()); var_dump(iterator_to_array($qr)); ?> ===DONE=== --EXPECTF-- bool(true) object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(32) "manager_manager_executeQuery_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(3) } ["options"]=> object(stdClass)#%d (%d) { ["projection"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> NULL ["session"]=> NULL ["isDead"]=> bool(false) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } bool(true) string(%d) "%s" int(%d) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-002.phpt0000644000076500000240000000350213572250760022523 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() one document (find command) --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $qr = $manager->executeQuery(NS, $query); var_dump($qr instanceof MongoDB\Driver\Cursor); var_dump($qr); $server = $qr->getServer(); var_dump($server instanceof MongoDB\Driver\Server); var_dump($server->getHost()); var_dump($server->getPort()); var_dump(iterator_to_array($qr)); ?> ===DONE=== --EXPECTF-- bool(true) object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(32) "manager_manager_executeQuery_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(3) } ["options"]=> object(stdClass)#%d (%d) { ["projection"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> NULL ["session"]=> NULL ["isDead"]=> bool(false) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } bool(true) string(%d) "%s" int(%d) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-003.phpt0000644000076500000240000000305113572250760022523 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() takes a read preference in options array --SKIPIF-- --FILE-- insert(['_id' => 1, 'x' => 2, 'y' => 3]); $manager->executeBulkWrite(NS, $bulk); $primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); echo "Testing primary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, ['readPreference' => $primary]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, ['readPreference' => $secondary]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-004.phpt0000644000076500000240000000277513572250760022540 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() takes a read preference as legacy option --SKIPIF-- --FILE-- insert(['_id' => 1, 'x' => 2, 'y' => 3]); $manager->executeBulkWrite(NS, $bulk); $primary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $secondary = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); echo "Testing primary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, $primary); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, $secondary); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-005.phpt0000644000076500000240000000332013572250760022524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() with filter and projection --SKIPIF-- --FILE-- insert(array('_id' => 1, array('x' => 2, 'y' => 3))); $bulk->insert(array('_id' => 2, array('x' => 3, 'y' => 4))); $bulk->insert(array('_id' => 3, array('x' => 4, 'y' => 5))); $manager->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array()); $qr = $manager->executeQuery(NS, $query); $qr->setTypeMap(array("root"=> "MyArrayObject", "document"=> "MyArrayObject", "array" => "MyArrayObject")); foreach($qr as $obj) { var_dump($obj); } ?> ===DONE=== --EXPECTF-- object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["_id"]=> int(1) [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["x"]=> int(2) ["y"]=> int(3) } } } } object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["_id"]=> int(2) [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["x"]=> int(3) ["y"]=> int(4) } } } } object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["_id"]=> int(3) [0]=> object(MyArrayObject)#%d (1) { [%s]=> array(2) { ["x"]=> int(4) ["y"]=> int(5) } } } } ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery-006.phpt0000644000076500000240000000311213572250760022524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery_error-001.phpt0000644000076500000240000000206413572250760023735 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() should not issue warning before exception --FILE-- 1]); echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; // Valid host refuses connection $manager = new MongoDB\Driver\Manager('mongodb://localhost:54321', ['serverSelectionTimeoutMS' => 1]); echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery_error-002.phpt0000644000076500000240000000304413572250760023735 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() with invalid options --FILE-- 3], ['projection' => ['y' => 1]]); echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeQuery_error-003.phpt0000644000076500000240000000153213572250760023736 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeQuery() exposes error document via CommandException --SKIPIF-- --FILE-- 1]); try { $manager->executeQuery(NS, $query); } catch (\MongoDB\Driver\Exception\CommandException $e) { printf("%s(%d): %s\n", get_class($e), $e->getCode(), $e->getMessage()); $doc = $e->getResultDocument(); var_dump($doc->errmsg === $e->getMessage()); var_dump($doc->code === $e->getCode()); } ?> ===DONE=== --EXPECT-- MongoDB\Driver\Exception\CommandException(2): unknown top level operator: $foo bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadCommand-001.phpt0000644000076500000240000000255213572250760023573 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadCommand() --SKIPIF-- --FILE-- observe( function() use ($manager) { $command = new MongoDB\Driver\Command( [ 'aggregate' => NS, 'pipeline' => [], 'cursor' => new stdClass(), ] ); $manager->executeReadCommand( DATABASE_NAME, $command, [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY), 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; echo "Read Concern: ", $command->readConcern->level, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: secondary Read Concern: majority ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadCommand-002.phpt0000644000076500000240000000332313572250760023571 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadCommand() pins transaction to server --SKIPIF-- --FILE-- executeReadCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadCommand_error-001.phpt0000644000076500000240000000461313572250760025004 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadCommand() with invalid options --SKIPIF-- --FILE-- 1]); echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadWriteCommand-001.phpt0000644000076500000240000000231513572250760024603 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadWriteCommand() --SKIPIF-- --FILE-- observe( function() use ($manager) { $command = new MongoDB\Driver\Command( [ 'findAndModify' => NS, 'update' => [ '$set' => [ 'foo' => 'bar' ] ], ] ); $manager->executeReadWriteCommand( DATABASE_NAME, $command, [ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL), 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Read Concern: ", $command->readConcern->level, "\n"; echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Concern: local Write Concern: majority ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadWriteCommand-002.phpt0000644000076500000240000000334213572250760024605 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadWriteCommand() pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadWriteCommand_error-001.phpt0000644000076500000240000000464213572250760026021 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadWriteCommand() with invalid options --SKIPIF-- --FILE-- 1]); echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeReadWriteCommand_error-002.phpt0000644000076500000240000000317413572250760026021 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeReadWriteCommand() cannot combine session with unacknowledged write concern --SKIPIF-- --FILE-- COLLECTION_NAME, 'documents' => [['x' => 1]], ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, [ 'session' => $manager->startSession(), 'writeConcern' => new MongoDB\Driver\WriteConcern(0), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { $manager = new MongoDB\Driver\Manager(URI, ['w' => 0]); $command = new MongoDB\Driver\Command([ 'insert' => COLLECTION_NAME, 'documents' => [['x' => 1]], ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, [ 'session' => $manager->startSession(), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand-001.phpt0000644000076500000240000000206513572250760024011 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() --SKIPIF-- --FILE-- insert(['a' => 1]); $manager->executeBulkWrite(NS, $bw); (new CommandObserver)->observe( function() use ($manager) { $command = new MongoDB\Driver\Command([ 'drop' => COLLECTION_NAME, ]); $manager->executeWriteCommand( DATABASE_NAME, $command, [ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Write Concern: majority ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand-002.phpt0000644000076500000240000000336713572250760024020 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'upsert' => true, 'new' => true, 'update' => ['x' => 1] ]); $manager->executeWriteCommand(DATABASE_NAME, $command, ['session' => $session]); $pinnedServer = $session->getServer(); var_dump($pinnedServer instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $session->commitTransaction(); var_dump($session->getServer() == $pinnedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(false) bool(false) bool(true) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand_error-001.phpt0000644000076500000240000000333713572250760025225 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() with invalid options --SKIPIF-- --FILE-- 1]); echo throws(function() use ($manager, $command) { $manager->executeWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($manager, $command) { $manager->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand_error-002.phpt0000644000076500000240000000203213572250760025215 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() throws CommandException for invalid writeConcern --SKIPIF-- --FILE-- COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'update' => ['foo' => ['bar']], 'upsert' => true, 'new' => true, ]); try { $manager->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new MongoDB\Driver\WriteConcern("undefined")]); } catch (MongoDB\Driver\Exception\CommandException $e) { printf("%s(%d): %s\n", get_class($e), $e->getCode(), $e->getMessage()); } ?> ===DONE=== --EXPECT-- MongoDB\Driver\Exception\CommandException(79): Write Concern error: No write concern mode named 'undefined' found in replica set configuration ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand_error-003.phpt0000644000076500000240000000154713572250760025230 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() throws CommandException for unsupported update operator --SKIPIF-- --FILE-- COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'upsert' => true, 'new' => true, ]); try { $manager->executeWriteCommand(DATABASE_NAME, $command); } catch (MongoDB\Driver\Exception\CommandException $e) { printf("%s(%d): %s\n", get_class($e), $e->getCode(), $e->getMessage()); } ?> ===DONE=== --EXPECT-- MongoDB\Driver\Exception\CommandException(9): Either an update or remove=true must be specified ===DONE=== mongodb-1.6.1/tests/manager/manager-executeWriteCommand_error-004.phpt0000644000076500000240000000273513572250760025231 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeWriteCommand() cannot combine session with unacknowledged write concern --SKIPIF-- --FILE-- COLLECTION_NAME, 'documents' => [['x' => 1]], ]); $manager->executeWriteCommand(DATABASE_NAME, $command, [ 'session' => $manager->startSession(), 'writeConcern' => new MongoDB\Driver\WriteConcern(0), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { $manager = new MongoDB\Driver\Manager(URI, ['w' => 0]); $command = new MongoDB\Driver\Command([ 'insert' => COLLECTION_NAME, 'documents' => [['x' => 1]], ]); $manager->executeWriteCommand(DATABASE_NAME, $command, [ 'session' => $manager->startSession(), ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot combine "session" option with an unacknowledged write concern ===DONE=== mongodb-1.6.1/tests/manager/manager-getreadconcern-001.phpt0000644000076500000240000000267313572250760023025 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getReadConcern() --FILE-- 'local']], [null, ['readconcernlevel' => 'majority']], [null, ['readconcernlevel' => 'not-yet-supported']], ['mongodb://127.0.0.1/?readconcernlevel=local', ['readconcernlevel' => 'majority']], ]; foreach ($tests as $i => $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadConcern()); // Test for !return_value_used $manager->getReadConcern(); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadConcern)#%d (%d) { } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(8) "majority" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(17) "not-yet-supported" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(8) "majority" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(17) "not-yet-supported" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(8) "majority" } ===DONE=== mongodb-1.6.1/tests/manager/manager-getreadpreference-001.phpt0000644000076500000240000000426713572250760023515 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getReadPreference() --FILE-- 'primaryPreferred')), array('mongodb://127.0.0.1/?readPreference=secondary', array('readPreference' => 'secondaryPreferred')), array('mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=dc:ny,use:reports&readPreferenceTags=', array()), array('mongodb://127.0.0.1/?readPreference=secondary', array('readPreferenceTags' => array(array('dc' => 'ny', 'use' => 'reports'), array()))), array('mongodb://127.0.0.1/?readPreference=secondary&readPreferenceTags=dc:ny,use:reports', array('readPreferenceTags' => array(array('dc' => 'ca')))), ); foreach ($tests as $i => $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getReadPreference()); // Test for !return_value_used $manager->getReadPreference(); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" ["use"]=> string(7) "reports" } [1]=> object(stdClass)#%d (%d) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" ["use"]=> string(7) "reports" } [1]=> object(stdClass)#%d (%d) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(1) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ca" } } } ===DONE=== mongodb-1.6.1/tests/manager/manager-getservers-001.phpt0000644000076500000240000000221113572250760022217 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getServers() (standalone) --SKIPIF-- --FILE-- getServers(); printf("Known servers: %d\n", count($servers)); echo "Pinging\n"; $command = new MongoDB\Driver\Command(array('ping' => 1)); $manager->executeCommand(DATABASE_NAME, $command); $servers = $manager->getServers(); printf("Known servers: %d\n", count($servers)); foreach ($servers as $server) { printf("Found server: %s:%d\n", $server->getHost(), $server->getPort()); assertServerType($server->getType()); } ?> ===DONE=== --EXPECTF-- Known servers: 0 Pinging Known servers: 1 Found server: %s:%d Found standalone server type: 1 ===DONE=== mongodb-1.6.1/tests/manager/manager-getservers-002.phpt0000644000076500000240000000273413572250760022232 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getServers() (replica set) --SKIPIF-- --FILE-- getServers(); printf("Known servers: %d\n", count($servers)); echo "Pinging\n"; $command = new MongoDB\Driver\Command(array('ping' => 1)); $manager->executeCommand(DATABASE_NAME, $command); $servers = $manager->getServers(); printf("Known servers: %d\n", count($servers)); foreach ($servers as $server) { printf("Found server: %s:%d\n", $server->getHost(), $server->getPort()); assertServerType($server->getType()); } ?> ===DONE=== --EXPECTF-- Known servers: 0 Pinging Known servers: 3 Found server: %s:%d Found replica set server type: %r(4|5|6)%r Found server: %s:%d Found replica set server type: %r(4|5|6)%r Found server: %s:%d Found replica set server type: %r(4|5|6)%r ===DONE=== mongodb-1.6.1/tests/manager/manager-getwriteconcern-001.phpt0000644000076500000240000000373113572250760023240 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getWriteConcern() --FILE-- 1, 'journal' => true)), array(null, array('w' => 'majority', 'journal' => true)), array('mongodb://127.0.0.1/?w=majority&journal=true', array('w' => 1, 'journal' => false)), array('mongodb://127.0.0.1/?wtimeoutms=1000', array()), array(null, array('wtimeoutms' => 1000)), array('mongodb://127.0.0.1/?w=2', array('wtimeoutms' => 1000)), array('mongodb://127.0.0.1/?w=majority', array('wtimeoutms' => 1000)), array('mongodb://127.0.0.1/?w=customTagSet', array('wtimeoutms' => 1000)), ); foreach ($tests as $i => $test) { list($uri, $options) = $test; $manager = new MongoDB\Driver\Manager($uri, $options); var_dump($manager->getWriteConcern()); // Test for !return_value_used $manager->getWriteConcern(); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(true) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["j"]=> bool(true) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(false) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(2) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(12) "customTagSet" ["wtimeout"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/manager/manager-invalidnamespace.phpt0000644000076500000240000000164213572250760023042 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Invalid namespace --SKIPIF-- --FILE-- insert(array("my" => "value")); echo throws(function() use($manager, $bulk) { $manager->executeBulkWrite("database", $bulk); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() use($manager) { $manager->executeQuery("database", new MongoDB\Driver\Query(array("document "=> 1))); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid namespace provided: database OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid namespace provided: database ===DONE=== mongodb-1.6.1/tests/manager/manager-selectserver-001.phpt0000644000076500000240000000404613572250760022544 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::selectServer() select a server from SDAM based on ReadPreference --SKIPIF-- --FILE-- selectServer($rp); $rp2 = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $server2 = $manager->selectServer($rp2); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $cursor = $server2->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server2 == $cursor->getServer()); var_dump(iterator_to_array($cursor)); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); throws(function() use($server2, $bulk) { $server2->executeBulkWrite(NS, $bulk); }, "MongoDB\Driver\Exception\BulkWriteException"); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } bool(true) bool(true) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } OK: Got MongoDB\Driver\Exception\BulkWriteException ===DONE=== mongodb-1.6.1/tests/manager/manager-selectserver_error-001.phpt0000644000076500000240000000211513572250760023750 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::selectServer() should not issue warning before exception --FILE-- 1]); echo throws(function() use ($manager, $rp) { $manager->selectServer($rp); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; // Valid host refuses connection $manager = new MongoDB\Driver\Manager('mongodb://localhost:54321', ['serverSelectionTimeoutMS' => 1]); echo throws(function() use ($manager, $rp) { $manager->selectServer($rp); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/manager/manager-set-uri-options-001.phpt0000644000076500000240000000253613572250760023121 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Logging into MongoDB using credentials from $options --SKIPIF-- --FILE-- $url["user"], "password" => $url["pass"], ) + $args; $manager = new MongoDB\Driver\Manager($dsn, $options); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(array("my" => "value")); $inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount(); printf("Inserted: %d\n", $inserted); $options["username"] = "not-found-user"; $manager = new MongoDB\Driver\Manager($dsn, $options); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(array("my" => "value")); echo throws(function() use ($manager, $bulk) { $inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount(); printf("Incorrectly inserted: %d\n", $inserted); }, 'MongoDB\Driver\Exception\BulkWriteException'), "\n"; ?> ===DONE=== --EXPECTF-- Inserted: 1 OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\AuthenticationException: Authentication failed. ===DONE=== mongodb-1.6.1/tests/manager/manager-set-uri-options-002.phpt0000644000076500000240000000274513572250760023124 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Connecting to MongoDB using "ssl" from $options --SKIPIF-- --FILE-- array( "verify_peer" => false, "verify_peer_name" => false, "allow_self_signed" => true, ), ); $context = stream_context_create($opts); $options = array( "ssl" => false, "serverselectiontimeoutms" => 100, ); /* The server requires SSL */ $manager = new MongoDB\Driver\Manager(URI, $options, array("context" => $context)); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(array("my" => "value")); echo throws(function() use ($manager, $bulk) { $inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount(); printf("Inserted incorrectly: %d\n", $inserted); }, "MongoDB\Driver\Exception\ConnectionTimeoutException"), "\n"; $options = array( "ssl" => true, ); $manager = new MongoDB\Driver\Manager(URI, $options, array("context" => $context)); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(array("my" => "value")); $inserted = $manager->executeBulkWrite(NS, $bulk)->getInsertedCount(); printf("Inserted: %d\n", $inserted); ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): [%scalling ismaster on '%s'] Inserted: 1 ===DONE=== mongodb-1.6.1/tests/manager/manager-set-uri-options-003.phpt0000644000076500000240000000076613572250760023126 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: SSL options in URI and 'options' don't leak --SKIPIF-- --FILE-- "does-not-matter", ); $manager = new MongoDB\Driver\Manager(URI . '&sslclientcertificatekeypassword=does-also-not-matter', [], $options); ?> ===DONE=== --EXPECTF-- ===DONE=== mongodb-1.6.1/tests/manager/manager-var-dump-001.phpt0000644000076500000240000000227513572250760021573 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager debug output --SKIPIF-- --FILE-- insert(array("my" => "value")); $retval = $manager->executeBulkWrite(NS, $bulk); var_dump($manager); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(%d) "mongodb://%s" ["cluster"]=> array(0) { } } object(MongoDB\Driver\Manager)#%d (%d) { ["uri"]=> string(%d) "mongodb://%s" ["cluster"]=> array(1) { [0]=> array(10) { ["host"]=> string(%d) "%s" ["port"]=> int(%d) ["type"]=> int(1) ["is_primary"]=> bool(false) ["is_secondary"]=> bool(false) ["is_arbiter"]=> bool(false) ["is_hidden"]=> bool(false) ["is_passive"]=> bool(false) ["last_is_master"]=> array(%d) { %a } ["round_trip_time"]=> int(%d) } } } ===DONE=== mongodb-1.6.1/tests/manager/manager-wakeup.phpt0000644000076500000240000000077213572250760021036 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager: Manager cannot be woken up --FILE-- __wakeup(); }, "MongoDB\Driver\Exception\RuntimeException"); $manager->__wakeup(1, 2); ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\RuntimeException Warning: MongoDB\Driver\Manager::__wakeup() expects exactly 0 parameters, 2 given in %s on line %d ===DONE=== mongodb-1.6.1/tests/manager/manager_error-001.phpt0000644000076500000240000000041213572250760021242 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyManager may not inherit from final class (MongoDB\Driver\Manager) in %s on line %d mongodb-1.6.1/tests/query/bug0430-001.phpt0000644000076500000240000000054113572250760017241 0ustar alcaeusstaff--TEST-- PHPC-430: Query constructor arguments are modified --FILE-- ['x' => 1]]; $query = new MongoDB\Driver\Query($filter, $options); var_dump($filter); var_dump($options); ?> ===DONE=== --EXPECT-- array(0) { } array(1) { ["sort"]=> array(1) { ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/query/bug0430-002.phpt0000644000076500000240000000074113572250760017244 0ustar alcaeusstaff--TEST-- PHPC-430: Query constructor arguments are modified --FILE-- ['x' => 1]]; $optionsCopy = $options; $optionsCopy['cursorFlags'] = 0; $query = new MongoDB\Driver\Query([], $options); var_dump($options); var_dump($optionsCopy); ?> ===DONE=== --EXPECT-- array(1) { ["sort"]=> array(1) { ["x"]=> int(1) } } array(2) { ["sort"]=> array(1) { ["x"]=> int(1) } ["cursorFlags"]=> int(0) } ===DONE=== mongodb-1.6.1/tests/query/bug0430-003.phpt0000644000076500000240000000063113572250760017243 0ustar alcaeusstaff--TEST-- PHPC-430: Query constructor arguments are modified --FILE-- []]; $query = buildQuery($filter, $options); var_dump($options); ?> ===DONE=== --EXPECT-- array(1) { ["sort"]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/query/bug0705-001.phpt0000644000076500000240000000272613572250760017255 0ustar alcaeusstaff--TEST-- PHPC-705: Do not unnecessarily wrap filters in $query (profiled query) --SKIPIF-- =', '3.1'); ?> --FILE-- 2]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); $manager->executeQuery(NS, new MongoDB\Driver\Query(["x" => 1])); $query = new MongoDB\Driver\Query( [ 'op' => 'query', 'ns' => NS, ], [ 'sort' => ['ts' => -1], 'limit' => 1, ] ); $cursor = $manager->executeQuery(DATABASE_NAME . '.system.profile', $query); $profileEntry = current($cursor->toArray()); var_dump($profileEntry->query); $command = new MongoDB\Driver\Command(array('profile' => 0)); $cursor = $manager->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); ?> ===DONE=== --EXPECTF-- Set profile level to 2 successfully: yes object(stdClass)#%d (%d) { ["x"]=> int(1) } Set profile level to 0 successfully: yes ===DONE=== mongodb-1.6.1/tests/query/bug0705-002.phpt0000644000076500000240000000122613572250760017250 0ustar alcaeusstaff--TEST-- PHPC-705: Do not unnecessarily wrap filters in $query (currentOp query) --SKIPIF-- =', '3.1'); ?> --FILE-- executeQuery('admin.$cmd.sys.inprog', new MongoDB\Driver\Query([])); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(1) { [0]=> object(stdClass)#%d (%d) { ["inprog"]=> array(0) { } } } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-001.phpt0000644000076500000240000000331013572250760020264 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction should always encode __pclass for Persistable objects --SKIPIF-- --FILE-- id = $id; $this->child = $child; } public function bsonSerialize() { return [ '_id' => $this->id, 'child' => $this->child, ]; } public function bsonUnserialize(array $data) { $this->id = $data['_id']; $this->child = $data['child']; } } $manager = new MongoDB\Driver\Manager(URI); $document = new MyClass('foo', new MyClass('bar', new MyClass('baz'))); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(new MyClass('foo', new MyClass('bar', new MyClass('baz')))); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query($document)); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- Inserted 1 document(s) array(1) { [0]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "foo" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "bar" ["child":"MyClass":private]=> object(MyClass)#%d (%d) { ["id":"MyClass":private]=> string(3) "baz" ["child":"MyClass":private]=> NULL } } } } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-002.phpt0000644000076500000240000000645313572250760020300 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction with options --FILE-- 1], [ 'allowPartialResults' => false, 'awaitData' => false, 'batchSize' => 10, 'collation' => ['locale' => 'en_US'], 'comment' => 'foo', 'exhaust' => false, 'limit' => 20, 'max' => ['y' => 100], 'maxScan' => 50, 'maxTimeMS' => 1000, 'min' => ['y' => 1], 'noCursorTimeout' => false, 'oplogReplay' => false, 'projection' => ['x' => 1, 'y' => 1], 'returnKey' => false, 'showRecordId' => false, 'singleBatch' => false, 'skip' => 5, 'sort' => ['y' => -1], 'snapshot' => false, 'tailable' => false, ] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['hint' => 'y_1'] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['hint' => ['y' => 1]] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['readConcern' => new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL)] )); ?> ===DONE=== --EXPECTF-- Deprecated: MongoDB\Driver\Query::__construct(): The "maxScan" option is deprecated and will be removed in a future release in %s on line %d Deprecated: MongoDB\Driver\Query::__construct(): The "snapshot" option is deprecated and will be removed in a future release in %s on line %d object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["allowPartialResults"]=> bool(false) ["awaitData"]=> bool(false) ["batchSize"]=> int(10) ["collation"]=> object(stdClass)#%d (%d) { ["locale"]=> string(5) "en_US" } ["comment"]=> string(3) "foo" ["exhaust"]=> bool(false) ["max"]=> object(stdClass)#%d (%d) { ["y"]=> int(100) } ["maxScan"]=> int(50) ["maxTimeMS"]=> int(1000) ["min"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } ["noCursorTimeout"]=> bool(false) ["oplogReplay"]=> bool(false) ["projection"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) ["y"]=> int(1) } ["returnKey"]=> bool(false) ["showRecordId"]=> bool(false) ["skip"]=> int(5) ["sort"]=> object(stdClass)#%d (%d) { ["y"]=> int(-1) } ["snapshot"]=> bool(false) ["tailable"]=> bool(false) ["limit"]=> int(20) ["singleBatch"]=> bool(false) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> string(3) "y_1" } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> array(1) { ["level"]=> string(5) "local" } } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-003.phpt0000644000076500000240000000466213572250760020301 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction with modifier options --FILE-- 1], [ 'modifiers' => [ '$comment' => 'foo', '$max' => ['y' => 100], '$maxScan' => 50, '$maxTimeMS' => 1000, '$min' => ['y' => 1], '$orderby' => ['y' => -1], '$returnKey' => false, '$showDiskLoc' => false, '$snapshot' => false, ], ] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['modifiers' => ['$explain' => true]] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['modifiers' => ['$hint' => 'y_1']] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], ['modifiers' => ['$hint' => ['y' => 1]]] )); ?> ===DONE=== --EXPECTF-- Deprecated: MongoDB\Driver\Query::__construct(): The "$maxScan" option is deprecated and will be removed in a future release in %s on line %d Deprecated: MongoDB\Driver\Query::__construct(): The "$snapshot" option is deprecated and will be removed in a future release in %s on line %d object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["comment"]=> string(3) "foo" ["max"]=> object(stdClass)#%d (%d) { ["y"]=> int(100) } ["maxScan"]=> int(50) ["maxTimeMS"]=> int(1000) ["min"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } ["returnKey"]=> bool(false) ["showRecordId"]=> bool(false) ["sort"]=> object(stdClass)#%d (%d) { ["y"]=> int(-1) } ["snapshot"]=> bool(false) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["explain"]=> bool(true) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> string(3) "y_1" } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } } ["readConcern"]=> NULL } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-004.phpt0000644000076500000240000000473113572250760020277 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction with options overriding modifiers --FILE-- 1], [ 'comment' => 'foo', 'max' => ['y' => 100], 'maxScan' => 50, 'maxTimeMS' => 1000, 'min' => ['y' => 1], 'returnKey' => false, 'showRecordId' => false, 'sort' => ['y' => -1], 'snapshot' => false, 'modifiers' => [ '$comment' => 'bar', '$max' => ['y' => 200], '$maxScan' => 60, '$maxTimeMS' => 2000, '$min' => ['y' => 101], '$orderby' => ['y' => 1], '$returnKey' => true, '$showDiskLoc' => true, '$snapshot' => true, ], ] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], [ 'hint' => 'y_1', 'modifiers' => ['$hint' => 'x_1'], ] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], [ 'hint' => ['y' => 1], 'modifiers' => ['$hint' => ['x' => 1]], ] )); ?> ===DONE=== --EXPECTF-- Deprecated: MongoDB\Driver\Query::__construct(): The "maxScan" option is deprecated and will be removed in a future release in %s on line %d Deprecated: MongoDB\Driver\Query::__construct(): The "snapshot" option is deprecated and will be removed in a future release in %s on line %d object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["comment"]=> string(3) "foo" ["max"]=> object(stdClass)#%d (%d) { ["y"]=> int(100) } ["maxScan"]=> int(50) ["maxTimeMS"]=> int(1000) ["min"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } ["returnKey"]=> bool(false) ["showRecordId"]=> bool(false) ["sort"]=> object(stdClass)#%d (%d) { ["y"]=> int(-1) } ["snapshot"]=> bool(false) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> string(3) "y_1" } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["hint"]=> object(stdClass)#%d (%d) { ["y"]=> int(1) } } ["readConcern"]=> NULL } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-005.phpt0000644000076500000240000000150313572250760020272 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction with negative limit --FILE-- 1], ['limit' => -5] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], [ 'limit' => -5, 'singleBatch' => true ] )); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["limit"]=> int(5) ["singleBatch"]=> bool(true) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["limit"]=> int(5) ["singleBatch"]=> bool(true) } ["readConcern"]=> NULL } ===DONE=== mongodb-1.6.1/tests/query/query-ctor-006.phpt0000644000076500000240000000151513572250760020276 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction "allowPartialResults" overrides "partial" option --FILE-- 1], ['partial' => true] )); var_dump(new MongoDB\Driver\Query( ['x' => 1], [ 'allowPartialResults' => false, 'partial' => true, ] )); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["allowPartialResults"]=> bool(true) } ["readConcern"]=> NULL } object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } ["options"]=> object(stdClass)#%d (%d) { ["allowPartialResults"]=> bool(false) } ["readConcern"]=> NULL } ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-001.phpt0000644000076500000240000000264713572250760021511 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (invalid readConcern type) --FILE-- $test]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; } ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, %r(double|float)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, bool%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, %r(null|NULL)%r given ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-002.phpt0000644000076500000240000000437313572250760021510 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (invalid option types) --FILE-- 0], ['collation' => 0], ['comment' => 0], ['hint' => 0], ['max' => 0], ['min' => 0], ['projection' => 0], ['sort' => 0], ['modifiers' => ['$comment' => 0]], ['modifiers' => ['$hint' => 0]], ['modifiers' => ['$max' => 0]], ['modifiers' => ['$min' => 0]], ['modifiers' => ['$orderby' => 0]], ]; foreach ($tests as $options) { echo throws(function() use ($options) { new MongoDB\Driver\Query([], $options); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n\n"; } ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "modifiers" option to be array, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "collation" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "comment" option to be string, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "hint" option to be string, array, or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "max" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "min" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "projection" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "sort" option to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "$comment" modifier to be string, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "$hint" modifier to be string, array, or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "$max" modifier to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "$min" modifier to be array or object, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "$orderby" modifier to be array or object, int%S given ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-003.phpt0000644000076500000240000000076213572250760021507 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (negative limit conflicts with false singleBatch) --FILE-- -1, 'singleBatch' => false]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Negative "limit" option conflicts with false "singleBatch" option ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-004.phpt0000644000076500000240000000407713572250760021513 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (cannot use empty keys in documents) --FILE-- '1'], []], [['x' => ['' => '1']], []], [[], ['collation' => ['' => 1]]], [[], ['hint' => ['' => 1]]], [[], ['max' => ['' => 1]]], [[], ['min' => ['' => 1]]], [[], ['projection' => ['' => 1]]], [[], ['sort' => ['' => 1]]], [[], ['modifiers' => ['$hint' => ['' => 1]]]], [[], ['modifiers' => ['$max' => ['' => 1]]]], [[], ['modifiers' => ['$min' => ['' => 1]]]], [[], ['modifiers' => ['$orderby' => ['' => 1]]]], ]; foreach ($tests as $test) { list($filter, $options) = $test; echo throws(function() use ($filter, $options) { new MongoDB\Driver\Query($filter, $options); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in filter document OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in filter document OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "collation" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "hint" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "max" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "min" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "projection" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "sort" option OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "$hint" modifier OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "$max" modifier OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "$min" modifier OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use empty keys in "$orderby" modifier ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-005.phpt0000644000076500000240000000070413572250760021505 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (invalid maxAwaitTimeMS range) --FILE-- -1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "maxAwaitTimeMS" option to be >= 0, -1 given ===DONE=== mongodb-1.6.1/tests/query/query-ctor_error-006.phpt0000644000076500000240000000106313572250760021505 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query construction (invalid maxAwaitTimeMS range) --SKIPIF-- --FILE-- 4294967296]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "maxAwaitTimeMS" option to be <= 4294967295, 4294967296 given ===DONE=== mongodb-1.6.1/tests/query/query-debug-001.phpt0000644000076500000240000000177113572250760020414 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query debug output --FILE-- 123], [ 'limit' => 5, 'modifiers' => [ '$comment' => 'foo', '$maxTimeMS' => 500, ], 'projection' => ['c' => 1], 'readConcern' => new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL), 'skip' => 10, 'sort' => ['b' => -1], ] )); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["a"]=> int(123) } ["options"]=> object(stdClass)#%d (%d) { ["comment"]=> string(3) "foo" ["maxTimeMS"]=> int(500) ["projection"]=> object(stdClass)#%d (%d) { ["c"]=> int(1) } ["skip"]=> int(10) ["sort"]=> object(stdClass)#%d (%d) { ["b"]=> int(-1) } ["limit"]=> int(5) } ["readConcern"]=> array(1) { ["level"]=> string(5) "local" } } ===DONE=== mongodb-1.6.1/tests/query/query_error-001.phpt0000644000076500000240000000040013572250760020525 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyQuery may not inherit from final class (MongoDB\Driver\Query) in %s on line %d mongodb-1.6.1/tests/readConcern/readconcern-bsonserialize-001.phpt0000644000076500000240000000127113572250760024366 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::bsonSerialize() --FILE-- ===DONE=== --EXPECT-- { } { "level" : "linearizable" } { "level" : "local" } { "level" : "majority" } { "level" : "available" } ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-bsonserialize-002.phpt0000644000076500000240000000161113572250760024365 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::bsonSerialize() returns an object --FILE-- bsonSerialize()); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { } object(stdClass)#%d (%d) { ["level"]=> string(12) "linearizable" } object(stdClass)#%d (%d) { ["level"]=> string(5) "local" } object(stdClass)#%d (%d) { ["level"]=> string(8) "majority" } object(stdClass)#%d (%d) { ["level"]=> string(9) "available" } ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-constants.phpt0000644000076500000240000000061713572250760023236 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern constants --FILE-- ===DONE=== --EXPECTF-- string(12) "linearizable" string(5) "local" string(8) "majority" string(9) "available" ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-ctor-001.phpt0000644000076500000240000000142213572250760022462 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern construction --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadConcern)#%d (%d) { } object(MongoDB\Driver\ReadConcern)#%d (%d) { } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(8) "majority" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(17) "not-yet-supported" } ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-ctor_error-001.phpt0000644000076500000240000000071413572250760023676 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern construction (invalid arguments) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadConcern::__construct() expects at most 1 parameter, 2 given ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-ctor_error-002.phpt0000644000076500000240000000130313572250760023672 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern construction (invalid level type) --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadConcern::__construct() expects parameter 1 to be string, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadConcern::__construct() expects parameter 1 to be string, object given ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-debug-001.phpt0000644000076500000240000000167413572250760022612 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern debug output --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadConcern)#%d (%d) { } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(12) "linearizable" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(5) "local" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(8) "majority" } object(MongoDB\Driver\ReadConcern)#%d (%d) { ["level"]=> string(9) "available" } ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-getlevel-001.phpt0000644000076500000240000000066313572250760023330 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::getLevel() --FILE-- getLevel()); } ?> ===DONE=== --EXPECT-- NULL string(5) "local" string(8) "majority" string(17) "not-yet-supported" ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-isdefault-001.phpt0000644000076500000240000000212313572250760023472 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::isDefault() --FILE-- getReadConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?readconcernlevel='))->getReadConcern(), (new MongoDB\Driver\Manager(null, ['readconcernlevel' => 'local']))->getReadConcern(), (new MongoDB\Driver\Manager(null, ['readconcernlevel' => '']))->getReadConcern(), // Cannot test ['readconcernlevel' => null] since a string type is expected (PHPC-887) (new MongoDB\Driver\Manager)->getReadConcern(), ]; foreach ($tests as $rc) { var_dump($rc->isDefault()); } ?> ===DONE=== --EXPECT-- bool(true) bool(true) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-set_state-001.phpt0000644000076500000240000000161213572250760023507 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::__set_state() --FILE-- $level, ])); echo "\n\n"; } /* Test with level unset */ var_export(MongoDB\Driver\ReadConcern::__set_state([ ])); echo "\n\n"; ?> ===DONE=== --EXPECTF-- MongoDB\Driver\ReadConcern::__set_state(array( %w'level' => 'available', )) MongoDB\Driver\ReadConcern::__set_state(array( %w'level' => 'linearizable', )) MongoDB\Driver\ReadConcern::__set_state(array( %w'level' => 'local', )) MongoDB\Driver\ReadConcern::__set_state(array( %w'level' => 'majority', )) MongoDB\Driver\ReadConcern::__set_state(array( )) ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-set_state_error-001.phpt0000644000076500000240000000073613572250760024726 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern::__set_state() requires "level" string field --FILE-- 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadConcern initialization requires "level" string field ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern-var_export-001.phpt0000644000076500000240000000166113572250760023711 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern: var_export() --FILE-- ===DONE=== --EXPECT-- MongoDB\Driver\ReadConcern::__set_state(array( )) MongoDB\Driver\ReadConcern::__set_state(array( 'level' => 'linearizable', )) MongoDB\Driver\ReadConcern::__set_state(array( 'level' => 'local', )) MongoDB\Driver\ReadConcern::__set_state(array( 'level' => 'majority', )) MongoDB\Driver\ReadConcern::__set_state(array( 'level' => 'available', )) ===DONE=== mongodb-1.6.1/tests/readConcern/readconcern_error-001.phpt0000644000076500000240000000043613572250760022732 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadConcern cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyReadConcern may not inherit from final class (MongoDB\Driver\ReadConcern) in %s on line %d mongodb-1.6.1/tests/readPreference/bug0146-001.phpt0000644000076500000240000001062013572250760021011 0ustar alcaeusstaff--TEST-- PHPC-146: ReadPreference primaryPreferred and secondary swapped (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(array('my' => 'document')); $manager->executeBulkWrite(NS, $bulk); $rps = array( MongoDB\Driver\ReadPreference::RP_PRIMARY, MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED, MongoDB\Driver\ReadPreference::RP_SECONDARY, MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED, MongoDB\Driver\ReadPreference::RP_NEAREST, ); foreach($rps as $r) { $rp = new MongoDB\Driver\ReadPreference($r); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("my" => "query")), $rp); var_dump($cursor); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_001" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } ===DONE=== mongodb-1.6.1/tests/readPreference/bug0146-002.phpt0000644000076500000240000001071113572250760021013 0ustar alcaeusstaff--TEST-- PHPC-146: ReadPreference primaryPreferred and secondary swapped (find command) --SKIPIF-- --FILE-- insert(array('my' => 'document')); $manager->executeBulkWrite(NS, $bulk); $rps = array( MongoDB\Driver\ReadPreference::RP_PRIMARY, MongoDB\Driver\ReadPreference::RP_PRIMARY_PREFERRED, MongoDB\Driver\ReadPreference::RP_SECONDARY, MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED, MongoDB\Driver\ReadPreference::RP_NEAREST, ); foreach($rps as $r) { $rp = new MongoDB\Driver\ReadPreference($r); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array("my" => "query")), $rp); var_dump($cursor); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> string(26) "readPreference_bug0146_002" ["query"]=> object(MongoDB\Driver\Query)#%d (%d) { ["filter"]=> object(stdClass)#%d (%d) { ["my"]=> string(5) "query" } ["options"]=> object(stdClass)#%d (%d) { } ["readConcern"]=> NULL } ["command"]=> NULL ["readPreference"]=> object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } ["session"]=> NULL ["isDead"]=> bool(true) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } ===DONE=== mongodb-1.6.1/tests/readPreference/bug0851-001.phpt0000644000076500000240000000146013572250760021016 0ustar alcaeusstaff--TEST-- PHPC-851: ReadPreference constructor should not modify tagSets argument --FILE-- 'ny'], [], ]; $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED, $tagSets); var_dump($tagSets); /* Dump the Manager's ReadPreference to ensure that each element in the $tagSets * argument was converted to an object. */ var_dump($rp); ?> ===DONE=== --EXPECTF-- array(2) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(0) { } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" ["tags"]=> array(2) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } [1]=> object(stdClass)#%d (%d) { } } } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-bsonserialize-001.phpt0000644000076500000240000000275413572250760025553 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::bsonSerialize() --FILE-- 'ny']]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 1000]), ]; foreach ($tests as $test) { echo toJSON(fromPHP($test)), "\n"; } ?> ===DONE=== --EXPECT-- { "mode" : "primary" } { "mode" : "primaryPreferred" } { "mode" : "secondary" } { "mode" : "secondaryPreferred" } { "mode" : "nearest" } { "mode" : "primary" } { "mode" : "secondary", "tags" : [ { "dc" : "ny" } ] } { "mode" : "secondary", "tags" : [ { "dc" : "ny" }, { "dc" : "sf", "use" : "reporting" }, { } ] } { "mode" : "secondary", "maxStalenessSeconds" : 1000 } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-bsonserialize-002.phpt0000644000076500000240000000424313572250760025547 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::bsonSerialize() returns an object --FILE-- 'ny']]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 1000]), ]; foreach ($tests as $test) { var_dump($test->bsonSerialize()); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["mode"]=> string(7) "primary" } object(stdClass)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(stdClass)#%d (%d) { ["mode"]=> string(9) "secondary" } object(stdClass)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(stdClass)#%d (%d) { ["mode"]=> string(7) "nearest" } object(stdClass)#%d (%d) { ["mode"]=> string(7) "primary" } object(stdClass)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(1) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } } } object(stdClass)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(3) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } [1]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "sf" ["use"]=> string(9) "reporting" } [2]=> object(stdClass)#%d (%d) { } } } object(stdClass)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor-001.phpt0000644000076500000240000000202213572250760023635 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction --FILE-- 'one']])); var_dump(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY, [])); var_dump(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 1000])); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(1) { [0]=> object(stdClass)#%d (%d) { ["tag"]=> string(3) "one" } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor-002.phpt0000644000076500000240000000410513572250760023642 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction with strings --FILE-- getMessage(), "\n"; } var_dump( $rp ); } ?> --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-001.phpt0000644000076500000240000000060713572250760025055 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid mode) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid mode: 42 ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-002.phpt0000644000076500000240000000327113572250760025056 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid tagSets) --FILE-- 'one']]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference("primary", [['tag' => 'one']]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY, ['invalid']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, ['invalid']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; // Ensure that tagSets is validated before maxStalenessSeconds option echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, ['invalid'], ['maxStalenessSeconds' => -2]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException tagSets may not be used with primary mode OK: Got MongoDB\Driver\Exception\InvalidArgumentException tagSets may not be used with primary mode OK: Got MongoDB\Driver\Exception\InvalidArgumentException tagSets must be an array of zero or more documents OK: Got MongoDB\Driver\Exception\InvalidArgumentException tagSets must be an array of zero or more documents OK: Got MongoDB\Driver\Exception\InvalidArgumentException tagSets must be an array of zero or more documents ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-003.phpt0000644000076500000240000000334613572250760025062 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid maxStalenessSeconds) --FILE-- 1000]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference("primary", null, ['maxStalenessSeconds' => 1000]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => -2]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 0]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 42]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException maxStalenessSeconds may not be used with primary mode OK: Got MongoDB\Driver\Exception\InvalidArgumentException maxStalenessSeconds may not be used with primary mode OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, -2 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, 0 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be >= 90, 42 given ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-004.phpt0000644000076500000240000000117213572250760025056 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid maxStalenessSeconds range) --SKIPIF-- --FILE-- 2147483648]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected maxStalenessSeconds to be <= 2147483647, 2147483648 given ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-005.phpt0000644000076500000240000000064213572250760025060 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid string mode) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid mode: 'hocuspocus' ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-ctor_error-006.phpt0000644000076500000240000000070213572250760025056 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference construction (invalid type for mode) --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected mode to be integer or string, %r(double|float)%r given ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-debug-001.phpt0000644000076500000240000000447113572250760023766 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference debug output --FILE-- 'ny']]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 1000]), ]; foreach ($tests as $test) { var_dump($test); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(16) "primaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(18) "secondaryPreferred" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "nearest" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(7) "primary" } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(1) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["tags"]=> array(3) { [0]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "ny" } [1]=> object(stdClass)#%d (%d) { ["dc"]=> string(2) "sf" ["use"]=> string(9) "reporting" } [2]=> object(stdClass)#%d (%d) { } } } object(MongoDB\Driver\ReadPreference)#%d (%d) { ["mode"]=> string(9) "secondary" ["maxStalenessSeconds"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-getMaxStalenessMS-001.phpt0000644000076500000240000000113713572250760026243 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::getMaxStalenessSeconds() --FILE-- $test]); var_dump($rp->getMaxStalenessSeconds()); } ?> ===DONE=== --EXPECT-- int(-1) int(90) int(90) int(1000) int(2147483647) ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-getMaxStalenessMS-002.phpt0000644000076500000240000000112013572250760026234 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::getMaxStalenessSeconds() with string mode --FILE-- $test]); var_dump($rp->getMaxStalenessSeconds()); } ?> ===DONE=== --EXPECT-- int(-1) int(90) int(90) int(1000) int(2147483647) ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-getMode-001.phpt0000644000076500000240000000103713572250760024257 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::getMode() --FILE-- getMode()); } ?> ===DONE=== --EXPECT-- int(1) int(5) int(2) int(6) int(10) ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-getTagSets-001.phpt0000644000076500000240000000133213572250760024743 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::getTagSets() --FILE-- 'ny'], []], [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []], ]; foreach ($tests as $test) { $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY_PREFERRED, $test); var_dump($rp->getTagSets()); } ?> ===DONE=== --EXPECT-- array(0) { } array(0) { } array(2) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(0) { } } array(3) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(2) { ["dc"]=> string(2) "sf" ["use"]=> string(9) "reporting" } [2]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-getTagSets-002.phpt0000644000076500000240000000131213572250760024742 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::getTagSets() with string mode --FILE-- 'ny'], []], [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []], ]; foreach ($tests as $test) { $rp = new MongoDB\Driver\ReadPreference("secondaryPreferred", $test); var_dump($rp->getTagSets()); } ?> ===DONE=== --EXPECT-- array(0) { } array(0) { } array(2) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(0) { } } array(3) { [0]=> array(1) { ["dc"]=> string(2) "ny" } [1]=> array(2) { ["dc"]=> string(2) "sf" ["use"]=> string(9) "reporting" } [2]=> array(0) { } } ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-set_state-001.phpt0000644000076500000240000000320013572250760024660 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::__set_state() --FILE-- 'primary' ], [ 'mode' => 'primaryPreferred' ], [ 'mode' => 'secondary' ], [ 'mode' => 'secondaryPreferred' ], [ 'mode' => 'nearest' ], [ 'mode' => 'secondary', 'tags' => [['dc' => 'ny']] ], [ 'mode' => 'secondary', 'tags' => [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []] ], [ 'mode' => 'secondary', 'maxStalenessSeconds' => 1000 ], ]; foreach ($tests as $fields) { var_export(MongoDB\Driver\ReadPreference::__set_state($fields)); echo "\n\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'primary', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'primaryPreferred', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondaryPreferred', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'nearest', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'tags' => array ( 0 => %Sarray( 'dc' => 'ny', %S), ), )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'tags' => array ( 0 => %Sarray( 'dc' => 'ny', %S), 1 => %Sarray( 'dc' => 'sf', 'use' => 'reporting', %S), 2 => %Sarray( %S), ), )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'maxStalenessSeconds' => 1000, )) ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-set_state_error-001.phpt0000644000076500000240000000477313572250760026111 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::__set_state() requires correct data types and values --FILE-- 'furthest']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => M_PI]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => 'secondary', 'tags' => -1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => 'secondary', 'tags' => [ 42 ] ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => 'primary', 'tags' => [['dc' => 'ny']]]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => 'secondary', 'maxStalenessSeconds' => 1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\ReadPreference::__set_state(['mode' => 'primary', 'maxStalenessSeconds' => 100]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires specific values for "mode" string field OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "mode" field to be string OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "tags" field to be array OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "tags" array field to have zero or more documents OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "tags" array field to not be present with "primary" mode OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "maxStalenessSeconds" integer field to be >= 90 OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "maxStalenessSeconds" array field to not be present with "primary" mode ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-set_state_error-002.phpt0000644000076500000240000000122713572250760026101 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference::__set_state() requires correct data types and values --SKIPIF-- --FILE-- 'secondary', 'maxStalenessSeconds' => 2147483648]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\ReadPreference initialization requires "maxStalenessSeconds" integer field to be <= 2147483647 ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference-var_export-001.phpt0000644000076500000240000000420313572250760025062 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference: var_export() --FILE-- 'ny']]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, [['dc' => 'ny'], ['dc' => 'sf', 'use' => 'reporting'], []]), new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY, null, ['maxStalenessSeconds' => 1000]), ]; foreach ($tests as $test) { echo var_export($test, true), "\n"; } ?> ===DONE=== --EXPECTF-- MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'primary', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'primaryPreferred', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondaryPreferred', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'nearest', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'primary', )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'tags' => array ( 0 => %Sarray( 'dc' => 'ny', %S), ), )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'tags' => array ( 0 => %Sarray( 'dc' => 'ny', %S), 1 => %Sarray( 'dc' => 'sf', 'use' => 'reporting', %S), 2 => %Sarray( %S), ), )) MongoDB\Driver\ReadPreference::__set_state(array( 'mode' => 'secondary', 'maxStalenessSeconds' => 1000, )) ===DONE=== mongodb-1.6.1/tests/readPreference/readpreference_error-001.phpt0000644000076500000240000000045513572250760024111 0ustar alcaeusstaff--TEST-- MongoDB\Driver\ReadPreference cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyReadPreference may not inherit from final class (MongoDB\Driver\ReadPreference) in %s on line %d mongodb-1.6.1/tests/replicaset/bug0155.phpt0000644000076500000240000000141413572250760017735 0ustar alcaeusstaff--TEST-- PHPC-155: WriteConcernError->getInfo() can be scalar --SKIPIF-- --FILE-- insert(array('example' => 'document')); try { $manager->executeBulkWrite(NS, $bulk, $wc); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(%d) "%s" ["code"]=> int(79) ["info"]=> NULL } ===DONE=== mongodb-1.6.1/tests/replicaset/bug0898-001.phpt0000644000076500000240000000203113572250760020245 0ustar alcaeusstaff--TEST-- PHPC-898: readConcern option should not be included in getMore commands (URI option) --SKIPIF-- --FILE-- 'local']); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $query = new MongoDB\Driver\Query([], ['batchSize' => 2]); $cursor = $manager->executeQuery(NS, $query); foreach ($cursor as $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- Inserted 3 document(s) object(stdClass)#%d (1) { ["_id"]=> int(1) } object(stdClass)#%d (1) { ["_id"]=> int(2) } object(stdClass)#%d (1) { ["_id"]=> int(3) } ===DONE=== mongodb-1.6.1/tests/replicaset/bug0898-002.phpt0000644000076500000240000000213113572250760020247 0ustar alcaeusstaff--TEST-- PHPC-898: readConcern option should not be included in getMore commands (query option) --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $result = $manager->executeBulkWrite(NS, $bulk); printf("Inserted %d document(s)\n", $result->getInsertedCount()); $rc = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL); $query = new MongoDB\Driver\Query([], ['batchSize' => 2, 'readConcern' => $rc]); $cursor = $manager->executeQuery(NS, $query); foreach ($cursor as $document) { var_dump($document); } ?> ===DONE=== --EXPECTF-- Inserted 3 document(s) object(stdClass)#%d (1) { ["_id"]=> int(1) } object(stdClass)#%d (1) { ["_id"]=> int(2) } object(stdClass)#%d (1) { ["_id"]=> int(3) } ===DONE=== mongodb-1.6.1/tests/replicaset/manager-getservers-001.phpt0000644000076500000240000000425413572250760022751 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::getServers() --SKIPIF-- --FILE-- "document"); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); $wresult = $manager->executeBulkWrite(NS, $bulk); var_dump($manager->getServers()); $servers = $manager->getServers(); foreach($servers as $server) { printf("%s:%d - primary: %d, secondary: %d, arbiter: %d\n", $server->getHost(), $server->getPort(), $server->isPrimary(), $server->isSecondary(), $server->isArbiter()); } ?> ===DONE=== --EXPECTF-- array(3) { [0]=> object(MongoDB\Driver\Server)#%d (%d) { ["host"]=> string(%d) "%s" ["port"]=> int(%d) ["type"]=> int(4) ["is_primary"]=> bool(true) ["is_secondary"]=> bool(false) ["is_arbiter"]=> bool(false) ["is_hidden"]=> bool(false) ["is_passive"]=> bool(false)%A ["last_is_master"]=> array(%d) { %a } ["round_trip_time"]=> int(%d) } [1]=> object(MongoDB\Driver\Server)#%d (%d) { ["host"]=> string(%d) "%s" ["port"]=> int(%d) ["type"]=> int(5) ["is_primary"]=> bool(false) ["is_secondary"]=> bool(true) ["is_arbiter"]=> bool(false) ["is_hidden"]=> bool(false) ["is_passive"]=> bool(false)%A ["last_is_master"]=> array(%d) { %a } ["round_trip_time"]=> int(%d) } [2]=> object(MongoDB\Driver\Server)#%d (%d) { ["host"]=> string(%d) "%s" ["port"]=> int(%d) ["type"]=> int(6) ["is_primary"]=> bool(false) ["is_secondary"]=> bool(false) ["is_arbiter"]=> bool(true) ["is_hidden"]=> bool(false) ["is_passive"]=> bool(false) ["last_is_master"]=> array(%d) { %a } ["round_trip_time"]=> int(%d) } } %s:%d - primary: 1, secondary: 0, arbiter: 0 %s:%d - primary: 0, secondary: 1, arbiter: 0 %s:%d - primary: 0, secondary: 0, arbiter: 1 ===DONE=== mongodb-1.6.1/tests/replicaset/manager-selectserver-001.phpt0000644000076500000240000000502313572250760023261 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::selectServer() select a server from SDAM based on ReadPreference --SKIPIF-- --FILE-- false]); $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $server = $manager->selectServer($rp); $rp2 = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $server2 = $manager->selectServer($rp2); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $cursor = $server2->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server2 == $cursor->getServer()); var_dump(iterator_to_array($cursor)); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); throws(function() use($server2, $bulk) { $server2->executeBulkWrite(NS, $bulk); }, "MongoDB\Driver\Exception\BulkWriteException"); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $result = $server2->executeBulkWrite('local.' . COLLECTION_NAME, $bulk); var_dump($result->getInsertedCount()); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } bool(true) bool(true) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } OK: Got MongoDB\Driver\Exception\BulkWriteException int(3) ===DONE=== mongodb-1.6.1/tests/replicaset/readconcern-001.phpt0000644000076500000240000000250613572250760021431 0ustar alcaeusstaff--TEST-- ReadConcern: MongoDB\Driver\Manager::executeQuery() with readConcern option (find command) --SKIPIF-- --FILE-- insert(['_id' => 1, 'x' => 1]); $bulk->insert(['_id' => 2, 'x' => 2]); $manager->executeBulkWrite(NS, $bulk, $wc); $rc = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL); $query = new MongoDB\Driver\Query(['x' => 2], ['readConcern' => $rc]); $cursor = $manager->executeQuery(NS, $query); var_dump(iterator_to_array($cursor)); $rc = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY); $query = new MongoDB\Driver\Query(['x' => 2], ['readConcern' => $rc]); $cursor = $manager->executeQuery(NS, $query); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- array(1) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["x"]=> int(2) } } array(1) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["x"]=> int(2) } } ===DONE=== mongodb-1.6.1/tests/replicaset/readconcern-002.phpt0000644000076500000240000000253713572250760021436 0ustar alcaeusstaff--TEST-- ReadConcern: MongoDB\Driver\Manager::executeQuery() with readConcern option (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['_id' => 1, 'x' => 1]); $bulk->insert(['_id' => 2, 'x' => 2]); $manager->executeBulkWrite(NS, $bulk); $rc = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::LOCAL); $query = new MongoDB\Driver\Query(['x' => 2], ['readConcern' => $rc]); echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query); }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; $rc = new MongoDB\Driver\ReadConcern(MongoDB\Driver\ReadConcern::MAJORITY); $query = new MongoDB\Driver\Query(['x' => 2], ['readConcern' => $rc]); echo throws(function() use ($manager, $query) { $manager->executeQuery(NS, $query); }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\RuntimeException The selected server does not support readConcern OK: Got MongoDB\Driver\Exception\RuntimeException The selected server does not support readConcern ===DONE=== mongodb-1.6.1/tests/replicaset/server-001.phpt0000644000076500000240000000306513572250760020455 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server: Manager->getServer() returning correct server --SKIPIF-- --FILE-- "document"); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); $wresult = $manager->executeBulkWrite(NS, $bulk); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); /* writes go to the primary */ $server = $wresult->getServer(); var_dump( $server->getHost() ); $tags = $server->getTags(); echo "dc: ", array_key_exists('dc', $tags) ? $tags['dc'] : 'not set', "\n"; echo "ordinal: ", array_key_exists('ordinal', $tags) ? $tags['ordinal'] : 'not set', "\n"; var_dump( $server->getLatency(), $server->getPort(), $server->getType() == MongoDB\Driver\Server::TYPE_RS_PRIMARY, $server->isPrimary(), $server->isSecondary(), $server->isArbiter(), $server->isHidden(), $server->isPassive() ); $info = $server->getInfo(); // isMaster output changes between mongod versions var_dump($info["setName"], $info["hosts"]); var_dump($info["me"] == $server->getHost() . ":" . $server->getPort()); ?> ===DONE=== --EXPECTF-- string(%d) "%s" dc: pa ordinal: one int(%d) int(%d) bool(true) bool(true) bool(false) bool(false) bool(false) bool(false) string(%d) "REPLICASET%S" array(2) { [0]=> string(%d) "%s:%d" [1]=> string(%d) "%s:%d" } bool(true) ===DONE=== mongodb-1.6.1/tests/replicaset/server-002.phpt0000644000076500000240000000300513572250760020450 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server: Manager->getServer() returning correct server --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()), $rp); /* writes go to the primary */ $server = $result->getServer(); var_dump( $server->getHost() ); $tags = $server->getTags(); echo "dc: ", array_key_exists('dc', $tags) ? $tags['dc'] : 'not set', "\n"; echo "ordinal: ", array_key_exists('ordinal', $tags) ? $tags['ordinal'] : 'not set', "\n"; var_dump( $server->getLatency(), $server->getPort(), $server->getType() == MongoDB\Driver\Server::TYPE_RS_SECONDARY, $server->isPrimary(), $server->isSecondary(), $server->isArbiter(), $server->isHidden(), $server->isPassive() ); $info = $server->getInfo(); // isMaster output changes between mongod versions var_dump($info["setName"], $info["hosts"]); var_dump($info["me"] == $server->getHost() . ":" . $server->getPort()); ?> ===DONE=== --EXPECTF-- string(%d) "%s" dc: nyc ordinal: two int(%d) int(%d) bool(true) bool(false) bool(true) bool(false) bool(false) bool(false) string(%s) "REPLICASET%S" array(2) { [0]=> string(%d) "%s:%d" [1]=> string(%d) "%s:%d" } bool(true) ===DONE=== mongodb-1.6.1/tests/replicaset/writeconcernerror-001.phpt0000644000076500000240000000151113572250760022715 0ustar alcaeusstaff--TEST-- WriteConcernError: Populate WriteConcernError on WriteConcern errors --SKIPIF-- --FILE-- insert(array("my" => "value")); $w = new MongoDB\Driver\WriteConcern(30, 100); try { $retval = $manager->executeBulkWrite(NS, $bulk, $w); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { $server = $e->getWriteResult()->getServer(); $server->getPort(); printWriteResult($e->getWriteResult(), false); } ?> ===DONE=== --EXPECTF-- server: %s:%d insertedCount: 1 matchedCount: 0 modifiedCount: 0 upsertedCount: 0 deletedCount: 0 writeConcernError: %s (%d) ===DONE=== mongodb-1.6.1/tests/replicaset/writeconcernerror-002.phpt0000644000076500000240000000204313572250760022717 0ustar alcaeusstaff--TEST-- WriteConcernError: Access write counts and WriteConcern reason --SKIPIF-- --FILE-- insert(array("my" => "value")); $bulk->insert(array("my" => "value", "foo" => "bar")); $bulk->insert(array("my" => "value", "foo" => "bar")); $bulk->delete(array("my" => "value", "foo" => "bar"), array("limit" => 1)); $bulk->update(array("foo" => "bar"), array('$set' => array("foo" => "baz")), array("limit" => 1, "upsert" => 0)); $w = new MongoDB\Driver\WriteConcern(30); try { $retval = $manager->executeBulkWrite(NS, $bulk, $w); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { printWriteResult($e->getWriteResult(), false); } ?> ===DONE=== --EXPECTF-- server: %s:%d insertedCount: 3 matchedCount: 1 modifiedCount: 1 upsertedCount: 0 deletedCount: 1 writeConcernError: %s (%d) ===DONE=== mongodb-1.6.1/tests/replicaset/writeresult-getserver-001.phpt0000644000076500000240000000220413572250760023536 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server: Manager->getServer() returning correct server --SKIPIF-- --FILE-- "document"); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); $wresult = $manager->executeBulkWrite(NS, $bulk); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); /* writes go to the primary */ $server = $wresult->getServer(); /* This is the same server */ $server2 = $server->executeBulkWrite(NS, $bulk)->getServer(); /* Both are the primary, e.g. the same server */ var_dump($server == $server2); $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); /* Fetch a secondary */ $server3 = $manager->executeQuery(NS, new MongoDB\Driver\Query(array()), $rp)->getServer(); var_dump($server == $server3); var_dump($server->getPort(), $server3->getPort()); ?> ===DONE=== --EXPECTF-- bool(true) bool(false) int(%d) int(%d) ===DONE=== mongodb-1.6.1/tests/replicaset/writeresult-getserver-002.phpt0000644000076500000240000000447513572250760023553 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server: Manager->getServer() returning correct server --SKIPIF-- --FILE-- false]); $doc = array("example" => "document"); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); $wresult = $manager->executeBulkWrite(NS, $bulk); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); /* writes go to the primary */ $server = $wresult->getServer(); /* This is the same server */ $server2 = $server->executeBulkWrite(NS, $bulk)->getServer(); /* Both are the primary, e.g. the same server */ var_dump($server == $server2); $rp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); /* Fetch a secondary */ $server3 = $manager->executeQuery(NS, new MongoDB\Driver\Query(array()), $rp)->getServer(); var_dump($server == $server3); var_dump($server->getPort(), $server3->getPort()); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert($doc); $result = $server3->executeBulkWrite('local.' . COLLECTION_NAME, $bulk); var_dump($result, $result->getServer()->getHost(), $result->getServer()->getPort()); $result = $server3->executeQuery('local.' . COLLECTION_NAME, new MongoDB\Driver\Query(array())); foreach($result as $document) { var_dump($document); } $cmd = new MongoDB\Driver\Command(['drop' => COLLECTION_NAME]); $server3->executeCommand("local", $cmd); ?> ===DONE=== --EXPECTF-- bool(true) bool(false) int(%d) int(%d) object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(1) ["nMatched"]=> int(0) ["nModified"]=> int(0) ["nRemoved"]=> int(0) ["nUpserted"]=> int(0) ["upsertedIds"]=> array(0) { } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { } } string(%d) "%s" int(%d) object(stdClass)#%d (2) { ["_id"]=> object(%s\ObjectId)#%d (1) { ["oid"]=> string(24) "%s" } ["example"]=> string(8) "document" } ===DONE=== mongodb-1.6.1/tests/retryable-reads/retryable-reads-001.phpt0000644000076500000240000000356413572250760023172 0ustar alcaeusstaff--TEST-- Retryable reads: executeReadCommand is retried once --SKIPIF-- --FILE-- getCommandName()); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $manager = new MongoDB\Driver\Manager(URI, ['retryReads' => true]); // Select a specific server for future operations to avoid mongos switching in sharded clusters $server = $manager->selectServer(new \MongoDB\Driver\ReadPreference('primary')); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $bulk->insert(['x' => 2]); $server->executeBulkWrite(NS, $bulk); configureTargetedFailPoint($server, 'failCommand', ['times' => 1], ['failCommands' => ['aggregate'], 'closeConnection' => true]); $observer = new Observer; MongoDB\Driver\Monitoring\addSubscriber($observer); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$group' => ['_id' => 1, 'n' => ['$sum' => 1]]], ], 'cursor' => (object) [], ]); $cursor = $server->executeReadCommand(DATABASE_NAME, $command); var_dump(iterator_to_array($cursor)); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECTF-- Command started: aggregate Command started: aggregate array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["n"]=> int(2) } } ===DONE=== mongodb-1.6.1/tests/retryable-reads/retryable-reads-002.phpt0000644000076500000240000000310713572250760023164 0ustar alcaeusstaff--TEST-- Retryable reads: executeQuery is retried once --SKIPIF-- --FILE-- getCommandName()); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $manager = new MongoDB\Driver\Manager(URI, ['retryReads' => true]); // Select a specific server for future operations to avoid mongos switching in sharded clusters $server = $manager->selectServer(new \MongoDB\Driver\ReadPreference('primary')); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $bulk->insert(['x' => 2]); $server->executeBulkWrite(NS, $bulk); configureTargetedFailPoint($server, 'failCommand', ['times' => 1], ['failCommands' => ['find'], 'closeConnection' => true]); $observer = new Observer; MongoDB\Driver\Monitoring\addSubscriber($observer); $cursor = $server->executeQuery(NS, new \MongoDB\Driver\Query(['x' => 1])); var_dump(iterator_count($cursor)); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Command started: find Command started: find int(1) ===DONE=== mongodb-1.6.1/tests/retryable-reads/retryable-reads_error-001.phpt0000644000076500000240000000337013572250760024376 0ustar alcaeusstaff--TEST-- Retryable reads: executeReadCommand is not retried when retryable reads are disabled --SKIPIF-- --FILE-- getCommandName()); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $manager = new MongoDB\Driver\Manager(URI, ['retryReads' => false]); // Select a specific server for future operations to avoid mongos switching in sharded clusters $server = $manager->selectServer(new \MongoDB\Driver\ReadPreference('primary')); configureTargetedFailPoint($server, 'failCommand', ['times' => 1], ['failCommands' => ['aggregate'], 'closeConnection' => true]); $observer = new Observer; MongoDB\Driver\Monitoring\addSubscriber($observer); throws( function() use ($server) { $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$group' => ['_id' => 1, 'n' => ['$sum' => 1]]], ], 'cursor' => (object) [], ]); $server->executeReadCommand(DATABASE_NAME, $command); }, \MongoDB\Driver\Exception\ConnectionTimeoutException::class ); ?> ===DONE=== --EXPECT-- Command started: aggregate OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException ===DONE=== mongodb-1.6.1/tests/retryable-reads/retryable-reads_error-002.phpt0000644000076500000240000000277213572250760024404 0ustar alcaeusstaff--TEST-- Retryable reads: executeQuery is not retried when retryable reads are disabled --SKIPIF-- --FILE-- getCommandName()); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $manager = new MongoDB\Driver\Manager(URI, ['retryReads' => false]); // Select a specific server for future operations to avoid mongos switching in sharded clusters $server = $manager->selectServer(new \MongoDB\Driver\ReadPreference('primary')); configureTargetedFailPoint($server, 'failCommand', ['times' => 1], ['failCommands' => ['find'], 'closeConnection' => true]); $observer = new Observer; MongoDB\Driver\Monitoring\addSubscriber($observer); throws( function() use ($server) { $server->executeQuery(NS, new \MongoDB\Driver\Query(['x' => 1])); }, \MongoDB\Driver\Exception\ConnectionTimeoutException::class ); ?> ===DONE=== --EXPECT-- Command started: find OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes-001.phpt0000644000076500000240000000461613572250761023630 0ustar alcaeusstaff--TEST-- Retryable writes: supported single-statement operations include transaction IDs --SKIPIF-- --FILE-- getCommand(); $hasTransactionId = isset($command->lsid) && isset($command->txnNumber); printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no'); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $observer = new TransactionIdObserver; MongoDB\Driver\Monitoring\addSubscriber($observer); $manager = new MongoDB\Driver\Manager(URI); echo "Testing deleteOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->delete(['x' => 1], ['limit' => 1]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting insertOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting replaceOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['x' => 1], ['x' => 2]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting updateOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting findAndModify\n"; $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['x' => 1], 'update' => ['$inc' => ['x' => 1]], ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Testing deleteOne delete command includes transaction ID: yes Testing insertOne insert command includes transaction ID: yes Testing replaceOne update command includes transaction ID: yes Testing updateOne update command includes transaction ID: yes Testing findAndModify findAndModify command includes transaction ID: yes ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes-002.phpt0000644000076500000240000000544113572250761023626 0ustar alcaeusstaff--TEST-- Retryable writes: supported multi-statement operations include transaction IDs --SKIPIF-- --FILE-- getCommand(); $hasTransactionId = isset($command->lsid) && isset($command->txnNumber); printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no'); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $observer = new TransactionIdObserver; MongoDB\Driver\Monitoring\addSubscriber($observer); $manager = new MongoDB\Driver\Manager(URI); echo "Testing multi-statement bulk write (ordered=true)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]); $bulk->delete(['x' => 1], ['limit' => 1]); $bulk->insert(['x' => 1]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $bulk->update(['x' => 1], ['x' => 2]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting multi-statement bulk write (ordered=false)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]); $bulk->delete(['x' => 1], ['limit' => 1]); $bulk->insert(['x' => 1]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $bulk->update(['x' => 1], ['x' => 2]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting insertMany (ordered=true)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]); $bulk->insert(['x' => 1]); $bulk->insert(['x' => 2]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting insertMany (ordered=false)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]); $bulk->insert(['x' => 1]); $bulk->insert(['x' => 2]); $manager->executeBulkWrite(NS, $bulk); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Testing multi-statement bulk write (ordered=true) delete command includes transaction ID: yes insert command includes transaction ID: yes update command includes transaction ID: yes Testing multi-statement bulk write (ordered=false) delete command includes transaction ID: yes insert command includes transaction ID: yes update command includes transaction ID: yes Testing insertMany (ordered=true) insert command includes transaction ID: yes Testing insertMany (ordered=false) insert command includes transaction ID: yes ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes-003.phpt0000644000076500000240000000645713572250761023637 0ustar alcaeusstaff--TEST-- Retryable writes: unsupported operations do not include transaction IDs --SKIPIF-- --FILE-- getCommand(); $hasTransactionId = isset($command->lsid) && isset($command->txnNumber); printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no'); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $observer = new TransactionIdObserver; MongoDB\Driver\Monitoring\addSubscriber($observer); $manager = new MongoDB\Driver\Manager(URI); echo "Testing deleteMany\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->delete(['x' => 1], ['limit' => 0]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting updateMany\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting multi-statement bulk write with one unsupported operation (ordered=true)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => true]); $bulk->delete(['x' => 1], ['limit' => 1]); $bulk->insert(['x' => 1]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $bulk->update(['x' => 1], ['x' => 2]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting multi-statement bulk write with one unsupported operation (ordered=false)\n"; $bulk = new MongoDB\Driver\BulkWrite(['ordered' => false]); $bulk->delete(['x' => 1], ['limit' => 1]); $bulk->insert(['x' => 1]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $bulk->update(['x' => 1], ['x' => 2]); $bulk->update(['x' => 1], ['$inc' => ['x' => 1]], ['multi' => true]); $manager->executeBulkWrite(NS, $bulk); echo "\nTesting aggregate\n"; $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ ['$match' => ['x' => 1]], ['$out' => COLLECTION_NAME . '.out'], ], 'cursor' => new stdClass, ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Testing deleteMany delete command includes transaction ID: no Testing updateMany update command includes transaction ID: no Testing multi-statement bulk write with one unsupported operation (ordered=true) delete command includes transaction ID: yes insert command includes transaction ID: yes update command includes transaction ID: no Testing multi-statement bulk write with one unsupported operation (ordered=false) delete command includes transaction ID: yes insert command includes transaction ID: yes update command includes transaction ID: no Testing aggregate aggregate command includes transaction ID: no ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes-004.phpt0000644000076500000240000000575613572250761023641 0ustar alcaeusstaff--TEST-- Retryable writes: unacknowledged write operations do not include transaction IDs --SKIPIF-- --FILE-- getCommand(); $hasTransactionId = isset($command->lsid) && isset($command->txnNumber); printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no'); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $observer = new TransactionIdObserver; MongoDB\Driver\Monitoring\addSubscriber($observer); $manager = new MongoDB\Driver\Manager(URI); $writeConcern = new MongoDB\Driver\WriteConcern(0); echo "Testing unacknowledged deleteOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->delete(['x' => 1], ['limit' => 1]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]); echo "\nTesting unacknowledged insertOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]); echo "\nTesting unacknowledged replaceOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['x' => 1], ['x' => 2]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]); echo "\nTesting unacknowledged updateOne\n"; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['x' => 1], ['$inc' => ['x' => 1]]); $manager->executeBulkWrite(NS, $bulk, ['writeConcern' => $writeConcern]); /* Note: the server does not actually support unacknowledged write concerns for * findAndModify. This is just testing that mongoc_cmd_parts_set_write_concern() * in libmongoc detects w:0 and refrains from adding a transaction ID. */ echo "\nTesting unacknowledged findAndModify\n"; $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['x' => 1], 'update' => ['$inc' => ['x' => 1]], ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => $writeConcern]); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Testing unacknowledged deleteOne delete command includes transaction ID: no Testing unacknowledged insertOne insert command includes transaction ID: no Testing unacknowledged replaceOne update command includes transaction ID: no Testing unacknowledged updateOne update command includes transaction ID: no Testing unacknowledged findAndModify findAndModify command includes transaction ID: no ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes-005.phpt0000644000076500000240000000424613572250761023633 0ustar alcaeusstaff--TEST-- Retryable writes: non-write command methods do not include transaction IDs --SKIPIF-- --FILE-- getCommand(); $hasTransactionId = isset($command->lsid) && isset($command->txnNumber); printf("%s command includes transaction ID: %s\n", $event->getCommandName(), $hasTransactionId ? 'yes' : 'no'); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $observer = new TransactionIdObserver; MongoDB\Driver\Monitoring\addSubscriber($observer); $manager = new MongoDB\Driver\Manager(URI); $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['x' => 1], 'update' => ['$inc' => ['x' => 1]], ]); echo "Testing Manager::executeCommand()\n"; $manager->executeCommand(DATABASE_NAME, $command); echo "\nTesting Manager::executeReadCommand()\n"; $manager->executeReadCommand(DATABASE_NAME, $command); echo "\nTesting Manager::executeReadWriteCommand()\n"; $manager->executeReadWriteCommand(DATABASE_NAME, $command); echo "\nTesting Manager::executeWriteCommand()\n"; $manager->executeWriteCommand(DATABASE_NAME, $command); MongoDB\Driver\Monitoring\removeSubscriber($observer); ?> ===DONE=== --EXPECT-- Testing Manager::executeCommand() findAndModify command includes transaction ID: no Testing Manager::executeReadCommand() findAndModify command includes transaction ID: no Testing Manager::executeReadWriteCommand() findAndModify command includes transaction ID: yes Testing Manager::executeWriteCommand() findAndModify command includes transaction ID: yes ===DONE=== mongodb-1.6.1/tests/retryable-writes/retryable-writes_error-001.phpt0000644000076500000240000000224613572250761025036 0ustar alcaeusstaff--TEST-- Retryable writes: actionable error message when using retryable writes on unsupported storage engines --SKIPIF-- --FILE-- startSession(); echo throws( function() use ($manager, $session) { $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['x' => 1], 'update' => ['$inc' => ['x' => 1]], ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]); }, \MongoDB\Driver\Exception\CommandException::class ); echo "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\CommandException This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string. ===DONE=== mongodb-1.6.1/tests/server/bug0671-002.phpt0000644000076500000240000000125213572250761017413 0ustar alcaeusstaff--TEST-- PHPC-671: Segfault if Manager is already freed when using selected Server --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); unset($manager); $cursor = $server->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1)%A } ===DONE=== mongodb-1.6.1/tests/server/server-constants.phpt0000644000076500000240000000115613572250761021344 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server constants --FILE-- ===DONE=== --EXPECT-- int(0) int(1) int(2) int(3) int(4) int(5) int(6) int(7) int(8) ===DONE=== mongodb-1.6.1/tests/server/server-construct-001.phpt0000644000076500000240000000162613572250761021654 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::__construct() --SKIPIF-- --FILE-- getInfo()['me'] : URI; $parsed = parse_url($uri); $manager = new MongoDB\Driver\Manager(URI); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('foo' => 'bar')); $server = $manager->executeBulkWrite(NS, $bulk)->getServer(); $expectedHost = $parsed['host']; $expectedPort = (integer) (isset($parsed['port']) ? $parsed['port'] : 27017); var_dump($server->getHost() == $expectedHost); var_dump($server->getPort() == $expectedPort); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/server/server-debug.phpt0000644000076500000240000000144213572250761020414 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server debug output --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); var_dump($server); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Server)#%d (%d) { ["host"]=> string(%d) "%s" ["port"]=> int(%d) ["type"]=> int(%d) ["is_primary"]=> bool(%s) ["is_secondary"]=> bool(%s) ["is_arbiter"]=> bool(false) ["is_hidden"]=> bool(false) ["is_passive"]=> bool(false)%A ["last_is_master"]=> array(%d) { %a } ["round_trip_time"]=> int(%d) } ===DONE=== mongodb-1.6.1/tests/server/server-errors.phpt0000644000076500000240000000370013572250761020641 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with sort and empty filter --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); var_dump($server->getHost(true)); var_dump($server->getTags(true)); var_dump($server->getInfo(true)); var_dump($server->getLatency(true)); var_dump($server->getPort(true)); var_dump($server->getType(true)); var_dump($server->isPrimary(true)); var_dump($server->isSecondary(true)); var_dump($server->isArbiter(true)); var_dump($server->isHidden(true)); var_dump($server->isPassive(true)); ?> ===DONE=== --EXPECTF-- Warning: MongoDB\Driver\Server::getHost() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::getTags() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::getInfo() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::getLatency() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::getPort() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::getType() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::isPrimary() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::isSecondary() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::isArbiter() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::isHidden() expects exactly 0 parameters, 1 given in %s on line %d NULL Warning: MongoDB\Driver\Server::isPassive() expects exactly 0 parameters, 1 given in %s on line %d NULL ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-001.phpt0000644000076500000240000000373713572250761023130 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 2)); $bulk->update(array('x' => 2), array('$set' => array('x' => 1)), array("limit" => 1, "upsert" => false)); $bulk->update(array('_id' => 3), array('$set' => array('x' => 3)), array("limit" => 1, "upsert" => true)); $bulk->delete(array('x' => 1), array("limit" => 1)); $result = $server->executeBulkWrite(NS, $bulk); printf("WriteResult.server is the same: %s\n", $server == $result->getServer() ? 'yes' : 'no'); echo "\n===> WriteResult\n"; printWriteResult($result); var_dump($result); echo "\n===> Collection\n"; $cursor = $server->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- WriteResult.server is the same: yes ===> WriteResult server: %s:%d insertedCount: 2 matchedCount: 1 modifiedCount: 1 upsertedCount: 1 deletedCount: 1 upsertedId[3]: int(3) object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(2) ["nMatched"]=> int(1) ["nModified"]=> int(1) ["nRemoved"]=> int(1) ["nUpserted"]=> int(1) ["upsertedIds"]=> array(1) { [0]=> array(%d) { ["index"]=> int(3) ["_id"]=> int(3) } } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { } } ===> Collection array(2) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) ["x"]=> int(1) } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(3) ["x"]=> int(3) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-002.phpt0000644000076500000240000000155413572250761023124 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with write concern (standalone) --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $writeConcerns = array(0, 1); foreach ($writeConcerns as $writeConcern) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('wc' => $writeConcern)); $result = $primary->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($writeConcern)); var_dump($result->isAcknowledged()); var_dump($result->getInsertedCount()); } ?> ===DONE=== --EXPECT-- bool(false) NULL bool(true) int(1) ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-003.phpt0000644000076500000240000000173413572250761023125 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set primary) --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $writeConcerns = array(0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY); foreach ($writeConcerns as $wc) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('wc' => $wc)); $result = $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc)); var_dump($result->isAcknowledged()); var_dump($result->getInsertedCount()); } ?> ===DONE=== --EXPECT-- bool(false) NULL bool(true) int(1) bool(true) int(1) bool(true) int(1) ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-004.phpt0000644000076500000240000000243513572250761023125 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set secondary) --SKIPIF-- --FILE-- false]); $server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); $writeConcerns = array(1, 2, MongoDB\Driver\WriteConcern::MAJORITY); foreach ($writeConcerns as $wc) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('wc' => $wc)); echo throws(function() use ($server, $bulk, $wc) { $server->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern($wc)); }, "MongoDB\Driver\Exception\RuntimeException"), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\RuntimeException not master OK: Got MongoDB\Driver\Exception\RuntimeException not master OK: Got MongoDB\Driver\Exception\RuntimeException not master ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-005.phpt0000644000076500000240000000246413572250761023130 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set secondary, local DB) --SKIPIF-- --FILE-- false]); $server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); /* The server ignores write concerns with w>2 for writes to the local database, * so we won't test behavior for w=2 and w=majority. */ $writeConcerns = array(0, 1); foreach ($writeConcerns as $wc) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(array('wc' => $wc)); $result = $server->executeBulkWrite('local.' . COLLECTION_NAME, $bulk, new MongoDB\Driver\WriteConcern($wc)); var_dump($result->isAcknowledged()); var_dump($result->getInsertedCount()); } $bulk = new MongoDB\Driver\BulkWrite(); $bulk->delete( (object) [] ); $server->executeBulkWrite('local.' . COLLECTION_NAME, $bulk); ?> ===DONE=== --EXPECT-- bool(false) NULL bool(true) int(1) ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-006.phpt0000644000076500000240000000201713572250761023123 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with legacy write concern (replica set primary) --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $writeConcerns = [0, 1, 2, MongoDB\Driver\WriteConcern::MAJORITY]; foreach ($writeConcerns as $wc) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['wc' => $wc]); $options = [ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc), ]; $result = $server->executeBulkWrite(NS, $bulk, $options); var_dump($result->isAcknowledged()); var_dump($result->getInsertedCount()); } ?> ===DONE=== --EXPECT-- bool(false) NULL bool(true) int(1) bool(true) int(1) bool(true) int(1) ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-007.phpt0000644000076500000240000000251613572250761023130 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with write concern (replica set secondary) --SKIPIF-- --FILE-- false]); $server = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); $writeConcerns = [1, 2, MongoDB\Driver\WriteConcern::MAJORITY]; foreach ($writeConcerns as $wc) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['wc' => $wc]); $options = [ 'writeConcern' => new MongoDB\Driver\WriteConcern($wc), ]; echo throws(function() use ($server, $bulk, $options) { $server->executeBulkWrite(NS, $bulk, $options); }, "MongoDB\Driver\Exception\RuntimeException"), "\n"; } ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\RuntimeException not master OK: Got MongoDB\Driver\Exception\RuntimeException not master OK: Got MongoDB\Driver\Exception\RuntimeException not master ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite-008.phpt0000644000076500000240000000426213572250761023131 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() pins transaction to server --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $wrongServer->executeBulkWrite(NS, $bulk, ['session' => $session]); }, \MongoDB\Driver\Exception\BulkWriteException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\BulkWriteException Bulk write failed due to previous MongoDB\Driver\Exception\RuntimeException: Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite_error-001.phpt0000644000076500000240000000133113572250761024325 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with empty BulkWrite --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); echo throws(function() use ($server) { $server->executeBulkWrite(NS, new MongoDB\Driver\BulkWrite); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot do an empty bulk write ===DONE=== mongodb-1.6.1/tests/server/server-executeBulkWrite_error-002.phpt0000644000076500000240000000365713572250761024343 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeBulkWrite() with invalid options --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); echo throws(function() use ($server) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $server->executeBulkWrite(NS, $bulk, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $server->executeBulkWrite(NS, $bulk, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $server->executeBulkWrite(NS, $bulk, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $server->executeBulkWrite(NS, $bulk, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-001.phpt0000644000076500000240000000245613572250761022573 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); $command = new MongoDB\Driver\Command(array('ping' => 1)); $result = $server->executeCommand(DATABASE_NAME, $command); var_dump($result instanceof MongoDB\Driver\Cursor); var_dump($result); echo "\nDumping response document:\n"; var_dump(current($result->toArray())); var_dump($server == $result->getServer()); ?> ===DONE=== --EXPECTF-- bool(true) object(MongoDB\Driver\Cursor)#%d (%d) { ["database"]=> string(6) "phongo" ["collection"]=> NULL ["query"]=> NULL ["command"]=> object(MongoDB\Driver\Command)#%d (%d) { ["command"]=> object(stdClass)#%d (%d) { ["ping"]=> int(1) } } ["readPreference"]=> NULL ["session"]=> %a ["isDead"]=> bool(false) ["currentIndex"]=> int(0) ["currentDocument"]=> NULL ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } Dumping response document: object(stdClass)#%d (%d) { ["ok"]=> float(1)%A } bool(true) ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-002.phpt0000644000076500000240000000410513572250761022565 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() takes a read preference --SKIPIF-- --FILE-- selectServer($rp); $command = new MongoDB\Driver\Command(array('profile' => 2)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ [ '$match' => [ 'x' => 1 ] ] ], 'cursor' => (object) [], ]); $secondary->executeCommand(DATABASE_NAME, $command, $rp); $query = new MongoDB\Driver\Query( array( 'op' => 'command', 'ns' => DATABASE_NAME . '.' . COLLECTION_NAME, ), array( 'sort' => array('ts' => -1), 'limit' => 1, ) ); $cursor = $secondary->executeQuery(DATABASE_NAME . '.system.profile', $query, $rp); $profileEntry = current($cursor->toArray()); var_dump($profileEntry->command); $command = new MongoDB\Driver\Command(array('profile' => 0)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); ?> ===DONE=== --EXPECTF-- Set profile level to 2 successfully: yes object(stdClass)#%d (%d) { ["aggregate"]=> string(32) "server_server_executeCommand_002" ["pipeline"]=> array(1) { [0]=> object(stdClass)#%d (%d) { ["$match"]=> object(stdClass)#%d (%d) { ["x"]=> int(1) } } } ["cursor"]=> object(stdClass)#%d (%d) { }%A } Set profile level to 0 successfully: yes ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-003.phpt0000644000076500000240000000203113572250761022562 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() with conflicting read preference for secondary --SKIPIF-- --FILE-- selectServer($secondaryRp); /* Note: this is testing that the read preference (even a conflicting one) has * no effect when directly querying a server, since the slaveOk flag is always * set for hinted commands. */ $primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $cursor = $secondary->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(array('ping' => 1)), $primaryRp); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(1) { [0]=> object(stdClass)#%d (%d) { ["ok"]=> float(1)%A } } ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-004.phpt0000644000076500000240000000275613572250761022601 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() takes a read preference in options array --SKIPIF-- --FILE-- selectServer($primaryRp); $secondary = $manager->selectServer($secondaryRp); echo "Testing primary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $primary->executeCommand(DATABASE_NAME, $command, ['readPreference' => $primaryRp]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $secondary->executeCommand(DATABASE_NAME, $command, ['readPreference' => $secondaryRp]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-005.phpt0000644000076500000240000000270213572250761022571 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() takes a read preference as legacy option --SKIPIF-- --FILE-- selectServer($primaryRp); $secondary = $manager->selectServer($secondaryRp); echo "Testing primary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $primary->executeCommand(DATABASE_NAME, $command, $primaryRp); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $command = new MongoDB\Driver\Command(['ping' => 1]); $cursor = $secondary->executeCommand(DATABASE_NAME, $command, $secondaryRp); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-006.phpt0000644000076500000240000000324113572250761022571 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() options (MONGO_CMD_RAW) --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); (new CommandObserver)->observe( function() use ($server) { $command = new MongoDB\Driver\Command([ 'ping' => true, ]); try { $server->executeCommand( DATABASE_NAME, $command, [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY), 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL), 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); } catch ( Exception $e ) { // Ignore exception that ping doesn't support writeConcern } }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; echo "Read Concern: ", $command->readConcern->level, "\n"; echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: secondary Read Concern: local Write Concern: majority ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-007.phpt0000644000076500000240000000174013572250761022574 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() sends read preference to mongos --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); (new CommandObserver)->observe( function() use ($server) { $server->executeCommand( DATABASE_NAME, new MongoDB\Driver\Command(['ping' => true]), [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_NEAREST), ] ); }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: nearest ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-008.phpt0000644000076500000240000000332413572250761022575 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() does not send read preference to standalone --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); (new CommandObserver)->observe( function() use ($server) { $command = new MongoDB\Driver\Command([ 'ping' => true, ]); try { $server->executeCommand( DATABASE_NAME, $command, [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY), 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL), 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); } catch ( Exception $e ) { // Ignore exception that ping doesn't support writeConcern } }, function(stdClass $command) { echo isset($command->{'$readPreference'}) ? 'Read preference set' : 'No read preference set', "\n"; echo "Read Concern: ", $command->readConcern->level, "\n"; echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- No read preference set Read Concern: local Write Concern: majority ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand-009.phpt0000644000076500000240000000446513572250761022605 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() pins transaction to server --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $selectedServer->executeCommand(DATABASE_NAME, $command, ['session' => $session]); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $wrongServer->executeCommand(DATABASE_NAME, $command, ['session' => $session]); }, \MongoDB\Driver\Exception\RuntimeException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\RuntimeException Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeCommand_error-001.phpt0000644000076500000240000000614513572250761024003 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeCommand() with invalid options (MONGOC_CMD_RAW) --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $command = new MongoDB\Driver\Command(['ping' => 1]); echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-001.phpt0000644000076500000240000000213013572250761022307 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with filter and projection --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array('x' => 3), array('projection' => array('y' => 1))); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(1) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["y"]=> int(4) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-002.phpt0000644000076500000240000000250213572250761022313 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with sort and empty filter --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array(), array('sort' => array('_id' => -1))); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(3) { [0]=> object(stdClass)#%d (3) { ["_id"]=> int(3) ["x"]=> int(4) ["y"]=> int(5) } [1]=> object(stdClass)#%d (3) { ["_id"]=> int(2) ["x"]=> int(3) ["y"]=> int(4) } [2]=> object(stdClass)#%d (3) { ["_id"]=> int(1) ["x"]=> int(2) ["y"]=> int(3) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-003.phpt0000644000076500000240000000252413572250761022320 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with modifiers and empty filter --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 2, 'y' => 3)); $bulk->insert(array('_id' => 2, 'x' => 3, 'y' => 4)); $bulk->insert(array('_id' => 3, 'x' => 4, 'y' => 5)); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query(array(), array('modifiers' => array('$comment' => 'foo'))); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(3) { [0]=> object(stdClass)#%d (3) { ["_id"]=> int(1) ["x"]=> int(2) ["y"]=> int(3) } [1]=> object(stdClass)#%d (3) { ["_id"]=> int(2) ["x"]=> int(3) ["y"]=> int(4) } [2]=> object(stdClass)#%d (3) { ["_id"]=> int(3) ["x"]=> int(4) ["y"]=> int(5) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-004.phpt0000644000076500000240000000135613572250761022323 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() finds no matching documents --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 1)); $server->executeBulkWrite(NS, $bulk); $cursor = $server->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 2))); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECT-- array(0) { } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-005.phpt0000644000076500000240000000326513572250761022325 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() takes a read preference (OP_QUERY) --SKIPIF-- =', '3.1'); ?> --FILE-- selectServer($rp); $command = new MongoDB\Driver\Command(array('profile' => 2)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); if (empty($result->ok)) { exit("Could not set profile level\n"); } $secondary->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1)), $rp); $query = new MongoDB\Driver\Query( array( 'op' => 'query', 'ns' => NS, ), array( 'sort' => array('ts' => -1), 'limit' => 1, ) ); $cursor = $secondary->executeQuery(DATABASE_NAME . '.system.profile', $query, $rp); $profileEntry = current($cursor->toArray()); var_dump($profileEntry->query); $command = new MongoDB\Driver\Command(array('profile' => 0)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); ?> ===DONE=== --EXPECTF-- Set profile level to 2 successfully: yes object(stdClass)#%d (%d) { ["x"]=> int(1) } Set profile level to 0 successfully: yes ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-006.phpt0000644000076500000240000000354213572250761022324 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() takes a read preference (find command) --SKIPIF-- --FILE-- selectServer($rp); $command = new MongoDB\Driver\Command(array('profile' => 2)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 2 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); if (empty($result->ok)) { exit("Could not set profile level\n"); } $secondary->executeQuery(NS, new MongoDB\Driver\Query(array("x" => 1)), $rp); $query = new MongoDB\Driver\Query( array( 'op' => 'query', 'ns' => NS, ), array( 'sort' => array('ts' => -1), 'limit' => 1, ) ); $cursor = $secondary->executeQuery(DATABASE_NAME . '.system.profile', $query, $rp); $profileEntry = current($cursor->toArray()); if (! isset( $profileEntry->command )) { var_dump($profileEntry); } var_dump($profileEntry->command->find); var_dump($profileEntry->command->filter); $command = new MongoDB\Driver\Command(array('profile' => 0)); $cursor = $secondary->executeCommand(DATABASE_NAME, $command); $result = current($cursor->toArray()); printf("Set profile level to 0 successfully: %s\n", (empty($result->ok) ? 'no' : 'yes')); ?> ===DONE=== --EXPECTF-- Set profile level to 2 successfully: yes string(%d) "%s" object(stdClass)#%d (1) { ["x"]=> int(1) } Set profile level to 0 successfully: yes ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-007.phpt0000644000076500000240000000227313572250761022325 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with negative limit returns a single batch --SKIPIF-- --FILE-- executeQuery(NS, new MongoDB\Driver\Query(array()))->getServer(); // load fixtures for test $bulk = new \MongoDB\Driver\BulkWrite(); $bulk->insert(['_id' => 1, 'x' => 2, 'y' => 3]); $bulk->insert(['_id' => 2, 'x' => 3, 'y' => 4]); $bulk->insert(['_id' => 3, 'x' => 4, 'y' => 5]); $server->executeBulkWrite(NS, $bulk); $query = new MongoDB\Driver\Query([], ['limit' => -2]); $cursor = $server->executeQuery(NS, $query); var_dump($cursor instanceof MongoDB\Driver\Cursor); var_dump($server == $cursor->getServer()); var_dump(iterator_to_array($cursor)); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) array(2) { [0]=> object(stdClass)#%d (3) { ["_id"]=> int(1) ["x"]=> int(2) ["y"]=> int(3) } [1]=> object(stdClass)#%d (3) { ["_id"]=> int(2) ["x"]=> int(3) ["y"]=> int(4) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-008.phpt0000644000076500000240000000244013572250761022322 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with conflicting read preference for secondary --SKIPIF-- --FILE-- selectServer($primaryRp); $bulk = new \MongoDB\Driver\BulkWrite; $bulk->insert(['_id' => 1, 'x' => 1]); $primary->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(MongoDB\Driver\WriteConcern::MAJORITY)); $secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); $secondary = $manager->selectServer($secondaryRp); /* Note: this is testing that the read preference (even a conflicting one) has * no effect when directly querying a server, since the slaveOk flag is always * set for hinted queries. */ $cursor = $secondary->executeQuery(NS, new MongoDB\Driver\Query(['x' => 1]), $primaryRp); var_dump($cursor->toArray()); ?> ===DONE=== ( --EXPECTF-- array(1) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(1) ["x"]=> int(1) } } ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-009.phpt0000644000076500000240000000322513572250761022325 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() takes a read preference in options array --SKIPIF-- --FILE-- insert(['_id' => 1, 'x' => 2, 'y' => 3]); $manager->executeBulkWrite(NS, $bulk); $primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); $primary = $manager->selectServer($primaryRp); $secondary = $manager->selectServer($secondaryRp); echo "Testing primary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, ['readPreference' => $primaryRp]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, ['readPreference' => $secondaryRp]); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-010.phpt0000644000076500000240000000315113572250761022313 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() takes a read preference as legacy option --SKIPIF-- --FILE-- insert(['_id' => 1, 'x' => 2, 'y' => 3]); $manager->executeBulkWrite(NS, $bulk); $primaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY); $secondaryRp = new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY); $primary = $manager->selectServer($primaryRp); $secondary = $manager->selectServer($secondaryRp); echo "Testing primary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, $primaryRp); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; echo "Testing secondary:\n"; $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); $cursor = $manager->executeQuery(NS, $query, $secondaryRp); echo "is_primary: ", $cursor->getServer()->isPrimary() ? 'true' : 'false', "\n"; echo "is_secondary: ", $cursor->getServer()->isSecondary() ? 'true' : 'false', "\n\n"; ?> ===DONE=== --EXPECTF-- Testing primary: is_primary: true is_secondary: false Testing secondary: is_primary: false is_secondary: true ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-011.phpt0000644000076500000240000000171113572250761022314 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() sends read preference to mongos --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); (new CommandObserver)->observe( function() use ($server) { $server->executeQuery( NS, new MongoDB\Driver\Query(['x' => 1]), [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_NEAREST), ] ); }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: nearest ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery-012.phpt0000644000076500000240000000404413572250761022317 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $query = new MongoDB\Driver\Query([]); $selectedServer->executeQuery(NS, $query, ['session' => $session]); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $query = new MongoDB\Driver\Query([]); $wrongServer->executeQuery(NS, $query, ['session' => $session]); }, \MongoDB\Driver\Exception\RuntimeException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() == $selectedServer); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\RuntimeException Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeQuery_error-001.phpt0000644000076500000240000000335413572250761023531 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeQuery() with invalid options --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $query = new MongoDB\Driver\Query(['x' => 3], ['projection' => ['y' => 1]]); echo throws(function() use ($server, $query) { $server->executeQuery(NS, $query, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $query) { $server->executeQuery(NS, $query, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $query) { $server->executeQuery(NS, $query, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $query) { $server->executeQuery(NS, $query, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-executeReadCommand-001.phpt0000644000076500000240000000273113572250761023363 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeReadCommand() --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); (new CommandObserver)->observe( function() use ($server) { $command = new MongoDB\Driver\Command( [ 'aggregate' => NS, 'pipeline' => [], 'cursor' => new stdClass(), ] ); $server->executeReadCommand( DATABASE_NAME, $command, [ 'readPreference' => new \MongoDB\Driver\ReadPreference(\MongoDB\Driver\ReadPreference::RP_SECONDARY), 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Read Preference: ", $command->{'$readPreference'}->mode, "\n"; echo "Read Concern: ", $command->readConcern->level, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Preference: secondary Read Concern: majority ===DONE=== mongodb-1.6.1/tests/server/server-executeReadCommand-002.phpt0000644000076500000240000000456013572250761023366 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session test: Manager::executeReadCommand pins transaction to server --SKIPIF-- --FILE-- executeReadCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $selectedServer->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $wrongServer->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); }, \MongoDB\Driver\Exception\RuntimeException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\RuntimeException Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeReadCommand_error-001.phpt0000644000076500000240000000506513572250761024577 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeReadCommand() with invalid options --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); $command = new MongoDB\Driver\Command(['ping' => 1]); echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['readPreference' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-executeReadWriteCommand-001.phpt0000644000076500000240000000247213572250761024400 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeReadWriteCommand() --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); (new CommandObserver)->observe( function() use ($server) { $command = new MongoDB\Driver\Command( [ 'findAndModify' => NS, 'update' => [ '$set' => [ 'foo' => 'bar' ] ], ] ); $server->executeReadWriteCommand( DATABASE_NAME, $command, [ 'readConcern' => new \MongoDB\Driver\ReadConcern(\MongoDB\Driver\ReadConcern::LOCAL), 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Read Concern: ", $command->readConcern->level, "\n"; echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Read Concern: local Write Concern: majority ===DONE=== mongodb-1.6.1/tests/server/server-executeReadWriteCommand-002.phpt0000644000076500000240000000457713572250761024411 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session test: Manager::executeReadWriteCommand pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $selectedServer->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [['$group' => ['_id' => 1]]], 'cursor' => (object) [] ]); $wrongServer->executeReadCommand(DATABASE_NAME, $command, ['session' => $session]); }, \MongoDB\Driver\Exception\RuntimeException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\RuntimeException Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeReadWriteCommand_error-001.phpt0000644000076500000240000000511413572250761025605 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeReadWriteCommand() with invalid options --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); $command = new MongoDB\Driver\Command(['ping' => 1]); echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['readConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeReadWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-executeWriteCommand-001.phpt0000644000076500000240000000224213572250761023577 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeWriteCommand() --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $bw = new MongoDB\Driver\BulkWrite(); $bw->insert(['a' => 1]); $manager->executeBulkWrite(NS, $bw); (new CommandObserver)->observe( function() use ($server) { $command = new MongoDB\Driver\Command([ 'drop' => COLLECTION_NAME, ]); $server->executeWriteCommand( DATABASE_NAME, $command, [ 'writeConcern' => new \MongoDB\Driver\WriteConcern(\MongoDB\Driver\WriteConcern::MAJORITY), ] ); }, function(stdClass $command) { echo "Write Concern: ", $command->writeConcern->w, "\n"; } ); ?> ===DONE=== --EXPECTF-- Write Concern: majority ===DONE=== mongodb-1.6.1/tests/server/server-executeWriteCommand-002.phpt0000644000076500000240000000467213572250761023611 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session test: Manager::executeWriteCommand pins transaction to server --SKIPIF-- --FILE-- executeReadWriteCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $servers = $manager->getServers(); $selectedServer = array_pop($servers); $wrongServer = array_pop($servers); var_dump($selectedServer != $wrongServer); $session = $manager->startSession(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $session->startTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'upsert' => true, 'new' => true, 'update' => ['x' => 1] ]); $selectedServer->executeWriteCommand(DATABASE_NAME, $command, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); echo throws(function () use ($wrongServer, $session) { $command = new MongoDB\Driver\Command([ 'findAndModify' => COLLECTION_NAME, 'query' => ['_id' => 'foo'], 'upsert' => true, 'new' => true, 'update' => ['x' => 1] ]); $wrongServer->executeWriteCommand(DATABASE_NAME, $command, ['session' => $session]); }, \MongoDB\Driver\Exception\RuntimeException::class), "\n"; $session->commitTransaction(); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $selectedServer->executeBulkWrite(NS, $bulk, ['session' => $session]); var_dump($session->getServer() instanceof \MongoDB\Driver\Server); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(false) bool(true) OK: Got MongoDB\Driver\Exception\RuntimeException Requested server id does not matched pinned server id bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/server/server-executeWriteCommand_error-001.phpt0000644000076500000240000000366313572250761025020 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::executeWriteCommand() with invalid options --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY)); $command = new MongoDB\Driver\Command([]); echo throws(function() use ($server, $command) { $server->executeWriteCommand(DATABASE_NAME, $command, ['session' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeWriteCommand(DATABASE_NAME, $command, ['session' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => 'foo']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() use ($server, $command) { $server->executeWriteCommand(DATABASE_NAME, $command, ['writeConcern' => new stdClass]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "session" option to be MongoDB\Driver\Session, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, string given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given ===DONE=== mongodb-1.6.1/tests/server/server-getInfo-001.phpt0000644000076500000240000000073213572250761021220 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::getInfo() --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY))->getInfo()); } catch (Exception $e) {} ?> ===DONE=== --EXPECTF-- array(%d) { %a } ===DONE=== mongodb-1.6.1/tests/server/server-getTags-001.phpt0000644000076500000240000000100713572250761021217 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::getTags() with standalone --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY))->getTags()); ?> ===DONE=== --EXPECTF-- array(0) { } ===DONE=== mongodb-1.6.1/tests/server/server-getTags-002.phpt0000644000076500000240000000172313572250761021225 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server::getTags() with replica set --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY))->getTags(); echo "dc: ", array_key_exists('dc', $tags) ? $tags['dc'] : 'not set', "\n"; echo "ordinal: ", array_key_exists('ordinal', $tags) ? $tags['ordinal'] : 'not set', "\n"; $tags = $manager->selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_SECONDARY))->getTags(); echo "dc: ", array_key_exists('dc', $tags) ? $tags['dc'] : 'not set', "\n"; echo "ordinal: ", array_key_exists('ordinal', $tags) ? $tags['ordinal'] : 'not set', "\n"; ?> ===DONE=== --EXPECTF-- dc: pa ordinal: one dc: nyc ordinal: two ===DONE=== mongodb-1.6.1/tests/server/server_error-001.phpt0000644000076500000240000000040513572250761021035 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Server cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyServer may not inherit from final class (MongoDB\Driver\Server) in %s on line %d mongodb-1.6.1/tests/session/session-001.phpt0000644000076500000240000000125313572250761020160 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session spec test: Pool is LIFO --SKIPIF-- --FILE-- startSession(); $firstSessionId = $firstSession->getLogicalSessionId(); unset($firstSession); $secondSession = $manager->startSession(); $secondSessionId = $secondSession->getLogicalSessionId(); var_dump($firstSessionId == $secondSessionId); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/session/session-002.phpt0000644000076500000240000001342013572250761020160 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session spec test: $clusterTime in commands --SKIPIF-- --FILE-- lastSeenClusterTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [], 'cursor' => new stdClass(), ]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]); $manager->executeReadWriteCommand(DATABASE_NAME, $command, ['session' => $session]); printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function find() { $this->lastSeenClusterTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); $manager->executeQuery(NS, $query, ['session' => $session]); printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function insert() { $this->lastSeenClusterTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 2]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function ping() { $this->lastSeenClusterTime = null; MongoDB\Driver\Monitoring\addSubscriber($this); $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); printf("Session reports last seen \$clusterTime: %s\n", ($session->getClusterTime() == $this->lastSeenClusterTime) ? 'yes' : 'no'); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { $command = $event->getCommand(); $hasClusterTime = isset($command->{'$clusterTime'}); printf("%s command includes \$clusterTime: %s\n", $event->getCommandName(), $hasClusterTime ? 'yes' : 'no'); if ($hasClusterTime && $this->lastSeenClusterTime !== null) { printf("%s command uses last seen \$clusterTime: %s\n", $event->getCommandName(), ($command->{'$clusterTime'} == $this->lastSeenClusterTime) ? 'yes' : 'no'); } } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { $reply = $event->getReply(); $hasClusterTime = isset($reply->{'$clusterTime'}); printf("%s command reply includes \$clusterTime: %s\n", $event->getCommandName(), $hasClusterTime ? 'yes' : 'no'); if ($hasClusterTime) { $this->lastSeenClusterTime = $reply->{'$clusterTime'}; } } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } echo "\nTesting aggregate command\n"; (new Test)->aggregate(); echo "\nTesting find command\n"; (new Test)->find(); echo "\nTesting insert command\n"; (new Test)->insert(); echo "\nTesting ping command\n"; (new Test)->ping(); ?> ===DONE=== --EXPECT-- Testing aggregate command aggregate command includes $clusterTime: yes aggregate command reply includes $clusterTime: yes aggregate command includes $clusterTime: yes aggregate command uses last seen $clusterTime: yes aggregate command reply includes $clusterTime: yes Session reports last seen $clusterTime: yes Testing find command find command includes $clusterTime: yes find command reply includes $clusterTime: yes find command includes $clusterTime: yes find command uses last seen $clusterTime: yes find command reply includes $clusterTime: yes Session reports last seen $clusterTime: yes Testing insert command insert command includes $clusterTime: yes insert command reply includes $clusterTime: yes insert command includes $clusterTime: yes insert command uses last seen $clusterTime: yes insert command reply includes $clusterTime: yes Session reports last seen $clusterTime: yes Testing ping command ping command includes $clusterTime: yes ping command reply includes $clusterTime: yes ping command includes $clusterTime: yes ping command uses last seen $clusterTime: yes ping command reply includes $clusterTime: yes Session reports last seen $clusterTime: yes ===DONE=== mongodb-1.6.1/tests/session/session-003.phpt0000644000076500000240000000371613572250761020170 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session spec test: session cannot be used for different clients --SKIPIF-- --FILE-- 60000]); $otherManager = new MongoDB\Driver\Manager(URI, ['heartbeatFrequencyMS' => 90000]); // Create a session with the second Manager (associated with different client) $session = $otherManager->startSession(); echo "\nTesting executeBulkWrite()\n"; echo throws(function() use ($manager, $session) { $bulk = new MongoDB\Driver\BulkWrite(); $bulk->insert(['x' => 1]); $manager->executeBulkWrite(NS, $bulk, ['session' => $session]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo "\nTesting executeCommand()\n"; echo throws(function() use ($manager, $session) { $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo "\nTesting executeQuery()\n"; echo throws(function() use ($manager, $session) { $query = new MongoDB\Driver\Query([]); $manager->executeQuery(NS, $query, ['session' => $session]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- Testing executeBulkWrite() OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use Session started from a different Manager Testing executeCommand() OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use Session started from a different Manager Testing executeQuery() OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot use Session started from a different Manager ===DONE=== mongodb-1.6.1/tests/session/session-advanceClusterTime-001.phpt0000644000076500000240000000250613572250761023742 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::advanceClusterTime() --SKIPIF-- --FILE-- startSession(); $sessionB = $manager->startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $sessionA]); echo "Initial cluster time of session B:\n"; var_dump($sessionB->getClusterTime()); $sessionB->advanceClusterTime($sessionA->getClusterTime()); echo "\nCluster time after advancing session B:\n"; var_dump($sessionB->getClusterTime()); echo "\nSessions A and B have equivalent cluster times:\n"; var_dump($sessionA->getClusterTime() == $sessionB->getClusterTime()); ?> ===DONE=== --EXPECTF-- Initial cluster time of session B: NULL Cluster time after advancing session B: object(stdClass)#%d (%d) { ["clusterTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["signature"]=> %a } Sessions A and B have equivalent cluster times: bool(true) ===DONE=== mongodb-1.6.1/tests/session/session-advanceOperationTime-001.phpt0000644000076500000240000000241413572250761024257 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::advanceOperationTime() --SKIPIF-- --FILE-- startSession(); $sessionB = $manager->startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $sessionA]); echo "Initial operation time of session B:\n"; var_dump($sessionB->getOperationTime()); $sessionB->advanceOperationTime($sessionA->getOperationTime()); echo "\nOperation time after advancing session B:\n"; var_dump($sessionB->getOperationTime()); echo "\nSessions A and B have equivalent operation times:\n"; var_dump($sessionA->getOperationTime() == $sessionB->getOperationTime()); ?> ===DONE=== --EXPECTF-- Initial operation time of session B: NULL Operation time after advancing session B: object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } Sessions A and B have equivalent operation times: bool(true) ===DONE=== mongodb-1.6.1/tests/session/session-advanceOperationTime-002.phpt0000644000076500000240000000165113572250761024262 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::advanceOperationTime() with Timestamp --SKIPIF-- --FILE-- startSession(); echo "Initial operation time of session:\n"; var_dump($session->getOperationTime()); $session->advanceOperationTime(new MongoDB\BSON\Timestamp(5678, 1234)); echo "\nOperation time after advancing session:\n"; var_dump($session->getOperationTime()); ?> ===DONE=== --EXPECTF-- Initial operation time of session: NULL Operation time after advancing session: object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "5678" ["timestamp"]=> string(4) "1234" } ===DONE=== mongodb-1.6.1/tests/session/session-advanceOperationTime-003.phpt0000644000076500000240000000234513572250761024264 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::advanceOperationTime() with TimestampInterface --SKIPIF-- --FILE-- getIncrement(), $this->getTimestamp()); } } $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); echo "Initial operation time of session:\n"; var_dump($session->getOperationTime()); $session->advanceOperationTime(new MyTimestamp); echo "\nOperation time after advancing session:\n"; var_dump($session->getOperationTime()); ?> ===DONE=== --EXPECTF-- Initial operation time of session: NULL Operation time after advancing session: object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "5678" ["timestamp"]=> string(4) "1234" } ===DONE=== mongodb-1.6.1/tests/session/session-advanceOperationTime_error-001.phpt0000644000076500000240000000405113572250761025467 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::advanceOperationTime() with TimestampInterface exceptions --SKIPIF-- --FILE-- failIncrement = $failIncrement; $this->failTimestamp = $failTimestamp; } public function getIncrement() { if ($this->failIncrement) { throw new Exception('getIncrement() failed'); } return 5678; } public function getTimestamp() { if ($this->failTimestamp) { throw new Exception('getTimestamp() failed'); } return 1234; } public function __toString() { return sprintf('[%d:%d]', $this->getIncrement(), $this->getTimestamp()); } } $manager = new MongoDB\Driver\Manager(URI); $session = $manager->startSession(); echo "Initial operation time of session:\n"; var_dump($session->getOperationTime()); $timestamps = [ new MyTimestamp(true, false), new MyTimestamp(false, true), new MyTimestamp(true, true), ]; foreach ($timestamps as $timestamp) { echo "\n", throws(function() use ($session, $timestamp) { $session->advanceOperationTime($timestamp); }, 'Exception'), "\n"; echo "\nOperation time after advancing session fails:\n"; var_dump($session->getOperationTime()); } ?> ===DONE=== --EXPECT-- Initial operation time of session: NULL OK: Got Exception getIncrement() failed Operation time after advancing session fails: NULL OK: Got Exception getTimestamp() failed Operation time after advancing session fails: NULL OK: Got Exception getTimestamp() failed Operation time after advancing session fails: NULL ===DONE=== mongodb-1.6.1/tests/session/session-commitTransaction-001.phpt0000644000076500000240000000504713572250761023661 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::commitTransaction() applies w:majority when retrying --SKIPIF-- --FILE-- manager = new MongoDB\Driver\Manager(URI); $this->manager->executeCommand( DATABASE_NAME, new MongoDB\Driver\Command(['create' => COLLECTION_NAME]), ['writeConcern' => new MongoDB\Driver\WriteConcern(MongoDB\Driver\WriteConcern::MAJORITY)] ); } public function run(array $startTransactionOptions) { $session = $this->manager->startSession(); $session->startTransaction($startTransactionOptions); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $this->manager->executeBulkWrite(NS, $bulk, ['session' => $session]); MongoDB\Driver\Monitoring\addSubscriber($this); $session->commitTransaction(); $session->commitTransaction(); MongoDB\Driver\Monitoring\removeSubscriber($this); } public function commandStarted(MongoDB\Driver\Monitoring\CommandStartedEvent $event) { if ($event->getCommandName() !== 'commitTransaction') { return; } printf("commitTransaction included write concern: %s\n", json_encode($event->getCommand()->writeConcern)); } public function commandSucceeded(MongoDB\Driver\Monitoring\CommandSucceededEvent $event) { } public function commandFailed(MongoDB\Driver\Monitoring\CommandFailedEvent $event) { } } $test = new Test; echo "Applies w:majority and default wtimeout when retrying commitTransaction\n"; $test->run(['writeConcern' => new MongoDB\Driver\WriteConcern(1)]); echo "\nPreserves other WC options when retrying commitTransaction\n"; $test->run(['writeConcern' => new MongoDB\Driver\WriteConcern(1, 5000)]); ?> ===DONE=== --EXPECT-- Applies w:majority and default wtimeout when retrying commitTransaction commitTransaction included write concern: {"w":1} commitTransaction included write concern: {"w":"majority","wtimeout":10000} Preserves other WC options when retrying commitTransaction commitTransaction included write concern: {"w":1,"wtimeout":5000} commitTransaction included write concern: {"w":"majority","wtimeout":5000} ===DONE=== mongodb-1.6.1/tests/session/session-debug-001.phpt0000644000076500000240000000150013572250761021237 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session debug output (before an operation) --SKIPIF-- --FILE-- startSession(); var_dump($session); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Session)#%d (%d) { ["logicalSessionId"]=> array(1) { ["id"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c" ["type"]=> int(4) } } ["clusterTime"]=> NULL ["causalConsistency"]=> bool(true) ["operationTime"]=> NULL ["server"]=> NULL } ===DONE=== mongodb-1.6.1/tests/session/session-debug-002.phpt0000644000076500000240000000244213572250761021246 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session debug output (after an operation) --SKIPIF-- --FILE-- startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); var_dump($session); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Session)#%d (%d) { ["logicalSessionId"]=> array(1) { ["id"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c" ["type"]=> int(4) } } ["clusterTime"]=> array(2) { ["clusterTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["signature"]=> %a } ["causalConsistency"]=> bool(true) ["operationTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["server"]=> NULL } ===DONE=== mongodb-1.6.1/tests/session/session-debug-003.phpt0000644000076500000240000000154313572250761021250 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session debug output (causalConsistency=false) --SKIPIF-- --FILE-- startSession(['causalConsistency' => false]); var_dump($session); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Session)#%d (%d) { ["logicalSessionId"]=> array(1) { ["id"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c" ["type"]=> int(4) } } ["clusterTime"]=> NULL ["causalConsistency"]=> bool(false) ["operationTime"]=> NULL ["server"]=> NULL } ===DONE=== mongodb-1.6.1/tests/session/session-debug-004.phpt0000644000076500000240000000145513572250761021253 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session debug output (after ending session) --SKIPIF-- --FILE-- startSession(); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); $session->endSession(); var_dump($session); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Session)#%d (%d) { ["logicalSessionId"]=> NULL ["clusterTime"]=> NULL ["causalConsistency"]=> NULL ["operationTime"]=> NULL ["server"]=> NULL } ===DONE=== mongodb-1.6.1/tests/session/session-debug-005.phpt0000644000076500000240000000271113572250761021250 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session debug output (during a pinned transaction) --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); $session = $manager->startSession(); $session->startTransaction(); $query = new MongoDB\Driver\Query([]); $server->executeQuery(NS, $query, ['session' => $session]); var_dump($session); $session->abortTransaction(); $session->endSession(); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\Session)#%d (%d) { ["logicalSessionId"]=> array(1) { ["id"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(16) "%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c%c" ["type"]=> int(4) } } ["clusterTime"]=> array(2) { ["clusterTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["signature"]=> %a } ["causalConsistency"]=> bool(true) ["operationTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["server"]=> object(MongoDB\Driver\Server)#%d (%d) { %a } } ===DONE=== mongodb-1.6.1/tests/session/session-endSession-001.phpt0000644000076500000240000000617013572250761022273 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::endSession() Calling methods after session has been ended --SKIPIF-- --FILE-- startSession(); $sessionA->endSession(); echo throws(function() use ($sessionA) { $sessionA->startTransaction(); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { $sessionA->abortTransaction(); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; /* The reason that startTransaction is in here twice is that this script can run without exception * if the endSession() call is taken out. */ echo throws(function() use ($sessionA) { $sessionA->startTransaction(); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { $sessionA->commitTransaction(); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { $sessionA->advanceOperationTime(new \MongoDB\BSON\Timestamp(1900123000, 1900123000)); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { $sessionA->advanceClusterTime([]); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { var_dump($sessionA->getClusterTime()); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { var_dump($sessionA->getLogicalSessionId()); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { var_dump($sessionA->getOperationTime()); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; echo throws(function() use ($sessionA) { $sessionA->isInTransaction(); }, 'MongoDB\Driver\Exception\LogicException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'startTransaction', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'abortTransaction', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'startTransaction', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'commitTransaction', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'advanceOperationTime', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'advanceClusterTime', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'getClusterTime', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'getLogicalSessionId', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'getOperationTime', as the session has already been ended. OK: Got MongoDB\Driver\Exception\LogicException Cannot call 'isInTransaction', as the session has already been ended. ===DONE=== mongodb-1.6.1/tests/session/session-endSession-002.phpt0000644000076500000240000000102513572250761022266 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::endSession() Calling method multiple times --SKIPIF-- --FILE-- startSession(); $sessionA->endSession(); $sessionA->endSession(); $sessionA->endSession(); ?> ===DONE=== --EXPECT-- ===DONE=== mongodb-1.6.1/tests/session/session-getClusterTime-001.phpt0000644000076500000240000000175213572250761023122 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::getClusterTime() --SKIPIF-- --FILE-- startSession(); echo "Initial cluster time:\n"; var_dump($session->getClusterTime()); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); echo "\nCluster time after command:\n"; var_dump($session->getClusterTime()); ?> ===DONE=== --EXPECTF-- Initial cluster time: NULL Cluster time after command: object(stdClass)#%d (%d) { ["clusterTime"]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ["signature"]=> %a } ===DONE=== mongodb-1.6.1/tests/session/session-getLogicalSessionId-001.phpt0000644000076500000240000000145713572250761024057 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::getLogicalSessionId() --SKIPIF-- --FILE-- startSession(); $lsid = $session->getLogicalSessionId(); /* Note: we avoid dumping the Binary object as it may contain bytes that * intefere with the test suite's ability to compare expected output. */ var_dump($lsid instanceof stdClass); var_dump($lsid->id instanceof MongoDB\BSON\Binary); var_dump($lsid->id->getType() === MongoDB\BSON\Binary::TYPE_UUID); ?> ===DONE=== --EXPECTF-- bool(true) bool(true) bool(true) ===DONE=== mongodb-1.6.1/tests/session/session-getOperationTime-001.phpt0000644000076500000240000000164413572250761023441 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::getOperationTime() --SKIPIF-- --FILE-- startSession(); echo "Initial operation time:\n"; var_dump($session->getOperationTime()); $command = new MongoDB\Driver\Command(['ping' => 1]); $manager->executeCommand(DATABASE_NAME, $command, ['session' => $session]); echo "\nOperation time after command:\n"; var_dump($session->getOperationTime()); ?> ===DONE=== --EXPECTF-- Initial operation time: NULL Operation time after command: object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(%d) "%d" ["timestamp"]=> string(%d) "%d" } ===DONE=== mongodb-1.6.1/tests/session/session-isInTransaction-001.phpt0000644000076500000240000000374713572250761023300 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::isInTransaction() --SKIPIF-- --FILE-- COLLECTION_NAME, ]); $manager->executeCommand(DATABASE_NAME, $cmd); /* Start a session */ $session = $manager->startSession(); /* Empty transaction, and aborted empty transaction */ var_dump($session->isInTransaction()); $session->startTransaction(); var_dump($session->isInTransaction()); $session->abortTransaction(); var_dump($session->isInTransaction()); /* Empty transaction, and committed empty transaction */ var_dump($session->isInTransaction()); $session->startTransaction(); var_dump($session->isInTransaction()); $session->commitTransaction(); var_dump($session->isInTransaction()); /* Aborted transaction with one operation */ var_dump($session->isInTransaction()); $session->startTransaction(); $bw = new \MongoDB\Driver\BulkWrite(); $bw->insert( [ '_id' => 0, 'msg' => 'Initial Value' ] ); $manager->executeBulkWrite(NS, $bw, ['session' => $session]); var_dump($session->isInTransaction()); $session->abortTransaction(); var_dump($session->isInTransaction()); /* Committed transaction with one operation */ var_dump($session->isInTransaction()); $session->startTransaction(); $bw = new \MongoDB\Driver\BulkWrite(); $bw->insert( [ '_id' => 0, 'msg' => 'Initial Value' ] ); $manager->executeBulkWrite(NS, $bw, ['session' => $session]); var_dump($session->isInTransaction()); $session->commitTransaction(); var_dump($session->isInTransaction()); ?> ===DONE=== --EXPECTF-- bool(false) bool(true) bool(false) bool(false) bool(true) bool(false) bool(false) bool(true) bool(false) bool(false) bool(true) bool(false) ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction-001.phpt0000644000076500000240000000105013572250761023514 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() ensure that methods can be called --SKIPIF-- --FILE-- startSession(); $session->startTransaction(); $session->abortTransaction(); $session->startTransaction(); $session->commitTransaction(); ?> ===DONE=== --EXPECTF-- ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction_error-001.phpt0000644000076500000240000000120413572250761024726 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() twice --SKIPIF-- --FILE-- startSession(); $session->startTransaction(); echo throws(function() use ($session) { $session->startTransaction(); }, 'MongoDB\Driver\Exception\RuntimeException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\RuntimeException Transaction already in progress ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction_error-002.phpt0000644000076500000240000000755013572250761024741 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() with wrong values in options array --SKIPIF-- --FILE-- startSession(); $options = [ [ 'maxCommitTimeMS' => -1 ], [ 'readConcern' => 42 ], [ 'readConcern' => new stdClass ], [ 'readConcern' => new \MongoDB\Driver\WriteConcern( 2 ) ], [ 'readPreference' => 42 ], [ 'readPreference' => new stdClass ], [ 'readPreference' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ) ], [ 'writeConcern' => 42 ], [ 'writeConcern' => new stdClass ], [ 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ) ], [ 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), 'readPreference' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), ], [ 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), ], [ 'readPreference' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), ], ]; foreach ($options as $txnOptions) { echo throws(function() use ($session, $txnOptions) { $session->startTransaction($txnOptions); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; } echo raises(function() use ($session) { $session->startTransaction([ 'maxCommitTimeMS' => new stdClass ]); }, E_NOTICE), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "maxCommitTimeMS" option to be >= 0, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, MongoDB\Driver\WriteConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, MongoDB\Driver\ReadConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, MongoDB\Driver\ReadConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got E_NOTICE Object of class stdClass could not be converted to int ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction_error-003.phpt0000644000076500000240000000175113572250761024737 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() with wrong argument for options array on PHP 5 --SKIPIF-- =', '7.0.0'); ?> --FILE-- startSession(); $options = [ 2, new stdClass, ]; foreach ($options as $txnOptions) { echo raises(function () use ($session, $txnOptions) { $session->startTransaction($txnOptions); }, E_RECOVERABLE_ERROR), "\n"; } ?> ===DONE=== --EXPECTF-- OK: Got E_RECOVERABLE_ERROR Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array, int%S given, called in %S OK: Got E_RECOVERABLE_ERROR Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array, object given, called in %S ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction_error-004.phpt0000644000076500000240000000174413572250761024742 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() with wrong argument for options array on PHP 7.0 --SKIPIF-- =', '7.1.0'); ?> --FILE-- startSession(); $options = [ 2, new stdClass, ]; foreach ($options as $txnOptions) { echo throws(function () use ($session, $txnOptions) { $session->startTransaction($txnOptions); }, TypeError::class), "\n"; } ?> ===DONE=== --EXPECTF-- OK: Got TypeError Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array, int%S given OK: Got TypeError Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array, object given ===DONE=== mongodb-1.6.1/tests/session/session-startTransaction_error-005.phpt0000644000076500000240000000170513572250761024740 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() with wrong argument for options array on PHP 7 --SKIPIF-- --FILE-- startSession(); $options = [ 2, new stdClass, ]; foreach ($options as $txnOptions) { echo throws(function () use ($session, $txnOptions) { $session->startTransaction($txnOptions); }, TypeError::class), "\n"; } ?> ===DONE=== --EXPECTF-- OK: Got TypeError Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array or null, int%S given OK: Got TypeError Argument 1 passed to MongoDB\Driver\Session::startTransaction() must be of the type array or null, object given ===DONE=== mongodb-1.6.1/tests/session/session_error-001.phpt0000644000076500000240000001003613572250761021370 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session with wrong defaultTransactionOptions --FILE-- -1 ], [ 'readConcern' => 42 ], [ 'readConcern' => new stdClass ], [ 'readConcern' => new \MongoDB\Driver\WriteConcern( 2 ) ], [ 'readPreference' => 42 ], [ 'readPreference' => new stdClass ], [ 'readPreference' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ) ], [ 'writeConcern' => 42 ], [ 'writeConcern' => new stdClass ], [ 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ) ], [ 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), 'readPreference' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), ], [ 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ), 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), ], [ 'readPreference' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), 'writeConcern' => new \MongoDB\Driver\ReadPreference( \MongoDB\Driver\ReadPreference::RP_SECONDARY ), ], 42, new stdClass, ]; foreach ($options as $txnOptions) { echo throws(function() use ($manager, $txnOptions) { $manager->startSession([ 'defaultTransactionOptions' => $txnOptions ]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; } echo raises(function() use ($manager) { $manager->startSession([ 'defaultTransactionOptions' => [ 'maxCommitTimeMS' => new stdClass ] ]); }, E_NOTICE), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "maxCommitTimeMS" option to be >= 0, -1 given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readConcern" option to be MongoDB\Driver\ReadConcern, MongoDB\Driver\WriteConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, MongoDB\Driver\ReadConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "readPreference" option to be MongoDB\Driver\ReadPreference, MongoDB\Driver\ReadConcern given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "writeConcern" option to be MongoDB\Driver\WriteConcern, MongoDB\Driver\ReadPreference given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "defaultTransactionOptions" option to be an array, int%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected "defaultTransactionOptions" option to be an array, stdClass given OK: Got E_NOTICE Object of class stdClass could not be converted to int ===DONE=== mongodb-1.6.1/tests/session/transaction-integration-001.phpt0000644000076500000240000000522013572250761023341 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() Committing a transaction with example for how to handle failures --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => $EMPLOYEES_COL ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $manager->executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => $EVENTS_COL ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Do the transaction */ $session = $manager->startSession(); $session->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); while (true) { try { $cmd = new \MongoDB\Driver\Command( [ 'update' => $EMPLOYEES_COL, 'updates' => [ [ 'q' => [ 'employee' => 3 ], 'u' => [ '$set' => [ 'status' => 'Inactive' ] ], ] ] ] ); $manager->executeCommand(DATABASE_NAME, $cmd, ['session' => $session]); $cmd = new \MongoDB\Driver\Command( [ 'insert' => $EVENTS_COL, 'documents' => [ [ 'employee' => 3, 'status' => [ 'new' => 'Inactive', 'old' => 'Active' ] ] ] ] ); $manager->executeCommand(DATABASE_NAME, $cmd, ['session' => $session]); $session->commitTransaction(); echo "Transaction committed.\n";break; } catch (\MongoDB\Driver\Exception\CommandException $e) { $rd = $e->getResultDocument(); if (isset($rd->errorLabels) && in_array('TransientTransactionError', $rd->errorLabels)) { echo "Temporary error: ", $e->getMessage(), ", retrying...\n"; $rd = $e->getResultDocument(); var_dump($rd); continue; } else { var_dump($e); } break; } } ?> ===DONE=== --EXPECTF-- Transaction committed. ===DONE=== mongodb-1.6.1/tests/session/transaction-integration-002.phpt0000644000076500000240000000464413572250761023353 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() Transient Error Test --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Insert Data */ $bw = new \MongoDB\Driver\BulkWrite(); $bw->insert( [ '_id' => 0, 'msg' => 'Initial Value' ] ); $manager->executeBulkWrite(NS, $bw); /* First 'thread', try to update document, but don't close transaction */ $sessionA = $manager->startSession(); $sessionA->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ '_id' => 0 ], 'u' => [ '$set' => [ 'msg' => 'Update from session A' ] ], ] ] ] ); $manager->executeCommand(DATABASE_NAME, $cmd, ['session' => $sessionA]); /* Second 'thread', try to update the same document, should trigger exception. In handler, commit * first settion, verify result, and redo this transaction. */ $sessionB = $manager->startSession(); $sessionB->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); try { $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ '_id' => 0 ], 'u' => [ '$set' => [ 'msg' => 'Update from session B' ] ], ] ] ] ); $manager->executeCommand(DATABASE_NAME, $cmd, ['session' => $sessionB]); } catch (MongoDB\Driver\Exception\CommandException $e) { echo $e->hasErrorLabel('TransientTransactionError') ? "found a TransientTransactionError" : "did NOT get a TransientTransactionError", "\n"; } ?> ===DONE=== --EXPECTF-- found a TransientTransactionError ===DONE=== mongodb-1.6.1/tests/session/transaction-integration-003.phpt0000644000076500000240000000410613572250761023345 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session::startTransaction() Transient Error Test --SKIPIF-- --FILE-- COLLECTION_NAME, ]); $manager->executeCommand(DATABASE_NAME, $cmd); /* Insert Data */ $bw = new \MongoDB\Driver\BulkWrite(); $bw->insert( [ '_id' => 0, 'msg' => 'Initial Value' ] ); $manager->executeBulkWrite(NS, $bw); /* First 'thread', try to update document, but don't close transaction */ $sessionA = $manager->startSession(); $sessionA->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); $bw = new \MongoDB\Driver\BulkWrite(); $bw->update( [ '_id' => 0 ], [ '$set' => [ 'msg' => 'Update from session A' ] ] ); $manager->executeBulkWrite(NS, $bw, ['session' => $sessionA]); /* Second 'thread', try to update the same document, should trigger exception. In handler, commit * first settion, verify result, and redo this transaction. */ $sessionB = $manager->startSession(); $sessionB->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); try { $bw = new \MongoDB\Driver\BulkWrite(); $bw->update( [ '_id' => 0 ], [ '$set' => [ 'msg' => 'Update from session B' ] ] ); $manager->executeBulkWrite(NS, $bw, ['session' => $sessionB]); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { echo $e->hasErrorLabel('TransientTransactionError') ? "found a TransientTransactionError" : "did NOT get a TransientTransactionError", "\n"; } ?> ===DONE=== --EXPECTF-- found a TransientTransactionError ===DONE=== mongodb-1.6.1/tests/session/transaction-integration_error-001.phpt0000644000076500000240000000437413572250761024563 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session: Setting per-op readConcern or writeConcern in transaction (executeCommand) --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Do the transaction */ $session = $manager->startSession(); $session->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ 'employee' => 3 ], 'u' => [ '$set' => [ 'status' => 'Inactive' ] ] ] ] ] ); $manager->executeCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ 'employee' => 3 ], 'u' => [ '$set' => [ 'status' => 'Inactive' ] ] ] ] ] ); $manager->executeCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set read concern after starting transaction OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set write concern after starting transaction ===DONE=== mongodb-1.6.1/tests/session/transaction-integration_error-002.phpt0000644000076500000240000000306113572250761024554 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session: Setting per-op readConcern in transaction (executeReadCommand) --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Do the transaction */ $session = $manager->startSession(); $session->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'count' => COLLECTION_NAME, 'query' => [ 'q' => [ 'employee' => 3 ] ] ] ); $manager->executeReadCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set read concern after starting transaction ===DONE=== mongodb-1.6.1/tests/session/transaction-integration_error-003.phpt0000644000076500000240000000316313572250761024560 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session: Setting per-op writeConcern in transaction (executeWriteCommand) --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Do the transaction */ $session = $manager->startSession(); $session->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ 'employee' => 3 ], 'u' => [ '$set' => [ 'status' => 'Inactive' ] ] ] ] ] ); $manager->executeWriteCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set write concern after starting transaction ===DONE=== mongodb-1.6.1/tests/session/transaction-integration_error-004.phpt0000644000076500000240000000433713572250761024565 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Session: Setting per-op readConcern or writeConcern in transaction (executeReadWriteCommand) --SKIPIF-- --FILE-- executeCommand( DATABASE_NAME, new \MongoDB\Driver\Command([ 'create' => COLLECTION_NAME ]), [ 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); /* Do the transaction */ $session = $manager->startSession(); $session->startTransaction( [ 'readConcern' => new \MongoDB\Driver\ReadConcern( "snapshot" ), 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'count' => COLLECTION_NAME, 'query' => [ 'q' => [ 'employee' => 3 ] ] ] ); $manager->executeReadWriteCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'readConcern' => new \MongoDB\Driver\ReadConcern( \MongoDB\Driver\ReadConcern::LOCAL ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; echo throws(function() use ($manager, $session) { $cmd = new \MongoDB\Driver\Command( [ 'update' => COLLECTION_NAME, 'updates' => [ [ 'q' => [ 'employee' => 3 ], 'u' => [ '$set' => [ 'status' => 'Inactive' ] ] ] ] ] ); $manager->executeReadWriteCommand( DATABASE_NAME, $cmd, [ 'session' => $session, 'writeConcern' => new \MongoDB\Driver\WriteConcern( \MongoDB\Driver\WriteConcern::MAJORITY ) ] ); }, "MongoDB\Driver\Exception\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set read concern after starting transaction OK: Got MongoDB\Driver\Exception\InvalidArgumentException Cannot set write concern after starting transaction ===DONE=== mongodb-1.6.1/tests/standalone/bug0166.phpt0000644000076500000240000000146413572250761017742 0ustar alcaeusstaff--TEST-- Disable serialization of objects --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- bool(false) OK: Got Exception ===DONE=== mongodb-1.6.1/tests/standalone/bug0231.phpt0000644000076500000240000000144613572250761017733 0ustar alcaeusstaff--TEST-- Multiple managers sharing streams: Using stream after closing manager --SKIPIF-- --FILE-- 1)); $retval = $manager->executeCommand("admin", $listdatabases); $retval = $manager2->executeCommand("admin", $listdatabases); foreach($retval as $database) { } $manager = null; $retval = $manager2->executeCommand("admin", $listdatabases); foreach($retval as $database) { } echo "All Good!\n"; ?> ===DONE=== --EXPECT-- All Good! ===DONE=== mongodb-1.6.1/tests/standalone/bug0357.phpt0000644000076500000240000000114213572250761017735 0ustar alcaeusstaff--TEST-- PHPC-357: The exception for "invalid namespace" does not list the broken name --SKIPIF-- --FILE-- executeQuery( 'demo', $c ); }, "MongoDB\\Driver\\Exception\\InvalidArgumentException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Invalid namespace provided: demo ===DONE=== mongodb-1.6.1/tests/standalone/bug0545.phpt0000644000076500000240000000603013572250761017735 0ustar alcaeusstaff--TEST-- PHPC-545: Update does not serialize embedded Persistable's __pclass field --SKIPIF-- --FILE-- $value) { $this->{$name} = $value; } } } class Page implements MongoDB\BSON\Persistable { public function bsonSerialize() { $data = get_object_vars($this); return $data; } public function bsonUnserialize(array $data) { foreach ($data as $name => $value) { $this->{$name} = $value; } } } // Aux $manager = new MongoDB\Driver\Manager(URI); $wc = new MongoDB\Driver\WriteConcern(MongoDB\Driver\WriteConcern::MAJORITY); // Create $book = new Book(); $book->title = 'Unnameable'; $book->pages = []; $page1 = new Page(); $page1->content = 'Lorem ipsum'; $book->pages[] = $page1; $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert($book); $result = $manager->executeBulkWrite(NS, $bulk, $wc); printf("Inserted %d document(s)\n", $result->getInsertedCount()); // Read $query = new MongoDB\Driver\Query(['title' => $book->title]); $cursor = $manager->executeQuery(NS, $query); $bookAfterInsert = $cursor->toArray()[0]; // Update $bookAfterInsert->description = 'An interesting document'; $page2 = new Page(); $page2->content = 'Dolor sit amet'; $bookAfterInsert->pages[] = $page2; $bulk = new MongoDB\Driver\BulkWrite; $bulk->update(['title' => $bookAfterInsert->title], $bookAfterInsert); $result = $manager->executeBulkWrite(NS, $bulk, $wc); printf("Modified %d document(s)\n", $result->getModifiedCount()); // Read (again) $query = new MongoDB\Driver\Query(['title' => $bookAfterInsert->title]); $cursor = $manager->executeQuery(NS, $query); $bookAfterUpdate = $cursor->toArray()[0]; var_dump($bookAfterUpdate); ?> ===DONE=== --EXPECTF-- Inserted 1 document(s) Modified 1 document(s) object(Book)#%d (%d) { ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%s" } ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(4) "Book" ["type"]=> int(%d) } ["title"]=> string(10) "Unnameable" ["pages"]=> array(2) { [0]=> object(Page)#%d (%d) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(4) "Page" ["type"]=> int(%d) } ["content"]=> string(11) "Lorem ipsum" } [1]=> object(Page)#%d (%d) { ["__pclass"]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(4) "Page" ["type"]=> int(%d) } ["content"]=> string(14) "Dolor sit amet" } } ["description"]=> string(23) "An interesting document" } ===DONE=== mongodb-1.6.1/tests/standalone/bug0655.phpt0000644000076500000240000000212513572250761017740 0ustar alcaeusstaff--TEST-- PHPC-655: Use case insensitive parsing for Manager connectTimeoutMS array option --FILE-- 1]); // Invalid host cannot be resolved $manager = new MongoDB\Driver\Manager('mongodb://invalid.host:27017', ['connectTimeoutMS' => 1]); echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; // Valid host refuses connection $manager = new MongoDB\Driver\Manager('mongodb://localhost:54321', ['CONNECTTIMEOUTMS' => 1]); echo throws(function() use ($manager, $command) { $manager->executeCommand(DATABASE_NAME, $command); }, 'MongoDB\Driver\Exception\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException No suitable servers found (`serverSelectionTryOnce` set): %s ===DONE=== mongodb-1.6.1/tests/standalone/command-aggregate-001.phpt0000644000076500000240000000171213572250761022504 0ustar alcaeusstaff--TEST-- DRIVERS-289: Test iteration on live command cursor with empty first batch --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $manager->executeBulkWrite(NS, $bulk); $command = new MongoDB\Driver\Command([ 'aggregate' => COLLECTION_NAME, 'pipeline' => [ [ '$match' => [ '_id' => [ '$gt' => 1 ]]], ], 'cursor' => ['batchSize' => 0], ]); $cursor = $manager->executeCommand(DATABASE_NAME, $command); var_dump($cursor->toArray()); ?> ===DONE=== --EXPECTF-- array(2) { [0]=> object(stdClass)#%d (%d) { ["_id"]=> int(2) } [1]=> object(stdClass)#%d (%d) { ["_id"]=> int(3) } } ===DONE=== mongodb-1.6.1/tests/standalone/connectiontimeoutexception-001.phpt0000644000076500000240000000200213572250761024620 0ustar alcaeusstaff--TEST-- ConnectionTimeoutException: exceeding sockettimeoutms --SKIPIF-- --FILE-- 1, 'secs' => 1, 'w' => false, ]); echo throws(function() use ($manager, $command) { $manager->executeCommand('admin', $command); }, 'MongoDB\Driver\Exception\\ConnectionTimeoutException'), "\n"; ?> ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\ConnectionTimeoutException Failed to send "sleep" command with database "admin": %Ssocket error or timeout ===DONE=== mongodb-1.6.1/tests/standalone/executiontimeoutexception-001.phpt0000644000076500000240000000204713572250761024475 0ustar alcaeusstaff--TEST-- ExecutionTimeoutException: exceeding $maxTimeMS (queries) --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); $query = new MongoDB\Driver\Query(array("company" => "Smith, Carter and Buckridge"), array( 'projection' => array('_id' => 0, 'username' => 1), 'sort' => array('phoneNumber' => 1), 'modifiers' => array( '$maxTimeMS' => 1, ), )); failMaxTimeMS($server); throws(function() use ($server, $query) { $result = $server->executeQuery(NS, $query); }, "MongoDB\Driver\Exception\ExecutionTimeoutException"); ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\ExecutionTimeoutException ===DONE=== mongodb-1.6.1/tests/standalone/executiontimeoutexception-002.phpt0000644000076500000240000000171613572250761024500 0ustar alcaeusstaff--TEST-- ExecutionTimeoutException: exceeding maxTimeMS (commands) --SKIPIF-- --FILE-- selectServer(new \MongoDB\Driver\ReadPreference('primary')); $cmd = array( "count" => "collection", "query" => array("a" => 1), "maxTimeMS" => 100, ); $command = new MongoDB\Driver\Command($cmd); failMaxTimeMS($server); throws(function() use ($server, $command) { $result = $server->executeCommand(DATABASE_NAME, $command); }, "MongoDB\Driver\Exception\ExecutionTimeoutException"); ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\ExecutionTimeoutException ===DONE=== mongodb-1.6.1/tests/standalone/manager-as-singleton.phpt0000644000076500000240000000271313572250761022661 0ustar alcaeusstaff--TEST-- PHPC-431: Segfault when using Manager through singleton class --SKIPIF-- --FILE-- Database = $Manager; } public static function getInstance() { if (static::$Instance == null) { static::$Instance = new Database(); } return static::$Instance; } public function query($scheme, $query) { return $this->Database->executeQuery($scheme, $query, new ReadPreference(ReadPreference::RP_PRIMARY)); } } class App { public function run() { $db = Database::getInstance(); $query = new Query(array()); $cursor = $db->query(DATABASE_NAME . ".scheme_info", $query); foreach ($cursor as $document) { echo $document->value; } $query = new Query(array()); $cursor = $db->query(DATABASE_NAME . ".domain", $query); foreach ($cursor as $document) { echo $document->hostname; } } } $App = new App(); $App->run(); echo "All done\n"; ?> ===DONE=== --EXPECT-- All done ===DONE=== mongodb-1.6.1/tests/standalone/query-errors.phpt0000644000076500000240000000233513572250761021325 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Query: Invalid types --FILE-- "Smith, Carter and Buckridge"), array( "projection" => array("_id" => 0, "username" => 1), "sort" => array("phoneNumber" => 1), "modifiers" => "string", )); }, "MongoDB\Driver\Exception\InvalidArgumentException"); throws(function() { $query = new MongoDB\Driver\Query(array("company" => "Smith, Carter and Buckridge"), array( "projection" => array("_id" => 0, "username" => 1), "sort" => array("phoneNumber" => 1), "projection" => "string", )); }, "MongoDB\Driver\Exception\InvalidArgumentException"); throws(function() { $query = new MongoDB\Driver\Query(array("company" => "Smith, Carter and Buckridge"), array( "projection" => array("_id" => 0, "username" => 1), "sort" => "string" )); }, "MongoDB\Driver\Exception\InvalidArgumentException"); ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got MongoDB\Driver\Exception\InvalidArgumentException OK: Got MongoDB\Driver\Exception\InvalidArgumentException ===DONE=== mongodb-1.6.1/tests/standalone/update-multi-001.phpt0000644000076500000240000001165113572250761021557 0ustar alcaeusstaff--TEST-- PHPC-243: Manager::executeUpdate() & Bulk->update() w/o multi --SKIPIF-- --FILE-- insert(array('_id' => 1, 'x' => 1)); $bulk->insert(array('_id' => 2, 'x' => 2)); $bulk->insert(array('_id' => 3, 'x' => 2)); $bulk->insert(array('_id' => 4, 'x' => 2)); $bulk->insert(array('_id' => 5, 'x' => 1)); $bulk->insert(array('_id' => 6, 'x' => 1)); $manager->executeBulkWrite(NS, $bulk); $bulk = new \MongoDB\Driver\BulkWrite; $bulk->update( array('x' => 1), array('$set' => array('x' => 3)), array('multi' => false, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); printf("Changed %d out of expected 1 (_id=1)\n", $result->getModifiedCount()); $bulk = new \MongoDB\Driver\BulkWrite; $bulk->update( array('x' => 1), array('$set' => array('x' => 2)), array('multi' => true, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); printf("Changed %d out of expected 2, (_id=5, _id=6)\n", $result->getModifiedCount()); $bulk = new MongoDB\Driver\BulkWrite; $bulk->update( array('x' => 2), array('$set' => array('x' => 4)), array('multi' => false, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); printf("Changed %d out of expected 1, (_id=2)\n", $result->getModifiedCount()); $bulk = new MongoDB\Driver\BulkWrite; $bulk->update( array('x' => 2), array('$set' => array('x' => 41)), array('multi' => false, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); printf("Changed %d out of expected 1 (id_=3)\n", $result->getModifiedCount()); $bulk = new MongoDB\Driver\BulkWrite; $bulk->update( array('x' => 2), array('$set' => array('x' => 42)), array('multi' => true, 'upsert' => false) ); $result = $manager->executeBulkWrite(NS, $bulk); $cursor = $manager->executeQuery(NS, new MongoDB\Driver\Query(array())); var_dump(iterator_to_array($cursor)); printf("Changed %d out of expected 3 (_id=4, _id=5, _id=6)\n", $result->getModifiedCount()); ?> ===DONE=== --EXPECTF-- Changed 1 out of expected 1 (_id=1) array(6) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(3) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(2) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(2) } [3]=> object(stdClass)#%d (2) { ["_id"]=> int(4) ["x"]=> int(2) } [4]=> object(stdClass)#%d (2) { ["_id"]=> int(5) ["x"]=> int(2) } [5]=> object(stdClass)#%d (2) { ["_id"]=> int(6) ["x"]=> int(2) } } Changed 2 out of expected 2, (_id=5, _id=6) array(6) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(3) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(4) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(2) } [3]=> object(stdClass)#%d (2) { ["_id"]=> int(4) ["x"]=> int(2) } [4]=> object(stdClass)#%d (2) { ["_id"]=> int(5) ["x"]=> int(2) } [5]=> object(stdClass)#%d (2) { ["_id"]=> int(6) ["x"]=> int(2) } } Changed 1 out of expected 1, (_id=2) array(6) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(3) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(4) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(41) } [3]=> object(stdClass)#%d (2) { ["_id"]=> int(4) ["x"]=> int(2) } [4]=> object(stdClass)#%d (2) { ["_id"]=> int(5) ["x"]=> int(2) } [5]=> object(stdClass)#%d (2) { ["_id"]=> int(6) ["x"]=> int(2) } } Changed 1 out of expected 1 (id_=3) array(6) { [0]=> object(stdClass)#%d (2) { ["_id"]=> int(1) ["x"]=> int(3) } [1]=> object(stdClass)#%d (2) { ["_id"]=> int(2) ["x"]=> int(4) } [2]=> object(stdClass)#%d (2) { ["_id"]=> int(3) ["x"]=> int(41) } [3]=> object(stdClass)#%d (2) { ["_id"]=> int(4) ["x"]=> int(42) } [4]=> object(stdClass)#%d (2) { ["_id"]=> int(5) ["x"]=> int(42) } [5]=> object(stdClass)#%d (2) { ["_id"]=> int(6) ["x"]=> int(42) } } Changed 3 out of expected 3 (_id=4, _id=5, _id=6) ===DONE=== mongodb-1.6.1/tests/standalone/write-error-001.phpt0000644000076500000240000000164013572250761021423 0ustar alcaeusstaff--TEST-- MongoDB\Driver\Manager::executeInsert() --SKIPIF-- --FILE-- "Hannes", "country" => "USA", "gender" => "male"); $bulk = new \MongoDB\Driver\BulkWrite(['ordered' => true]); $hannes_id = $bulk->insert($hannes); $w = 2; $wtimeout = 1000; $writeConcern = new \MongoDB\Driver\WriteConcern($w, $wtimeout); echo throws(function() use($bulk, $writeConcern, $manager) { $result = $manager->executeBulkWrite(NS, $bulk, $writeConcern); }, "MongoDB\Driver\Exception\BulkWriteException"), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\BulkWriteException cannot use 'w' > 1 when a host is not replicated ===DONE=== mongodb-1.6.1/tests/standalone/writeresult-isacknowledged-001.phpt0000644000076500000240000000174713572250761024524 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::isAcknowledged() with default WriteConcern --SKIPIF-- --FILE-- insert(array('x' => 1)); $result = $manager->executeBulkWrite(NS, $bulk); printf("WriteResult::isAcknowledged(): %s\n", $result->isAcknowledged() ? 'true' : 'false'); var_dump($result); ?> ===DONE=== --EXPECTF-- WriteResult::isAcknowledged(): true object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(1) ["nMatched"]=> int(0) ["nModified"]=> int(0) ["nRemoved"]=> int(0) ["nUpserted"]=> int(0) ["upsertedIds"]=> array(0) { } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { } } ===DONE=== mongodb-1.6.1/tests/standalone/writeresult-isacknowledged-002.phpt0000644000076500000240000000220513572250761024513 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::isAcknowledged() with inherited WriteConcern --SKIPIF-- --FILE-- insert(array('x' => 1)); $result = $manager->executeBulkWrite(NS, $bulk); printf("WriteResult::isAcknowledged(): %s\n", $result->isAcknowledged() ? 'true' : 'false'); var_dump($result); ?> ===DONE=== --EXPECTF-- WriteResult::isAcknowledged(): false object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(0) ["nMatched"]=> int(0) ["nModified"]=> int(0) ["nRemoved"]=> int(0) ["nUpserted"]=> int(0) ["upsertedIds"]=> array(0) { } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } } ===DONE=== mongodb-1.6.1/tests/standalone/writeresult-isacknowledged-003.phpt0000644000076500000240000000203013572250761024510 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::isAcknowledged() with custom WriteConcern --SKIPIF-- --FILE-- insert(array('x' => 2)); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); printf("WriteResult::isAcknowledged(): %s\n", $result->isAcknowledged() ? 'true' : 'false'); var_dump($result); ?> ===DONE=== --EXPECTF-- WriteResult::isAcknowledged(): false object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> NULL ["nMatched"]=> NULL ["nModified"]=> NULL ["nRemoved"]=> NULL ["nUpserted"]=> NULL ["upsertedIds"]=> array(0) { } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } } ===DONE=== mongodb-1.6.1/tests/utils/PHONGO-FIXTURES.json.gz0000644000076500000240000022370113572250761020536 0ustar alcaeusstaff‹ch9UFIXTURES.jsonÄœ[oÛH²Çß÷S4‚Î ñôý²OãØÉ$;ã3Î$Ø]œ‡&Ù”z̋‹my±ßýTS²,y(Z1|ä ° ±%R¬îúÕ¿ªšÿú‚ÿºÆÕ¥-Ü«¿£WñŸUí_½¾?8·Ms]Õi8ø]ü@ðÿ8úõê<úïtʯnõQüAŠöÃ…ûçŸf¶xáó_N¾YU¾éêsýù´œ~yëÞ¤—ÑægºÂú|}¶Ÿç³yTºvsDæë¦ý´º¦÷¶,]³y8·÷GÏì´ìš¦*7$U1·å¢?^•ÓêäÍæÑØWáÈE•µ×¶vèm9õ¥s5ú ŒNjg[—µð©ÑT0%7Ø4­]ÓÀ%þýkýòöM^nÚÚ¹öhù–åõ—6®m×"¾qÖû³û¶ÿ«¹½ª¦]=4¨iá 5¯`\~\¥ýPŠñР¼Jàè_/¼?Ø.æý{Ï+_¶ï^^qÓÅ—pE½![%VÚ ]ÇzЄ’ÈCˆy58èÿòê¶^ùÏëï¶ÈÉ¢t—>ŸÚÖ""ÆŒò[“W£öØ5`ÛXHó¶&2 ÁÆlAp¤”4JÌÇ3[ç®A> ó|̧U™n¹€Ï\Ùb÷mk\|eˆúå¥ B"®5¡rtqàˆH.ÁjO3Èß¶G® ´ ß.»òÖíbC ìç’j§™L)§2¶2!Úrn†?cxQJ chÌœ&$Ét–1ø?ȇ$·¾vJýìnl1ÏÝ(&>Ù:ñMµ‹ÇU}í· 1ŸU¥ûÔ±«Ã€•Q?f&FPzƒ)ßôK¯lÞžo/»<&ÎEáÛÙktlë˜b˽é’ËÚ§S7@¡“*¯`º£¢‚‰ÞzÛ ?olßFèÂ¥(ë¦Ùf,Ñv5 ?älÝ:ñÓÒ7/ªÁˆÌÕ®L}ƒ®ª¼›ÃÌ÷1ŒõeâÓ®lQ Þ†¾uÞ"˜õ𾮉Ї98èXuþîMp7g¾uI[¡§¤*÷­ë¯ u±o}í¤$%T` ^ûÙ(ÉUèc5ËáŽ^ÀeµæpƒZ k·ðÞç¹­Ó™½rãÎáÌ÷Ú=ܦ’Ò‰ j ôkpÇŒŒûlM#Ê4•ü`>[aNÑ»ÚùÔ¢‹yíËi3f¢óºK]™¸¢êÚÙ#¼ó‡péq#®ñKÛgB„ޏÔRÓQ™ÊäS}ø÷s…ÞÛ¢§sœwñr!™áù|g¥/>qeëÀõMŸi1j¤œh·¦ïAídXd„ÂjtMHDaBµ­;³QQÍê]Àe'N`šRJãYbcGŽ5àWeƆcÀo[• Ç´¢)æÎXf³AàÚÔF@Évæ<\λՖշ±{â¬mí.잺Yéêìb©'Xó‰"æ©Àý<ó..`êåù ¹ŸkWĹ] ÷‹ËÒ\ Ë] (C8`"`/µÕ·€ÈÚÍ|X™~æy…ÒÀi8PÜ­Ã[¬-=\D„>§BU–ùÄ÷£Ã;}Šà¤s ýú-.`#,唂'ÁêÙXª¤ÁÚ@ç0¡ÆUgYÁm.aYCtâÆÕçQnc[ìឥ‘ÊLWä¥%¨Ž8_1äûÖƒ…¥ƒôÁ<´ÐLB´ãò¹/AøØ[»tÒjjwÖz ëØ:^Ä]=‡é‰‡³ú¤…IŠŽa6±ßÇpŒ)~”|y¸2)Ž RÎëa X©!pÍ3f ]\ÛÒÛkX5謂¸Û‚{ûa1˜s¹³ßEЯ. aüâQÖ~rqm›Ë=ÌF4¼O‡µ¦Q†È8gØ”ƒ>hg Ñ [–QêRX —»`ëI4qL:—IÉS“1Là»hÊS–b™XÎpª*“D%,±ÊÅ2QÌ)NIâAØÂ @Éäν@û¦ÑNuûÆCVIPJDP1ÎØWd¢¸žpM'FIrC¶Eðs¿za^ 2·OÒ#÷ÜÖ}|>€Ü·¹ŸaŠÉð« PÜT”W ;]¡+i‰|”,ì/\„þtaXù­«»™ç‚ø«ùºTÐ@…Ãé¦X†?š9@؆íŸR†ëõ…ß ]Æ`©j.ž/ÍKA'.‡ÛÜ¢ JtѼF@ŸÑ¼– Éשׂ‡à“¤¥˜ jæƒújÀ1SmÔc¹_Ƙ8`‘(†‡µ+:‚o±tÜšŽ‚÷½Í¯ªº©ÊçL4­±~iCQ1Ã4¥ìD±ˆ`¡øáì$¤ÐÙú²A_lo¨åz’j±}ˆtlð+ã±ì’ÅǶ®r¿Ï²ÒÜ6Á˜ ‰ýCšL3i™¬(¬M4‰ SBCP¶¨ímTØ:qîåc1V:Ö²—; V¹Ž³8f4á6N˜5œÄ$îb@¬Î9ìlšIB“8sd±®´QcË Î±Wù]íÓZö÷Î%—ãœÕV˜âì†Aä)Fq»[Ò~tÌÐÉ©k’ÙO›·¾Ú"h k™›]IÚ‡éÜ!uÓå°b NW|ëÆ',Û'“ƒÒ ²9öÂ¥ð½Ï,_$]c 8sÿ Ká®d7tÁ´\ Êù³A—I!!ž.=Ôèwy:æ>¹kt a{UÂ¥ŽóöÌ7MÕmUÊï§Ävè-Ù‹Z'ŒE˜aömYÈ'cAäá2‘Š`tZ];ÔÑ•Èe{ˆ¤w¾çìï30úЄ ¾k1×ú²áSV×òHH‰õhJBD„KI‡Kê3….Úº±„Îýåªú"Ë“÷©}XFwËïYak¸b¶Kt뱇¬Á(jÖÌPïÆzÔD«sL4>mд׵ŸÎÚhjëó‹¸2V a¥…É@¸ó §Vð \EÌR«yÌ…K“Ðõ"g€Y»ÌR•ªØ îþ%Ì’¨Ê§v/ä–s»»,¢ŽUm%“?a%& Ðq£éSsÈ'ÿuTã^£7•›Ë2È´bPΞ­kµKQ Ø—I ²»IÜ0ìN…KI›»¤íë¬ó¦ 2µqKt'í6²Cu+™œú¤µúŸ@ÐÍêî†nAÏ>÷Àæu¾yD×jÃ5£úù’Éé†>‚ñCó®Í'÷á÷{W¾N7®]­L`>³Aí îaŠåh§°UjÃÈᑆÁ½Aï}ÝÎúÜÕåÜö˜m¾†üÉiÈá4ã¦ùìB?^ã?í!WÉ`ºï >ZªÐg¦ä¸&#iÁÈTΰ4èÂvœ~vum·±Â ÿu  \¡3w9óÍÌ׃v¸/ÊÔþ¶ÚªLÊ&j¸ì ã:"ÕGÂU-"CÔÓÃÕï‚jÓ´®‹ÝÎjl’­b`#Él,w[GƆ&*‘©Lã„i–§Df 'ÖÊ” @é®ñ­Kf°íLȽ2ÄǶÐ],=³åXÿcî)›P%åÊÈ'¢ô"™€´XÚëå¦ï« ·a¿ëÙéBtÞ¥ÆÚú(àòÆÕI/cáìÊzµCÎöbô¼®®@‚t¨—t›PÎ%ë#tÔWWúµY`;TÞ5YýÊ7I¬KlÍU ®Ð—WðÁádËóÚ‚ð-[º®Þ4¶@^ŸTu  øŽÃßAæ6«Ã#,6”(*˜~¾Â®&ýîèx“ÅÚ—Œ&˜ûýnÐ|Ö5êÞ½+¼\óÒ5Be"úÐÉ„©ˆk.Øár—Šr…~± ˜“):ñaYèMnoÝã‰å¯‹ ¢ªÁŠáƒ.Çæ¥ëî PDtÔ4 GDƒŸ<\>BsÃÀ2½hBaeŒ³ÇU Þm±OgÄ÷ \&è ýi#J#)‰\ÆëAa" -;H¹öO{[,J'Ù. ™& ¥ˆ5Ë ‹1¥”¥)7*8]"2ëb nš ngðZ¢´Œ‰²bß»&úÓ.ÒªÜKÑþn˦*vbØ·0eò|Åd¢Ÿ`0–ŒÞPEõYüÛeWº+;ùØÍѲ®Ï!Çî{ƒÃö¿Ùœœ®RÉ=S#ôÅÕj¬›o@» uYÔtMâçÀv †’k<ÒäD9L4¦Ÿ…\tºLE ãÜgÙ ‡z0±¶^Óg]=Ÿ-®„Q§Y-èã 4Hþ¤æÃÕ d~é hH´4ƒZÖ£ 5¿|¸nTcŒÙy ¾~•ðe¦w‘迾ݴ ›Ãl³¡÷û¢`EÚAg¾m)e¸bɆc¦ƒæ¥ˆ°‰8Ú< Ázd#¬c ˆ¬í ¢çG+*Gå´k_Lg>$ªv/02I'œÊ—Îüö9]ÊùàL¹Å¡šÓ§šç»è‡\ižGàxýÎ}:VR¡@šZ«0UVK,•Ñ [±J51’ —š”d2ÓI–’H‹3ǸÒ*M i»¸v‹¨If¾é·$Ý¢èÊtgGSȈ,Fù£4æ',ð.Œ"Õ¸\¦JÉ'‚ò‰ÑÌÜÀ²«·étªκu7ñQ M·Ãëµ|íU`ŸªÝn dt6BÊ>ìËU¼¶[2Ðk˜™ç!|OtÔÌÚªDg>ÝD÷{ZüÿŸZG&/¾Ë‘€¤0’Ðñ-3"â/';B›>úèê¼»tè‹·+*>xöx§à†³Ç”{(nÿêêÆ-7hw<¸ù¦",¢ÔÀ÷¯$¢¡¤q8cQ¦újóØú½²éÊVŒ–BûÂÇ?}k5Ô8k›ÌÀ±·ÃòóÁ²¢éN$Ö/ DÂtDWãš~"D$`^ãƒÑ‘ºù|' •‹Á€@™gv,qBZ ƒƒ0GÉ4s w6U‚†^)IiŒc@lÊÓ4¡ÃYÛËEîÁ{AÞ­¹EÀ·¡¼½a —<‚(9â’ªQú4uó™Í_ÃÜt¡º§ÝjçÁëŽRPt ëúäi_®tyÈÞnçh‹ C—¨¾%òîj ¨J}…àbàíËž_P–ð àò\H¹Ú¨:}‹µ0ÝÈ']èBBðÖðkæ“MÉ EIŸ¯H”BDÛÊßÙû^1Xû{à§ ñLûÈõ¾÷Å×Ó]õÑ©

xøC¼1ÚØ¹›¶ƒò •HtŸê]Sׯë-7½À¡«Én·"}ëªÕJ8fTül¨¥‹uþö󬮺éìÚj—rLá]ýxº)H¡Tõåã>!´¨ —Ã|?hp-##Œo‰¾jiŒ>\òVºµ\ØæÛ›nxÃâ}ƒQØ ¾O¹ì½½¶~æjŒ1}é-2„Ë(<îÄŒêÞ‹#rH¼rŒÎ«®ßð%<e:ZdþX%³Ç3?aå¼7¼³ëèA²– *' â_zùL¦eôø>ï‰4‘Q†‹§&¾ ©×} !‚ &Ù…TÆ›HË™“DH‘¦Î¨vFX®…uILyl²„Cì-3l±©L…‰…44£ƒH­sàà^íD_F¶Â¼¯ÒKˆ§}2&O”\ý„1›@0#n¨’[U°ï¡ê/>nªrò±öÍUÿ¥éƳ’nîS¿zÂô®  ,ì!Û“s¾~wiÚ%ýˆÜÇ}æ6 ¸{`R ñÏJÏÞ7û­h{ÙÚC;8×îžçAÉÙ;¹ê¡Ëܰ]b}D¶s0LBö|²UôTxÿ@ž¥H4¾¾X¤ëµÕõ#z¾]U6~Ÿç&Iõò…ŠF‚‘ímYE"ŰP‡Ã(ÕõÉwt {µôÑh¸ýŵïyj’áô¥»N&&bŒP2èM57ôÉ»¿ßBB‰ÿ#îÚšÛ6’î_A}Oq}&jî—GÇvœÄ²“u.[µoÌ@DD^^d)¿~»g^„T*Ò®rÉLI¦OŸîÓ§·%²_ë&¬Ò=‚Ìw<%õhÅ1˜¶>Ò½s7“ ‚èá¯yÎ{ÇH.Ækó轡`Ï•¼? MË(}u‡ãÓ¥ª4Ç_.'$žŸ!]™PÊñŠÀï2POdá ^Ha-ñ•÷Î béÐû!_}Eÿ²å$†ú¾C«ÚC³¤ªZœpK"3-ê‚Ì=äWÏ¥©?”Í.C]Î_gê|m*þ¦ÖÆÊ¾Û!_ÒÓFجD»P@Àz±£Ÿ€(ÐÝÊf÷D¶‘Ó¦iÓØ †‡[äìgIÓ€)×XdaŸLàÜ"yA³@’ý^£¶$û}éÊ›~º\ V‡Ä´W~|~âßòçæúH-ø¥œ›—ôÒ4r¸< MF«R‚ç³þ3úí]µMSœOD•즟¢rÂÅïðÔ ÷D·÷ëϰ¼mßÂDZY‹h 9Ø;ïÍ’h*Håh9^åBP­ž[@|ƽÒ<» !ûy±[ô(Ã×Ý‘B {2ýä–ÓF}•äp“—î™Ì¨b9!F³QlÎsʈ<„èa¾ñ¬ÔäµÂY¯µ×ŽˆPâȨ2Á8%*4uª ®rP•VŠùÊRGU%KˆAÛ*amÖùÍ"ÔÓĵ£ (ËAO¬ï,H̤µòÞZÆù3µ4«×Ù'‡Y;âæ‡¥;x(¦[K&À$žm¶/÷j¶yöeoü‡Wö<Q=”ü&óÂP£>wß8iÒg÷LƒÛk¡Ž0O FZJñr8Ê­b:û!ŒG­`˜Å Ë8@Ñÿ¸fœ×<Ù …;ÍŸ½‡,kܲWD×#z>Ë^8½Æ e&<: à w=¿áfBc©à ÜÞÅ8‚~íòzJC›f/ÝÔÌeŠŒ‹Ä€¡§ÆùÆE5Ùg·ž•‡›ôaïR?ã5Š¡¨g Íìj"¹”—.êZ™Cî`-·É\ Àž…ƒ~ûV/nêf?}A¬*TŘ¢•·¬®öé<&¦k&TÊ Ã¹Sž0õÀIáÀH­ÄÏuçt›ÿ÷8LÑÎtþŒî ,Ǫ»ð³¼Ø §ÏO¹ª 8û°¬oÃPm÷SçF´i|ØÎqöEاÜ@˜zÕ@øe\’ÚnàaGuŠp£.iµ y´*Œ¶IGCïË›tçØð~ç瀮 ½ð2Ä×:œT#¨FAí?õ4‘(·v° zfÏGK¢Ôx£k‹Ô¦Îè”Á1ÙUofˆ2±Ñã„“»á´ïÂS,9¥¼¼êÈÚœ*uHˆþyƒ3Ç ÚÈžPWó×4‡³ò]gJQJ–ÒF ©µ…Õˆ±ÚZo¯XQ*©l! gŠ’{[rCª$ ©hh•œ¶r&…CÑÜr9>çù$úQv”à¹n ]r÷ºÊJN¼þ‘{t§WhŸ°‰Æ¨¥ÝsÚ7ÄÞeW… DòŠî€¾ïybݙ箊þCµO2¥¹K_ )Ù2¸ Šò´úSÒdh‚œ½EÉÓ ¼d;㉜ý¤Ês !Õ¥Û1ÀÞr!äÄ(šV8*hΨ"èö}rÙGmksìn”Rìt÷\ pCg„_Ü…bFðy«é¸£Ÿá¹âDðs’~–½i ÖBÀ ɨ~[’·õë|¾_P| B/^œQ9ƒ(28dz½­Ž*ñÎbœûßU·ÒäY¦´ EFÀµ =3¦òNÊÀeI(nƒ)eE ·>¼.Ë $0ÎÊ"àMl Nã|Žû µ ÒX=–H4»$9“ô¹Ê _Ûo‹™ê må9ýîoÏÈ:ée ±ÛmGËРîÖm5Aˆ@6óìÍmt*Úþ×XÙÝÊ|öH£bYf-×/'–…WTÙUhv…ðñ…N¿µ_çõéi´·ÈxŸ‚„(ïš©amÍYýødn¹'ÖL™ã*K~> ‰6Têävû1<ôŒP ÚÒmo`â×ùi ÉOÞÍ'qwcšï_úáÌÐ j£þö2£sŠæ'g\^Ô2Ü;Jˆ¾I ™ÌÉ:ø›^Å5áXòYýE)c3£†¿þY5?p/$P«q:HÍϤù©Z k”ž|¢´*TeU)”qÜpf=©to«6Z3_ˆRií=ñ¤0Õ€¥´âÜUâ8Þ]cŸo; #f|ïÜÝ¡–õŸöúÃÑ^sˆïÿ¾о’B£î1KƒÎ«jöë²]¹²lPò_8u’v6á¶ù¤Ýgݸeúdgߣe\DšFVRosÝ®]¿&­LŠÓ iÚ+µY¿L´kNb 7¢hüc«ÓŸ7_){1…×’ N|X@Ƶ?‘–Î?ðÕq0Å^äûÍu8ekûÓbQ7m=¡f§ŒÐlÆÕÅ w”™\s&IÁöª'¹¢ÄÈóÕí„b,qþÎ=u”avÈôp–è¥Jw‰÷Œ±‹û%ĺ)Å£b‰%ØsÇ+”Rgï( |»YÞuÎñRŽŠ›ß,õ'’¡YoÊ› –Œs~é[„ {‡¼üÄ`'‡ãfµ= †Þ„P¿Ì— FëcH*K4~/(Î4 Gƒ‘œ•TW–PR8oü\Á²’žÒÝá+ë¤óÙf¶Õ[|«WíÍÃJËC˼GñpÕÑneêo©‘敵r¸¾×êÔ´ÿ'ä;öŠcëÕPýµŠ«ú°¿ù:€¨ïm¢l?É·ËeÛý¹?6’gŸwc'V¸ëä{›ümQÛz4°ýµÍ}\MÖ´»šîn!ËΤ>õÍÎãÅZÆ ©SæåÐVh›Å[ )sÝ Eþº ÃÞ¤û;Z¾´í*Ü…Å8s}žK‘As¨‘ââù6Ž|2ˆÕãZ!ÉÐN圛·µèàÿw{·Çºë^nË娪ë PÔ“å†_CÓ¬wn’ý…0ÆÏ´Õ—Žì¸´’+ÍN¬I“&gJ[uÆ;DR\gçNQ‹_c©7³qÏ’ÿmSº›ÓãE“K¼\+AgDû$õdÑÜH5싽»Ša?Ôˆg·KžÃß¿êæ:/ç&ø,•…!÷*Á*åyYîJSÎ+˜3Œ!~‚ÐÊ #à9ÄóÙ¯]1?vËv±…ã|á÷ UÞ#<Ö:®Ê›18Á÷FXý\÷¢Kç^gWM9ÏO`CK¿¿ìj¶»9”ÔËŒ¦·] x¯¶‹K¼cqÅJô·mª¬wîD¾N$‰ðöµ;?$¤²µïÇGóì Ö“fiÏHþд¡wkH:á}ó…±Z²!ÜZM_š9£Ù;8ÑÙ—z ÊVOR(CxŸäA7Íx EFæâæ9Qµ"”àb¹Å¢=QI~’±1ä/OÌ€ÓÂ9`ƒÒÝUšäè¦k·½ d?»²-êì ·qu’jÍF þðNè­N+ùs´­ƒÇóò%Ià #Ç µÉ•`Ä<÷ = v®câ ?ÁQÀªUUp_XôÇâÀz¥­lUЂê²Ôº \J&dI¬”Pa" ÑžT–ªbÕ êÞ¶sw{¼â“èï'”øü]-&w.ïc XöÊ¢©¼}¶X÷˃kPf„JÝ4ò²hïGEc)Éeª=šÁ‡Q¯»e Û…)·ÇVªE¦¼'×ͳ?·ƒ1è‘Ô[¶q%*Ž“~F@—ö÷Ša$Ê™CvcB]¢¹TT¼œû.IM²7¸âéݲ¾/Q¢æ]øæNŒ¾L§ÀÉØ‹ )EÝ'eã:]A€í uFµ@²´g7ûe‰›–úù$98äº7Õ > ¼Œ¦O&£/îfde®·§¶G SAøù¤dTP–}i‹°H‡±ûM4ßBPY·_ç'îÎt}—fø±8ëYÊ9'dp™Äî"®ràÚJœE£Û\· ßW(Už9W:ã‚)¥«Jb°ÿªœ— €P)¼c>ÖÊ|ëU¼ ð|„Âç_î¦-rªe]Χ¯›ãã.ۿØÕtÝ,Ç ‹´`^NËK¼ZW nzibªZ ÷ÙvÊÞ2üuÓÞÕåiyïÍòç)«Ìc_Z›8áDHùø¬¸´¹—àžä Ü¥eï27>Š<Ù›“™§÷2mÎ ¹’„hî)–saà¯^o‡~¬—ðF¢CFp·c7ècêž\¨óD£jÂÍ pè¬ëuŽóºƒ;÷áÎ3‚à3@ç<ÝÕÈ´¨¦žZ ôÓ.šÂUÌhYHZal!)•÷†±"@¸t^¥«œcŒ–ÀM+6Üu½õ¸Üdf¾]¸ ¼³GYçc7¾.1ãlF Ÿ } }’n*Œ¿ÎNFÚùs;Çaæ!¥ïq“è>neõí×€äiå Ý›Ûì×yöd5./KFq %ÜG»û°\â³Ou³Ye¨!^%ÕSR:ű_Û1Î1›ɬšùbx©‰ÍÞëuöÃÂNZDs´l=áD?™cZª.oHF¥A3:ž –CþÆŸm÷¤Óý­ß†tìxsˉt¥±„éÊ«ŠW¢´¡Þœ—ÆéB‰¢"\«ª°ÀU¬â¡`LzYTÃíœ%R¤¼¾n Ík'¹|þ',Žû©¼Ãjð¡»Ã?*KÖšWðC´q}OÅA3v Aö’Ü+¢É½†¿(ÿ÷Ž~<æmó:û\¯ÿ†73Ù¬|ÜܹŠ èÛ¾””(á>ž{w°Wø¿´Ié—Q$Ëν‚RÒ1ÆâV¯âß<»‚0Ø9¯DGî~b›7ßbPˆÛéïÖÙ½z² Did`×r~$V`Ij_° Å¥êRë¤BOiõ¸! öm~rMµyÑî½’‚\:'°<·ZêA/‹íEØ; ¸«ý,¾À¾}ˆí×GÚáýÈ90K™ ÁŠÙÂÁ9¡¢FeŒ+ œléC{% ü‚¡¬ŠPi?¼¶'~VuÈ—íÜ5“"Ç›ÕѸñ öb¹i¥%}ß׌kî'§bÇw c ·3)©¸§R,>yÔž»ÛÙ÷øfÄŠ7ÛÕÖïßºw~ÀpN¾‡£é’ü*âÿžĦïw›Éßìù5}Ýn)ßÛv{ÌûäÛ]:yÖí“ë&¢÷Óvj ÷­.𻎔¿ªê²[cV_ Nsb•²/çD¡ $oS´3¥ %igY•8Qæ~ÊœD5w7\ºÜ›p¨Dl )X¢3Öž§D÷f"ÿ8Š{ñ$ÈRi á˜8n*´¦E%„ F;ÉKªV ªƒ …)%ÄoKÅt!½§bxu‡Çuäu™/6¡^|Ÿ$-ù NÓÑ…XÿÚÀË´ãoÀ­g˜H÷Ô°Ñ€rœwü†ö~5‹6äý*Ïú?ãùëðûõ.x¤2Û®¤0‰hòìõ~¬Ivp{{=04¡„3V÷ì힘3ò¼(‘\-k’ÎîyOŸzt¾9c 00ÑõÅâƒPŠóì}³¬Ë6éÌxq2Å™¥x³÷–°ƒšú~TøÞÍ—uÑçuvU¯×‹ÔØøwœ†ªUFW…G+¶±G™¢FùT"ø²çK»5¾Ý*d·EƒŽŒt=ÎTFDû†T… ƒWwHð/ðÒ>m+…ìí v£¤±¸Ívñb€ú|PµØ½Šë¥cáÄ&_²ö@¥È>A`ƒ7y‚- v9ޅؼ==ööÉÕÃÕËÇö-Ú˜™ÔæâA¨â¹VÆ£þ-\äĪç[Ï?)‚t…ò&ÿæ–þX1Ìù *YVG0^åy(ÕèëR \¨JÆ*VhÁte}¸ Š^ "Ýp"ኢ]¯ó"¸Í4â€yÇ‚Èõj>žG(Á^Qò?â®®¹m#ÙþÖ}Y»®‰šïGÄ«TìÄk;›ª} "ŠðB¤lù×ßîe¤U¼ÔCÉ‚ŠD÷œî>}ŽŠVàßМu~İè9Ë•˜Ü;ÞË7õö^ÉHƸp7È|†lÑ&”;@õCÔS…„ÚC,bäÏú F Ý#µA2ìQèÏJu W†BØúëúë¹ ³Ó‡‹¶ƒêÊ?çȦ…±r)Ù¸¢ÅyWÆi¦™çü÷—A"€:A?Z`ù§b¼qÛ,@9)-A9ÀÇÑw„ ó¬š@%೚åŠR¸ÆÐÂSçµ$Ü]*RŒ¯×|q÷›zÍÌQáM¨áh˜ì ¤AÀ\€ž3%–B€3{¬q“fÂ4}­ÊMš;By;ÊQx¹Ý³+ïëüvégg~ŽÓ¤uG‰m­ÛŒ B`lÙ ßlûñäûžÓ€ÊD?›Ýœ2’';pðkì@&‰ˆøß6s$†d箞@H‡3özœs [Í4=aRHpqUe׆ŒLI2/O•ØÕðX4èÕ?ã‚^d¨"ÿÔ8`X†œžO%2³V}žTRc?0JYO%ÎJa¥/œ <m fdá­—F S^„Aùá·”. @\;'¹!Âz1šD¢l¶ªÖÅQ“ˆ7«=-Ã}Ûg×Ì'gL¨ç\ë%Õ‚“TÑÇòš^mýuƒÂ.ËwHÀ…ï£`k[bİ•w}Å›½’J¶ø%uR:HN °Ml0|›3áÆIN'Œn©¤í¬.þ¸ë=gõü 1ÕÿïªÕYâñò“ØõxòQ¢ð%|ü…ì®:„1%Ï hnàC«×—ÙåÆè{}@O!f©†Ê_»€|E¥YÉ‚aªpšÒZ0¥¥ÌKj%ã¥Ì­%¬,òÒÊ=šù.ˆ×í^Aæð„< |ÜÂã¸ï곯ë¼~À|ËVñ%¥ri§ßûÛdžò›í«-ë/­ÏPë: é_oá©ï\ó†=ƒ~pè·«/®ó¨í6þ½sÏ[¡4GUìÔ°béåsKdÊ´{Pä9ݰ JI‡ã(E‚‰Úº§;Ó%å@Æ4Í„ÜXTM-À´þrpñá%<×Ç´ ‰ O­hA”iÀÔ£RýUK¡2 §Î£W‰* \¹U¸Ï®›ûÛÍô¾”£b!°U¤Ìs͵÷¹bPfñ „D…Zçï'z)´kÝâ1.¸dÛ_º¦_{®¿Ž‡}"ùn7 f” A†õ&Í 6;a¬Òqx8 Þ¡:”‚žL¨eŠéM”ˆ>,]ZôŸ¯Ü€Ç!ƒcÏl ¥ìSŸÙ(郔u6¿§hHF …Zã,Â?_¶M1Iù ·v²Ì™ eO7𳄰Å;lÏB3.ß³ë¯Â]hV÷‡}íëõæ8jø…ä’YõÔ]{i3‰l¸Ùxˆ.ˆ´ç‘3¸‡7*ƒ3e*à |öÎIi(‡Yf gÂ*¨¾EArŒæ :ü‹?ø. # Ÿ»ñ#½þ€W—Ýá¡tœPf-LÅý…»šu#ÆRT18ÀÑ™ú·»©^¤Ù[Œ÷—«MX Íž–kò­ìH\ ì´ÖçöÏÝNr¤ïßïúæ‡Ç=T7‰ `s ;Ȳ–ô„„L,vê[Ië²ÕWÙIvm›Ê‡clypHwÏÀ¤÷öC%%Ñ@iôÐÕ×ê#†Y¾Œùä1¯3Tþ³b× "âI¥ G="Ê¥Nf7Ô9SòœqVÂË7ZF·”*§cÁ´¦67%ÖEŽh+”ïL¼ÛíwÔyPú¨Þú_ðrÃ$Õö·úê¡òƒ¢œ“gæ9*<)Àpü±d¼÷®Áq±YöLý‘Ç*;6űÀÆã7rá±Ï^­Vý¼¸€Ó°Z4ƒR;nøÞ×Ý·É‘…õDR~ åCõ¥ÚôÛ|}˾é&¯«Û¶þEÄWÄ:å¾?E^Ý üÎé¸o¡F_³±ó8/'8qG·eϺ¸£2Mä< l¦àz´œòOåÐ ùÆ»æáªëpa×F×§¤`%sܪÂ)ŠÒ›œÚ—P«È…)sK…ÅI/h@¦®—&»qͽ»Éò:\Ýœ€˜û2ˆD¡…OFÄ£·u?U75Ò/ÿ©BŽêÿ³]¹m9’Þôš=n 0ø e×MáZ¢n¤½ÄÍœí&ÏÖõº+ß㟴Ú>í"KíºNÖ>ö:1£/­óî\å"¯…{Zß\q (7†œNÏ÷¿íâ×Â->T×mY Æ›æ]jxªÐv'‡ gf5ê<©*2º Ô_# Ô Öšó Þ˜BþçIçdBÐ…Ê ÆÜ¸àà¸×Y_Bôç,¸²€¯=À8tµb†p'D^²BèÜøäÖÏÝ4aL¼¤¢qdR˜Ìo<÷sm=!És¨\–Ú ùM r@Ë{®Îm¼‹OWníàŸ˜^AN­WHëßöpcKãó6NÍ"I‹ÿ}Ú|KK©`ÝŽì Ìk¤ö+†å; ²nÖçÀ"æš–´‹ßÇ|ó©ÿ±ëÇ Œ˜ïüNä€O¨ÿM¢Hy—·kÝ\²¸€7³óÁÅž—Mõ½>ªPX, 3O-*M%Vr>ËÍW,ÓSöYl×Ý$s·W“¤;ÏK‹ª„Ú;O­wZU2EÐí}\°’ª  àp¦(-a.¤¥€k¸›ØÛ dÝÕ_£Ü…û›ízzpáîüz1µj©,[ ®KÑéüÄßmŠ‘”ð¯N²ç¶õ‰uÖðibUÛÿ¿)|{ä¯M½`Ç #ãÅ- á > aÇ1îú´ …O±ºXGÛØ¶ÿô’·s¬[x4¹RútíÎ4J*EÑÜ#þ‚:ë® _ÿ* Ü}êÈ'$ƒ³ÔR ƒô`)=Ï2Ê·5Yãî×õdí =“Œ.õý‚-‚Ž+?8j;µÑÛ•>íÖ·>X½lõõ]‚håH÷^öR¢‰èS´#¿ŽÝ)3àôN’Xᦦ’§›û)¡xdé~¿† ½ë …mÎÅÿLJkwupúw¼K£bÜÃø¬$ûLZnG—û‹ÐK2«ÅYF¾xùU5½Æ«µâ$°Òj«$ÑRyáJICp9@Òº2·Ž—PûX[Š\h^æ4ÀÑJIYr¯F£þ¿ßÃÍʧîõ*¬×ÓyïÝåeUÏ>*SLe@Ä7ÈHt~h0ù/·%”2PÜ¿HT±2ø¼m|½ÙŒñzPÏw¨æÕw÷ºó¼Óèؼ›‡is·ãØc+12xS› ¿¦“ÎÞàÀlÃç …Ÿ®%H4ådñª ×H¼kà·hj×y­©Q¡ÜÝxп^m›påîÂ|”ÿî\qŒ]7ô©AýÏv›EõhØÔì,‡{}UT·9dýÜ0®u©Yª´˜`´)HàœˆRà< ¦Ðw¹Ï!¸J)ÅáDý=HeŽÂóoê›°®ü$ž?¬ ü¿Ü>cÏ¡üÐ]È;ã¿pëÄåù¸z4 áøNLœÝ–>ò½ËÀm¿3Û÷çºZ¿¨Úfà¾øoú³$ñ‰Ã„vʘ¡Š`Z­¿ÛÝ[ -E¨¿?Æëv0øÃ!u‹ªx#"©oCÓÜ·Ý¿Q£Ž]Ô×WÍ1Œ€¿îQdúòpÔKn©Z ÞšØCÍ>éQ+Áþ*¦3*„&g1Á]á.RàW!LîàP¼aÎXzâT\Y')¡¥2¸€Ë•fJk.‚aeaè;Š;*ˆïþtW!ûo à#ù«iÁ\ñ˜cù¾ÔJ/ù¾YÌO¥x¾/_A ? 6壎wo ´+Ò«¶Ç¸D–_ò Ú´ª­ÜFÂíýh?¸¾V?Ê‘‡Ý.~¤ðG"àt´Ï¡5§ãà3c£dïâ½Û/w/  ïÏWð^ÞÖëƒÕûñ†ñBPýÔ,|”þ·‚‰Q|Ñ_¥Zßèó ýý•[{{Ë e J™/•dŠ€˜Â)¬â9ÇÝà¡b'¶(=6ócdœºçnr@z³£jö÷®š6!ºpÍݼ?'Â)b3"ìc;ú~ÙÔëå4=÷s×ÃÑú@k'‡K2ÙâÏ«­¸Í°O2yÞN‡â»-œžÏ›-~OgwŠõN_/5æ=|93 '‹i(2éâw€]ðyàbÍ{Ç‹ÝÚBsؾþÂ}uUuD`#~êÀ–(™ÅŸâ•i¦Ñÿê _Áw|ºÕœƒ œÃ$îJâ¡× Êð¼VèP2©tid<+J -ƒ.‰ 2÷ÄA5o÷ÙÇ»Èn»ðÙ T¦«ûã–fßn‘±7à[=ßKKèž„¥Ø7Mø©uÙâë~‰ƒü±:<é_tGnh—bêÛ†þÒ8L1˜C}›-Þ´‡ù¶3‰2q_. ÚÚ™{ìÛ'Î}œÝÁ±Í}·£ÓŸô -$f~ª!š¹-Y¨e™FvºZ«Å»ß,[|D€Ùxÿ>ÄEۇ˷Í´~ìÆäFðåƒd¾{pÎ÷‚g–ð}úÈWI›q*?ËQ´ 8z[…Éb…@HÐ*v§JˆjæE {Ε”žbD XAEI . ¯¥Ä©~ÉhÉÇíÁBG<  ¿¼¹Ÿ<Î*fJA3CEÆ 5ßyô~|ÒÇ|±H+ù;ëÐQÑ‹ÎnÛåýœpwÜÈézgÉe»“ÑOÛ±ÃC~¯ÛÞ¯ÅÆÚY»;æ~Ó1þBëû•ï7ɇ$SAÞÀ¾æ–¼ÝÎm)†ˆn£ã2âæPœÜ'^bz­iž×y”º9_RB­•‚œ®±O¤"‹×ͶòרÙ|}%¸³'Ú;³ÙäÏM7–"Ò~êvŸÀQžUb¾Ý§UF >†Ž.šð5UVÔk·š”àÍiŽ<åEî%+Hn4‘ììôŒ².§žzÉ=„lNä…+ ç¾°ãeÿú:Ù`§¸ŒŸÞÕÝ®og·ù’e\ˆLIþQeÝùëÎÅqëŽ×{¦Ã]_n°†‹óü¤¼[µö>1dï†æ£­ÏGkã‘Vø{ö¢âü¯uñH·sÅÞæ=Þ}è3w÷±¯8;ÕãðØiCøé¶ù„àŽ¶Í€ºPOô:uþ˜•Þù¥ÂÔa,ñ›[ߺ#$øŒ’,µ~r šÌ}À¿‡ã<ŸÛóu³Î®á¥ŠŸlúx(¡¢´L¨ðÔx€CZp]0o˜w¼äÄû‚0#% åË\\ø-li‹‰Mý¼Í}–7÷Göû>ฯ¦;pX¬f©ÀKÔRPöØÁoûÛϹytÁ` 7”ÛÝqðÐKôMÏש÷ Dë^ŒÇ}oÃÓ•ÍÞáÄwŒô¾©ÿF¤÷v`ø…=ãg Ò.Å…ÔÄ8Êÿ‹X…uÈéÖ !`àM†7¦NuÈì¸À5>¬Ú¾Ý1MjžÜHTÈŒq*é|¾Ð,ã8z?‹ß×ßîÆ¹UæáÁ¬&3†Ã)¡pð€g€r™+¥¹Ç½F™p¥cÁ jgQhBµV$)KmJªËq3Ñëj}‰; 5¤ªpܼ099LeŒW+6×s)ãSì¹&ji‰!ß8j ?v\pén‚„ëA‰ëmÁó¤€;;‹—~^(zêÏïq¢Øšü$î~H^­¼g{«˜j:ÀðÌ]YsÛF¶þ+|›¸®‰ê}y”OœXN2v®]•·ºa2¢H [ʯŸsºæ°¨©JRŽÄ(”„³K0Ä#¹Ù#j`á˜vNŒÔêóí‰Ô“hÔœhÕ'9Q½¾ý³Š#ÂAôÏûÄôäç ¦á#@–q1%ýB:öýâhù#A2DOÉ‹ˆõýåpß×Ãv¶0A:˜Í)qâ OÃNåºÔœ³À©¶„o‹Ašà ‡*t!HÐý“Âç€[çí„ùíjáv›# ëàUm\8ÿŠªLY™)%O ü›¼ñÏ!juÝÏú´ºžèì·a­=Bœj,O6yW©ñã.±é3öÍBø•§pÚà.rÔ¿“3IÎhÊ!A=€õ_ðCõžýð»®ÚÞ/¯ÃHŸïsç0Dh;eF?7è‡j—ÿ~¼Só*•qÂôE˜€ó gÎý†X–ž{Æ4 N¨äš–ÄçV–Z–Š(b‹¢0¤¤~IÐØÂ˜0u<úwµ:ÿ±2ž:ÿÛ¡@~¿[¢Ð¾_m·ó±•"1袽’!ú{*5øGWÌ\:š¶¼vßLw»ÊÚ/ªâV•9-ÿ_ïZ7¾¬¾½ë«H}É4 9~ÐÁãq'ƒz™}û VöFosLºGsèÖµ:[¼E'WqtŸÜ¬úw ßðÆ÷{ñŠØwOk¶$LO©|v•.›¥é]ME¡¬+c/²äûŠà_;?x*P¤ðŽå%Ës¡œ×LFÚ¢ÔNÍC°0òë`rZhî”ÌK¨ò–—ÓÚû~LÏu]–ÿ‡Ç£ÊõÃÐÛ…9ªãÝßvëT‘ŒÙ©µÜ<(}2º/¸exœ¾Z­oú¯bŒËúˆigæUMÂÿؙȑp»©vûõdîò/nüöÛ€ÐqsÀëmǨ'ÊtUbýó+}.¥Š³óQó¸ÌNzs·™¼™w£û÷PÃvè¹wÅ{<+JÌsåSJ(¢Ä™ååpÞ5að=º Œ‚rÐZk^!@4ÒÍ ÒÎxx{¶p\•†ÚÂ"ÐC@W¦`ž™àµ. ž,ô€Æ2¸E¶¨-VŽç‚_#”ÄÁ‘ÜAxºÑU¾PÂa0‚^êÔ«àÛU¸¿9ù32 m7n#{B=éà7÷ÿ´«¼3[Ô›;hÄ»A‹MúÇŽ@OZêu…âžH:•†v›©3Óð›ÔVòóo¦Õäm$þܬ\y°vÿˆ(%H’Hnë$Òwl£ŽªqjJºîÏû傎NWÂp1.¶©e†rqpþ½+VÅÌeðè-·Ã»7fx‘Ãìå<“FÇqƒ !ÂTx¨öBc5db¸+µ2^Ó\@Z šñ~ÓÝ-<òwX¥Ž³Ü}Ö(01hv‘x^#AŽHmfìTÓ“ùùŸæèÍåvnIý¶&Ú5|9èΣŽ^6ù5-Åâ?;š±Ž·šô§ª{i~l‡F©ÔÔž¯,C>˜ü¶Û„Ñ aô¬X-®ÍŽFÖ+­íÿ€ñ¥ÈÐhyËy¦4.±›û°,Â:ó‘-?XšyÉPÞÚ óÚGö¬²ÜðFó’),Õ‚Ð’–0YV)&s/lîú‡ìb±ú2ßP~T¸Â˜5›»nP£Ç5#ùÔ 1ÕŒ±AÈ©¢¹R§ÀþPS\+¹zìëY»eVר^ãq½ÞË&5€7žÂ÷Ò¹¾æÒPÊÓ6Åù`vTQ5ùÝíŠp;ùìð¡H{o*Æ}îï ·º»A Ìv>_‡·ƒ§I=÷¾ÛèŒp˜ž\« †R¢/ÒVßÎÐfy(r uyî© \”N£ìUz›¹f”>ð¼ð,w¼`BCðz%‚v®`$PÕ_jÃÚß¹e¶Z§vû'düÁ›øõb> ªý?¡/¢büÔùÓjQ–c|¸ßÃzsÇ{ ÊPÙÊU;êÒ0×Î0×j”»ˆ>ôõiz[ëJ +õâ‰Ñ†ýuJ{Å›¹ïÞÆÁŒjs–Mj=ý´™ÃÂ^)l5·°=Êìò- [~>•‚ã¶ïËÜMþ˜E ÜW×{×îøY|p‹ÅühÝã85T‰ÿÒNlf`r•£¥}ªMF`„½ vžö5º§…l³ »|DR“”Ðp;Ï©£yYäL•¥/¡ý6%V÷@½"P`³Ì!¥pé ü;gK- ÛoYƒ™8¹–þ¸Qíb3Êo·U̼͒€iaj¬5ZèŽÑ÷¤ŠWj]nÆñs,‡;³z%ÞøW"£ 7eû4ÅûðûNžA¦L„ÙÝT«ò}ɯÅöjo"àìÐ(È>[ˆ+e™¼›/JxL&oæ1Â+:wLZ”P%4;…ñZ0'ƒ‚ qD %ŠXhá„-Õð§~V\‰e9[»ÝQS÷ŸPv]÷¾Ô¸_Œ+]C“˜1Ë3Ëõ©ÕøÍ|%sûròÛ:ÉWÆÍxOüþ ꢗû.ÆhÍ=O®Q›yŸõ€ÝÞ%š Þ©j°i[Œ®M¨k<‰Úîš)¡ÝÄ/˜>^ÉeÄS\åƒÆdmÔ]eøù€kV³J 8½«T@doÏ\§‚+Ô£ó‡ù±ÇƒU!=Uϯu=$ƒvI÷Jçí_%¡€k(‹Éð¨Ìp¢^„í X¥õŽÃð^(maœwÊ‹B›¢ä\‡@‹ÜÊBÁ„‡Rñ`œ3–[šÓ~î”ëþ»²Â•® ¨îg»A¸j<ÏŒä­0ÉŠLRf §f…·«¿ÃãØ8ÿ¯Ý &ãX»‘mRÆ&‰:Ù«%]úDëpåñaÎÏ&íN?9l÷NO±¸&q©Èn2I7¦½~‰of%ª¿œÁ¢‰Q“ŠÐ3ùçÂ{@âÍëŸî‰vë‹ÿWs3Ì¡·+¶ï¤|QxÉ·ßë.ѼH2T†ü"Zˆ=0ƒ”h¬=õ…Õª4áÎJ¯ÓÔ"s¯e‘—Ž]2“Û¼Ð^:cK“û@r˜´ûWâ_!ƒ»µÏ¶³yXÕV¿u‹0¸UK‚Ž£qN9™R¥§Ôrú@:ÌÛïŠõ⦚ ^Vl‘ó»åmOÈ_å•{ V\Tcnjr[¦©¥öŠêmm¶w┹tss;l¯çËzi;ôJÔ=Îä{`KÕËcäŽaY©Ñ¸æ8[¼+®íäÝãz;Gw‡®ßþ=´;íŸà+6k{…ß°óG,Î¥´ ùÏnì ¡šÃt<æSÍ3©RÙ‘¡y‹ËþrňȓcŽ@‹Ç•d” å1Þ*K ¶3¦½-˜3¥àò—^á$dÍi`hÿØíwñœ² Š'鑿ƒ5ý·¼qk7ìFÒ©‚€×Ô’ªÉ©ª ïVÑÒqú­gL-Ú‰!¸ß -Z§QßY„]µìÛPÈþ¸uiçíoâ)¢1î!ÎßU½‰k´„~mmԲɯa¼á©¥Œµ] VÓè=jç$-Ñò|ªšk6yù ~@•ycuLc½æJN`>@íÜÅñ¯ûû#NãÔHöÜ9Aë Ê&Ï 7Pÿ"•ÿñ~.ù¥ZÙÇ—I2Ç»§'9üXY0Äd°·uKÊ.mlLsì®<k#ˆ›ýŽ<2Ò’?d}C+v•,$Â^–˜&>„æN–D&[p™ÖÀÿ°N5Ľfºá{­c¢ÜH¶°FPbéùª…}Þ…¿G2ÆZ«èýÈ P=@ƒs*%æzõy¾™³—“¹]_5YãG×{d«SB`Òš½#³wllÈ+c­ý¹¤3“l¤ëû}•LÂS7©.·®½âöͤ·ŠB“>™ÞOäì`ÃÓ¨L¤!„êŒxyÉ S“«Ü#70ÎUNöÓÖÚ ÅhAݬܘ5¾ª®”PÏž/„È8J?Žžã)ÍŒ„†ý"÷ºÊE~9øaîÓ+uAK­ÉóÒP ó°¦zwÈhN¡lœ¡£T¡ È(Ü1+•ôª4:ôs^$©õ*ë"ÿ Úï»áîâý úì]8~ T½€ùhÊsÇÌ©ØùOó¿Ý¼Î{·í?ÆÇ#ݼ+úèÚT–†¼ÐÖ5ãÍ/UøE-!xÅ6Ôœvü{éwi¸Ú†×C©‚zuFÍW(FO®¡ ¸u“·hoÑ{â'èâaà0 öènuž;º)™ÐÜŒ³Þ$Ï(ºÄ]„»ùúøÄÂ¥³BP–i–’‡œR ԓ2Ö1å +…–V±À”,‰"…– Æ êSI{ƒ:‡`õu“A451üâ}ìn¸N–ßs$Œ Éü"N ~߆—“h5•¸øÀöæ‚yŒØ'ýy# ¹KQÜnó7Û'jñ Î6âyZÁ®–•‹Ðûçi\!ÚÙ :4– ·óÕWïô®jsšúÙ>gÔk®œ0m•_ƨ9Ñqšiu¾›'B*”©»£ß4<»õ¦ö•—:µƒ÷fîÇ’G·Uyn¨•$Ctk¯îþU4³¸_¸Èmb;C( ”¶Â³Ò±Âiar¯P£†J‚W6ŠËs’{]ZhÀm€´ãÊ’­$E-±'q­»„wwljH¿\Ãæ îçm2±í)eSaäTÁ\ü`:uLx“À4zÊ÷d’Ѷ%UUËNVv°°ûäWäËUÇÅ/‘ºFŒò· Õ‡f.pÕD]§P“¾æè˜Ï4Ð…¸»lí8SÆb ~BFËóí…jr )|Û¿}q‹Îßßâw†‹×›þ®¡8Úl\1ƒGzÛrx²6€ÊgáA Ï=]LÏÚ6õ”Í$ç/r•p ï²»Ýzí}j ÇF!h.´¦Dyå&Vohμх”š¢ ž1)¹*…ÎZSÊ2 Hí³Ûék÷a†á8®Oˆ`ßHîhùyÁ¡Ö =Þ©3ƵCõ§¹‹h„y…à¿ ~Ù{HÆÒéà¸å.Sx²û‹{>ønZh¥xjüs׶ܶµd…u^’Ô„¨}¿<&¶ã“Øš8Ç>Iͼm"LŠÈðbKþúéÞ7ÒkXÔä!–-êRú²ºW¯5^¹¶FÒ êßC¢i<¢ú]÷ÕÓ¾áæ¼½±¯§Ôqƒ Àîo½q«jÖ^@i4 §ƒ W X´yÁ;ûGÖ$p¸¼ ws—­ýöK9ª-M¸H .˜sZÛÔ¤.“”er IIVð‚ãêÐÃûTj¸É]Î,TîÜÁ3ª¨FwåÎe¤ktÀÜ•óDäsÙúŒþÐÛÀ7g æ«*®ÉÒp¶„lÃïO ¾¿‰ZíÂÇ5î^G¨¼ýÁ\DöýN=´%¢öÈø‹tÆô½QŒoêry¸¿ý"ßÝßœÔ–Ûæô¦a wEøîƒÀmkt=U¼%1ª÷Å’ÕR,Þ@ ­«š*Üœã‰A|Üw„*ÝêÑ áM¹Ýúý¬á`”g§™HÍÔôÞ’‹ð´$Wa Gu™ds>‹9ã¥iŠÝyJln”s\ŽFr©ÓœX¨è<ð G„. ð”R‹¶0ZøŒ/³¶ÂNòyÚÓî!õ§ò“ð§?ÞMdŽZV'VZyÏåS›xL߆~ÍßÁÞºS¥Å›œê=\?kÿÈ`3-¨kš?öøqÑßêå!8¶Kþ €À8:h#ºQ@óV~h~9ü.(etñê“ÛÞ…ÅÛòv5I,|sü„ ²Ç#ÿBüòg¹»-·³(Äœ Ó–® ÝM$—ŠNBw­B×înSmO¥/N">ˬ, Û,|j 4èDÁ·”a -cN•%"O)³ÎB±/ jP^X'Aý[7"9ÿÝ›g!÷—ÿÇ'€_+¾Ÿá;SÏ“·zo«Ï~ùûnÈiúö€Iƒñ'Jc56/¾qŒ¨…³bð×®oýÐÉÛ¾:a…jÞ øra'7êþ ÒÃaAÜ"@¬£´u—ŽÑܾ›6N@•@Ž‹%)ÕâÕ¦üèV‹÷ïFT©OÆ/Ý~ßsÔ0瓌•ÁffPãšÙ€‰„ ˆ§éd`ÎŒåWÙù?§ía\p‹ZïŒWN(‰<æˆË `\Á2n8$”f*ÏŒ¡:Ë än2‘CùÃHàcµübFç)ìÀ“³K7ð LÚG!OIh´Èø¿cö7ðׯx8!~¯‹}½è†j§eâ>€öî   ­­D÷ÎÎD®ËB>Îùâ„°"ï­'£ãt¼èí8‰½vb ]©ºaÀZ#Êl6‹wî°ª±>·mçû›ƒûTúÏ“ya®¿´ÕÚ­¶rT7Ó(Ŧž1øfÐ>B3PœMùœX+…gÂÛ ¾Åœ+4Ï4²pÚ3Űnfæ7Ñš$Iñ ~.x yp°í*¿šBßsnT€WfO ¼üî§;ˆÃåëj32Ä‹’Á:*(S÷Ó@Øû«)äøæÿp@ÚŒ·…Í;Švx¶.ÒËÖ¢2ž!@¢)üî‰l®%êuèËöCóht[ÈÚÍX`*àêzôÜè‡ù–™ÿs|‹4ú1Oy•Xtí½isë.B³ž  ñL> ª?Þsß³\Sg!/@rÈ3ç(g™-$ÉD–Aíà9-¸HmFO ]üï|êrP=ÏrnS=Œ ç¾Þø2[ÅâWSA-äRrêi-ž|/$ÿ@òµÕî…:!®öSÁo~—û¯«ô¼Úa)¹R̃é”Õá¶ÎY(:ù”A¥V9¹„3®¼bŒyFDZÐ"ç¹pÝT¬Ç« ’îý°lÀºÜ¡ççr¥ž^?+ö_ïü¸”ÈOÛõ:áI9NIé’ ƒ´âÕež,È]„éØ_Ý7Ò¸qWïùÂÿ¡‡Q;òô:Sœ>®Æ÷t|¾¬“íDŽž’u°ÇOzŒ(?Ä O ô¢á]½%8!Oÿ „T—ëÕ2Ø—à¼(Tú†â3lhqBÑý>UîíýZ}žÑ¯35L­¹*Ň¡þ¾-¶/"‰ Ò^Gà~DÉ»…„;øˆ¿3K-Òjnáé°ÔÇÐàÎÉB잦Rz<ôµÐ¤H&Íã’ªázÿ±Ú"2ßJ; ˜ÿ{WúÑõÞ;·[OŽòP(DI¹D Ú{c ì?µÚÃ#½™ôÂjhµ‡ºè©/dî¹(ãwΕ/ˆ3{nxñU§ßþº. l‹'…vQ@Ü\n/¬!‹·Ç´ú\¾ Üþâb7Þrýûüw’Ñ£*ÚóâW;ìžw]¨­¡rK­&×óKèé ž:\e‡¸¥“ýç²mÛ©J­†n7GöAgÛ4˨ϡñKS͵w"3"“6'i&¸.Ò4ƒšrÿEfÈ0KïsºsÇyz?/ OÝŒ[Q¿p»ÃçÝ 쫺míR»$Z`Ó.åS5òßGFÃâí!ãqõv‚\WŸræf§§55‹'gÍ ^\Ûù»Zû~¼n;éøÆi~0Ãêø¸Ñù¶ííÛ7°ˆc“ZŸšPrA‚.Q¹úÇ¿ÿöÐÇiãã£õ.fï+4!Ÿ»ÕVŽQöÙê"ᦕ{¸M$TFr•[}Tîy8ñÒ>UÖfD¾˜…²lð©w©eÛ¹á<çÂP0¾ùÞ)*þQn=ÏÇz…ëèl¬é¬xÿ«ÜL|½ö»UµÉ'bå=ã½>ΟZ°wîcV®\¼<þ| uzpÈ€w£¡¤wºCÚÞin?â÷ÍäA‹é¶Ýî÷ð(ÉÑ‘mcôFfNÿ¨®™â{׿Íõá¥U<×i†úá/=@§ñF€€9Œ_޵K…â%wüâ€g-YŸòéc;üD›Í,<ÿ+ZWåŒ=¡úù-mM Ǽ;Ù M¿¾c¿ÜW_ÖÐLïgrX},Ï‘“Ÿæ,³Båiæ…-ˆƒ7iÆòÔBgM=cŽ[¼ãñŠSš 7yÂH=&Oþ¾òaž¤×O‰|GÉ)bŽdâh–BYy-ïSÆ)ÿÐÒ—w~òë¢ð»Ã—ìðŸA„¿ç<±z÷ú=e¯é8±÷÷5£.Šô‡1`ÿ67\èE¹ïèÅÑðõ±Â¤&äsI¹\ïùC“x_»³\}NèEêÈ·“ `x¨è¥T=NÝ¿)¡ ºu³ÆöB·‘Ò‰ÑÄÈISú¥á ü„¼Já…FySûƒ?žAÞ›JS"¬‡P€ÐsDÜÐuK ÕÚ,¨ ‚Ö;ƒ">P¢SášRZäÆH¯†ýìvU pŸ­\îçÍá>”‡q#ê·n]n'âõ?´ýžü?XH-ŒËéhýäC´@š°{2¸Ù¿¦Ùm‚.%zzGg íçJ‹«ÜÐÞútnÉÆÅ„Ô~nѪ)4Nm-ªëKê¸NŸ¤òl,÷8ô*”Î º;Úä,#N Sáï¼»…/]}·ö¾˜G°Ac®t,'ü¼;ú©Á ™”4P‚Þ›óªÿ ! lŽ í–QååmöýÙãádºöêМã×,™‡¾¸k8ÑØ¾Ì×z»íyK3ˆ¼‘`;奡µæT^hµˆ'ïW•Û4ãw2ìJžýP壹¾àå~=´£›ZžIœuÏÊuu´‰Lòˆ#—L(Uê:ç.Ñ`3´yØŒâvë%Þ‡ú—$76OU–R• æSâ\æ‰ç©+‚…F.ŠTä|*3šfJ[Ö£ò%á³ëÖ¶ÌFÇïµÜÊ ü{ùƒ‘’)¡É“ݸ^ºOåþÇÅ_%¶+å¾V»{ï(å¥ß Íê¾™¿uaÿuc_ËáwÇ/-:À¹}#} õP‡·ù”qW_Ãüޤ¹§óUÐ^^®¨HX¼?ø¿W>çêQ“ú¶A!ãÆïQÀ“O]¤†óçæÍRÂfrLg¹‚C¯ïùê˜ïÇÅ1£è“+Œ-hNÒ´(\nRg3hðMJsž+”Aƒßµ,Df¸að /]31?¼Œ?ì‹yG.î“ß¶½*³ÞÊMïâ%[JÁ—†{O…~ò­Ëcº?"Á½^™•ýPÇêYñïݵ ¾°Y³×÷«‡–û²àÕ4¼Öóæ®ÄèSy¦0;þF„½;^ ‚fXkH1=R,dÒ©Ø~½©à7ùèÍ‹j7+´¥‚-Ïݳ/-M¬âdÐô§}•M”2–\å†-Å‹ÏäP¥ã’µß¥óÜæE&¼Ê|¦ ¥tÎra”Ñðv;QÞz§R¨ì9)¸. ËG_m?<Ò» êäyR·ëyW«wwgwc'ĺ#¼zÜiwMKm,<Tßsùˆ0|tùKFè’+#ïõŸ€­©,·½­ö«Ñ€‡ö¼aÀÔ‡+û“û6œ¸õÛ¯Qš¦-èaKÞÑuÂ\kqætøá«2k½²‚2F‰ßÓͪÕõ:¯ yÌ5ç8Aœ‡„qMߘrL™ñJ&¸—3Ò°Â,Þ oø·~W·ŠLbü ²[@3p1@µ¶jiõ³›o£54÷VNÐ1–pkqd}…Ìñ÷ßÇ]>š5,Ërƒ O¥åF^p%…DŸ^jS¡3™JÀ9õŠå>GÅëÔsCsGÓ” £ü õ•¥3å2ó »8–3>ìüñ0I¿Ñ¾á‰>½a:Qà }Ñ×qVX!ç³KÊÉÁ§\¼ÁÞfSãäG»åŸò0›>2ù %¾¶·‡a ì¹l5Ï­* -›N šR2½)Ã+3%˜¾ÎP< ÊŽsTòBAÒ„.S“”¼"ÄCÍyª¹)8×vx˜;*±(Š"•¢È†yið¤ï}eجÈ cañ—È“&S„0´¦ ìdMzBBÙ¹/Ë÷·[Ç„VRZzÁ{kJÙâCµÛù‡ÈÜœŠˆ_07|zT+áÛŒš­R]UFM$jÈ4}5U†_%·;7^' Sæà{Îs@9a)c)€_)0z­¼ðÒd^Hcð~Ã@ʲúOo¤rjxHtwDnþ.µC ¢yõâÔ¶ûÌLqï·©›ÔW¦„% ;ÞÝ31Ö*¾ôÙºú|ÎÐ:;S6Å/çH` 5‹VwÐóeëÅMPñA‘8Q½©¾ÀOò‚øŸ,• ÏͲ`$Ѻ’!8Ú¾ˆÛ$ŒU¯²AùX=dð”öûXİT q!xo ˹E&´[þ(¨qèZ [(*T&-¼–©TQÅSÊüpÄìŽðlm’G¦35E>•cÑòæ¸:sf?+ Ê2 9ŠžšôñT,/CO?&j¨0ürÃH¡ˆX¼À©ñvñ¿ìÁ§å8‘Jð'\øÇC_û_ÕIeì~N§S Kþ—¹+knÛj²…ocU™¨»/^¾8‰íÄc»ÆõÍÛ]EX áÉØô¯Ÿn’²D „¦XÔ<$•²qn÷éåä£_æê²}¤"L«^Ù¯»«¤­´B·ì‹ ®:”’©â±1âa¯19NMäŽQm5g†¸ Q`<!bæÑ}Ø*}|$ÑA;JY4ø¨ûív¾¯Ë0«Rƒ‚—ã,w6qQ–ò¸›<ÀŒÐºVWz!;<\ï=tîÆé¹Ã9£#Æ®…e~¯ˆÏ““—¨ <9ž†¼»‡C©* Ð'ÝéÆOíIéSã„â$¼àz0û˜0Áùe6eæðÅÖÀPUq‘S¬Ì‘,rlËCvŸ Hñ ’¬”œÐ$œÉF{žB´4¢ýS6AûþòÞ×”3§£°ñ·ÞËÃ9/“›¹¡²ý3„Wï)çåáP|‡Ó"qõ|òWЂsÛûÜÁ%åð¯j~œ¯°…³²fòª©·k¹Ÿ×( uëåÖ?^v°dò²qRs|\ÓËu†Ggz·».kíH+m­UƒÉØT ë›–ü"®„EÈóÜ»#N|¿B7¯1aÇå1Lnx¢>i³uvËÐÎx˜éïªË!s¥!#Ó U£µÌ½òõbYÍ]‚Ó|T·ûO (ߊÐ׫Yê2ÞpJÆà…º2’cŒ9hÓÀ)¼i6‹ÅóÉoëÅÍIÚu×.ËGr‹5ôóejš(`ûõªÝ9-ߎ‰X2Ø6Þ‘š×¸n6"So°"DAíé²¥°JÈr»Æp‘ R«‹,x¬Ñ÷¸8â¨dD‹àpì#JM ÌHÀG”2IÍmNÂgŸ å* C’6QšΆ9nû–«”ަø­1ÁÈÎqt]]žàWú ±d¤œÂÉy2$gB³Rúе3lã(À0õL^Á-Q"8¥%—O«¶Y¥îT=ŸlçÛ·yÕµíM¹ÀŒaPϧöù8|ü¬©Ý./».f°_ói«àþºh*MÜR±‹ÌŒC66w¢XwVÌD>'Áéà"GOh¶]K&*“ü)Xg,áRq£ 7*EàÆV¹ ýî_Û­Îr3_ެ=¯fë¢|®f ŒI¦\Ò)á¬4 óHÚK/ü–oa²/  ”ž)aðeò³á„Y1ù«^ý„ض6ð%ÿ¤ß ô)Ÿ;œ¡ÈëscdþpH;UÌ>ù<6VO€ysœ—Á¿Ë¸¡Ð¾a×å`` H •ž-îUÉyšbC|NJ9‘R2¥È!îd+T–[£réxî—Dïn5KÕ?õ"¤ÅÏv‚þL‹Å&—%®6·o @^ixàú„–„Іi±œ¾©ýÑ*ÊÑÞ1~Î9BqEÉ-°z>ùâzk`wŲE:‰–ÇLYJbzex/»‰–„#d$LTŠ¥/\¾cµ »òI±Ä²Ñì|·¼Â…$)R†¢½ä¨îíŒqÄAÔ±°”B².KJµ &}é‡Iû-ñqŠÞ/0S,ô}ÙªlX´^bzÊ(#?0chü纾nÒfÇòÛÅ@#>“öŒ„…Z6ùïuãÖyòÇ¢I½/þ©ÇÉËÍðjÏǼü{é®1Íõä“dÓí’®Ô„ —Å*f\F?·]ÎÖ®šw®¨I‚¶É[…jê(»±&´¨¢ÊZËrähP5ÅcéØ&Î|¢Â» Iê¯"D¦jÂ^ábPÞ­cY5ûwYGÃ#¨0^Iøm)bXqürçS‹ö”]ø ¸M˜ÍëÃõ¢#Áy8‡­¡ç[#\ÓÉ>¤M>À6Èô?98HV`Õøœ¾fõÔ‚6À.+x°ýnô¿®‚4±2ŒÊËXºÍÛEצ[‰bëReÏ„'!Âvo ‰¡î¤W:*ë,$fŽ ä ppã"|–º´ú ë½·Üâ꯮úîVË:Œsrü3¡ˆë¦„›·íBU³¼š+ƒÔwʵr¥Kű·ÝžÝ‡ iVRu>n·d&»q¶Ò ÷ ïá¥ÑÒd)ŒYNŸ:ù¾£`ÔšA¡^¡!Q½È@ÿº íj•*÷ ½9H¾˜‘p /”ËÁ'Ï)H*¨R‘C>c‰Ž[¡YŒ9Jf$âŒç4ìývx»‡»+Î(¿^w×Ã…0®Ÿñ+y¯0Û'ÛŽ?]hýrz¿¹>E¸²LÙ3J"Y+€œì¹ š„ïíûUKnÑò¢Ù„Ó4åQCýBôï&^vI²RŠž’D¢ ÈŒÒâ"YØ Ð“ëª··8£Lm„{žÙÈLQÈgõœäaÏfgᜇ·Æ`»r/'•7.Â&‰‚çØ×ut³ ^—¹oÜfT—å_MÙoìßí‰ È'¦L)d±RÙø-šwM߸Ms4‰v4ØS œž¥0ùÛämÝxnîœúN‘•WÛEŒ“ÂaáóF‘§W!‚[¡CÔ0[™*S)¸æ2ÃbÎ{*•kb*ëŒPƒŒ^JsæÒ¹h‘'›"ŠW8—˜Œ@áqàŸŒÕNXÜ1&FËÒR1zЇª]Ö‹åHcÎÝbÝäbÿ¯:-ÚïCU/Mì•TlŠc£¬üW»ø¸Nð4žï§å÷M|7Ÿ—#ŒÖÌªÏØ`šòfÛ¾¿ó·ÜõX¬oS­ðËöôœÿcB „PöÔ JÜ Ç‰ßÁu|e3…ÝÔ à§Ì(®nÊj\ å9[)œ"!<´æÞ:¯|¬gâ½¥Y8ª<‘ ÞPmÄûû’?±ÙR»ù}í!Ü|îÚ"]yÝp5LØŠpVQy`ªx°¿q«ÕtÌA"ˆRRLÎ!ÑëHzÝXÇÇÈò—ƒº>AVV8•}…%ONWpŸP‘‚†$8cíEB˼n0?>Tm=ØÑRKO².ŠR®„¥ÚÓh“’‘F©° -¡Êz@M:A$Ê®0±¿¿§&Œ,•QñáÈÆ*¬­n} *~Y)‘ø7{¹ûÁÍ0A~÷3Kü䣃7gW¾õEíWO½EÈ.ï 3!U¿{Ÿs/p´ënDÔÀ©«'/+VÁY!z‹ wW SY`Šú" eÓ¸îXý4¸v8lï=¼è”9­Ð'I»¬•¥Î¯‡,GmUŒ5qóEñ*Ò‚ÏZó?»†Å8µŠÙ¦9’i<袤c9ú£½c«Ã¥LëåvWàùäï²Í²ŽM&a‚Âð¨{z¾. Æ÷.Ôiò¾]µÝw·¹uîÏp×.œv?g>*˜èe—„ˆ&×VšÁUc&*-¹ŒœË7à%Ë›S÷½ƒe ÄqÈ•dLŒP ÄY{HÿT–” ¤!7&™É.g×(D>›ÂÄ×¶q^Í÷çÛ¸‰ü™û^”üÍ …l4ò©áb* þZ•F‹ÿ†ÿÃw¸»ØMß9À=0 ¿%—ÔžoŽ…ÎéäU·®ÃÍræ7“wh§ºßd±ý>š‡ó’Ý·™Cõ—ô}3ñ0¾ô© ½¥§ S{l„Až<(ˆ$ Là2ÁšvU­/ç_Vj.C *'³ÃͰäE4ð 5Óp⪬XH\)ÃH0! ¯ 7‹šÀû‘ËügÇå(è¼rÄ£v>íĆªÅR>3WÖ* ¦pE‚²N͈^ Ó†rÎÏèOÏ(Cñü.Ý pî^ÿåvZ’õ.Ýð•—­÷›ÓÝÇÇ:cqóô:úS¦+Eµ:a%X…â"d~—7EÑ$GX,;Ÿ„0‚8\×·<Ç,‰áL’]"D†¤’øÇز§P/d¿±Æ7±ÒYá±ZoÆu _øúÚmÊÛûÜ:¤›Ü(¦¦@ƧŠkúÃ[{+ëé§y}ð>˜n1–ZsFuj% î ¯P¦îB“~  ÚÁ¾NM†wo ƒyL˜¡êðÝ=óKö#Y¥à¸Îи¨¨1ÚÓÿšbk7«fXÿ)†(m’±Ñ:o‰¦<É26à1ÙR\ˆLAz&r–\(ï}JÎsŸªõõ3™°éꆙq«_i“Š4æ÷Ôýl¯ˆ­¨U•óƒ«ÆÛk\Rñ=Ee‘–õÍäÓ·®^ô¶L4`ÞÕ?{CÄcY $;O=Ê2•²R–k2,ϺõbÑ©ymWï«E›æu œ{ŸÉŠG#p?A¢‰6:Ku„‡ªÓÖ^‹yâÂiö´?ŠÌ1‘‚Þ»4jÔ¿ê\}]ÆÞôp»HH¾¥S*ŠäcÛd`÷Û ¯´L{š¿ê°ëSÎÃ,°!£íùÚ)LH3Ù*cN>4˜‚ ¡äï›´B9Õb,3Ožta™˜qÁø°¦ådÔš_d¨89|Ôº¸u"ðîCÀHššÄ%q$*Á9i˽b)2—’¤máj ޾°s²ýú õ ¹á?|T ùw»œ¾JˆÙ)L‘-ŸÉ+e­!œ0YÒ6F×\…WÁFÑÙ0¢ ““/Ûx”Tån•þµƒ£§^žæ(·—éÆÝ=¬Ãù{Í$ZÖë¾sé¼ }qÊHU‚P).3…ƒòCÛò'“ õ¤†˜b KÜ%à(šJçˆ6*CäÉ$Q™”ápêzËp­6ðþó>¶Yߌ³ùBCî"±¸KòPÛÅÒ)Üû”ñ ¥(í‚…Øç“·í?Ï6õ·44Š/8Î)ŸoÕÞ0ÈÊ©›¼jÖ~3¯ºñy„mîh»zå2SjŸÜ'¨|Åq-x°Ébd%%V'/2ÛrÝ$7àŸ˜á0´KÔÑå© ){§©Ï ¢  .sJT^gîN…ê˜ê·­wÍ&@déZÔ¦—ˆýéº.÷U^v©Þïü æ™aêJR>•––Ê`ïÃ;·¾ž5õât|ÓŒpÉÏh1‡ ªU¶ß¸d28¶›b9”¹¹÷“oQÒBSï2„>ùè jPÀûvBÌðÁ"Ò‹Tˆ›¦ÝDUÔ TÓhDˆ$@A«à4Î<9E]L&zƒ²áQÆ`•Ê’Hž³V`¤'©¿Áâ»´2ãŠÃoê˜Úbcå® õVUŠ s-‘“ßÛïiú¾uåh¡9Du ˜gCUzòf½UQBy[Öýònµ8¹Õ¢wu?|Ž6R{ò-z”06ÜR6Hã)V‚µ¼Œ¸‘kïúmÅ$ b‡Ò–@ªªe0ÞN#1*I‚uΤ`£0^J#U„¨¢½…L&Ó@”‹¼_IÏ¡VÝ8-ð?šT–œx·N«Õõ ™ê3Hk¯à ŸªrÈøû?ÞâFÎó d.ó}Üx 3~¤¢gâì|]zBÄäãØUê&Ÿ¾­»Aòþ›ó5¤XgßIÀÛÔ” óÔ¾¤ÀÍeE-7½vnw—) ‘„± ío\¸jî6‡V7‡…/Ê-žAve˜ç‰xA·ÌyK½vW¡ Õ:ˬ) ^ø5]ô„3Èîûy|ß(äPPùjUá¾€Ä×ÏÊ–À´¨]*"’q E­¬™kåÎ\ùr,ç· ö¿•ëaÜ䙂—äBH ㇃bÚÒ ×~Ü!àø~º–Ÿßyýz=äð ],\ ¡: ‰Iñ¤Ç¤ðÂöÌ)>9aðù©N³^qœÏGi£Ž¡Ý‰Æ)GA^`‰"J!XZ™ãR:%¼J± $”Xb(ܸ:#àfI$[jC`2EòñÃnÙüÖ°¥U¯MVVw!ä©Ûh¼{kU§­õWRá¿[9  ‰­Hy†O×)ÿ;#ËrLSZ<`+SÜž=§`ðiWËœ·ü;Œ—ë² ƒåüE#¨ÓÖà§h‘ªwÊØI!çÏOuÂôÂã¡G!.7ÛÕû.Юæ­JÒ ïªhÖnb­"d 3nke^к 8óEh)‹u1³,#ìj×Äká°?WëýÐnýzEûqÇ·oê¬×LÙ+cš‹"©l¿¿çb^0éœÕòáŠ]HÊ\Ý…Ü®ßÏW»@ÕÇ{ç‚¿MžÅ)ee§ÄÓ7ß#s÷L3ÏW„îIE[ÈGa÷¼&˜qlS(/ED–•¬ÁWEI$†š ÕÛ+®ª¤'BR„Ìt0“¦Ô`+ÚV>Ýy›Xú¸Åñ^&6Aµáöæ7k*mæÖ©òNqÞy':pÆ–Ÿ¼ÀO¾º"i¿ºn»Š`ž!d<ä0½•g_¬>û³Ü#ÀzÜ Ç-éÜ<Ð5Št\©¿=9“àíVèIþôù)5Dgù8›$·c7uì²'û¶¬QqReïS¨J¤j˜wIÊòl«ñ9SK¾÷ «L¯ªûä.Á­Âý˹¿ßk üßYù䌅P`o•ÃÙšsÅzäwÏåÖ1ÅÆ…ÝIr.§`l¬þدJ;fŠõÖŠ”¢ÓÁ¥à­öžÉ”j M“,p=}ó8nB¥Ž–±,Ü%¹>æÀöJbjìXϪµyqÆt Iካ–†ä u™õEyB=à²n­€<>‰®¾ÇwŸ½e~r;á^í‰oq"Ôiýä­,€ËŠTg[YŒ"µ<£Etõ#õjo¯e³÷ËKQ -¬á:z4D‘Ig4Ã÷Š-ἪÀkRñu2Üë"¢VÓŽicb¿ú¤o·(µ<ŸÇa? @.ó=-ÙÅx×YfZ…±ãåx÷ËÅa(ïCÓU„´üeеÓîìŪ\“šÒÙËòÅlW£óý“·—O6Få?;Ë/›°/‰àGw–?ùn"Zpo ·÷\×+Û3+ {Öò÷»ñÃìá¢ÙZì˜` |¸Ò8Tå2;’õ)À]È/µF›{SKá!e+ƒ+5 °úhUc#bá—ýe ¹Èe[ñæ%ò¾ÚùU)ÙógÞ±NhÓÊ)oíévv‰ú*н`î!U$åÙOÈh‡²¹Ebó›‰hä›°¡Êñý÷,¯Â°[¢%©h ½3\<ùhŠ¢ý…ÈyÎW¤ìâøãˆà¯ÂÇM9i'8ñ¥K•Œ µç*&É‚eQ—àL²pÍrH,ˤCL ÅzW«O"8& ŸžHÔòµY„¼~ÙnfŠÓŠsÞáàÀ¼’V]ß”îþc³Ù®o·ªÌ7{-Uö!Å»½ðgoÃ~_¶u òÅôã1¯˜ùÕÞÇÛyd¤Íîµ*ÿŒÔ[&õÁï|jÕY5Ùgò¸´…¤í-ÀÍlMŒzŠ…Ò’\Ò*ì`{ÓîÂw5À-”•º¨x­ÖÔ2+—£ÉR'&c`´#Ò¨œ*XŠqQE¯DR›®‡Õ‚÷ÜÃbËôZJn^O¾} ?wÏÂ;%hí!#ÑèæÀ#’ÊQókŽª8 óŠ?àx=òÉ[ÚÌÎ^ŒÛáž;•›Núår~/äû0Y»Ó@,h‹³SO¾á®ó½ãœÛÙ&0ð(УܣГݾ¬¶ý˜ïXOVݯ&ga²ð‘–ëŠ_¥^]¤…w9ùÚ&/Xõ!yQ˜ŽŒ+¦ ÊöâfaÐ2½ÕÎéϵXü¶äñýiÃÖ×`ðΡ…PWZ¶jÄ7.ÖMQ;þ!€ ¸±aîìçÃf}Øý“Fî÷×L~›ñ©PpUX/D‡ØýÔ÷*‚æR`é{´$Aù­eÓv¾iÿypsþ$ šNc‹ä®€qðœŒÆd-´ª¤£=i&x¤pxŸœ‹Æ›G& w,N:M¸ø,¼½ˆ£¼¾ÛC{*½zò¼öª¾“àóR5§S~ Ûröz?'¹Ê<èÙîKõB³³·+ùq8û- €G¼¥õä=¡(/ËpϪáoXü(iÔS³x+z€>;ù³~ª³¬· ÿQ„¿VuöwDONÄV}6Î\!²$YµôÔCL6Áé¤E¶T­,‘o¢¯Ø†ì×ÍZzÅ—JO|¸«íw²¿î€Ã:×Ôò/&¿SÏâ¨t4V׺PÁ¹énöØÏ”‡©BŒPòpt…6Iž½^ï?^ãlŸTºÿù_Žu½ 1ÐxØÞÓÎrã-KGèÎë4ðò“ge€¬˜lØüü˜Ö½UxBU.h†©_…ý~Ýìd‰äXej±2Á‰c) ‹¦8³³ á” F¤‚‘ÁàiN ¦ŒªÕ›i?Ù¯p:È©AsYØO›ãäS6u2 ùECð\ÙØýŒ[ÞáUø+úkyuˆÝq±OÛeŒcÖˆÜÁ”3‰ Ÿ6€²¼9Ýçòϧø´ïq\ 4'–Î@"eºÎKùÔœ¾ó¢7šÝÛPL¤ÞØÇ¸ÿrñÍ›AM¨¬ÅXæB¦R<Ë®æRyMºèP…ÒÁ(-BŒ1„-‹ª$åeMNMÏt­/Ç!,‚üe»nÎÿÒ˜VaÆ)p©y¥“Ì·F +—ïÊæû“ËŒã6á»Ípw´$¥´n‰oðgŸ¿9©™ðÈ€Ö¯:$r7”’ºTµ¯R&F‹9²r))fk‡¬Äs£f²]'©•K~ ã¦Ù*2J¯¿ñ*Ë:DOå¹j¥ìßÇëïÏ^/Îo4;ß¶á㺽X(aW¶ÀÔÃúb½'u†Ëp~(Ûæv äü ¯5©ƒ”•óbtˆN׺]ÀH…åJ«ÂsôI;çs•±d•uiú\»a“ûp[@]t¼it{^ñf«âN’ì;ñ Á^#ÁÃF¿™\pQhœetv©wã¦ä‘Û€ ïn#>Ù.c£5Y™4mYòÚ¦ÊËŠÖÅ“¸ò5U$YRÀk6º2Ò ¬ˆÖÂ2ü4Ãu¿½™^t¾_á3–f$y.׳]Ö†ËÞzÙ{Éý•Ôͦç4ì·]Ó.Ì1¸ªþñžžqí=§-† ÿŸð÷! ^µ îDÎ ”v×UiØUØPtõµ(¤ƒÐD{5*å(¤sQähs£§êï]Z ejiý°ÛQ¿Oöä¡sh†i©$Ž‚nJ2¾:¬h‡Oؾ¿51‘sj2“b÷Ó+¿¬SðÄm®¡fV©ŒUA¼J Z%Uä&û¨€¹MÎá횤K˜Ö©LÚxSŽeÝþkCýÇKåɯۋׇ²Þ|Xßië¾£åËŒíäÔ³ª•)ÿ瀿‡ÄþžßJ·ßžìý ysvó˜$Ùe)ôã¿ûbå_Oßn½Å=K^ hŽTée8ṡˆžxEæd{-¼®Ü8gj,›½ ‰iø¾w$o½.Ž- ß?ŽÛñÎ8Åé.êÝøqnÞœIEúÇ= 7^i«š-é_Îv¥;¸²_ÁCjåMðÇ”2JÅB’Ñ”`¹ô"éœ ¾UŽŒ²¹Ð­¢—ÎX'Ñ$F.XÅô>ÖMA$éoÖ(¯ÙøÏ2ܽ¡ý¶$©”è¶;Æš­4¿ÓÒð0t/ÁÚ6sÄH€È€ê.A}#¾”Mãjk8ºø¤Çñuf8¸¾Ô@–@€%qVr'"3È‹¡€¶fK×®7a°rïÄ»o^Ñôùvžo ©žY+¨õ™]iß ?—á‚ú‚æ/Êð‚€_–ÀéíHÝUã©>ýI+KP¹š”Cäñ§4YE­÷R"Óù" Ó »f_é¢Eh…0*­PUœOŸÛÝŠ˜ÁŽ´_—Õ‘×9´Çøa~®KIÕqÆ;Pv…sך¾½¶6í³+$7–ÅûëT³WÈR€% ÈœLÎÒ|ñÙ^9>jñÖ"5Ö¢4“GTÇ £w`¦¹Êùí*¥þÃÍ^©e-Cáº-ipÜ…tÞ÷^!öz+®„ã-ÇÑ€4¨drÌx/Ë=XQÓ ¹$_^oñÇ’Vû² m)"0ÍÊ'8þn”¢»t¤1|ŽrΣÆk—ÉFø‘×ÊrO¼½•ˆIƒ¯2‚ݲ(on^IÓÐäs4ü;'ø3£dç”l!ç?Ëú¨š5Ûšå€Á=sfi?µB^7as´.qâ%0ÍxxI`F$jI(ÚØh^µG T4Ýxl‡OcÌ4ÌGƒ³°ø¾½‰irfó. Tö;{å$w¼Eÿ‹Úñ¾?{õI^ôFî©PgÚyÛæ†úP [rœu›Çþ2¥Ãfsg€ó”¥Òˆ¨†Î³v ñ ¡–˜HrŽ•ˆÜ3¢q<Ÿ¹´ø-fäÅéùô Þwÿî¸a|tnJ;ÜŠŽÎgÍ:8dÐå®tó–âõqØ÷g7 Â7F_—U»ÃŠüxÀÛ%ÇüöV( çx©}Ì“ Ñ¢€i²#7Ñ¡xIøÍ0Qh¾™s`òB½B³ìŠÍ … ”%MG÷«›ñûEü·ñ:4AÜkdùâR'´ïœƒ¹qº¯Ìéõú‰­ËåEé~úz”õÔÀŠÖy€.,ʅἤ*Þ6®åŠÖµ1k’gHqJG¢¤ÝDq6[UÌÆ×œ’¡…nL#…Ý´qó©Rܬiê”Ë–m.ÛÙ<ø/m¿ÓÏh«rÖéÿ©ÔÂuÙu•ËM˜©Õ‘øµ]F?Ò¸½nÏç#9Ò{Q%°P4P c*bIyW|ŒICb/€¡™'-kñžyqr'mŠ$Fã‚ý-Ì^!þrý©‚Ü)û :Ü3ÆEVÖÓXý•6Íe’Ï·á0tGû9*m ãK¢ñ-MïÿS†¡}¢\eä5Fpª9'LW4Äà`{ÙNš„€ ´ÁjŠT–Q$‘V$‚Åt-#FDáÕ¢Ó{ÛÍÖ<¾t4k_Æ4,â¥nʼ-4Øýµ.wUµ¿‚FM-An#¸øv‹?üy+C³X”­s z¥† H¥ à' VÁÄP ×…jœvÕÓä£ñ,+§ rç‚ÊùnØ-ëøú9l®›v~~;6 ã´xæ•ê<@Ò•e­jÅóñÖ¾Wò‹¤—¹”z‘¹áv׃nòéTŒÅIn@„õ4 §‘¶råC\SÖVZ3%3õCÄ$SPB'—œt«N—™7ácØŽÌ/²îínºßÇí»YGNuø$R Hü€ç22‘R,‰¿«°1M¹º¨UBv<è\™òAs†7ïE0ÀdR$#«—ñO2Zp<¾QªAŒzž.O„¯í2c¾Aò»¼œQÍž¤L_„X€3.;ª#_éö°ás’#šsŸz©;ßNœü“Ü&"I÷Sžr)eÚÀç‚-6F°<gªôÈbŠ3 ñÂà_ö6Åb¿Êezg"bQÙìò‹§16Ë–oÂöïpü¬ gâ^~§@î¯D3©½ š·¬&g¹èê# a½ëSE-Ûz)A)¬qÀlÓoEÃKÊÜÍ”êlͺ¨ª† M fåÀ›´…*ÊÓ¸a=t«(€¼v¹ì „4Ö-ÿîH«}e_iųÿ§ìÚ–ÛÆ‘è¯ø-“*‹… q{Ì¥âìÄIv3©IÕ¾h1"E)Ù–¿~»IÙ–iƾ:Ž,ܺOwŸ>ãÝ•A¡ÅwûÊù~CvÇA SoS7‘ LìJ¥ˆaìYÊvë¿êt¿½´„·/Qò…°Ü@¸¦÷¹…à"geIÀ¯QÇ @¾V du ç¼öÂä`œ—ïrÕÛ¶Î&\&†C¤}ó% 4® âsAù048Ѹóë6¥Š ;Œó¶ïÔngà ˆ¤õ5°eŒ8QU"|ï8ÂåL±u€Ñà'•ƒ@ƒ@ˆWQgJ‡Yd¸ÚU@§ª?”~KiÒEàï•Ӌ¿Ëü-Sb%™NúÀ‰ìÈÂØ{¾¿<µ1³ð®IÛáá [ €¹Äã’(BT:‚:œ–­Bñ8=ˆÚx©sl‹CQø’cDŽ˖,ïn= Ôª‘I÷ö_Ã`ËÈæ¾p1KA(³‚WŽbí.½MuÒÕ¥¯ª&B.æ‚7Aò”Àx³¿‘ÙõÌM?+þs p ®¦-dî+ˆu™±®Àæk]ñÊBA Å%j¯•BºŠ ÷V¯’é€ðá`™5‰38îÛ0öýÒÛßeTZà>k½(Dï º»>âõ­Î…T¨¢›„~ñô²ú¦ëƒ<‹œ)Æ+A•(ò(ËBV•` -¹(´*iîrãdŹ…ý‹|ø²è¶,q[owk_ÃWɺ[Ÿ&ýøq?ì‚Ðâû›ãP„¿…G wÚ„¬Å¥íDZsŸmãGÒ|¿1ëðŽÆ“³×÷ç^•û!»…Õ[´ Ná(e×` ¯4%%–·iú./ˆÊ«³š@ô¯ü–5€QJ´! ‚©i»íëpìü×i#†’‰ÉŒ™äÁÓ§©¥éüì³í‘´ÕùHM4„R¤0¶nü¾Ív‡>Òƒ+Á¡Šœ(%™Ëa¸ëA‹B€Y®,|}¹N ðÞK«àh¹Úµ8¹:iæcïl ;æc;¬TFhžI¦øayJS.P²gñ3Ï“yž°Ë#o:Û,DH§M6àÕ;eqH6.œâÆÃQr/ ñ½>Lá J>ÀoƒõÎY¡©Ä>’åj~=tmÖbóÄ ë8&¾¼è÷zLOtì¨ÇË3Æhބܸìô/ë›r]¿’/6†`2a‡!ÐXçA:7ÕÚè§![¡1²(-쯎!›)ì㬴¸N–zª´Wà •õ*0ÅÒ,Èž‚»´LE3S†˜Ù °(Ql.~,¶½ÍƒffÞj$ÌYX ƒÚÕÑÏE|œÄ‰"… 3,Ò³T„tk àÜ‚•HTU©`×´¦ž'9WV„``µØé`VH¸æàíò4Úß76›^LšpAcûqNZðæ~‹sÌÙTLÿ¡Þ'¯1‡±Ý`&:SíÌ™‚7’‡Ø´³1·ÏØÃŽš¼®ƒÛ [ç 8v2×½õ•—ðg$u9X[ì;•»|®4~ƒeѶYWšˆ]ƒÛ41ð—±–O Ó·9Õ+œ_v'Ãlø¶Þ­ã2ÊäšøM¹µ¯ÌÄ RZã•àWØýÌ‘+i$à®~qÌ87VUp¡i)5ÄvRo(y€ø7¾”Ý}Ö ¯·iÕü?±@Ör²Û9}{^Fʧî] ¸ò‚ŒàLå‡üýØy·™Bº¥œþ,õ“sðè"Åþîz[zl=ÀQ; ÚŒ¶€t©[Åmn¼-iéŒá˜vÈ-3„q̘RزÜÌ ¾Œ#îv½o‹Æ¦±¨~X¸«>He]hg~Ñ”NW°Å+ÀX!Õ—I r5Ö7Âûª ™”}-ßú-\(Ÿ] » !έ4´È ϨÍK*|P 02))DЊҗ²ð`Nd%Ç•„ÑÂTc‚s7l§ÁÖ]†SËS—£ºcÄb¬£¢—¥€õ„Š‘žòu±ã­IÚàP(…ûMç›ë°9-æ—€»ÀxQðY,‡@ŠY« ÌšA@ ‘1`¤P…¼dN&–íEPðÚfv‹¬V€žIÛûw¹‚Ðæ¾ïý&^ÒXaSC‹ALˆžr¼Äç(ÕCxxìöãù°y&8EŠî»ïëU‘Fh×•Ò °ðR0pnp[ —bïáÂ`ˆRÖZPm…rÆU¡),oúÑØe½/|iÓ˜†kQ¾nžª­XNÀù1qŸÇN¥”!_R2ìˆK©ÏíS$¸ÇØ @;ë`£® °H €ºÃŠàþÊJ(; „¸â½² æ˜ uáÞ‚•Úi®Ïû°çÛuÍÒ¬žy­äȵ2’‡²™_íÕUÝESÄ .’: £“oÛ¬´}D+µ=œ<ì bÜWl½è4ðj9+ˆÁ%€6lp.]¨ YÞÜßS;fæÀm}|ƒ VÐÿiË®¨£æBçf^{…¹À;I¨ y¾‘þ¾údcÉK&@«”Û;ÔW˜ƱIã4‰ÐNsp^Œë‚­ŠŠ–8Š9˜QÆŸË-xJxSåÞvY+ÍØð"ØuœØmšCZ¦ø¢÷W]_G&6]Ù˜Ïê8ãð"ƒ¼ x?œŸýi‡M‡£MF9ˆŸë5ÌúsA–ÐÜ싲ß#ç&ÐÐØ À…Ġ̇¥$Ʀ‰Àjt]¬mM…ßQÓý+qÊÆÊÀ—kà%Zâlsê£}h½Ý„#ÎI-)”ö)«o°ápÉ#­ëhš Ds +û-¨&ûp{×h±©„ë¤^‚ÏŒ°aáÝnÖ»|RœÛ£f~<+Ì ï˜t?k0Î.æ£oö%J¦‰p¼k|8-ó½Þ#±æ‘¨s¤>nï#ÜÈ¿ð$ÖºÝ k@[uà›±àºý|ߤ…~êíö>xŒÝ\¸ôÔ"û"˜çƦÓìz¹în-¸ö>³EQÏòŸbiÔó‰ (¼¢Ÿí:tŠS¹tõ¾ßÇ8Q &'&×¹ܩeŸòϰƒ¿U§ù“Û Â¿¨‹¹Æÿ Ž|ðçgSk÷T3x™!ŸÙN£1NJ:¿ ÿ,.qƒÂ!{^`—÷±|ݤ'ûçŠÃ IäWn&àýLŠí¬›cÙ{Ù/Xç·/jšñ²ÅÁ‡ m!«ùµüplÁ>ŸÚ'ˆÄÂH¬Kr¦iC²u¶_^änÓ iÖäÛ¨eï/îÈ‚bË÷&6«‚'€;S2$ÎÖÃ!}ßòÓ›Oíˆ'‰*ëÃÒŸ}ß]–öÙ_½ª„À%19Jz$-¬¿­·ÙzÆ’yêr;нd ªi$=¹¿áÓô½—–ðy¾üÝm]í&ì9å"ÞN‚_ȓҵåÄ»Ê{µ¼Òu×zf—×§\>€œ©W§:ˆepxâ âÿm,—+lG)Kœ8j™mía9?aGºÆ-öÍô©#f>4uÓ÷øk×ûYm? y÷~€þ¶Ûz©Ê)BÈ“4Z¬ºd;;áN¦nEšÀ;6¤íºëH>æ—i™Í™¤(,3)ÁÌžÔÂzÂLZñ¥½ÿ‚ÌòieW§­ò_ad:iF‡ Ì-ŽgÀ9¾ø·WÎpŒ3"Þ"gŠÈ¤ÎýªnÈXHŸãœg€{nSûÛºLãëüDi”`ÛÖE×—÷¸¾@ÀØïU €å7«Kðù‘u2ÎPÄ,IÛhg±ˆåHø¨?|"Ôí·®b8ÄAt³îÇ1âçgï*lÇ$Íœ²0G7àI±¬öú%yòäÒvÈjÍz{Øv‰LÍ!nS?O²M!Œó¨~{~öë!súÒPÍl,§ÔÐ$×~1péU€Æ>vöeEwhSom}ì/mæÕÆ“—:¶Ê¯F.NÄ¡Ì;K…Ùo^j†=^F¸ÅZ‚D˜s(¾‹”}ÂNù ÒU–&hK@©¨F•ò&ר„•¡Jî²ÆÒ?mçÓ ìGøž›0!Óº ‰=r,Î.ún^–Ѐnà&,kgsekÛß<7‚Oϰš(³Ið=&ŽÃ^ÿѦ–7¹—Õ¯G~vørRõ©$fc3öe£ïâ/ºÞ®¥J£tõ‘‚à\rìôìüîþµ¡‰B™§¶~3d#ghqI÷\~_Û5Æ·‰èÆöWA[Ø],êƒ?Ï*ŽO›§–Î)Õ1\ÇÎ’S£¨Ñ)£€ÿ‡M[‡~W/g«°øx[Ý&øø=FØŠâç„bŽo­5cfë“4W&‰uP`U/³3/œ‡`#0šK‹ªª]8cuÑ{¿­|ã‚o=am[ðM«Ð@žy³¹`(†—°Ú[Ô:o,6â-?LìÊpžéMbFç¯È<¡Ï³Ò×É"Z £¥¶\Êœñ4ºñû9üÄÛã“ÀtUs"ä×J sø_êÐÎ2ýãìï(…Õ½ˤØS¿ÞûåªSÑhËZ¿Y§ªý±®£x‹ý\'UDûJë"£ðø´H.±3xqOrŒn‡®MZÜŸ¶m#­†£Xk`a£¨Wcëv$E¬Æõ”¢ý¶Þlºq‚q7,{AÛ` em³Í”FJ„iaˆ¶ )*)t… 9Ì+NÓbEf¤ÔTV·û9Ïîi‰zÌMó“‹G&¬NcC; u¼§‹c£Iž¯©tHcÅç¼üþš±»$ÃM¥÷b®f.´½”=}fYömmd <;@Ñ” ð/Ù¢5ùÍþŸ†ß¯u°ˆÓ/æ§ tÄt F”LRsômÝÌ2~-M+ÙÇËg‹¬ãàË{h†‹ÆF9‘ŒrôæZ€¡hEqG#žt\˜±Û2¸2L_# T=NF{-@USEFA:ŒÝvj¹U·ê;À‰­çáW ùt–to_a-˜$U{¯<|Iùþ¡¿lIÀˆb%%äTpú`TôÉ®}0ƒÖm{œŒ6͉:Öy˜ I*#(§IŠF>ãe{‚M3™Å(*ñ~~;„­Éq¬q¬ŒJ ñ{É$áDšâÐ!B¸_0•Ì—ÑØ&•ÿ3’]Ð-8*}‹¯; b8 Üœ4y’tÏ~<“å;9F"S")]Áè[f#ü½8—ûÔ ªâÐÙ0"~šðØ\kµ¬ÃRŽ€ü_Ÿ:ëÄÚ˜[ ²Œ²+½½?lbJ¨ë"CT“$Žè>3»ZHž?­¶Å¹"IGˆ³êÃUëË—˜öÙt œüV°lÔQ2mŽ0æd lHŠ× œ[v¡·©}ŸXÊþpŒ´ƒ‹ö/§å²ÄùÙÃÏ×Úpe;'‚JT’°îáXÖÉúÑn/ÃÑQ… {øÕ´b çãžÁˆ=R¾ØÜÏURnô‘L’ Ýõº^æÜ4¤“ýϾޖAkûóØXäå‘b7HRjf„IYâf=NÈ\ö{— 5½¤ãû8ò/6¡Õ-ŠW?ãtÑÝH žH㑾+Š.ƨ!’`±4ÿüÓ½@cù,KxÂ(p•ÝÖÂS—d>û¦«‡Ò¤FH°úŒüçíK¢Ñ,ÆÀºšL9Ò«§”^fÛ[ªlüǬeiy5زpUoCUÄ‘…±zEš™s¢•DÆØƒ›Ì*ܹŅå8²k…õ].|ï‚ᢥÝ.dt> E¡_MÝ%›C¥ÁÎ4ÊýQ3‡³Jæ çËmÇñN×]ãÿGÚµ5¹ë¿¢·ó"±x¿<ÚžñNvìרYWòID‹"µ¤¨YͯO7@( öžSµNm%Û@÷×ß…ÉþO µÓ‡×ý¦j+VóÙcÉ]äèð>\$q˜°:âc= ˆ½±Q>¸ª U9]à1{evü´­ð¹lÄŠ‚3Þ#C¯ˆ—¥I–±o­X6»œ  ×Õîå‚qb’Û­xþƒA,²CºÏR.írÔ,BŸÕþ«Ð/l„ë“y&ó«j$F&ð¢í°´!¯SëHæ£8½MÐ÷ü,ÉbÏg‘sEÞ¶§}5ü‰¾/투4絪 ÿå92RÀáÿ±xR-û|†eí;¿Æª+ö½ÔË\žoêÕ`Ú7C9'±††Sô=E¬ k)€ø/±Èç¼XÂ1]XСFÓÐg@e Í›€øåþ Çô®ÊuM~£Ñ‹FM;É“½«Ì’$á1Kt ×1ë{«òBôÔ@VjbU®äqeqW~ªŽGju]c2Ÿ½ˆÓ^·[ߪ×bt€†€ï¡º‹±Ø¥ˆ¹é (yšvmÁ=ž+y<‹|Q·¶ÝÏCFÆ2«Nãç·oú~@nÿžÁÚÀGÄEùFñ ˆvŸ„¡àû`- Ã4e1NXª9›¢ªíloðt†Ó'œ¿ERü.éæø7X=…è|T‚in©—E^˜°æ¿Z KAép<=·I¶¨á@e k§"t_DK6Á— Á¨ï%pµ¤¬$Q¬s—q­lWWÁkÏ^ªÕžuðö"œ¬R´¶˜Ž¼y[9ˆ†¼»P£°Ðõ˜í!O¼ˆ'Ì®¥â+%óH_}{êà"CÙ¾¦`ÂȆœu½*Ym*“Óå3ðþB|0à€-K)ÚÐ|ârÝÔyQ“¢,èþç \¾t“ЭMfº¡ÚX[9èðâšZN‡·­•í}´f>ƒðx4¹ yðõ‡Bþõ¦†½Ôr>2#ñdµÍN„_|ÎI¬ÅY–ÎÛ8š÷F©Ô4!ã3›Éº…Ýøöö–6NÓ勃Wëg8s3FD1EBL2ÞÒ2 5Åžß–÷GU¾´X¬2 =X#üâ<Geºï¼Ž-{µÙAžê¼m†¨™¾F÷SòTL6ú;YèmoÎ-”µ L}7æ ïûã¤ê ¡^âZE³)¼X¹(…À6Ž ¾{9”Š îžë|@m¬‰~3´ÙñÕ+Õ8þKÍw ϧa,jæ¢]Ó1.~•»ûwÄBƒN–Ê Ø8ËSR§Hæ~·jäqKÈÃ+„Å»ÍfÕ¢ÁüÅãÒ*…E‡x7M9HïJ+#¤YÆõá7êtǼúM´xžè Ê:3 ÆP) C7Ë2N­SJQ@Wš¥hËF+àx콪&ÍŸ:%Uˆ=òà,ÆNˆC§ÌXc¼è”ëˆ"'”M¿óÕù{Îô€wüñõ¡oò²0•x÷†ç¡ÙK‚òNgƒ×ʸ¼rÕÙðžFüÍòfE®Ð>Žè‹zì °›¥>öÀŒeŠCÕ$f‘öîþÑ·ò¾šÓð½áîÜw“ËÂa™ŠâVB´q>ÙˆE>‹yµÉóÌ*í-Rýòï-üˆþ äåò8”œöEhÚÂnñy >¨ÜÐ`.fQEN]Ë‘¢¨7²†bé¼Ê†wö>¡å}unƒ!Û¡Ûõ³ØÍf…Bz˜×H?®x£@tƒ'`‚ÙÓë%ÕãÜßÚ&O›‡¶ÐêÇ,ˆ¦„ß jÆYuLQó¥Sì‘e¶Ãå:§ÝÉí®%]Tâ|¦Ò „Žÿ"·ƒÐµ¡VeYÊ„^`kTù俇ñMeö°6ö+‹øÇfÐö¢aˆi BG˸óUæØ#:§qøá°×h¼”Y}Ë‚lý¿k§þ«çÅs[®-µvêzišð¦×•DÔF”ð7€Æßü…æÍ½xÍ#OÝ8,øJBDå ÑÃ(å‘ÖJ¸ÂÌœö£TÄàjǵEü$jK€áü¼§·Ñø|(s "§¬ß˜WšõLËjLýµû™o6–yüGÔ±KûŠ•<Ü*7Mž¾L ˜ñàY ?` yËWÊ÷A GNë·_ª×BÐ8þبòîjQžžóÙ—:àmih‹vË2ƒÀ<–ßܯjFP#9R/´©– "7èÅ;¡{•6ü‹µ59-|nÏȰæòR4Hg%•âU5À8ª¾ÇJÈ›`vÀߥ8Ðfü?%­ÆûÐn ‘Ù‰zaÀ<î‘ð(taœÚ´³ÕQ,Ò½¹ŠS<¯ˆ7šù·W}¡g½“RX­^¼ã5–>1ô1À—sù@ï‰QA+‚7³-ªs7 à-²½o&îVxJÈîW…}ð5F{pî™ü|!÷ù!xaQ]h“@Úòé–qô¹-÷]q6Ö™ ÅO)üã“ÔZ¾•‰ÙÓž“Æ)ª6gªÏÕ¤Àâ6;TJ÷Ÿyôjµ³œÒÐØνyDŠ´ ù¡ ´¿N;øÃfvîƒòi¥×eQÖ£àmban_¤Ëª-k´"+•GçÐZóÖ?ȲªEì³–öÃâ); 7º3Eè\Èàó—+ùUS)éïso2žþ\d´VË\¬)%)/òTYŒ,üI-%XL•.>õR×ÍX:}ö° ¿mªr´VV<Ûj § ³ÍÛÓp|>">5òC¿jM]c,q+–5\)ÎaµêBÌ)Žõy“í—ªilyÁ† 졎Ý%ŠÑå•¿¡/DVäæ:fpv7X“·ÂË(!û®;jËaºFï¾ÉO§Bâ¨Wq«;î¯Á“àþqLÒØwÙd‰,C󗺂?e»C°ûpo›oHäÝjúûã4DÂq\¡n`-|휠,pM”ù±ËkîÕÌS^\sž¢ v#¦ºzºŽ³…S©ÿÍþÉÆhí³B·áãÀ fóˆ“4÷•§ ÙjÃRjqµ!UÀih{>ëâ«´î>/޶w ×9`‰¿ÄŸ­‚StR ñ¬œUÄÀîʺê‚„ŸáŽ¢QEuÏg8ÚÔæÖv[ò3˜YõÝ›—Ýp¹u4ç]óÎågY“ÆSKj_•Ã|¦ønzqZN®/t½“!YÜš\™'Hç—‚¤ ÕNž—8`íâïâ è*Öðöº=ºAl±3P_.ŽTèÍôcôz Yí¿ö®…zè¯ü *óÆjÖ÷ªbRk¥…3¬ ªMîFcóÙoWq‡Ç˜a”•Çúz·¹ðˆK© apNµlžZíãHÝwÁ)’‰ë˜f¿)Ôz¼[H»r£åè–•¿‹#ý(ï‰HE''—8%mV–ÐówÇy­=ܨ7ÏRÔÇ*…xÆ í Õ•¤Rôz5ƒ\.y ÿÞ–%yõ~Òò?jcÕK4Ÿ}«åº:ç+=öÀwÜj&Ýu²n¨NÓ†¨ÈžÊ/èÕÕ+&Ê]QÐX2zQëûþs¾ä¡I–Ë 4,y™ûë«rX*& o7GV1÷Фí¯;?E+”ì»Q.§Ü«öm)ÏB}ˆf<«¼rSK&í"rB“ÕüfV¯‹ ; ]M•ñu´ðoá"¯œ.™œG8ji,ùE“BHØNUZ‹oã@ƒ!™}LYíô¹ªÍÕݲë’9 TØ,µ¨o5ÇÕ=|v¿*ìò泎`Ýå?LøafT²1 ]F£ƒä3óz@ÞšKYõË<+›qŠªi @ /ÕAl[yo4r’€²ÁB»vnFQæÅÞ[IZ7õŽô°F´t±÷Íɺl[õ)çùaÄÊ¢ƒÖÄ¡- ñÈx>ûU½ôLŽ#åJØã]1¦Æ‰@µÎ*a·GÃyé¹Ó˜W´Bed‘*ZB®v™ˆáº™¸%*ÜÓ6=7öý€³>¹ËëÓÎ\Ù¬Që2ƒ«+›*¬¶AvßuK71˜ |ø‡ÅDý³1͈n“« j/g¯&‰$šõ¿ÿó,å†Ú¾¥L÷ÕÞMѦƒ(Ž7ä\¥{ÑÈ‹có:ï”™gþ½¥¯P;¡¸¹Ž#ÛåÕ0JØ £ " £ äůÂzÁ˜bGßs¬¡ªÙÉ‚7y’œí®.Åf†ê;t@Ê|ö£›ð2oUh´P»Ïºy )ÝDfØîmÐb/Î’~;pþN¬òÙ¢-Ÿ®sôž¹Ÿb¤Ç—ìñ¯-Óêæ[i\êªÈÑü*âN*­Ã §’ƒ®ÎìÌnÑãÁ¯es¶§´=òZÃÝòåŸÅÅ"Úœ 6>‹¨f”v™4V2–Ð} _8j5®È7Ò›%þ0¤cßá²iè4«'%=›ë7í ­oêF¼xÒ|w‰ae¬PÖîô“ñI[¤“èØ#Nõ±–ûîè™^™{*\EËú4;vž€ú´>UZÌÝT±®Ê8b-ø=JÈASszœ8ƒ/á=µÑKR^IV¯Q@G¥Œ² Í3¯:ÀDÏIxùA?ÂöÑøí-ôßUsšbŽÅÏn¢”.aÜV¡æ¿ÓÇó¾Øu­Ô-äå9æû¢†k’.§ ƒÐƒÏ”ÅDÂqºÝ,ABj7y³cvÅ_ÚU^Òš°ï§\®Gºìû2\ûIÏg˜û™s2L¼X9œ­<Þ2,ÍGQ·ã˜Å#7"¬Æ_Uëñ wÝ•,0%úƒ"©XköÚ®qXw•½šam´ãìdYóü¦ìa.¦c¿ÛBg»p3së:Î#©~X¾Ù_äcîñŠ€/âLŸÃçŠD‰—%vN‹£¢OaàûHà\1k,/¯þfæmkñ«Úáƒ9ËXѺ÷÷"†<„è85%kLâ(J\Ú_êø$s±×ç÷Váê?c¿’Qªy7+Ò€ýe&I. —ÅKÔö°ÄESWQ8ëkßÍ»CGZÁ»®„Zôz)TåÎgWµŸ6ÚMß|7 áQdÙ£-«Sƒ¶#×(<2Žh®âxÊx£ M@©¿%+D¢Àê\M1°Œ÷RV:Ypý¦Räx¨É;õ«\ïÚº¡,ÿÒÓZ¸€d·}c=äýõãzÐ%³`Gxíwæêm/•7}°vîcÓéfióÝnawß÷ƒÀ‹áߦ¤à›%<)Pè8kDyêÇBX¤·ßÇmHHU ÜÂDyØLfhŸÉ¹G_峃ˆÄ%Q ‰1á•3ø.Ò’i(TFÅJ¯%Æ$ø.»ç‹5G ‚'©,lc U üuH6àµ`uà_ñø€â=<=ƱŽl¯IÓ£†|V¼@ï]ó•¹iNP;³ÃtìÎ0O¤ÿôi_ÔùìºÇº«¸:SØpTØ<—ï7HÛ™ù+…Úþ`¦VÓFs|G4Üͦ–²À7^”§.‡o˜Ì1ü>1#‚ýûˆ3Û‹ù@Ù5@‚éç³¶hO‹jñµ4aJ„”Ä¡‡· ë­ŽU±ŽÍ=Ó~'Šüï9+ÿ ¡bô&ömƒ¬œ£Åâwc‹:¨FÃØ bÖç¹_Êáåv› Â¹’…Èñ„¶Hú(h'ÆŸ#Þh,³ø6näõgä'o†Q¢^22{½É•sŽÖsýª‚dŒÈ¥ƒ†p>ûªS9ßçÛÚ¶î†nÀ‡ÊX'>u¼Í¡ù–Ù©?Ö)öÂ:„ÏðÛÑu¶ ÄàÅ®Ö=“íÝÎ`ÀÒ4¶Ì×]Õ·n·go-ÒÆãÑúݦzP5d:Ùf,¦EõØ©ÊÙŸ2|À½ÂàÖó÷U©ÐrZ÷g7aТ ›æÏ}?ŒÒŒghÓœœ2ë«¥0Ïת¿á‰ò½µ(¨tD—ý5€S·zÐvrzfqšˆc 3ßc:¾5‹¯ÍNÙ𞼯á½e&™Š¤.ïi?"7õ<Ö‹·C¡4á{Ö–Âãõ°ß‡þÄW‰2ì‡-ÓÔ YfŸBÉ}b":@¡a°3§üõ1>žmj¾=yýw¤{_»ðbs úh‘š‡*ï¤(§è²›Y[†O3 é> Içƒ\’ù쪹ÐЊ¦'[@ˆ(ðÒ˜%"îtyè[n­^GðqVpà™zèÙ¨µv¸ÉË¿¾q‹OØBÛ‚f\7Ê\—%ß(dbNV…ÓIlXËûÊæ,îŠ_éüí7—¦¨÷z/ŸEÓ´ÅÞBHÈâ0 3Ö wƒ:ËþDù‡RpÐÂlÞÔB5½‚z Ði1…c±ç‡ʲä;¿gN;òMîs¾æp}¯}—;3SÙ]‡òÔB»æv>ÓYô]Ì£F¡-a =`ÀzôåvÔ—õÜÜu,Ž£LÝ™L¶*·ZÚbô¾Š•üµWÌØ9 ‘ïûiê²l¥´K³”CQ÷­_º¦bsõ|ÒBKü"Fâà¾[2Áç3•d¦•A…‚Dƒ¶×ƒn"ryß"¯NΫ̛¦æK÷Ïûp»û›eXˆÚ·m‚ªº*kZ ïÂsâòº‰WE­v‘1x;€^H†• •¯èäܧQú²é™Ï~^îôþ_ZÖšDðµ²pËh¨$.Ì•tØ–ùID<´/mÑRKU øÕ V{¸QÌŒ@Þ+F òæG^ û½”ÂÙ·§Þ´w"ä`ñš2س÷ 4õaûÂòB&1J£Y.{‰OÃÖi0Ì…"*›U!²ä10_à‡V“ïGgÝjßÀùìßm¹>uÔv‹!1Š‚4f¹\ÅÁ< ,óÕÞÙuªÀv©óARÀ}åcƒò¯wÑB\Zú©(ðÝ”Õ"ò¯\Ææ²n¥DCÂyÝå'n]N¥¿A‹ ýÌ’XÜÆ- ©¡ÙræÇ,^ü>Ul&=¯EÞ\æ%* ²§Aµaàoß`YÏEOWö§ 3L'­”Í1/ gE¡¼mú|n멃'’¶n×¹dÔfÉú­Ú* ‚reÎ%‚ÄCOßi…hæ¥tÍØ6\ýû4©¦Ÿw«¿vÿ\hš¥å¸ÅHyfÅUkwÓÀ ­íµY³×Äÿß“éÛpà{çA Â¸í©;©‡Cj—SºTEŽž—u÷A˜å©ÁSàY¼Æ·_ŸF©ü£û– #là~è‡ÈQãTØ _·Í%öÕ4ÉÑ–“%×<ŒÜDUQWŒ¸R®/3ô؇nØ y.EÚs©¤gæm=u)NÝ–¨T^ÃEŸóàÓŸy¹AÑ 641èª@6&ê"UÀÒEÁ»g¬=>Š¿NUIØ ^m4ºº¬˜­b[‹þâæ‡F¬±S!L%–"› *8ƱB*eãü¢(3pJ×ܯviƒ…¿aÐ$9öM¤í"Yä!„oÖ‹ÿZ¯+B®Vcð'zLïä…}4|Ô cß–7‹oÇd‚DèA󛲸@[•Úi~1—;P¥l#ÎLŠþ£‹ëÏvI JÀÌ5ݹã_Ó†ï#‡’$ ¼Œ¥jPÁV&f{¸#V=<Ó^izYBׯîÄv—Fß Ý0áeÐvÂI§ Ñš=þ…œ\xÛøOELÿ ¯\ÚˆAhÊiÊw¡*}V/”ÀF> ƒ{Å7ÎAD5g:Žø!ƒ1~Côÿs"nW…$'œyÒÔð¸ëj'¢,ïá®Xð–øX@OL<‘ûGrÅ;TAÐ$¯ÒÄŸrÂÞãä>¼#p"ËÞas ]óyÜklO:ÑûW´!Á–U¹”R;hÀ›æêË|µ?¥7Ø÷â åÑÛH/†ḩt§“f豕&UK:Þ'ËlG‡‚Ý23-ˆUæ&iÆrÍØä˜ï2Jè‰/Q·ít OÓ^ÙxühygzÌ;…ç«õ¡oö¡±du_KÌ43Áò+íõ|΋ê`ñѰY„Ür–&êè¼"øžë¯ºù¯S8ó%«B®Óâá9CÝHb:Eõžâº¹iËdK(¥ÍP[vÃâþKÛ•4·,é¿â[_DöåhÙîñ´ä×ýì{âÝŠD‰„jLýúɬE¨LfwÄÜîÅJªrû”˜@X‡Œ‘ð‡ê÷ôе)áq½|Í/¤lŽU»‘|pL6‚7œäÍ˲^Ø\¶xl¦ÎSáõ×YõbÄ}³ãÖ•HÏÅ\.‰´ Xý"Šso¯\«." 'tƒâªéúÿ 5còòÁt ½°™ ¶"D‡ùØÂ n÷º±ž-ùyQ…ÃŽ¼qïß @"´ãhg1“†ŸÀ²¸E&®÷&\|jý\Y8¼¬ïUƈ~IŠ~†¯¶®6C0ƒ7([üX¶IÝèîÝ ò.5GT¢·7 jÙÄmßÓ“ÑÖn1€™—JÏÐÏDC~Õu("8ã*²žÕ©ðV‡n‰ÓBwÓ”¤Ë §1ˆüûÅX…2åKâ§E Ú±ýo!ºÜ6n5Œ×±˜“= ZÑÄXTC)©dð;Âêù9p˜%qG’Ôý/.!½ñ9r†µ›:b°”¿“7í{Û|4͘1I}1nø™yo–EQ’IG[·MDH|YimϺ³‹âûÚî5£"`ÙØÔ÷¸ú0ú-~̤á'ˆQ?• ò·Pœ ±{o¨_ä„lº‹ Ú…AND\o&ƒá`D$¦"crk*8’ëœáH½Df¿ó¿F}~&{²úµV= ϰӈ¨@Í ‘)¶ýe;•"<¶ªk‘(2A«_SäD—³Šú°!{Ý,þk¨×WàïCå%Ò¸Øà°Üƒú˜0PÄZÌ{^”Ðà sš#4÷$¾ZÎW„Hn{éªõ†±J!+²A„ÞAaÙB)Ò‚ìÏ3GoÖ A d*gõŒ%ßóÄQ&c/[ð̾­ yhÑuÒª²¦‡*U—Ž€ÁE Øb, Ò¬eë@m¨ÑÞZãxÐÝá­!5®Ú¾—¿êi¯xA÷n‡ Šv‚¹¨šn5;„0N‰Èd·Ö˽²`ß]K„øýj…sÏ;TnÈÓü1GO Ý‚$‘ ãC,Rð:óÕóPÑ}Ñ`¦Ô‘ ]z_½2ZåNzù¹ÝQDà Ìj»ÛýñdŸ’HÐĵ­°!yFB '†h¥±Þ;3§eG-õœ­4ÌlY§2Â’¼2žy*‚yµõ°[ûŒàqã 3J奛ËF/‡=çoñï¡ê_iŸ³_âÍ[‹¨@úˆHû¢™×·gÊ]ÖWvrðwA?5RþH º7±T>®8Jü8 $q!è™P™[\SÙfehz¦N£që·ð ôºA ÅbæØ~Uåšls¤ê‡N—ÞˆÀsg³\ª¼-d2YOûѺº“WÎf="Ì?ÛÃbt\¡ß“" R_6dY½X¥ZglÛC§Ÿ*YYv¾™†ð´„" ÊMÀÝšÖ|çk¾]Õ ²Á3À(ÁÇðÒh.Ó›¢ŸŸ%a‰4Èì2Ö=¡8REe+T}pŒÒ(±ø2LÑT“4— Ë¼èØžŸ-ÊÝýd¢N€gHéB§C¨Ê^“r\P©N#pÒ^v»±ƒ˜6û—åuçy.Úñµ5¢šKm`7ÎP÷ø%JŽ© Q¬·s΂˜[𡃆´YŒzÀÌ dqˆD–WŒ»µa•Ã]ʪø×%­©£à¹ífäŠ Ó¾}ÝÞ¶¤-=qóÎgça!“r6ß º:tî·tφ§'ÆÈÿÜÚðÖ¡Í{ŽùUÚV½*)H+,ò8ICQecÄ›EHcì-É+«¦•=©ö2“‘rþϦœY|hªoX`JÌek##™B5pÀºQsöë]“#)0g)Oc;ª¸0R&¡Œ²ö—mW¬+0á[õwì¹Þ0?Ç™»]ˆ —Ç«d‘4áɽ8§JÞÃ/¾#ÜãŽÒÖž)Xdok7±®—|ÿ°+»k[ø8Žƒ, É®¡¬nt7Yƒ¶‘7Ó«Fê^ùÞ,ì™Ã¤I?wÖ÷óÚIèæœ‹”[~næ20grqöþ†Úñ&СK„V["FßÔ‹:påw’E~ âQ@æ«Ô&v aíöúÂ=†û0på+Ò,ŸK»¸Ä§/L-a"9¶ÈZu[Â|c§êj«’@t^·VíThÎ1‹?ƒKènÓÖåânh^™ä¤i’±dù²ÃNLzPHÒ ×ï¹m:³®”)$´œ#…ã“;«hŒTÉ•[‡Öið¶HÄÖú»»¥¡T›¹e¦oÊ=Íóe  _ÚNu‹Ï3“… [ Í?!ù.FZ×µ{rúù<öìo«bƒÅ~_ª•,”îÆ [dGÊœH³¤iœÊ’–3IƒÔŠ{uÛ”ÂH¿M·:bo9‡#ŸCÑ 4^ܵMû¹’Û¦˜%|Ñ÷ .’ ¤ä8ŸòTmð[¢çxAìân:aG'-ÂR¡Úa´ï³pÉ÷µ}©™™ût #s7´q×Qó…hÞÞí¦ÅùÌthXm»ªãü4yŽ'J]lÕõaT›è-ñtîèp™ù² N}ÞÚzËéþí„}A'y gmµöE1¿oàÿ¯È»éò`8'CXÏ„Åý´Óœjoúq!kìJµm{å10?gñ±z%—åY¾Àûå²íûÅ19rÃ?cQ/à5K¼ÇÖ=â\]‰óܨ]Æ{3¼§…'àSìZM¯3G øõ¬ÍyîÇ2Mß-:Å{[çØôlÐi/fõ.ûT¿2õQÒºö•Ž»[:ÆÐý4åý*«úÐx/SáÏ“¶½±îé…>ÃÿSW+zÚye¾b®ßÍIúݼ<óþ%îÜOáDE´9µlõfç ²74æ™{›¤ïã(æKõêoŠ>¶_dI âÍ‚)ʳœF÷½¼t*1‘A²¼¹Ö4º}±•ÀW5Ð!†YaŠÄn–CYâÜO»:ÆÖoô ÙLðd^æ,Fmúp ‹@HÓj òÿÞšU{¥ÚW2ÌݯuËqTÛÎuð¶DÝ“ŸÃ7d"F`õª¼•±p'{«#©jk&|f¾ 7„ óûÄxÞÐcm´~kNà¬1mˆù"…YCP>»™s»¡ëÔÁ+Q¬YÆ4êö 90û÷˜ˆÐ¬œàþAÃøõ\$yóyiXÄv”àVEòÜtjw@ñX˜ŸªV俱dOÂêó\Íï­‰^ 16“3"8W?—IúX…[UVQéõd;,q’ ‘Ô8pïVsês–öÂ/vOƒa~S{¨ÌèÃû4®[ùiTûQš‹xÿp³§æð§7§Õ;´ˆ>Àå(e 㣹¹Lë?ƒxÎ 7ž‹ä~–'"šncå&Ü•L­¨¢„mÓý+b6d¥vÛîWíD þ½KhaR|C”Y(‚ü,;Ý'Äx™q$ë k½¢¼ŽZ6Vµ‹}érÈ–Ça˜‰–EÊtbÞCíFF¢è™¯êFíD±}ꦈˆË™¢ʦÐÊØß¼»×ð᎕èçãjŽŽ6 Ò ‰E,Ö'Εh¹9_ز:hpÿ¡}ŽpW=N£°H“"’tK8z­tèf)!¼©At«7*ÃËÂk H¤4¶ÞQhžWlªì·ú*¹, ˆ2 ¬OAF|«K®Km¼"ÏíhO×# ±Väu„jÍ•‹/!½Q‚¾¨"§ ì¹½'Ô!vÖºÆmM©þŽù·^ÑIž· ~{GŽïeEݯ­µ²{ƒ:1臊=Pe‰ë¯¶Ú ² hÛ(ÆÒîû›9;m¼Þtľ¥„ˆl‹¬‹‚·y_îÞ }À<«z+Ë%Ö€ŠÔ4OTÛhDaq´ÑõÇõÍ}Õ÷y"Šü(E+*ÝY™4¯›IÕŸ²‹Ióç?ûv ÅbÌ)LoŒœÙbj=[y;¾õ¨¥ž}±<ÌÂTµ4ТªŸèE·5ƒåJ¹°Èò Š„‹û•öS6Ôé+µðo ý£pMe™ÊTtœfá9JoqEkuíó@Ö× î†a{î{¢nU†>§ ™ÞÃJ6“Ñ^2ú*6êLÈ÷nhJúF1îŠeþÚÏHQÚÆ¢ûKµ”óä=ÊXõº¡ý¶¸òü˜5æceú)¨^£(”÷°™!Œ/¤m»>É…-+ëðù(¬N—äÖßN2;ÅY"|_C»±Úè†pKÛ›u¶·å»e²cúò.“¢Òôû%*ùÚoÔ)Y—Ǽ¨"õþ¥FÃŒU¸9>lŽÏré×jŠîÿG*÷<& õ¦³0i®´/ÚSKµV•;#‹ڴHåê:-ƒ°Ýuxfil¤nÞ䋿ž7\ÎâŠ8‹"ÑÓ!©ÀÓé±%$W'#Û‘+£ŽaÜÚð3 ýÔ—é7>ëz§"÷ ¾”‡7Fy²f˃ÐnÃÃ&Ïo ÞµùT‘e¹È“i×n:M [Û£S¸¬»:ÔÏzšZ/ø>zî¥ì9„¼·µEGí™*îDi¦ý\¤x;ºjånˆ¢®¡dl;HŸ¶Û•aRá\˜dÊpb}{á ÍÈ’/ô Ðì™Ý© Ÿjÿ.3e’á¼Dºíš¨QÄ ‡ÿ†fd«#¨ÌUY‚kÉIvp'½ üîgõ°Å±” P…#£Þ]KM>Fw¯}õ8(É{õ\µMN,—ÇÅ´è?´K²xûÐ"=‚¤†âØñæÝÑ5Up~1âüòBô½nb÷ppõH$"´K¬_᤾ XS ëþ;VãóÍùD(0- N™M‘yK<ä¿PJÝÐo’B6èPtËÈf ˆctÄ1&bn!É+E‰à4¼‰ÂýºÏÃö™!¢¦:Û×Iw q&‘m@0)Vðq(x@·–í.ŠìS‰"Xd%s;'ž1&‡f»¸«:ÎS2Ì“$("Ñ»µ% %w«5&FdÚU{ÙÜ¿fTª§'Zgƾ:¼µqU*d‚£ºÓÛÌ-bÿ|$P ‡üŒ“äqËDfGÔ7«£ ”Ó"ç¶5ú*;ƒÂ-ÿnïõjYI÷øŸêÉsvÑAß¼ìÆ„oÔ1¼&`Qä‰pHñ´™˜(Õ-ÛžPÏ[* 4ÔŒüôtIy¡¹i17ïPõÏ<ý·z:ªlí³$M#Ñ¢mÓOQgQâ‚]E2rÅýÌ1õÒ‡Çɨž‹PóMC’Dq*JÝO†Uéé)Mì }ïâk/n#cø‚Ô˜ðoÎ÷ê!Rå…zqƒõñLdñRðL7œ;·«î´œ…Y&½#e&E“¶kûÍ!uWdêyòŒ¤Æo¹×ºfÚõãz’Jñ_sóî Lc¿UËÔŸÁw+“w:¾CÈ !85ÊH'È1œ^åýÜjöž1¬Ðqb.Ì=zgi–g’vÝiTCKÝØ‹¡P(S‡G6[Õ™ÿ SÍl«Õ5»ÖÈÏü( "Éþsƒ3‰ƒ™ILêij ¾mqÊ,„éÓ7wräpI7 W{ypQVĹFƒ(žÊìÆ¼Ãìl»´ßxýûþ…“tü€!r‰öû/PÝ©ÅÝHaaž›8„.I´ 5Fs©{>ب§¶ÖȳT—[†FúeõÊ—¶'× CWNæ/#^ŠþZ³¢ˆ># Ô k{»j*Ù{Êh „ŠI†é!œm/iÆOÈŽ!WG‘œand¾/ƒº ŒÃ¥õvpﵡ×ÞÞÈöHõU¹Ótø=Õ•á+Õ<ŽÒ4 «>­ÌÐÊ–%<2^OL¢R-ÙÆ÷ŠÒ(/QÔáÉx)+2_Ôü£ä²õÖm·z¥¦fñ »T8:ÃZœŸ9&-gS ûMRÿLlç2`?OóHÄ,Õûšˆ˜Í¨®nLµeŸÉf¬‹rO߯?Û™bóÙãc†ÞWé¥Ð22Î̶z"Êð7ŸuÆ¢¸¾¨g=ULž^Ú|c «ûæÝ Mw|RÇi:“&ó4K#Q”-|@»¸©·íFº!¼r#Yc ;{»b¼ /h,ÓôØMlˆ+™Aìë ¥™eëk "oáíìµ¾8?CcO±Ÿ&Q–D¢zÔ‡z»C×Wîq,J·^ßDŸ(Î’è&cLsT4Ë2\F öµƒ?Ø2×ÚõÔ|r£4EüÃÕ©Gº. t* EGùkMVnw'Ô<±×Ãô 루$O®´k˜ø¹¤_vý3P/ãì.n ˜=ø©TÕŠÑ~zä¢S¿±fžžˆê¶ x¼$>ó£»'Þäöy&k Ú†Önþcn4z¡#aÜ+÷Ö$Š+½ƒ¨Aeêe[ºŸ¸FaÓ³’^²ÉâÀ‚dŽ"¾ÑQxáò¹¼y(ŒÈ @ð* ¼ÓöÏOÊàfUG¯ì+Æ—•Ë·¶¨l°¶ã\b®‘eG˜ðâÈÔa’>š|"˜å#šÎÀ§øº;¸Opk†ÿÞò-K ÷»³^1þÙÁ¯(>ÄÜÃ4Œ‹Hä{£68ùÖ|§ztèÖªñÕ­ª}+DÑÊ^œÃȨqónü…@qú{8ÊD¤-W)Cê7îêÉzãÈ@š©aYÀGF+b)S·¯Iùà¨T¤Yƒþ˜X {[øŽ‘>Í@lìT»’nòyÛHBã #j±$W´#H E’óìL ·Íísl– ó«’mO?«ýžSY5Ö3T]c ²ùw‘„y\ˆ^ê%ô"ëe ›º ŒzåZø×gwÖü‚¹}q&Hr|œ›!´—˜âzoiæF2Å–©høíÅmÛï­¥ØGu¼ˆWŠ·(?‰EÃoÄú7£W¨ûë´"ÞÎèHË’f:)²Íÿ`wzTžû~XˆÄÛRyÈ1pã 7óñ Úg5Ý^6OÆT‹¿w‹ï\–€7%IÓÿãìJšÛF’õ_Ñm.ûrl·ÛÖ³,·ŸíÇ›[‘,‘A€Æ"5õë_f(‚@e!=‡‰ž·ÝNU¡rûÖwW‰¶h3nû¥P`åÚ}QÖI¬Tñ?PùíŠÜR»¤moþ³ª«µ°4‰Ÿ$©°”€û¶Ðò™.,'aƬÄ:çÊ(–ÏbS“8çEo %Éz{s_ž­Ïµ¯å4“(ñS–…!Š7ˆzÚw¯j"Þó^Ó’,¼?S¹Q„ÃñÝÞ ¥âåYš ž×˜%2„w24/iÊ^z+68Æ-¦P¨‘`ì‡ÆÈrhì6õV['M † t‹¼ä^[ŒaLº3ãùï`Õ¸ œAK‘²tÌ´ˆ qh/¿!8€^…•­µ7PÇ–S§no~èí×ùã«-ü4?σ² 'åÃs‰ÿeWS4EKxÓ™Ïý^˜=+ÔW;5-œÂ(F¶ÚÓ¬ ê'Å`æƒ,û­šÌÐ-ïtJ2Šl`¬9ô΀-ñ¢Ðã­í÷:Ùºø8¶µ9»?öÕÞ=J·gêëý‰¥]}ÜÑl˜¿”ƒ<5º i¿›® ±4aì¥qƳ$n‹ƒ¬±µ%g¿#Y·›¯‹ø!ÙÒè{žÂÉí¶Pߤ½Ù‡ï1‰³„2òêê] ö‹ÈÇ-~.,’5&<˜…ÅÏÕàÍ ·t=ºI#h”„‘Çb® Þ½CN4W;²|†4‘¬ýØ­œ<—×XÔ)‰yÕpΪ|ô9B£‘C7ÌÚ$¾HŠ“’)õ™–ê%z¤"»¶Ëà´r{ƒÿGϧþeà}M!Ø;ò$•›½»í‘z‘N› AUZö™§E¿±€¥-|õ;Ù¼Ö[ça W[®išdðærÂÛÉ-fFxKI´tÙCA׸í ^é·Ù'ø”ÈâÜ®>xiPœ™µóõC›AùÃ# þjN1ÒXq~Èû íÕE MÔôNì ¸CHŠº©ŸÑXK}Çi’³8ÀÕN3!c…—˜w7ÿØèAê7E'BûÐ@å|’E'"ˆ#è‰yž1kZí£üLô{ùLcL¾kåPr$¥ôÛΖÃjAÑ4Ó4…›ÉÚtÃ8Î7™—±­ËMÅ„e|«i!Œï]½'E“DÕIT9;Àµµ¥¦óó €(9Aꑆ»Ghé‡u56Æëüm„Ê?zËn_¹3ç>_ïk æK|§§C_mj÷qê 5R¨íáI›‰·ZSÒ-ˆ…—¡©PJ–^a ÎJ`â€ëT¦ßKâ ˆxh°²Eéùi®ºlêB2…N¾+y*Ð7H;íFœa J•YîGYž±Ðí+8*å³iÖ2oP¬—×v ,àdQq™)PR©œ; ãNÇÆèäÄÂdâš¶®Bóämõ[K¨÷5íïg‘ÔŠ¬·7?ÈÖÀe^Ñw2ÄqÀF7Š…Fø+(´çuO5°¯tˆ›85°ïôqC¬™~K?)O}5ö °Ëì•ô®èDNª• g•c"+\·ð~d “,ÏyNxp{íÕI´Fº2t7¢âïeU‘5Úížž•®ì¦š¨¨—³ÆŠ>,6+ËDs榣öu'_™ðçŠ=:Õ¨<" Ûùû ­ª‹i‡?'yœç x P t cÄùŒ•p£=ßÛ:‹xÛð-N€5Š/diÃÌ󓀓 îì+Ïœìƒð»Åƒ§ÑomõèThݤšäü9ÞZ‚L²$çÙ¶û[3ÕëRÆ!åíÀ+Ùà :µ–º[)Ï‹ì,A¡}?Î3Ïã´Àƒc¯»*EµÞ\ö¢®<æ}EºwcYL}’Õ~ZE_5N¸)]”Šs/ üŒ% ÿ(Ož™V ŠE»Ç}•)ÍVXäËîçïùi_ûÝ“‡ã°~û±ƒ_¶* '^‚#E–•1ü¹Elž~—XuŠ$f]Ô/ЃQ!ÿÆã‰©BM9FFصŽ`’EiÆrÁ€.šˆ›³<îjè~™ÛÅo}G–8Ú‘Do. ¥@à#‘¥æµ/š}d¦°CÉ‚P™Rßv7ì¤EXo²WáÊbÃY¹ôÓ,Ë–̉X­N©ù2ÊN‹^~h;› ÒB6Ôp…Aô‘~X’0„vÐcyY6¸®²´4ÇÙû‡’R‹Äa¶è³…dð/Ö´¢Ø‚v':[æKº ù3yÕÖ­|לhW¹ TtTLeÖ!̯3»—ø¾Ç*½¡>) ”Éu‹¡Ÿ‚dž‰aWâÑTtf1ˆ1˜m[ÞÞ|ìË­Æœh!»…xi>‰òœ5ëFÅTY7(+«"&ÅùÔZhÝ ¬NõH’}‘JBbØŸ¾9 Ì…'8¢ªS§íŸmß‘s`m¦àñrà°‹!ûE«ë})‹Êùþ¶Õ²Tl)3kÜÊé_è26”+á¾ñhx‹B»øÀ,6²šc£ ¹Spﳑ¾¸¶”ï!#b­/d ·EârsvSÀÔÀûV¿Ðzå3%ÒqëB Ï*t‰!4Œ¸žlKû~„ÈbæD£·ìzÏúãDTg^;£j黎–ÅÚ²^‹‚ ‰à¾rR?œNÑ"Ñ´“-Q£C{Ó1ýŸ¡u'ïì²–Û¡²Ï9‚4RÀ"F Ø.Ð! žå|Äk2º/P½·“ÁÁ(<äŽU˱" š\Ö¦éW…tw'Ñ™óànkÐâàmi¨þ7e©@j|#g€ÑáÅÐé† KùãµP»÷E®H—Žm+¯ô­€ ÕPllüí{Óæüê <-‹_Bö‹Ò„%»÷|Ô3IóìPBN¯2Íx}aYÐÀË¿*¤+GÎ[m +ÑcQÙšß•r©Iðü<šÅ…?®’n3Ûv_.åï¸Mþ½––µŠ]˜S÷ÖQˆÝ—°M»ÁÍž¸ŠÒZ[ jLúM¼^ó=­Ýˆ"Ö< XËv¨ýD)¥û2S2/W¸k5Hšù±Á¤#§ðæg gqͤyžx9‹g~›“R·ÀÌc>Kmmdz)²Ê®(2%ùáé›bhÆhÿÂ3CUælù¥|ªQ¸jßbcËÄòþ{¦¯;hÎ$ê®`LÚ+eYÈ"Ë/Mxòx«r" y™>œißî¶)™7ôƒ°Ì¿)~¤Bý, >D^”FPtrŽð×afqÙ€µÂ]Ÿ‰û¬ð~U—‚|b”´ÜÐ),¨Åðåy,ô|]n…{˜¾Ö#!9|` á²ýS¬>½?{l-ÉoOkYÞÞ .ÍnØ™~Ëõ<:”‘ã<2¨¾÷ZHD–BĹUÞQ®¶ÅaÅüMT“³ ¥kŸœiÓ%Ÿuöï}µŒ`¼Â»Uº]#ÑÞ\Ûˆ~)à¡dïì0îÓ9Ã]k@6CyÎY:MØf/‹“ÀO9GyÜÍwéã8q Âäá?àÏͲþ^R»x'7•hnoQv0^1~O¢ áן%š¢¿Ì{6·ú%…àNñztv®-ÛûRõœCno´ù•®^§ ×€FQ8éäGß8ñ2sW©ÇpÊFžg P÷¸Û;…D&_[ÝdÙ±pe=&eíl.ã>–f7ˆ®oð4WW“X+LÒeê_8°¶´È¼³¨žEAæñ:Éݳ‘sti#¯=ºíæ}ZÉæÑJŒuíæ™ïñ¨k;¤ü*5Z#tº¹eÝóH‡…°X¬~ª§Rãs;·ÍÎÜÎs¢gyâ³¼:Ï ?ed-+ƒ\i}Æ<×µûšö·ø±›jWÎ!Š#söyj,ëV/‚šó\r¡b+*Íœ 4ÙáŸÍÛuÍl5Ö4Ó¬ö<–L†§é$oֲР„¸K¼1´Æqι·[‹åSÛAÃÁ:Ê÷²|´qÔMâ%S]Ïýô_ËX)q½;UòÕòÅîSÝl¸õ8.³,NAjŠ£ú,¬qŠ¡a¼—r6‹Ø‘Å~æóU¨„æb§0s¤ºÌÊßh²îAl7(qÍ{z H¤…èmþòcØù0+ÿ&e«ŸA‹Å鮚z½?¹»ZRc*¥8èë1[ͺ֑ôbò»†´Ñi©“ÅUUË8UªV(u!¹R{õ¢ˆ´ÒØaßÖ•Åxu@6ئ(°X²ÁŽt¦£:‘ ƒ—5ÍXfePgak”›¿ÍúØÔ­X3ŸÕoÅÝ\©8ï&ºšãü¡)ëv« ?‚w4a­«Žë¦‡—ލ2ï¬.5+Î̦ؾ°!‹ ‹ôнè»Wxx¬« “$dÕÝ¢¹Y:ø—.J™:Xðà ©ß‹É4Q¿TŽÑ‘æ:¨ÎÍcIëŠF¬K›%wê=4¹bÍK/«eþ¤šdÜÞü¥XæœðÆqƼ·ofs”Ã5;¿¤{µø’dÙ6¸ÎÕ’'îÐþ¼µ8Žòܶ.åF{q±ZÕLŽÂƒÜZ€Ä3öoŠ|Å¡X*VÊkþ|—Y©j3Ö2ÊYqýÔ y ª—7'y;E;ÈâŠMN\P€ÀÜ=˜´#ñ¹žé0þU¬h§Ø÷³ y4îq6ñ÷¿ {ÔC¶E",dâ(caT@´VÂ^OÝ#ª»ó†1ðÈð¾®':Wƒ¶Wau5qãí,$;®DÝC=ÍG—ÉÄ MÇÆ‡‘g÷Gµï« ½_Rn+ÎÛÑRŠ…Q‡9+»Ùôœw¼¡LÌéîìÑA¶|ZEŽºœ—­™}ô„ò.X¨Ze„Hâª'ºÚ­¬yJyÞÅ?DדßûEóJÝbW9Ð<6”~€j:qÂÒˆß`éOoìß$‘Ü•êÜy«l¹îèè½\ïI¢ÅYóIÃéáM‰s…_k?Wa•µlb^×? Þ¦°hì, Ͼ·7oÖkêÒþ¯ÆÐqg)|˜q¾#m"2[RïêV¤_1Á5 "OÅj¢…ަ¼ƒ2›½”N³þÇÛa×bçÒxi¸±]·••‹ízÌ0yPmAÞXÌDtÚ&fÁ‘ËËÜ—äÿÑ4c`'4;à­@¸KäþÁ¥UÕ¥£M,—ЋÓ$e­’„í£{R²¾î¨äÁÙеŒN€ïûwý¯ž*_þ’ãôÛË|?ÌY„ú²®6u噹Ùkkã:Bk_ðƒ*)[X½<åäã\ÐЖ'„—Å,á(Ç ÿrŸ‹µ¬:s‚/ÚFÈÒÝ©ÜËÓ'Û-ZqÙŠë‰T‡¾ÒƒÝ¯s‡IÁæ%YÈ :=6EGA¡ øè«ú…Ç„²‚ù?L°Æm„°9wszÖt×+#°7ÄS^ÕlÅÜJÀØÝ‹g& ÿ¡†0IÛÙO„ Sbœ1à Ó2?ëÚDJ®=£¼ÓïÈtß‹¢³€fzöÔÆÈž#´õåÌú§¹jí%M´Z¬–Ø÷- @Í$>ˆÓíØhcØ®´ëÚòôD~ŽÂyœÜÑš]ê®×eÝ)åYÄj*7½fyGëÐoÁíÍà 0(æ7S“õë,‰Òd^Â&ï:Ü3ˆ’°à€Š ® ÛÕ¼¾÷“z éõK±¯KÑSßÕ/òöæ«J\¯°åúqæ{1§€SÄÉÆ˜B‰6ùÈd>ÿ(2Ì&´Õ•Ö%ê T°FZ™ŠG;Þ}L Ї~âg¼ñÚ£²Ï&F† ‚8«˜«_Ù¬møÃÖC~n8¶X¶ùõákË2@v#k(¼ehÖ¬^ÌÞfÚÆ FÜ™eÇòòÓ¢šB˜@ëä'®#&¡2v7’XÀŸŽ¾Á:·~2½£RÖ0ð×p~¨U}#s?Îó”5=íQl××?{í›Àëå…mÌôSl‰¨Æ†ÅïõbA¡ÁÑ%¡çs6b( #óu|)*è ø—Ý'qbÒ?‰ç‚ÎoKÑ?•ž£¢¶/.ðSå •Z½Íá&—׿>©L:È{Yèçó•‚WYžÖy–!H’SRoÄ¡…/®$Zy¤©Áã—¦¼6¾.åÁ’¿­vÄ £¶Ô0S‡,Yÿù`®ò³/Ì¡½¢ä "ñ ¶[‹>Ä 8V¾ØHÇ(r|Ý@g¥,ͧQ¶;ZÙõԞ풲ϞLLe–›Aß·mQ{6ðÆzæ²l˜å(°Í*MÞ¼h™Màì_g8«R%ßUD+òéÐÕÚo'òyÓ(dmÿ,ä¤3¦E*U/æ<ÞF@ÿÞÉ~Ú˜Ž,Èm±½'PIB÷Xzèb‡,Gœ´KsÃs€¸Ë~Ïà þhpÞDw:ÍTSñš1Hû.¤‚ ‚oχdÀˆïÌËÝB'Ýò×YnŽ á±È"˜ì@Ƙ‚3Í|y=æÅú¾±^Î#ŠÊ•26ãʶV LBÒ]Ýšz5ã]——P8£‘Ý* Y,«'ÐÀ@B_ ¾TÎ`.úR`îˆù€Áv Ò0IcVû=Ìã¹ó‰=¢/5ov‚¢ä yvo!‚;ï8oo.Pì’]V…i’°L–6.¼p_vÀÚêoÆZä€×‰—bSè…ßgÑì-Ùrw°+”…aÅ)ϵNßÜbM CžÛ7·Rž± yew3 ðøE­~«6s’û~–Äœa/º]e†»ÂŸ–ù”ZâŽãÙliËäAi“Êå€Äzçw×S°4ÌŸßc7kG‚ÓpXÝÿsv%ÍmYø¯è–‹ˆÂ¾½ÄÖXö8±=qMnM°EB ÄP¿~ÞëI,ýš/sH•®X-t¿õ[6gÛE¦&ú‹…jdðƒj1S¶çœr¥­!Ø ~íP꙲jaªü~-å<Ÿm%Ý+ÈBiv¨ÑW­Ïl€¥]ã Ͳ(ð91g¤’@Jl!5×ó¦[wŠ'HÓ°àjÚ7[4¿69qOÒøYR¥¢ç†gð,N¯ÕûC7 äÒ]£û‰“½‡;´@¡ci» p?/K8cöZF8Iæ¢l÷òû'año½e–õSéwí°îìÅÄmñª—Ð眞üÈNmÆ> JêÈAôqÌM˜Ø(re }³Æž3ÌÆ#‰4òÜ”ÅëjjFHQõj«¼ytcYîe,ŸñæøýNàóqNµDA†òTùfÐÿi‹6UÜ)»&aE°>œi“÷wŸåº49g®Rã°$èë—ðwôK£ ´‡l.ñ›²´M“,'E +¡.[]RœñÆK “`o±œ‘(m¤Æ1æ)L#«˜·8yWwÍ¡¢§º½ðqØG”‘ºÁ½u¡ Y3—½èJg#NÒ 'ÀsCKæEÌAg Å,­½aS¡þ£®¾uPéV·‹üjIà¥,õ-­÷žšÓ{qQqâÙïi Vê|&ß©m#ÁûYDQÀbÁõ”öü,{a<år/œ¬•ÞCü l¹o3D×Ûñû»¡ýµÞÏ «4<¤ÿ8‹X ‚:?ˆ—Bk±Ué0uY‡ý.^l\ñKÙI·‡È,Þ"†>Z…~à³à=‡ú„‚q$vÏè-œu}b{¸^>-Ê=³_ ~áøÂŽq ã S#K̹UÂæa(B¯&ý\Én£HÓÈPéÃ8´=÷¯æÊÒJb à€Q–²²¿,eU7™‡pô0äÍ)nTÝï”ì9.TŠw‹³ž¥5ÌÒ ö}&ëüea@dÂÓ}ÕE+[Ã4×ðÊüa3u÷+t©+ŽSVèú1´À.'+BӔǬkEé,•&&Sï®ÝXšßwf8dë¥ÿ¬|[$| ˜1g¡´TÜ)sÔT`v§”%ÏŠïSÝÊ=‰ù|@{ª`Ókíû»‡nÕöLéÏunoŸ ;¸a¢J?«–˜Õ¨”i»(²¥DÈ7]€\PŒe1_àO nÄCF¬Åô¶–%\qçTCØÚš%nÎBøêårå ¡É%o§Ý±UÙ¡÷RiÙFø^”¦K]\tíAV„ÙYßö‹ ò¯y*O­e¡Ì&Èæ 1ÉS'aBTuN7cî—–â„r(;\8›¿ç®G´9h¦Ìs›ª-Îæ^*•OJYìAñ,µ8L¡H –6ÊÍ¢0fÆWòEVN; ÓCW­]Qù+ü¼¯iÙæ÷ÂRÍ\è&‹‡‰Ë9Ÿ‡à}.çÑëÚdh6] #Ëš¯¿¼ÓËFêlý*òþMõ¥5õ“1|”Ùb½P-l»žîùøs-xÅ€Z¤ïi7Â~$¼˜ý• qž,‡­À~ÆÙM»“!Ü¥’9³ß¼ØóCwÎÔùlMŸÕσÈÖµÈ5ÌbfŸ;iø½ÐËxª =üº ¥|ó$e¥Öúù–¹ÔîÅ©³Þ›«$§}8°>¥=ï§î{SÅæ8Œ–”ã¥pq0Ïn–M½dúFÿ‡“¥ø&-JÜÔ”%êÃÈM¯idU4‚rþsYNÏènI‚V¤þ{©­A˜ ¸¨ÜGòƒ êºÇF<çÅÖ¾uƒ ˆyì%ºÆ8õ‹ù¢B†Æ±&oèfƒ™™ÌÓTŸ}Óý4€ç¸K©>ß5ž ]FPƒ±éÝ;y-Æædá þè÷ÔÅÔ ø—ìÒcéÏX~úÈ‘†‰qb욦y¼‘ë‚ÐP}ÀÙí# ‹ZºÖ¿¿; ái™¦¥t-­£ëFÐfñ\¸A™S¢T@&KRâ7%ïlï¹Hó;þºæéÕxtí¿ç±(å§cA`/‹¨¼®r¹Ï‹š©?—Ÿëƨ¼KT‘ã·˜‹ÁiœArdí¢PàŽ¸¤½¥‘vÐfÊ 4ôÊû­ Ù½%CÅ"óÃ8ô]– Bƒ|#Gî&ZשM®áuÜ9ÿ¡°(Qõü­Ûâf“1F¿¤2ŠYxàÕ|Ý|9ªDæ\05Vsªú$hM8(ͨ^÷R‹ëŠØR¬z<áy‡V†jê`z@SnZYIÃNöuCBz«Æ ii⇬”¨Êf^WWØïR3XçzS.ébÔ†ßþú 4ø‹G Yµ„L7ÉÜŒ…-¹ôï *ÂKZÏÝ„öm<0É<Ê$ŽlždÛÒ*2½Êˆ}åQ˜òT´ŸÅi%œÕ/bW7H©()o¬ÿoI¿µ/Ðu>o-€Ë¥Ä±ÿâçLl‚#õâ4㩡éZ'ŸcÍááp’z‡ÊÓ´•”³%1ôèØ…Qv\ŠF ùqÊ•íf¦u &í¦„>P™NS(¹BO܉Œz{ JÚ#³¦'ÏÆÇãŠ,†teœ‘ÅZ¢nbUÀ«ˆÅÓ³}Ò¿aVRø­°°ru‘Iœô œ]Ø¡©›ÅHdœp_Ok kp‘P#%ÿ¶;A+Ƚϋ³Íýòs¯àli1ŠÕañ¶žNØÆÃˆ„<´è¡×¨&† -qáL•¶¬¥«Øo,Ÿ_§Ç=I¤Ü ÉÐâû (?Õ4ò¢Ì9-ÆN¹“z„Ä›¡…ÖÜdÝj UjT5$Dµ CËÂËÜ,Y`&)PæÜ!÷FE^7»‚9ŠùMÔ´)Æ7Œ”ä,FSëí$‰8Îâ8aáÏKHRD›@o‹6¬ÏöÓ¦øcš Éfû(‘@¸lEo §,$,­b’¸>(qMýÄ*W›£Ê?ÙJ“§]WÑ®V¦ÿÌ8ªtåáÕÚÓ#R; SÊ®¹‰-ù%߉æp„½¡ù:ê b_¾o„ú“ÆÌ•Ç»éÄË¢€å«·ÃËY÷àsDÅQo“ûk‰&SÔÿc £XG‘Æ^ì‡,7 â³Ì7b*x}ÅO–µ²ŠyCvÛ\ôk¯åm'ƒ¦®—¥ ‹zv„È‘«#… =>AEÁ4ô*è-®•&yÆ0/n`²|([ NcMo”ýN/¡l.¸Ë—Bì[¬Õ‹hv5-â4޳… ôbb½˜4vÆÕ6Vh!Ëd^“u ›ûÄÈ ‡ýßÛ’Æ|3€÷Èf%Ç.ˆ·ahjËD<«ùý ‹¯ƒf2‘„‡‚عs£U%áyÞ’EMc /,tOm1#ÆM}QÉ”'xþ©kt|Ë'êŒÙf„O/I¡ eÝöj–Å‘Qj¾£‚Ç,×:ÚÀ¼ù3ŒÜ”ˆ½Êþ,"¨ïºA”òèLÕqñ"0/œN8øâUÞjï˜[;×’6µî1Dç8Ù9¥žÏë|ë• ?Íp(íˆÞãM3öŒ¾æUÍ–*Áe±’•Zu€…vv4úI¦¬4"ÕÿÔ©(òÝÕ ÒiÇ:ôc¯-AÏ«,å9\ܦ–Êuö¡¨úü³«ZÛ†æ8â¢òPÇ Þ˜³6ª(” €»À«Ó?ŠõÿE‹9“ºUni¶g­¹éÔp¢¥àBq±š®\¯6•×¥g™7¢X3}Þ¾6¶nr'Ö$Õ TD‚Ët3µDäM–ⶆA·¢gEìâ!ôjI;. ?$éA‹xR{ÀÇò˜þ Wµiã‰ö-òl¸¢yÍÄ`gÌ9ä’)Ú›ô|m‹ÊX¡ÂAMÂ’Ï+ÅTacÐ]ˆ¸À÷wUA#Ñ ^¹#¡„ÃÍ=j%qìz,¸!z°DæX*t £òXÁk{†eØ&·ÔÙd{¸¿ûÖ³lzÎKmcøAÅqp.ç.ƒSŠ¢%\1Ï nRhsâUä¿wE•ÓZðÁºØ®§(òÈÂíÑë$6k\hg·ó¬¤xyûEˆ…Gr O2ì­V•h4©Î&=/6ö}„MGq¶6ù~ÆÁ$þŽkáÝY؃F`Uò5ÐCUÎZѬĶ>‡¤öl»,kèuÚšW˜UAsпïq8b©hÐ;ã†Ñ5ôa’EL¿‚üN]óM]¾vð ™5xCccßZ,“{cöû»!³çÒ}X$ðܘeÓ­!äd§þe‹¶ÌÆ“n ÊÃWC^'S¥¬¢M\­F„_w©À‰ô c×Ïxì{¨¸¤³›E0YUb d»›ÿÅÞŠfNèÛN½@è÷oˆ§C]ù,ý¼­r?ê}kg’é¿WÈÔpžÒONöîïÔèF]φî¤áGY˜°ú¦}”V ¨ÌcþÝ îSÀã‡ük] àùœÏ©,òk•]ç8qS·¢œØrÔ`ó Þ䲨€ˆ&dêEŒìœ#N§¤ïï´~µN÷¢zš‚9Æ"Pm{¼ö·Ã̳/ [›‹òL«ã;oLliÌÅ⮽ ?° ±¼Ç ƒ0ÊÖôX0§ÅæÒüA‹§'øœ]^öË&Vø™¶Æ´õKZ]ç§°}?¸©è±ÁÂ0Êë3³¢¹ÖßugÛj?ëªÚiØïf^ÆM¹¢ºé”åG^âAMÊ™-ªb³Å9D3s-ó×ÉšLi’R’mÅOœÛ'ûÑ¿ú[M“›†¨ðÀY«!aÕ7×/ç‘¿sTCÞžtSÓ ÐŠþ$¶¨¢è†7Jây¯Ðô3§Ç©kåÒá«Üà>‚GcB  mÕè8ë]->ÖÐri‰œÐ-ùk¯¦ôE4„&TsÖ×馃’ã’éè†2¯{G;45Öš‚g/îltÌ‹™@«Èyˆ9ÖLUë´ÛS)ÌcQ>ù.o¥Ö›°åöê± Ã˜ŒÔ좀 Y¦bè*Ö“_þ€cqhŠ•ÓŽÖhVYA/Bo#…~ë-díÙ ÒÀMYò¿ÊÄÕ5ƒ¼–ÍêÁúh?Å ­6.h-±ËíMoêÆè®Ì ™OBºf©JÈO./ÿý[Ö4ÎòQë¸P/rÔQ%ÇI3 Çó$X¶8áSûwb¢­:}át{®&×\=’ó®ÃA¨5LY³‹ç„‘’kä„M(ž&²)tå:tÒ+wÞ<“rxq¥@5%ò˜ÓjJs!uß–øiì%)gFÚj— !< B‹ƒ–b¼éiE‹;õ 4*ùiíû3ÇPÝZÃTuâ€Ry g<³¹£‡¦„R^]Hg_wCa\»r¬eœaª§gÂÍj÷}¦’(Ôšå q”y<íß-4~(Ê}"öÈ ÉKø¬‚æSW–}¶ö6'Îù .óâF ]oè¹~Æ9ÛFù^8+YP¬C¼®MÁ…ä}ÉqÄE«J‹ŠêEçwñ0µ›¤ ¸£±›q^ã_ˉŒÝ°iŸ4Û'û\ÓÔŸ³œu(5[ÔÀîÝ®72\f*¸ “»ÙLAeë›ÊЉ·èý,ë}]Ò'%å1Îë‰Åoõ±´#ºßS/vYnàj7â`“DŒ¾Êr'^ÎÿÓD“â/›z4”0êÅ~F'ã7ò䛳êÌ<ó>høþŸWÖ«‡/ š'ãøDAè³tJµåwJq¤Æ½/ë÷\8OE»anä‘Ó’Ó"sXÖÞ+7H¦@U4ð‰þxXbgoà$P×Ê §¤Øi­Ò@—.v÷ ÚÖ¢!^Ð]ûR+ ¯G0}#8‘7Ÿ\SßËÂ,fáäl‡p-Ù4Zˆ©æXƒ‰úŽx%e†.ð4hÔMžÛë¡5‹¨\ñõEw`îyÿ¦•ó K7€ç ‰ñÒËéìâyÐ-¡‚'¿­€°™»¦Ýu„†C1Ýö´ØFv’3ñ£>- b¤Súkd¬Ó“œÉn †ÐIµÎsÇÖ0~DÚ%1yZ6/ Û£}Ë—zfM9Áq쳨¾ëî ¹Rb"¾Ê>Ã¥o¥Ô‰íVp+j…šimúÆQ‘µKƒ2‚À¯å'(@3v4ÝN£`3hšHšÁ´â*“8s]Ÿe…‚ µÍmI9K ôó¨n‰Æ¾Í‰)ø+Ú\”dÀyDKgûìpñAll] »^š±˜öµZÀ˜ï¤ÞŽòUÄéöO9ÕP噬Pöf¡åGé/çÐÅ'ó­ê|ë,ŽßœýºbU§<½²ÏÝ<àóXO—Ñ׆”˜[É I ˜°¸»ËÐÌÊÆŠâֿ产áuí6KÉwœæˆ))oñöÂV´ÜÈ0Ü8䤆²g²;¨Køõ d’£ãmswSUâÑh¢Ù¯+sÉQ}FM…ÇQ&ŠÂ,ãé­âÒ!4·UpÕç\E@xnyº‘\Òc ˆc_A‹S¨õ$= •mIJ(l è"b³›Í¾‘+ãÇ`»âQ|ßaØ¥Î÷q Û†Pm+·x«]¬,ß03t aœ-—9îÔv¦€2W¬á™Ä¼=ö[5Ô$o¬[ðuÛUòE,~ŸïI&o2È<7b­°óºÄÝTdÞ_oåI¹L3õ~Ô;Aï:¯ºUÔÄB‰æB{¨–žZÍó¶TºÔ§¬¡Ú²©I~™÷ØÛº‘'g£´Þ™ ÆR’%a8úX+Ü _ž§£7¿lž³<7*íèìgÑZ ÊÉÙ²§ˆošú¼m ÈŒ©@g÷w¥,x²BiìB e1€r¥…h>c´r Î@B¶0ûˆÑ™æÛè*ª)Æâb&â:É ÚÞ°Z: |æ­r–[ôæ„–´…ðYWòYW¿N)Ãw»AohÞ6» )?,çÒ3Žû»‹Ò‰«Šâd©[¡&w}žbœÞÑÕ\½J%-ø¿Ö®¥¹m# ß÷Wè– ‰ÂÄÑ–¹")›’·œÚTíaHŒˆñЂ ´Ô¯ßî@"ÁéA{k“ªø—5Æ<º¿þ^q[oЇLaÇ.:² prhM†ŒÛ{= Ð6)`íS]ÇÙÑSÕ·£+-ÏfOS“÷§k"ŠÑ*©-Åðü4&A²ŠY¤à­1o&üƒÇt[y(Z&KáFU. O›+Ç„j9çÍÏà*ÏY±„HŒÜ‹”ðØûÁ­Òà¢R=íŽ<’ˆµÝbtëÎ?Ma{2pzí çÑ›T”H×åÉД¨[:Ôõ‹Z;lI?6õz”+“.—`Å’,ë¼,LT†ªÌ~·Œ&‚çVnn³ë‰°µgš^.ï.âN.DHÀà<rSqÔÀ¢;ìû£wÆÉwRHåã#}k~¿¼¥'fË_'¦|²t„yÈâÎÈ粪Îìm!öTÍO¼ì·r’Èw®4k'kÏqTáÎ"ôW <|,WY:«v#ºJJf?ø]ÉnI’Ä’îávå Z)N2–°uðP…™ÀC÷:èÑ32mfÖ"ZdÍ" Ç@ⵑËq ÏX”¢Ø…íִбT"µ;ʬ±8ƒûƒ¿¯[dƒÑ•´¦ú™WÎ{“Û/—®b^LE«çñÅÔ]â£ÿ)q˜ÄKçCÿ˜žÖEêy;õ¬š79ÃTN£ÌY®ÍZˆ<±]<ÁC_›1^‹‡ôJWVù-T*´òP§ Þ´î‘ oôUÒm©RDY†¼-¤ ,˜f/N,·ó8 X̬ZlŽÞv;Y\>Ÿêc?UaûGoÌ®V.G#l‘Wí«8Z—­óñmžEp>¸j£¿Š°¿åÿYc§ |¯X‹ýÜ©ÞáêïÖ—8lq¥ß?swS‹Kõ$b‰ZöâU]¾ì_T¢<4@!MúCùQOå«ÔsµÍœ´Ü÷ÑA†•¶m.½·–¼ÁgiÌúÐg‹FÛÞSk¼ŸvÞgqFH]^„˱¾¬YœÞ§WÕT‚Q ¿Jé½ x³$¹+I¢Ú×i üÙ§Ûïg8èA Ç5÷쮽:aIo§˜Ëu{è\GÐf'r2a1ssݨ3ò°5fÝÙß…·rÍK¼3#jqwdžýÝI|¢–ƒ¿¸zÈáUŒYóÜnl«<9Õ¸(U×&kq9ؼ—æ§\--#?Ë9ýóÂ${Ò1š5²*ìC]£Ú¢]7Ö…Šm#÷ïòtg|×W5±VP±¸ú4òS†ô©›êè£ökÈóßìM7?{û^C}ò(Úr-7šhas·ÿXî?á³÷¨0ÉL«ø0Êâ$\å,˜^Tm“Ûixÿ©Œšç ªQЂÛIþÑòô,Ö-ÍàйR™òû˜Õc¥æu:)ÓîI-1JŽÞ‰fÐ=,`Ì B5ÀPÔ›C²X8·_IóÚGΓ2„Ùzb](BûC&–§M2ybŸ)V}öa‡Ø0 |’õ³¬WºyjØÊÄ9– -dîGœr]TJdv×’Z³'O„®“z#è@ŽÏm¿wLîµ+àòæPmg„PìdiÈ¢•¬µ £§pBM„S"‘/ª¾d]»×%ͼ¸D­O±¶¶›¢Å¾6`a‡rgíïHStí$½a€È~•ÃãØKƺhàŠ½×Tã…tiuŽN%P ð¬c+4Ú‚.7´@'\oïY )h‡÷èïKµ”觇O(V˃øN«ÕBFÈ fÇãF (0ÎsžÑа–úÅÕ<ßÑ·/cä Vh;dMyìgyÀ²²4Êh«^Ù˽µlNØ—Î+UÑùƃ‹Õ'#=6¬ö¸2ól§é%ú9ü›f¬àØJæv°÷24vÞÀ¦ÏÒ”(Û÷´Öëºãâê¶ ŸÛöELü¥'øÜj…6VP”Ìmïí*êr…ZäUÏ”žóÞ¶M ÏmHúM Uç:.è'ƒ4[q ؆ÍÔP`;ÉZ”Å„ú¬J¿Äf.¼+oå~ïÒ‹Dq†qÈÚ™k­óBG( :Q=—1Ïãø^<Ñ•êìÊ–Œ0 ÇûVš×b`%:Ð?ÊÓˆå‰ÿ$º¶Èc;È*Æ<"ž"†6°üÖÖ²Q$â¯{†¨)ðÓ•ÏóU݈½‚–ÏÛI¹¦@œÇNIè§=(çxC W·Ü’8À¯æY¾+ì]Ô*N|ž•€èØ1Þnªp8!<ãÔ,æ©B?a(’ãóMGøg,„[þ&*st—ÉáRá¢ÿîlI,§‡Å¶“<™È½s4:ZP ìÚW$âÝj2•£ÒŽÒ]e8§–†ÏzUÔ.‡®x¶•Ðà8ø1?H°ÿ¶2ñè§0ÎÚb¦ iÂÓ¤l2;K´ÐÚ>Ø¡UÅ Imæéú‡'ÏÞ×.ï¼Þ$L²(eeÚ´@zƒ­°Ãn/¢’MMn0ÏžÜÒ­¼½ƒý_"ïb(8ÑŸç=Š:㢃Vö"KðCƒ CïÒ)Èy' þ„€'i¡ÈXc<Ùâê‹Âøœ¡`Ç“˜Fðb°ÜXLLjÓ·jÓÂ9üiß™k‡úÎöߢ4ø7?c~µš³ÆßÚ!Þ{©à/Þ};ìÖ5rßk~s»c†uò•ZÞŸGšN9ÐÚܫ˲ÇQ¬i¸v÷0¯Þ^ª½ëba“1Ì_‹ã$ä✠~¨×Ó÷)ù¤¬æ‘²0x›¦Êì¨ð«B‰ú­>‰ÍŽË½÷ФÖx)ý<5óh1sw9^@ôgôÓ0ö}–kW[m…÷zÙk oÆ| ÞÕz€]ÿH–m†eB­Ïhq—3ûïÕ6*cu/hˆk<5¬‹SúŒ2ã;Þtw€ËãU‘ž¸™šK‹£õ¹8$â€Çi¿‚MøFe«âQ·LÂñjxm™ÉD-y/²"¿oádgçð8Ÿ š¤ÑÊ_Å,ó1øaPM†_„07\’~ÏÔתáµJwèì:‚ N›ÉKY\Ý©Æ8 ŒQ1®¶/˜Ql-y3—J˜'&QjfÛ~“[Çdb´Ÿù²Y’×eñ÷¶º¢Ží÷(?Ó¤—±„JF½ÓˆuyÞb¾]£Á&©ãœsùÝn?<½\ò QÆÒ¶ãHÙ»`}œâ…¨ò6•`Nv t´y£5$ÄcS:ÉX͹­Óã`u7‹%sèÖ¢ íÈS×¶k¯€Æ§eÞ ÖŽm>ÙN\"fS%CÎ Mn‚‘’œ ªú ¿_l·Š .nuØ{Ïâ°‘;Ö*?A­K»‹c…¾ )ÆÍoŠò8ÎW Ëel#+ ”Q´×Bjè´õJqj7“Ë»“$‘ }G½ïÚšÓ½-Wqšf žx‚’ÚëLòÕ1uízm þ2÷›/²«iß¿/¢›ó¦ 1 sÅÊ+h÷ð·%ûäåq’ñâ„%Ž4›ðOI>{ÆIŒqÖBX‘ûœ’ó©mĞ؋eÛõ šŒG2ÿGvO? S‘Ó‰&ã¥Å"þü‰‹¡»åÙÝi¶ t¬PŠמèàÿjº›]ûÊÜBeF›Š YÄ:ßÇ„‹«Í÷’¢ŸKá(Íü$…ÿ°{Ýzõ{„ŒuÑÏëm¼í:WøáëÁÆp2tjÑ¡ûÍ“¤l䞌eü{S`À‰˜˜&}V.)s×lâ'¬l[Q×èÊ­jÕwö޾³Ò¦´ø‚uLí=:Ó™¶ouÒPIìHW'p$Â޲ÀOVqÈ"z÷¥l‹¶“DDN­L eÈÞ_˪hÜʹ9˜+ ^BÃBoŒù¨« ó,ÊX4E¬XfjX,ãe“ó>wZjrð0iÈΙ7Û*ùåß¹ugUpÕ²V·}Ö:ûå#^Tq¢!uZ 8t¿\ü§37#ÿ_ÞMò§NBp}–‹ÇF\°á ¿­¤à^<¿ÉýÔR÷ Ö@Š ©2ü48ˆ.¯[—]œÀGËcλñâ&ëכ͡ªZجÞs=tô¤Eã`úzuÑÁ>òÓúF5ƒ#p±QÅ>K¤Pž! »þ uúw8ÁΣTû=œeªzÕþ‰stÓ8ðã—mongodb-1.6.1/tests/utils/basic-skipif.inc0000644000076500000240000000143313572250761020016 0ustar alcaeusstaffgetCode(), $e->getMessage(), $e->getFile(), $e->getLine())); }); register_shutdown_function(function() { $lastError = error_get_last(); if ($lastError !== null) { exit(sprintf('skip %s: %s', errno_as_string($lastError['type']), $lastError['message'])); } }); mongodb-1.6.1/tests/utils/basic.inc0000644000076500000240000000064413572250761016536 0ustar alcaeusstaffname = $name; $this->age = $age; $this->addresses = array(); $this->secret = "$name confidential info"; } function addAddress(Address $address) { $this->addresses[] = $address; } function addFriend(Person $friend) { $this->friends[] = $friend; } function bsonSerialize() { return array( "name" => $this->name, "age" => $this->age, "addresses" => $this->addresses, "friends" => $this->friends, ); } function bsonUnserialize(array $data) { $this->name = $data["name"]; $this->age = $data["age"]; $this->addresses = $data["addresses"]; $this->friends = $data["friends"]; } } class Address implements MongoDB\BSON\Persistable { protected $zip; protected $country; function __construct($zip, $country) { $this->zip = $zip; $this->country = $country; } function bsonSerialize() { return array( "zip" => $this->zip, "country" => $this->country, ); } function bsonUnserialize(array $data) { $this->zip = $data["zip"]; $this->country = $data["country"]; } } mongodb-1.6.1/tests/utils/observer.php0000644000076500000240000000210613572250761017315 0ustar alcaeusstaffcommands = []; \MongoDB\Driver\Monitoring\addSubscriber($this); try { call_user_func($execution); } finally { \MongoDB\Driver\Monitoring\removeSubscriber($this); foreach ($this->commands as $command) { call_user_func($commandCallback, $command); } } } public function commandStarted(CommandStartedEvent $event) { $this->commands[] = $event->getCommand(); } public function commandSucceeded(CommandSucceededEvent $event) { } public function commandFailed(CommandFailedEvent $event) { } } ?> mongodb-1.6.1/tests/utils/skipif.php0000644000076500000240000002631013572250761016756 0ustar alcaeusstaffselectServer(new ReadPreference('nearest')); $mongosNodes = array_filter($manager->getServers(), function(Server $server) { return $server->getType() === Server::TYPE_MONGOS; }); if (count($mongosNodes) > 1) { exit('skip topology contains multiple mongos nodes'); } } /** * Skips the test if the topology is not a shard cluster. */ function skip_if_not_mongos() { is_mongos(URI) or exit('skip topology is not a sharded cluster'); } function skip_if_not_mongos_with_replica_set() { is_mongos_with_replica_set(URI) or exit('skip topology is not a sharded cluster with replica set'); } /** * Skips the test if the topology is a replica set. */ function skip_if_replica_set() { is_replica_set(URI) and exit('skip topology is a replica set'); } /** * Skips the test if the topology is not a replica set. */ function skip_if_not_replica_set() { is_replica_set(URI) or exit('skip topology is not a replica set'); } /** * Skips the test if the topology is not a replica set or sharded cluster backed by replica sets */ function skip_if_not_replica_set_or_mongos_with_replica_set() { is_replica_set(URI) or is_mongos_with_replica_set(URI) or exit('skip topology is not a replica set or sharded cluster with replica set'); } function skip_if_no_transactions() { if (is_mongos_with_replica_set(URI)) { skip_if_server_version('<', '4.2'); } elseif (is_replica_set(URI)) { skip_if_server_version('<', '4.0'); } else { exit('skip topology does not support transactions'); } } /** * Skips the test if the topology has no arbiter. */ function skip_if_no_arbiter() { try { $primary = get_primary_server(URI); } catch (ConnectionException $e) { exit('skip primary server is not accessible: ' . $e->getMessage()); } $info = $primary->getInfo(); if (!isset($info['arbiters']) || count($info['arbiters']) < 1) { exit('skip no arbiters available'); } } /** * Skips the test if the topology has no secondary. */ function skip_if_no_secondary() { try { $primary = get_primary_server(URI); } catch (ConnectionException $e) { exit('skip primary server is not accessible: ' . $e->getMessage()); } $info = $primary->getInfo(); if (!isset($info['hosts']) || count($info['hosts']) < 2) { exit('skip no secondaries available'); } } /** * Skips the test if the topology does not have enough data carrying nodes */ function skip_if_not_enough_data_nodes($requiredNodes, $maxNodeCount = null) { try { $primary = get_primary_server(URI); } catch (ConnectionException $e) { exit('skip primary server is not accessible: ' . $e->getMessage()); } $info = $primary->getInfo(); $dataNodeCount = isset($info['hosts']) ? count($info['hosts']) : 0; if ($dataNodeCount < $requiredNodes) { exit("skip not enough nodes available (wanted: {$requiredNodes}, available: " . count($info['hosts']) . ')'); } if ($maxNodeCount !== null && $dataNodeCount > $requiredNodes) { exit("skip too many nodes available (wanted: {$requiredNodes}, available: " . count($info['hosts']) . ')'); } } /** * Skips the test if the topology does not have enough nodes */ function skip_if_not_enough_nodes($requiredNodes, $maxNodeCount = null) { try { $primary = get_primary_server(URI); } catch (ConnectionException $e) { exit('skip primary server is not accessible: ' . $e->getMessage()); } $info = $primary->getInfo(); $nodeCount = (isset($info['hosts']) ? count($info['hosts']) : 0) + (isset($info['arbiters']) ? count($info['arbiters']) : 0); if ($nodeCount < $requiredNodes) { exit("skip not enough nodes available (wanted: {$requiredNodes}, available: " . count($info['hosts']) . ')'); } if ($maxNodeCount !== null && $nodeCount > $requiredNodes) { exit("skip too many nodes available (wanted: {$requiredNodes}, available: " . count($info['hosts']) . ')'); } } /** * Skips the test if the topology is a standalone. */ function skip_if_standalone() { is_standalone(URI) and exit('skip topology is a standalone'); } /** * Skips the test if the topology is not a standalone. */ function skip_if_not_standalone() { is_standalone(URI) or exit('skip topology is not a standalone'); } /** * Skips the test if the connection string uses SSL. */ function skip_if_ssl() { is_ssl(URI) and exit('skip URI is using SSL'); } /** * Skips the test if the connection string uses SSL. */ function skip_if_not_ssl() { is_ssl(URI) or exit('skip URI is not using SSL'); } /** * Skips the test if the connection string is using auth. */ function skip_if_auth() { is_auth(URI) and exit('skip URI is using auth'); } /** * Skips the test if the connection string is not using auth. */ function skip_if_not_auth() { is_auth(URI) or exit('skip URI is not using auth'); } /** * Skips the test if the connection string is not using a particular * authMechanism. * * @param string $authMechanism */ function skip_if_not_auth_mechanism($authMechanism) { $uriAuthMechanism = get_uri_option(URI, 'authMechanism'); if ($uriAuthMechanism === null && $authMechanism !== null) { exit('skip URI is not using authMechanism'); } if ($uriAuthMechanism !== $authMechanism) { exit("skip URI authMechanism is '$uriAuthMechanism' (needed: '$authMechanism')"); } } /** * Skips the test if the server is not accessible. */ function skip_if_not_live() { try { get_primary_server(URI); } catch (ConnectionException $e) { exit('skip server is not accessible: ' . $e->getMessage()); } } /** * Skips the test if the server version satisfies a comparison. * * @see http://php.net/version_compare * @param string $operator Comparison operator * @param string $version Version to compare against */ function skip_if_server_version($operator, $version) { $serverVersion = get_server_version(URI); if (version_compare($serverVersion, $version, $operator)) { exit("skip Server version '$serverVersion' $operator '$version'"); } } /** * Skips the test if the PHP version satisfies a comparison. * * @see http://php.net/version_compare * @param string $operator Comparison operator * @param string $version Version to compare against */ function skip_if_php_version($operator, $version) { if (version_compare(PHP_VERSION, $version, $operator)) { exit("skip PHP version '" . PHP_VERSION . "' $operator '$version'"); } } /** * Skips the test if the server not using a particular storage engine. * * @param string $storageEngine Storage engine name */ function skip_if_not_server_storage_engine($storageEngine) { $serverStorageEngine = get_server_storage_engine(URI); if ($serverStorageEngine !== $storageEngine) { exit("skip Server storage engine is '$serverStorageEngine' (needed '$storageEngine')"); } } /** * Skips the test if the server does not support the sleep command. */ function skip_if_sleep_command_unavailable() { if (!command_works(URI, ['sleep' => 1, 'secs' => 1, 'w' => false])) { exit('skip sleep command not available'); } } /** * Skips the test if the server does not support test commands. */ function skip_if_test_commands_disabled() { if (!get_server_parameter(URI, 'enableTestCommands')) { exit('skip test commands are disabled'); } } /** * Skips the test if libmongoc does not support crypto. * * If one or more libaries are provided, additionally check that the reported * library is in that array. Possible values are "libcrypto", "Common Crypto", * and "CNG". * * @param array $libs Optional list of crypto libraries to require */ function skip_if_not_libmongoc_crypto(array $libs = []) { $lib = get_module_info('libmongoc crypto library'); if ($lib === null) { exit('skip libmongoc crypto is not enabled'); } if (!empty($libs) && !in_array($lib, $libs)) { exit('skip Needs libmongoc crypto library ' . implode(', ', $libs) . ', but found ' . $lib); } } /** * Skips the test if libmongoc does not support SSL. * * If one or more libaries are provided, additionally check that the reported * library is in that array. Possible values are "OpenSSL", "LibreSSL", * "Secure Transport", and "Secure Channel". * * @param array $libs Optional list of SSL libraries to require */ function skip_if_not_libmongoc_ssl(array $libs = []) { $lib = get_module_info('libmongoc SSL library'); if ($lib === null) { exit('skip libmongoc SSL is not enabled'); } if (!empty($libs) && !in_array($lib, $libs)) { exit('skip Needs libmongoc SSL library ' . implode(', ', $libs) . ', but found ' . $lib); } } /** * Skips the test if the collection cannot be dropped. * * @param string $databaseName Database name * @param string $collectionName Collection name */ function skip_if_not_clean($databaseName = DATABASE_NAME, $collectionName = COLLECTION_NAME) { try { drop_collection(URI, $databaseName, $collectionName); } catch (RuntimeException $e) { exit("skip Could not drop '$databaseName.$collectionName': " . $e->getMessage()); } } function skip_if_no_getmore_failpoint() { $serverVersion = get_server_version(URI); if ( version_compare($serverVersion, '3.2', '>=') && version_compare($serverVersion, '4.0', '<') ) { exit("skip Server version '$serverVersion' does not support a getMore failpoint'"); } } function skip_if_no_failcommand_failpoint() { skip_if_test_commands_disabled(); $serverVersion = get_server_version(URI); if (is_mongos(URI) && version_compare($serverVersion, '4.1.8', '<')) { exit("skip mongos version '$serverVersion' does not support 'failCommand' failpoint'"); } elseif (version_compare($serverVersion, '4.0', '<')) { exit("skip mongod version '$serverVersion' does not support 'failCommand' failpoint'"); } } function skip_if_no_mongo_orchestration() { $ctx = stream_context_create(['http' => ['timeout' => 0.5]]); $result = @file_get_contents(MONGO_ORCHESTRATION_URI, false, $ctx); /* Note: file_get_contents emits an E_WARNING on failure, which will be * caught by the error handler in basic-skipif.inc. In that case, this may * never be reached. */ if ($result === false) { exit("skip mongo-orchestration is not accessible: '" . MONGO_ORCHESTRATION_URI . "'"); } } mongodb-1.6.1/tests/utils/tools.php0000644000076500000240000005142213572250761016633 0ustar alcaeusstaff $collectionName]); try { /* We need to use WriteConcern::MAJORITY here due to the issue * explained in SERVER-35613: "drop" uses a two phase commit, and due * to that, it is possible that a lock can't be acquired for a * transaction that gets quickly started as the "drop" reaper hasn't * completed yet. */ $server->executeCommand( $databaseName, $command, ['writeConcern' => new WriteConcern(WriteConcern::MAJORITY)] ); } catch (RuntimeException $e) { if ($e->getMessage() !== 'ns not found') { throw $e; } } } /** * Returns the value of a module row from phpinfo(), or null if it's not found. * * @param string $row * @return string|null */ function get_module_info($row) { ob_start(); phpinfo(INFO_MODULES); $info = ob_get_clean(); $pattern = sprintf('/^%s([\w ]+)$/m', preg_quote($row . ' => ')); if (preg_match($pattern, $info, $matches) !== 1) { return null; } return $matches[1]; } /** * Returns the primary server. * * @param string $uri Connection string * @return Server * @throws ConnectionException */ function get_primary_server($uri) { return (new Manager($uri))->selectServer(new ReadPreference('primary')); } /** * Returns a secondary server. * * @param string $uri Connection string * @return Server * @throws ConnectionException */ function get_secondary_server($uri) { return (new Manager($uri))->selectServer(new ReadPreference('secondary')); } /** * Runs a command and returns whether an exception was thrown or not * * @param string $uri Connection string * @param array|object $commandSpec * @return bool * @throws RuntimeException */ function command_works($uri, $commandSpec) { $command = new Command($commandSpec); $server = get_primary_server($uri); try { $cursor = $server->executeCommand('admin', $command); return true; } catch (Exception $e) { return false; } } /** * Returns a parameter of the primary server. * * @param string $uri Connection string * @return mixed * @throws RuntimeException */ function get_server_parameter($uri, $parameter) { $server = get_primary_server($uri); $command = new Command(['getParameter' => 1, $parameter => 1]); $cursor = $server->executeCommand('admin', $command); return current($cursor->toArray())->$parameter; } /** * Returns the storage engine of the primary server. * * @param string $uri Connection string * @return string * @throws RuntimeException */ function get_server_storage_engine($uri) { $server = get_primary_server($uri); $command = new Command(['serverStatus' => 1]); $cursor = $server->executeCommand('admin', $command); return current($cursor->toArray())->storageEngine->name; } /** * Helper to return the version of a specific server. * * @param Server $server * @return string * @throws RuntimeException */ function get_server_version_from_server(Server $server) { $command = new Command(['buildInfo' => 1]); $cursor = $server->executeCommand('admin', $command); return current($cursor->toArray())->version; } /** * Returns the version of the primary server. * * @param string $uri Connection string * @return string * @throws RuntimeException */ function get_server_version($uri) { $server = get_primary_server($uri); return get_server_version_from_server($server); } /** * Returns the value of a URI option, or null if it's not found. * * @param string $uri * @return string|null */ function get_uri_option($uri, $option) { $pattern = sprintf('/[?&]%s=([^&]+)/i', preg_quote($option)); if (preg_match($pattern, $uri, $matches) !== 1) { return null; } return $matches[1]; } /** * Checks that the topology is a sharded cluster. * * @param string $uri * @return boolean */ function is_mongos($uri) { return get_primary_server($uri)->getType() === Server::TYPE_MONGOS; } /** * Checks that the topology is a sharded cluster using a replica set */ function is_mongos_with_replica_set($uri) { if (! is_mongos($uri)) { return false; } $cursor = get_primary_server($uri)->executeQuery( 'config.shards', new \MongoDB\Driver\Query([], ['limit' => 1]) ); $cursor->setTypeMap(['root' => 'array', 'document' => 'array']); $document = current($cursor->toArray()); if (! $document) { return false; } /** * Use regular expression to distinguish between standalone or replicaset: * Without a replicaset: "host" : "localhost:4100" * With a replicaset: "host" : "dec6d8a7-9bc1-4c0e-960c-615f860b956f/localhost:4400,localhost:4401" */ return preg_match('@^.*/.*:\d+@', $document['host']); } /** * Checks that the topology is a replica set. * * @param string $uri * @return boolean */ function is_replica_set($uri) { if (get_primary_server($uri)->getType() !== Server::TYPE_RS_PRIMARY) { return false; } if (get_uri_option($uri, 'replicaSet') === NULL) { return false; } return true; } /** * Checks if the connection string uses authentication. * * @param string $uri * @return boolean */ function is_auth($uri) { if (stripos($uri, 'authmechanism=') !== false) { return true; } if (strpos($uri, ':') !== false && strpos($uri, '@') !== false) { return true; } return false; } /** * Checks if the connection string uses SSL. * * @param string $uri * @return boolean */ function is_ssl($uri) { return stripos($uri, 'ssl=true') !== false || stripos($uri, 'tls=true') !== false; } /** * Checks that the topology is a standalone. * * @param string $uri * @return boolean */ function is_standalone($uri) { return get_primary_server($uri)->getType() === Server::TYPE_STANDALONE; } /** * Converts the server type constant to a string. * * @see http://php.net/manual/en/class.mongodb-driver-server.php * @param integer $type * @return string */ function server_type_as_string($type) { switch ($type) { case Server::TYPE_STANDALONE: return 'Standalone'; case Server::TYPE_MONGOS: return 'Mongos'; case Server::TYPE_POSSIBLE_PRIMARY: return 'PossiblePrimary'; case Server::TYPE_RS_PRIMARY: return 'RSPrimary'; case Server::TYPE_RS_SECONDARY: return 'RSSecondary'; case Server::TYPE_RS_ARBITER: return 'RSArbiter'; case Server::TYPE_RS_OTHER: return 'RSOther'; case Server::TYPE_RS_GHOST: return 'RSGhost'; default: return 'Unknown'; } } /** * Converts an errno number to a string. * * @see http://php.net/manual/en/errorfunc.constants.php * @param integer $errno * @param string */ function errno_as_string($errno) { $errors = [ 'E_ERROR', 'E_WARNING', 'E_PARSE', 'E_NOTICE', 'E_CORE_ERROR', 'E_CORE_WARNING', 'E_COMPILE_ERROR', 'E_COMPILE_WARNING', 'E_USER_ERROR', 'E_USER_WARNING', 'E_USER_NOTICE', 'E_STRICT', 'E_RECOVERABLE_ERROR', 'E_DEPRECATED', 'E_USER_DEPRECATED', 'E_ALL', ]; foreach ($errors as $error) { if ($errno === constant($error)) { return $error; } } return 'Unknown'; } /** * Prints a traditional hex dump of byte values and printable characters. * * @see http://stackoverflow.com/a/4225813/162228 * @param string $data Binary data * @param integer $width Bytes displayed per line */ function hex_dump($data, $width = 16) { static $pad = '.'; // Placeholder for non-printable characters static $from = ''; static $to = ''; if ($from === '') { for ($i = 0; $i <= 0xFF; $i++) { $from .= chr($i); $to .= ($i >= 0x20 && $i <= 0x7E) ? chr($i) : $pad; } } $hex = str_split(bin2hex($data), $width * 2); $chars = str_split(strtr($data, $from, $to), $width); $offset = 0; $length = $width * 3; foreach ($hex as $i => $line) { printf("%6X : %-{$length}s [%s]\n", $offset, implode(' ', str_split($line, 2)), $chars[$i]); $offset += $width; } } /** * Canonicalizes a JSON string. * * @param string $json * @return string */ function json_canonicalize($json) { $json = json_encode(json_decode($json)); /* Versions of PHP before 7.1 replace empty JSON keys with "_empty_" when * decoding to a stdClass (see: https://bugs.php.net/bug.php?id=46600). Work * around this by replacing "_empty_" keys before returning. */ return str_replace('"_empty_":', '"":', $json); } /** * Return a collection name to use for the test file. * * The filename will be stripped of the base path to the test suite (prefix) as * well as the PHP file extension (suffix). Special characters (including hyphen * for shell compatibility) will be replaced with underscores. * * @param string $filename * @return string */ function makeCollectionNameFromFilename($filename) { $filename = realpath($filename); $prefix = realpath(dirname(__FILE__) . '/..') . DIRECTORY_SEPARATOR; $replacements = array( // Strip test path prefix sprintf('/^%s/', preg_quote($prefix, '/')) => '', // Strip file extension suffix '/\.php$/' => '', // SKIPIFs add ".skip" between base name and extension '/\.skip$/' => '', // Replace special characters with underscores sprintf('/[%s]/', preg_quote('-$/\\', '/')) => '_', ); return preg_replace(array_keys($replacements), array_values($replacements), $filename); } function NEEDS($configuration) { if (!constant($configuration)) { exit("skip -- need '$configuration' defined"); } } function SLOW() { if (getenv("SKIP_SLOW_TESTS")) { exit("skip SKIP_SLOW_TESTS"); } } function loadFixtures(Manager $manager, $dbname = DATABASE_NAME, $collname = COLLECTION_NAME, $filename = null) { if (!$filename) { $filename = "compress.zlib://" . __DIR__ . "/" . "PHONGO-FIXTURES.json.gz"; } $bulk = new BulkWrite(['ordered' => false]); $server = $manager->selectServer(new ReadPreference(ReadPreference::RP_PRIMARY)); $data = file_get_contents($filename); $array = json_decode($data); foreach($array as $document) { $bulk->insert($document); } $retval = $server->executeBulkWrite("$dbname.$collname", $bulk); if ($retval->getInsertedCount() !== count($array)) { exit(sprintf('skip Fixtures were not loaded (expected: %d, actual: %d)', $total, $retval->getInsertedCount())); } } function createTemporaryMongoInstance(array $options = []) { $id = 'mo_' . COLLECTION_NAME; $options += [ "name" => "mongod", "id" => $id, 'procParams' => [ 'logpath' => "/tmp/MO/phongo/{$id}.log", 'ipv6' => true, 'setParameter' => [ 'enableTestCommands' => 1 ], ], ]; $opts = array( "http" => array( "timeout" => 60, "method" => "PUT", "header" => "Accept: application/json\r\n" . "Content-type: application/x-www-form-urlencoded", "content" => json_encode($options), "ignore_errors" => true, ), ); $ctx = stream_context_create($opts); $json = file_get_contents(MONGO_ORCHESTRATION_URI . "/servers/$id", false, $ctx); $result = json_decode($json, true); /* Failed -- or was already started */ if (!isset($result["mongodb_uri"])) { destroyTemporaryMongoInstance($id); throw new Exception("Could not start temporary server instance\n"); } else { return $result['mongodb_uri']; } } function destroyTemporaryMongoInstance($id = NULL) { if ($id == NULL) { $id = 'mo_' . COLLECTION_NAME; } $opts = array( "http" => array( "timeout" => 60, "method" => "DELETE", "header" => "Accept: application/json\r\n", "ignore_errors" => true, ), ); $ctx = stream_context_create($opts); $json = file_get_contents(MONGO_ORCHESTRATION_URI . "/servers/$id", false, $ctx); } function severityToString($type) { switch($type) { case E_RECOVERABLE_ERROR: return "E_RECOVERABLE_ERROR"; case E_WARNING: return "E_WARNING"; case E_NOTICE: return "E_NOTICE"; default: return "Some other #_$type"; } } function raises($function, $type, $infunction = null) { $errhandler = function($severity, $message, $file, $line, $errcontext) { throw new ErrorException($message, 0, $severity, $file, $line); }; set_error_handler($errhandler, $type); try { $function(); } catch(Exception $e) { $exceptionname = get_class($e); if ($e instanceof ErrorException && $e->getSeverity() & $type) { if ($infunction) { $trace = $e->getTrace(); $function = $trace[0]["function"]; if (strcasecmp($function, $infunction) == 0) { printf("OK: Got %s thrown from %s\n", $exceptionname, $infunction); } else { printf("ALMOST: Got %s - but was thrown in %s, not %s\n", $exceptionname, $function, $infunction); } restore_error_handler(); return $e->getMessage(); } printf("OK: Got %s\n", severityToString($type)); } else { printf("ALMOST: Got %s - expected %s\n", get_class($e), $exceptionname); } restore_error_handler(); return $e->getMessage(); } printf("FAILED: Expected %s thrown!\n", ErrorException::class); restore_error_handler(); } function throws($function, $exceptionname, $infunction = null) { try { $function(); } catch (Throwable $e) { } catch(Exception $e) { } if ($e === null) { echo "FAILED: Expected $exceptionname thrown, but no exception thrown!\n"; return; } $message = str_replace(array("\n", "\r"), ' ', $e->getMessage()); if ($e instanceof $exceptionname) { if ($infunction) { $trace = $e->getTrace(); $function = $trace[0]["function"]; if (strcasecmp($function, $infunction) == 0) { printf("OK: Got %s thrown from %s\n", $exceptionname, $infunction); } else { printf("ALMOST: Got %s - but was thrown in %s, not %s (%s)\n", $exceptionname, $function, $infunction, $message); } return $e->getMessage(); } printf("OK: Got %s\n", $exceptionname); } else { printf("ALMOST: Got %s (%s) - expected %s\n", get_class($e), $message, $exceptionname); } return $e->getMessage(); } function printServer(Server $server) { printf("server: %s:%d\n", $server->getHost(), $server->getPort()); } function printWriteResult(WriteResult $result, $details = true) { printServer($result->getServer()); printf("insertedCount: %d\n", $result->getInsertedCount()); printf("matchedCount: %d\n", $result->getMatchedCount()); printf("modifiedCount: %d\n", $result->getModifiedCount()); printf("upsertedCount: %d\n", $result->getUpsertedCount()); printf("deletedCount: %d\n", $result->getDeletedCount()); foreach ($result->getUpsertedIds() as $index => $id) { printf("upsertedId[%d]: ", $index); var_dump($id); } $writeConcernError = $result->getWriteConcernError(); printWriteConcernError($writeConcernError ? $writeConcernError : null, $details); foreach ($result->getWriteErrors() as $writeError) { printWriteError($writeError); } } function printWriteConcernError(WriteConcernError $error = null, $details) { if ($error) { /* This stuff is generated by the server, no need for us to test it */ if (!$details) { printf("writeConcernError: %s (%d)\n", $error->getMessage(), $error->getCode()); return; } var_dump($error); printf("writeConcernError.message: %s\n", $error->getMessage()); printf("writeConcernError.code: %d\n", $error->getCode()); printf("writeConcernError.info: "); var_dump($error->getInfo()); } } function printWriteError(WriteError $error) { var_dump($error); printf("writeError[%d].message: %s\n", $error->getIndex(), $error->getMessage()); printf("writeError[%d].code: %d\n", $error->getIndex(), $error->getCode()); } function getInsertCount($retval) { return $retval->getInsertedCount(); } function getModifiedCount($retval) { return $retval->getModifiedCount(); } function getDeletedCount($retval) { return $retval->getDeletedCount(); } function getUpsertedCount($retval) { return $retval->getUpsertedCount(); } function getWriteErrors($retval) { return (array)$retval->getWriteErrors(); } function def($arr) { foreach($arr as $const => $value) { define($const, getenv("PHONGO_TEST_$const") ?: $value); } } function configureFailPoint(Manager $manager, $failPoint, $mode, array $data = []) { $doc = [ 'configureFailPoint' => $failPoint, 'mode' => $mode, ]; if ($data) { $doc['data'] = $data; } $cmd = new Command($doc); $manager->executeCommand('admin', $cmd); } function configureTargetedFailPoint(Server $server, $failPoint, $mode, array $data = []) { $doc = array( 'configureFailPoint' => $failPoint, 'mode' => $mode, ); if ($data) { $doc['data'] = $data; } $cmd = new Command($doc); $server->executeCommand('admin', $cmd); } function failMaxTimeMS(Server $server) { configureTargetedFailPoint($server, 'maxTimeAlwaysTimeOut', [ 'times' => 1 ]); } function getMOPresetBase() { if (!($BASE = getenv("mongodb_orchestration_base"))) { $BASE = "/phongo/"; } return $BASE; } function toPHP($var, $typemap = array()) { return MongoDB\BSON\toPHP($var, $typemap); } function fromPHP($var) { return MongoDB\BSON\fromPHP($var); } function toJSON($var) { return MongoDB\BSON\toJSON($var); } function toCanonicalExtendedJSON($var) { return MongoDB\BSON\toCanonicalExtendedJSON($var); } function toRelaxedExtendedJSON($var) { return MongoDB\BSON\toRelaxedExtendedJSON($var); } function fromJSON($var) { return MongoDB\BSON\fromJSON($var); } /* Note: this fail point may terminate the mongod process, so you may want to * use this in conjunction with a throwaway server. */ function failGetMore(Manager $manager) { /* We need to do version detection here */ $primary = $manager->selectServer(new ReadPreference('primary')); $version = get_server_version_from_server($primary); if (version_compare($version, "3.2", "<")) { configureFailPoint($manager, 'failReceivedGetmore', 'alwaysOn'); return; } if (version_compare($version, "4.0", ">=")) { /* We use 237 here, as that's the same original code that MongoD would * throw if a cursor had already gone by the time we call getMore. This * allows us to make things consistent with the getMore OP behaviour * from previous mongod versions. An errorCode is required here for the * failPoint to work. */ configureFailPoint($manager, 'failCommand', 'alwaysOn', [ 'errorCode' => 237, 'failCommands' => ['getMore'] ]); return; } throw new Exception("Trying to configure a getMore fail point for a server version ($version) that doesn't support it"); } mongodb-1.6.1/tests/writeConcern/writeconcern-bsonserialize-001.phpt0000644000076500000240000000233213572250761025024 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::bsonSerialize() --FILE-- 1 new MongoDB\Driver\WriteConcern(-2, 1000), ]; foreach ($tests as $test) { echo toJSON(fromPHP($test)), "\n"; } ?> ===DONE=== --EXPECT-- { "w" : "majority" } { } { "w" : -1 } { "w" : 0 } { "w" : 1 } { "w" : "majority" } { "w" : "tag" } { "w" : 1 } { "w" : 1, "j" : false } { "w" : 1, "wtimeout" : 1000 } { "w" : 1, "j" : true, "wtimeout" : 1000 } { "j" : true } { "wtimeout" : 1000 } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-bsonserialize-002.phpt0000644000076500000240000000335713572250761025035 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::bsonSerialize() returns an object --FILE-- 1 new MongoDB\Driver\WriteConcern(-2, 1000), ]; foreach ($tests as $test) { var_dump($test->bsonSerialize()); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["w"]=> string(8) "majority" } object(stdClass)#%d (%d) { } object(stdClass)#%d (%d) { ["w"]=> int(-1) } object(stdClass)#%d (%d) { ["w"]=> int(0) } object(stdClass)#%d (%d) { ["w"]=> int(1) } object(stdClass)#%d (%d) { ["w"]=> string(8) "majority" } object(stdClass)#%d (%d) { ["w"]=> string(3) "tag" } object(stdClass)#%d (%d) { ["w"]=> int(1) } object(stdClass)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(false) } object(stdClass)#%d (%d) { ["w"]=> int(1) ["wtimeout"]=> int(1000) } object(stdClass)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(true) ["wtimeout"]=> int(1000) } object(stdClass)#%d (%d) { ["j"]=> bool(true) } object(stdClass)#%d (%d) { ["wtimeout"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-constants.phpt0000644000076500000240000000027513572250761023675 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern constants --FILE-- ===DONE=== --EXPECTF-- string(8) "majority" ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor-001.phpt0000644000076500000240000000311713572250761023124 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(2) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(2) ["wtimeout"]=> int(2000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(7) "tagname" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(6) "string" ["wtimeout"]=> int(3000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(6) "string" ["j"]=> bool(true) ["wtimeout"]=> int(4000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(6) "string" ["j"]=> bool(false) ["wtimeout"]=> int(5000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(6) "string" ["wtimeout"]=> int(6000) } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor_error-001.phpt0000644000076500000240000000073713572250761024342 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction (invalid arguments) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern::__construct() expects at most 3 parameters, 4 given ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor_error-002.phpt0000644000076500000240000000176113572250761024341 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction (invalid w type) --FILE-- ===DONE=== --EXPECTF-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be integer or string, %r(double|float)%r given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be integer or string, bool%S given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be integer or string, array given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be integer or string, stdClass given OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be integer or string, %r(null|NULL)%r given ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor_error-003.phpt0000644000076500000240000000062613572250761024341 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction (invalid w range) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected w to be >= -3, -4 given ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor_error-004.phpt0000644000076500000240000000064613572250761024344 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction (invalid wtimeout range) --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected wtimeout to be >= 0, -1 given ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-ctor_error-005.phpt0000644000076500000240000000102513572250761024335 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern construction (invalid wtimeout range) --SKIPIF-- --FILE-- ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException Expected wtimeout to be <= 2147483647, 2147483648 given ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-debug-001.phpt0000644000076500000240000000105713572250761023244 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern debug output should include all fields for w default --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(true) ["wtimeout"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-debug-002.phpt0000644000076500000240000000121413572250761023240 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern debug output --FILE-- ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(3) "tag" ["j"]=> bool(false) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" ["j"]=> bool(true) ["wtimeout"]=> int(500) } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-debug-003.phpt0000644000076500000240000000367713572250761023260 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern debug output --FILE-- 1 new MongoDB\Driver\WriteConcern(-2, 1000), ]; foreach ($tests as $test) { var_dump($test); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(-1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(0) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(8) "majority" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> string(3) "tag" } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(false) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(1) ["j"]=> bool(true) ["wtimeout"]=> int(1000) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["j"]=> bool(true) } object(MongoDB\Driver\WriteConcern)#%d (%d) { ["wtimeout"]=> int(1000) } ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-getjournal-001.phpt0000644000076500000240000000072013572250761024324 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::getJournal() --FILE-- getJournal()); } // Test with default value $wc = new MongoDB\Driver\WriteConcern(1, 0); var_dump($wc->getJournal()); ?> ===DONE=== --EXPECT-- bool(true) bool(false) bool(true) bool(false) NULL NULL ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-getw-001.phpt0000644000076500000240000000071113572250761023120 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::getW() --FILE-- getW()); } ?> ===DONE=== --EXPECT-- string(8) "majority" string(8) "majority" NULL int(-1) int(0) int(1) int(2) string(3) "tag" string(1) "2" ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-getwtimeout-001.phpt0000644000076500000240000000061313572250761024530 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::getWtimeout() --FILE-- getWtimeout()); } // Test with default value $wc = new MongoDB\Driver\WriteConcern(1); var_dump($wc->getWtimeout()); ?> ===DONE=== --EXPECT-- int(0) int(1) int(0) ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-isdefault-001.phpt0000644000076500000240000000466613572250761024147 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::isDefault() --FILE-- getWriteConcern(), // Cannot test "w=-3" since libmongoc URI parsing expects integers >= -1 // Cannot test "w=-2" since libmongoc URI parsing expects integers >= -1, and throws an error otherwise (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=-1'))->getWriteConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=0'))->getWriteConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=1'))->getWriteConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=2'))->getWriteConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=tag'))->getWriteConcern(), (new MongoDB\Driver\Manager('mongodb://127.0.0.1/?w=majority'))->getWriteConcern(), // Cannot test ['w' => null] since an integer or string type is expected (PHPC-887) // Cannot test ['w' => -3] or ['w' => -2] since php_phongo_apply_wc_options_to_uri() expects integers >= -1 (new MongoDB\Driver\Manager(null, ['w' => -1]))->getWriteConcern(), (new MongoDB\Driver\Manager(null, ['w' => 0]))->getWriteConcern(), (new MongoDB\Driver\Manager(null, ['w' => 1]))->getWriteConcern(), (new MongoDB\Driver\Manager(null, ['w' => 2]))->getWriteConcern(), (new MongoDB\Driver\Manager(null, ['w' => 'tag']))->getWriteConcern(), (new MongoDB\Driver\Manager(null, ['w' => 'majority']))->getWriteConcern(), (new MongoDB\Driver\Manager)->getWriteConcern(), ]; foreach ($tests as $wc) { var_dump($wc->isDefault()); } ?> ===DONE=== --EXPECT-- bool(false) bool(true) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(true) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-set_state-001.phpt0000644000076500000240000000275013572250761024152 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::__set_state() --FILE-- -3 ], [ 'w' => -2 ], // -2 is default [ 'w' => -1 ], [ 'w' => 0 ], [ 'w' => 1 ], [ 'w' => 'majority' ], [ 'w' => 'tag' ], [ 'w' => 1, 'j' => false ], [ 'w' => 1, 'wtimeout' => 1000 ], [ 'w' => 1, 'j' => true, 'wtimeout' => 1000 ], [ 'j' => true ], [ 'wtimeout' => 1000 ], ]; foreach ($tests as $fields) { var_export(MongoDB\Driver\WriteConcern::__set_state($fields)); echo "\n\n"; } ?> ===DONE=== --EXPECT-- MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'majority', )) MongoDB\Driver\WriteConcern::__set_state(array( )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => -1, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 0, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'majority', )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'tag', )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'j' => false, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'wtimeout' => 1000, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'j' => true, 'wtimeout' => 1000, )) MongoDB\Driver\WriteConcern::__set_state(array( 'j' => true, )) MongoDB\Driver\WriteConcern::__set_state(array( 'wtimeout' => 1000, )) ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-set_state_error-001.phpt0000644000076500000240000000322513572250761025361 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::__set_state() requires correct data types and values --FILE-- -4]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\WriteConcern::__set_state(['w' => M_PI]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\WriteConcern::__set_state(['wtimeout' => -1]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\WriteConcern::__set_state(['wtimeout' => 'failure']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; echo throws(function() { MongoDB\Driver\WriteConcern::__set_state(['j' => 'failure']); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "w" integer field to be >= -3 OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "w" field to be integer or string OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "wtimeout" integer field to be >= 0 OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "wtimeout" field to be integer OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "j" field to be boolean ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-set_state_error-002.phpt0000644000076500000240000000114413572250761025360 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::__set_state() requires correct data types and values --SKIPIF-- --FILE-- 2147483648]); }, 'MongoDB\Driver\Exception\InvalidArgumentException'), "\n"; ?> ===DONE=== --EXPECT-- OK: Got MongoDB\Driver\Exception\InvalidArgumentException MongoDB\Driver\WriteConcern initialization requires "wtimeout" integer field to be <= 2147483647 ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern-var_export-001.phpt0000644000076500000240000000361213572250761024346 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern::bsonSerialize() --FILE-- 1 new MongoDB\Driver\WriteConcern(-2, 1000), ]; foreach ($tests as $test) { echo var_export($test, true), "\n"; } ?> ===DONE=== --EXPECT-- MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'majority', )) MongoDB\Driver\WriteConcern::__set_state(array( )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => -1, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 0, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'majority', )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 'tag', )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'j' => false, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'wtimeout' => 1000, )) MongoDB\Driver\WriteConcern::__set_state(array( 'w' => 1, 'j' => true, 'wtimeout' => 1000, )) MongoDB\Driver\WriteConcern::__set_state(array( 'j' => true, )) MongoDB\Driver\WriteConcern::__set_state(array( 'wtimeout' => 1000, )) ===DONE=== mongodb-1.6.1/tests/writeConcern/writeconcern_error-001.phpt0000644000076500000240000000044313572250761023367 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcern cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyWriteConcern may not inherit from final class (MongoDB\Driver\WriteConcern) in %s on line %d mongodb-1.6.1/tests/writeConcernError/writeconcernerror-debug-001.phpt0000644000076500000240000000157413572250761025334 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError debug output --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => 1]); try { /* We assume that the replica set does not have 12 nodes */ $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(12)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(29) "Not enough data-bearing nodes" ["code"]=> int(100) ["info"]=> NULL } ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror-debug-002.phpt0000644000076500000240000000172313572250761025331 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError debug output --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => $i, 'y' => str_repeat('a', 4194304)]); } try { $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(2, 1)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(33) "waiting for replication timed out" ["code"]=> int(64) ["info"]=> object(stdClass)#%d (%d) { ["wtimeout"]=> bool(true) } } ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror-getcode-001.phpt0000644000076500000240000000135713572250761025657 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError::getCode() --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => 1]); try { /* We assume that the replica set does not have 12 nodes */ $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(12)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()->getCode()); } ?> ===DONE=== --EXPECT-- int(100) ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror-getinfo-001.phpt0000644000076500000240000000135313572250761025674 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError::getInfo() --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => 1]); try { /* We assume that the replica set does not have 12 nodes */ $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(12)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()->getInfo()); } ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror-getinfo-002.phpt0000644000076500000240000000147213572250761025677 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError::getInfo() --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => $i, 'y' => str_repeat('a', 4194304)]); } try { $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(2, 1)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()->getInfo()); } ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["wtimeout"]=> bool(true) } ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror-getmessage-001.phpt0000644000076500000240000000142713572250761026367 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError::getMessage() --SKIPIF-- =', '3.1'); ?> --FILE-- insert(['x' => 1]); try { /* We assume that the replica set does not have 12 nodes */ $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(12)); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()->getMessage()); } ?> ===DONE=== --EXPECT-- string(29) "Not enough data-bearing nodes" ===DONE=== mongodb-1.6.1/tests/writeConcernError/writeconcernerror_error-001.phpt0000644000076500000240000000047413572250761025457 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteConcernError cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyWriteConcernError may not inherit from final class (MongoDB\Driver\WriteConcernError) in %s on line %d mongodb-1.6.1/tests/writeError/writeerror-debug-001.phpt0000644000076500000240000000154513572250761022452 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError debug output --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()[0]); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%SE11000 duplicate key error %s: phongo.writeError_writeerror_debug_001%s dup key: { %S: 1 }" ["code"]=> int(11000) ["index"]=> int(1) ["info"]=> NULL } ===DONE=== mongodb-1.6.1/tests/writeError/writeerror-getCode-001.phpt0000644000076500000240000000121113572250761022724 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError::getCode() --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()[0]->getCode()); } ?> ===DONE=== --EXPECT-- int(11000) ===DONE=== mongodb-1.6.1/tests/writeError/writeerror-getIndex-001.phpt0000644000076500000240000000120713572250761023126 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError::getIndex() --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()[0]->getIndex()); } ?> ===DONE=== --EXPECT-- int(1) ===DONE=== mongodb-1.6.1/tests/writeError/writeerror-getInfo-001.phpt0000644000076500000240000000132313572250761022751 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError::getInfo() --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { // "errInfo" is rarely populated on a WriteError (e.g. shard version error) var_dump($e->getWriteResult()->getWriteErrors()[0]->getInfo()); } ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeError/writeerror-getMessage-001.phpt0000644000076500000240000000136413572250761023447 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError::getMessage() --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); try { $manager->executeBulkWrite(NS, $bulk); } catch(MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()[0]->getMessage()); } ?> ===DONE=== --EXPECTF-- string(%d) "%SE11000 duplicate key error %s: phongo.writeError_writeerror_getMessage_001%s dup key: { %S: 1 }" ===DONE=== mongodb-1.6.1/tests/writeError/writeerror_error-001.phpt0000644000076500000240000000043113572250761022570 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteError cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyWriteError may not inherit from final class (MongoDB\Driver\WriteError) in %s on line %d mongodb-1.6.1/tests/writeResult/bug0671-003.phpt0000644000076500000240000000154513572250761020444 0ustar alcaeusstaff--TEST-- PHPC-671: Segfault if Manager is already freed when using WriteResult's Server --SKIPIF-- --FILE-- insert(['_id' => 1]); $writeResult = $manager->executeBulkWrite(NS, $bulk); unset($manager); $server = $writeResult->getServer(); /* WriteResult only uses the client to construct a Server. We need to interact * with the Server to test for a user-after-free. */ $cursor = $server->executeCommand(DATABASE_NAME, new MongoDB\Driver\Command(['ping' => 1])); var_dump($cursor->toArray()[0]); ?> ===DONE=== --EXPECTF-- object(stdClass)#%d (%d) { ["ok"]=> float(1)%A } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-debug-001.phpt0000644000076500000240000000261113572250761023017 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult debug output without errors --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result); ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(1) ["nMatched"]=> int(1) ["nModified"]=> int(1) ["nRemoved"]=> int(1) ["nUpserted"]=> int(2) ["upsertedIds"]=> array(2) { [0]=> array(2) { ["index"]=> int(2) ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } } [1]=> array(2) { ["index"]=> int(3) ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } } } ["writeErrors"]=> array(0) { } ["writeConcernError"]=> NULL ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-debug-002.phpt0000644000076500000240000000526113572250761023024 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult debug output with errors --SKIPIF-- --FILE-- false]); $bulk->update(['x' => 1], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 2], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $bulk->insert(['_id' => 3]); try { /* We assume that the replica set does not have 30 nodes */ $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(30)); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteResult)#%d (%d) { ["nInserted"]=> int(3) ["nMatched"]=> int(0) ["nModified"]=> int(0) ["nRemoved"]=> int(0) ["nUpserted"]=> int(2) ["upsertedIds"]=> array(2) { [0]=> array(2) { ["index"]=> int(0) ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } } [1]=> array(2) { ["index"]=> int(1) ["_id"]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } } } ["writeErrors"]=> array(3) { [0]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "E11000 duplicate key %S phongo.writeResult_writeresult_debug_002%s dup key: { %S: 1 }" ["code"]=> int(11000) ["index"]=> int(3) ["info"]=> NULL } [1]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "E11000 duplicate key %S phongo.writeResult_writeresult_debug_002%s dup key: { %S: 2 }" ["code"]=> int(11000) ["index"]=> int(5) ["info"]=> NULL } [2]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "E11000 duplicate key %S phongo.writeResult_writeresult_debug_002%s dup key: { %S: 3 }" ["code"]=> int(11000) ["index"]=> int(7) ["info"]=> NULL } } ["writeConcernError"]=> object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(29) "Not enough data-bearing nodes" ["code"]=> int(100) ["info"]=> NULL } ["writeConcern"]=> object(MongoDB\Driver\WriteConcern)#%d (%d) { ["w"]=> int(30) } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getdeletedcount-001.phpt0000644000076500000240000000135513572250761025114 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getDeletedCount() with acknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getDeletedCount()); ?> ===DONE=== --EXPECT-- int(1) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getdeletedcount-002.phpt0000644000076500000240000000142113572250761025107 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getDeletedCount() with unacknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); var_dump($result->getDeletedCount()); ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getinsertedcount-001.phpt0000644000076500000240000000132713572250761025322 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getInsertedCount() --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getInsertedCount()); ?> ===DONE=== --EXPECT-- int(1) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getinsertedcount-002.phpt0000644000076500000240000000142313572250761025320 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getInsertedCount() with unacknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); var_dump($result->getInsertedCount()); ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getmatchedcount-001.phpt0000644000076500000240000000132513572250761025110 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getMatchedCount() --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getMatchedCount()); ?> ===DONE=== --EXPECT-- int(1) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getmatchedcount-002.phpt0000644000076500000240000000142113572250761025106 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getMatchedCount() with unacknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); var_dump($result->getMatchedCount()); ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getmodifiedcount-001.phpt0000644000076500000240000000135713572250761025270 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getModifiedCount() with acknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getModifiedCount()); ?> ===DONE=== --EXPECT-- int(1) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getmodifiedcount-002.phpt0000644000076500000240000000142313572250761025263 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getModifiedCount() with unacknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); var_dump($result->getModifiedCount()); ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getserver-001.phpt0000644000076500000240000000116013572250761023735 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getUpsertedIds() --SKIPIF-- --FILE-- selectServer(new MongoDB\Driver\ReadPreference(MongoDB\Driver\ReadPreference::RP_PRIMARY)); $bulk = new MongoDB\Driver\BulkWrite; $bulk->insert(['x' => 1]); $result = $server->executeBulkWrite(NS, $bulk); var_dump($result->getServer() == $server); ?> ===DONE=== --EXPECT-- bool(true) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getupsertedcount-001.phpt0000644000076500000240000000135713572250761025343 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getUpsertedCount() with acknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getUpsertedCount()); ?> ===DONE=== --EXPECT-- int(2) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getupsertedcount-002.phpt0000644000076500000240000000142313572250761025336 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getUpsertedCount() with unacknowledged write --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(0)); var_dump($result->getUpsertedCount()); ?> ===DONE=== --EXPECT-- NULL ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getupsertedids-001.phpt0000644000076500000240000000164713572250761024774 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getUpsertedIds() with server-generated values --SKIPIF-- --FILE-- insert(['x' => 1]); $bulk->update(['x' => 1], ['$set' => ['y' => 3]]); $bulk->update(['x' => 2], ['$set' => ['y' => 1]], ['upsert' => true]); $bulk->update(['x' => 3], ['$set' => ['y' => 2]], ['upsert' => true]); $bulk->delete(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getUpsertedIds()); ?> ===DONE=== --EXPECTF-- array(2) { [2]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } [3]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "%x" } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getupsertedids-002.phpt0000644000076500000240000000426213572250761024771 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getUpsertedIds() with client-generated values --SKIPIF-- --FILE-- update(['_id' => $value], ['$set' => ['x' => 1]], ['upsert' => true]); } $result = $manager->executeBulkWrite(NS, $bulk); var_dump($result->getUpsertedIds()); ?> ===DONE=== --EXPECTF-- array(13) { [0]=> NULL [1]=> bool(true) [2]=> int(1) [3]=> float(4.125) [4]=> string(3) "foo" [5]=> object(stdClass)#%d (%d) { } [6]=> object(MongoDB\BSON\Binary)#%d (%d) { ["data"]=> string(3) "foo" ["type"]=> int(0) } [7]=> object(MongoDB\BSON\Javascript)#%d (%d) { ["code"]=> string(12) "function(){}" ["scope"]=> NULL } [8]=> object(MongoDB\BSON\MaxKey)#%d (%d) { } [9]=> object(MongoDB\BSON\MinKey)#%d (%d) { } [10]=> object(MongoDB\BSON\ObjectId)#%d (%d) { ["oid"]=> string(24) "586c18d86118fd6c9012dec1" } [11]=> object(MongoDB\BSON\Timestamp)#%d (%d) { ["increment"]=> string(4) "1234" ["timestamp"]=> string(4) "5678" } [12]=> object(MongoDB\BSON\UTCDateTime)#%d (%d) { ["milliseconds"]=> string(13) "1483479256924" } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getwriteconcernerror-001.phpt0000644000076500000240000000153613572250761026212 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getWriteConcernError() --SKIPIF-- --FILE-- insert(['x' => 1]); try { /* We assume that the replica set does not have 12 nodes */ $result = $manager->executeBulkWrite(NS, $bulk, new MongoDB\Driver\WriteConcern(12)); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteConcernError()); } ?> ===DONE=== --EXPECTF-- object(MongoDB\Driver\WriteConcernError)#%d (%d) { ["message"]=> string(29) "Not enough data-bearing nodes" ["code"]=> int(100) ["info"]=> NULL } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getwriteerrors-001.phpt0000644000076500000240000000202413572250761025016 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getWriteErrors() with ordered execution --SKIPIF-- --FILE-- insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $bulk->insert(['_id' => 4]); $bulk->insert(['_id' => 4]); try { $result = $manager->executeBulkWrite(NS, $bulk); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()); } ?> ===DONE=== --EXPECTF-- array(1) { [0]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%SE11000 duplicate key error %s: phongo.writeResult_writeresult_getwriteerrors_001%sdup key: { %S: 2 }" ["code"]=> int(11000) ["index"]=> int(2) ["info"]=> NULL } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-getwriteerrors-002.phpt0000644000076500000240000000250113572250761025017 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::getWriteErrors() with unordered execution --SKIPIF-- --FILE-- false]); $bulk->insert(['_id' => 1]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 2]); $bulk->insert(['_id' => 3]); $bulk->insert(['_id' => 4]); $bulk->insert(['_id' => 4]); try { $result = $manager->executeBulkWrite(NS, $bulk); } catch (MongoDB\Driver\Exception\BulkWriteException $e) { var_dump($e->getWriteResult()->getWriteErrors()); } ?> ===DONE=== --EXPECTF-- array(2) { [0]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%SE11000 duplicate key error %s: phongo.writeResult_writeresult_getwriteerrors_002%sdup key: { %S: 2 }" ["code"]=> int(11000) ["index"]=> int(2) ["info"]=> NULL } [1]=> object(MongoDB\Driver\WriteError)#%d (%d) { ["message"]=> string(%d) "%SE11000 duplicate key error %s: phongo.writeResult_writeresult_getwriteerrors_002%sdup key: { %S: 4 }" ["code"]=> int(11000) ["index"]=> int(5) ["info"]=> NULL } } ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult-isacknowledged-001.phpt0000644000076500000240000000122313572250761024712 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult::isAcknowledged() --SKIPIF-- --FILE-- insert(['x' => 1]); $result = $manager->executeBulkWrite(NS, $bulk, $wc); var_dump($result->isAcknowledged()); } ?> ===DONE=== --EXPECT-- bool(false) bool(true) ===DONE=== mongodb-1.6.1/tests/writeResult/writeresult_error-001.phpt0000644000076500000240000000043613572250761023147 0ustar alcaeusstaff--TEST-- MongoDB\Driver\WriteResult cannot be extended --FILE-- ===DONE=== --EXPECTF-- Fatal error: Class MyWriteResult may not inherit from final class (MongoDB\Driver\WriteResult) in %s on line %d mongodb-1.6.1/CREDITS0000644000076500000240000000013113572250761013467 0ustar alcaeusstaffMongoDB Driver for PHP Hannes Magnusson, Jeremy Mikola, Derick Rethans, Katherine Walker mongodb-1.6.1/LICENSE0000644000076500000240000002613713572250761013472 0ustar alcaeusstaff Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. mongodb-1.6.1/Makefile.frag0000644000076500000240000000621613572250761015037 0ustar alcaeusstaff.PHONY: coverage test-clean package package.xml format format-changed DATE=`date +%Y-%m-%d--%H-%M-%S` MONGODB_VERSION=$(shell php -n -dextension=modules/mongodb.so -r 'echo MONGODB_VERSION;') MONGODB_MINOR=$(shell echo $(MONGODB_VERSION) | cut -d. -f1,2) MONGODB_STABILITY=$(shell php -n -dextension=modules/mongodb.so -r 'echo MONGODB_STABILITY;') help: @echo -e "\t$$ make vm" @echo -e "\t - Launches VMs for running multiple MongoDB variations" @echo -e "\t$$ make list-servers" @echo -e "\t - Lists running servers, and their URIs" @echo -e "\t$$ make test-bootstrap" @echo -e "\t - Starts up MongoDB through mongo-orchestration" @echo "" @echo -e "\t$$ make coveralls" @echo -e "\t - Creates code coverage report using coveralls" @echo -e "\t$$ make coverage" @echo -e "\t - Creates code coverage report using gcov" @echo "" @echo -e "\t$$ make distcheck" @echo -e "\t - Builds the archive, runs the virtual tests" @echo "" @echo -e "\t$$ make package.xml" @echo -e "\t - Creates a package.xml file with empty release notes" @echo -e "\t$$ make package" @echo -e "\t - Creates the pecl archive to use for provisioning" @echo -e "\t$$ make test-virtual" @echo -e "\t - Provisions some VMs, installs the pecl archive and executes the tests" mv-coverage: @if test -e $(top_srcdir)/coverage; then \ echo "Moving previous coverage run to coverage-$(DATE)"; \ mv coverage coverage-$(DATE); \ fi lcov-coveralls: lcov --gcov-tool $(top_srcdir)/.llvm-cov.sh --capture --directory . --output-file .coverage.lcov --no-external lcov-local: lcov --gcov-tool $(top_srcdir)/.llvm-cov.sh --capture --derive-func-data --directory . --output-file .coverage.lcov --no-external coverage: mv-coverage lcov-local genhtml .coverage.lcov --legend --title "mongodb code coverage" --output-directory coverage coveralls: mv-coverage lcov-coveralls coveralls --exclude src/libbson --exclude src/libmongoc --exclude src/contrib --exclude lib --exclude tests vm: @command -v vagrant >/dev/null 2>&1 || { echo >&2 "Vagrant needs to be installed to run vms"; exit 1; } @vagrant up ldap mo list-servers: php scripts/list-servers.php test-bootstrap: vagrant reload mo vagrant ssh mo -c 'sudo rm -f /home/vagrant/server.pid' vagrant ssh mo -c 'sudo mongo-orchestration -f mongo-orchestration-config.json -b 192.168.112.10 --enable-majority-read-concern start' php scripts/start-servers.php format: $(top_srcdir)/scripts/clang-format.sh format-changed: $(top_srcdir)/scripts/clang-format.sh changed distcheck: package test-virtual test-virtual: package sh ./scripts/run-tests-on.sh freebsd sh ./scripts/run-tests-on.sh precise32 sh ./scripts/run-tests-on.sh precise64 test-clean: find $(top_srcdir)/tests -not \( -path $(top_srcdir)/tests/utils -prune \) -type f -name "*.diff" -o -name "*.exp" -o -name "*.log" -o -name "*.mem" -o -name "*.out" -o -name "*.php" -o -name "*.sh" | xargs -r rm package: pecl package package.xml package.xml: php bin/prep-release.php $(MONGODB_VERSION) $(MONGODB_STABILITY) libmongoc-version-current: cd src/libmongoc/ && python build/calc_release_version.py > ../LIBMONGOC_VERSION_CURRENT mongodb-1.6.1/README.md0000644000076500000240000000560113572250761013735 0ustar alcaeusstaff# MongoDB PHP Driver [![Travis Build Status](https://api.travis-ci.org/mongodb/mongo-php-driver.png?branch=master)](https://travis-ci.org/mongodb/mongo-php-driver) [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/gbd3t99ucib5n8sf?svg=true)](https://ci.appveyor.com/project/jmikola/mongo-php-driver) [![Coverage Status](https://coveralls.io/repos/github/mongodb/mongo-php-driver/badge.svg?branch=master)](https://coveralls.io/github/mongodb/mongo-php-driver?branch=master) This extension is developed atop the [libmongoc](https://github.com/mongodb/mongo-c-driver) and [libbson](https://github.com/mongodb/libbson) libraries. It provides a minimal API for core driver functionality: commands, queries, writes, connection management, and BSON serialization. Userland PHP libraries that depend on this extension may provide higher level APIs, such as query builders, individual command helper methods, and GridFS. Application developers should consider using this extension in conjunction with the [MongoDB PHP library](https://github.com/mongodb/mongo-php-library), which implements the same higher level APIs found in MongoDB drivers for other languages (as well as the [legacy PHP driver](https://php.net/manual/en/book.mongo.php)). ## Documentation - https://php.net/manual/en/set.mongodb.php - https://docs.mongodb.com/ecosystem/drivers/php/ ## Installation To build and install the driver: $ pecl install mongodb $ echo "extension=mongodb.so" >> `php --ini | grep "Loaded Configuration" | sed -e "s|.*:\s*||"` Additional installation instructions may be found in the [PHP.net documentation](https://php.net/manual/en/mongodb.installation.php). This extension is intended to be used alongside the [MongoDB PHP Library](https://github.com/mongodb/mongo-php-library), which is distributed as the [`mongodb/mongodb`](https://packagist.org/packages/mongodb/mongodb) package for for [Composer](https://getcomposer.org). ## Reporting Issues Issues pertaining to the extension should be reported in the [PHPC](https://jira.mongodb.org/secure/CreateIssue!default.jspa?project-field=PHPC) project in MongoDB's JIRA. Library-related issues should be reported in the [PHPLIB](https://jira.mongodb.org/secure/CreateIssue!default.jspa?project-field=PHPLIB) project. For general questions and support requests, please use one of MongoDB's [Technical Support](https://docs.mongodb.com/manual/support/) channels. ### Security Vulnerabilities If you've identified a security vulnerability in a driver or any other MongoDB project, please report it according to the instructions in [Create a Vulnerability Report](https://docs.mongodb.org/manual/tutorial/create-a-vulnerability-report). ## Development Development is tracked in the [PHPC](https://jira.mongodb.org/projects/PHPC/summary) project in MongoDB's JIRA. Documentation for contributing to this project may be found in [CONTRIBUTING.md](CONTRIBUTING.md). mongodb-1.6.1/Vagrantfile0000644000076500000240000000322713572250761014645 0ustar alcaeusstaff# -*- mode: ruby -*- # vi: set ft=ruby et sw=2 : Vagrant.configure(2) do |config| config.vm.synced_folder ".", "/phongo" config.vm.provider "vmware_workstation" do |vmware, override| vmware.vmx["memsize"] = "8192" vmware.vmx["numvcpus"] = "2" end config.vm.provider "virtualbox" do |virtualbox| virtualbox.memory = 2048 virtualbox.cpus = 2 end config.vm.define "mo", primary: true do |mo| mo.vm.network "private_network", ip: "192.168.112.10" mo.vm.box = "ubuntu/trusty64" mo.vm.provision "shell", path: "scripts/ubuntu/essentials.sh", privileged: true mo.vm.provision "file", source: "scripts/ubuntu/get-pip.py", destination: "get-pip.py" mo.vm.provision "file", source: "scripts/ubuntu/mongo-orchestration-config.json", destination: "mongo-orchestration-config.json" mo.vm.provision "shell", path: "scripts/ubuntu/mongo-orchestration.sh", privileged: true mo.vm.provision "shell", path: "scripts/ubuntu/ldap/install.sh", privileged: true end config.vm.define "ldap", autostart: false do |ldap| ldap.vm.network "private_network", ip: "192.168.112.20" ldap.vm.box = "http://puppet-vagrant-boxes.puppetlabs.com/centos-64-x64-vbox4210-nocm.box" ldap.vm.provider "vmware_workstation" do |vmware, override| override.vm.box_url = "https://dl.dropbox.com/u/5721940/vagrant-boxes/vagrant-centos-6.4-x86_64-vmware_fusion.box" override.vm.provision "shell", path: "scripts/vmware/kernel.sh", privileged: true end ldap.vm.provision "shell", path: "scripts/centos/essentials.sh", privileged: true ldap.vm.provision "shell", path: "scripts/centos/ldap/install.sh", privileged: true end end mongodb-1.6.1/config.m40000644000076500000240000005205313572250761014170 0ustar alcaeusstaffdnl config.m4 for extension mongodb PHP_ARG_ENABLE([mongodb], [whether to enable MongoDB support], [AS_HELP_STRING([--enable-mongodb], [Enable MongoDB support])]) if test "$PHP_MONGODB" != "no"; then dnl Check PHP version is compatible with this extension AC_MSG_CHECKING([PHP version]) PHP_MONGODB_PHP_VERSION=$PHP_VERSION PHP_MONGODB_PHP_VERSION_ID=$PHP_VERSION_ID if test -z "$PHP_MONGODB_PHP_VERSION"; then if test -z "$PHP_CONFIG"; then AC_MSG_ERROR([php-config not found]) fi PHP_MONGODB_PHP_VERSION=`${PHP_CONFIG} --version` PHP_MONGODB_PHP_VERSION_ID=`echo "${PHP_MONGODB_PHP_VERSION}" | $AWK 'BEGIN { FS = "."; } { printf "%d", ([$]1 * 100 + [$]2) * 100 + [$]3;}'` fi AC_MSG_RESULT($PHP_MONGODB_PHP_VERSION) if test "$PHP_MONGODB_PHP_VERSION_ID" -lt "50600"; then AC_MSG_ERROR([not supported. Need a PHP version >= 5.6.0 (found $PHP_MONGODB_PHP_VERSION)]) fi PHP_ARG_ENABLE([mongodb-developer-flags], [whether to enable developer build flags], [AS_HELP_STRING([--enable-mongodb-developer-flags], [MongoDB: Enable developer flags [default=no]])], [no], [no]) if test "$PHP_MONGODB_DEVELOPER_FLAGS" = "yes"; then dnl Warn about functions which might be candidates for format attributes PHP_CHECK_GCC_ARG(-Wmissing-format-attribute, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wmissing-format-attribute") dnl Avoid duplicating values for an enum PHP_CHECK_GCC_ARG(-Wduplicate-enum, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wduplicate-enum") dnl Warns on mismatches between #ifndef and #define header guards PHP_CHECK_GCC_ARG(-Wheader-guard, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wheader-guard") dnl logical not of a non-boolean expression PHP_CHECK_GCC_ARG(-Wlogical-not-parentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-not-parentheses") dnl Warn about suspicious uses of logical operators in expressions PHP_CHECK_GCC_ARG(-Wlogical-op, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-op") dnl memory error detector. dnl FIXME: -fsanitize=address,undefined for clang. The PHP_CHECK_GCC_ARG macro isn't happy about that string :( PHP_CHECK_GCC_ARG(-fsanitize-address, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fsanitize-address") dnl Enable frame debugging PHP_CHECK_GCC_ARG(-fno-omit-frame-pointer, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-omit-frame-pointer") dnl Make sure we don't optimize calls PHP_CHECK_GCC_ARG(-fno-optimize-sibling-calls, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-optimize-sibling-calls") PHP_CHECK_GCC_ARG(-Wlogical-op-parentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wlogical-op-parentheses") PHP_CHECK_GCC_ARG(-Wpointer-bool-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wpointer-bool-conversion") PHP_CHECK_GCC_ARG(-Wbool-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wbool-conversion") PHP_CHECK_GCC_ARG(-Wloop-analysis, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wloop-analysis") PHP_CHECK_GCC_ARG(-Wsizeof-array-argument, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wsizeof-array-argument") PHP_CHECK_GCC_ARG(-Wstring-conversion, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wstring-conversion") PHP_CHECK_GCC_ARG(-Wno-variadic-macros, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-variadic-macros") PHP_CHECK_GCC_ARG(-Wno-sign-compare, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-sign-compare") PHP_CHECK_GCC_ARG(-fstack-protector, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fstack-protector") PHP_CHECK_GCC_ARG(-fno-exceptions, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -fno-exceptions") PHP_CHECK_GCC_ARG(-Wformat-security, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wformat-security") PHP_CHECK_GCC_ARG(-Wformat-nonliteral, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wformat-nonliteral") PHP_CHECK_GCC_ARG(-Winit-self, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Winit-self") PHP_CHECK_GCC_ARG(-Wwrite-strings, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wwrite-strings") PHP_CHECK_GCC_ARG(-Wenum-compare, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wenum-compare") PHP_CHECK_GCC_ARG(-Wempty-body, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wempty-body") PHP_CHECK_GCC_ARG(-Wparentheses, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wparentheses") PHP_CHECK_GCC_ARG(-Wdeclaration-after-statement, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wdeclaration-after-statement") PHP_CHECK_GCC_ARG(-Wmaybe-uninitialized, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wmaybe-uninitialized") PHP_CHECK_GCC_ARG(-Wimplicit-fallthrough, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wimplicit-fallthrough") PHP_CHECK_GCC_ARG(-Werror, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Werror") PHP_CHECK_GCC_ARG(-Wextra, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wextra") PHP_CHECK_GCC_ARG(-Wno-unused-parameter, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-unused-parameter") PHP_CHECK_GCC_ARG(-Wno-unused-but-set-variable, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-unused-but-set-variable") PHP_CHECK_GCC_ARG(-Wno-missing-field-initializers, _MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS -Wno-missing-field-initializers") MAINTAINER_CFLAGS="$_MAINTAINER_CFLAGS" STD_CFLAGS="-g -O0 -Wall" fi PHP_ARG_ENABLE([mongodb-coverage], [whether to enable code coverage], [AS_HELP_STRING([--enable-mongodb-coverage], [MongoDB: Enable developer code coverage information [default=no]])], [no], [no]) if test "$PHP_MONGODB_COVERAGE" = "yes"; then if test "$ext_shared" != "yes"; then AC_MSG_ERROR(code coverage is not supported for static builds) fi COVERAGE_CFLAGS="--coverage -g" COVERAGE_LDFLAGS="--coverage" MONGODB_SHARED_LIBADD="$MONGODB_SHARED_LIBADD $COVERAGE_LDFLAGS" fi PHP_MONGODB_CFLAGS="$STD_CFLAGS $MAINTAINER_CFLAGS $COVERAGE_CFLAGS" PHP_MONGODB_SOURCES="\ php_phongo.c \ phongo_compat.c \ src/bson.c \ src/bson-encode.c \ src/BSON/Binary.c \ src/BSON/BinaryInterface.c \ src/BSON/DBPointer.c \ src/BSON/Decimal128.c \ src/BSON/Decimal128Interface.c \ src/BSON/Int64.c \ src/BSON/Javascript.c \ src/BSON/JavascriptInterface.c \ src/BSON/MaxKey.c \ src/BSON/MaxKeyInterface.c \ src/BSON/MinKey.c \ src/BSON/MinKeyInterface.c \ src/BSON/ObjectId.c \ src/BSON/ObjectIdInterface.c \ src/BSON/Persistable.c \ src/BSON/Regex.c \ src/BSON/RegexInterface.c \ src/BSON/Serializable.c \ src/BSON/Symbol.c \ src/BSON/Timestamp.c \ src/BSON/TimestampInterface.c \ src/BSON/Type.c \ src/BSON/Undefined.c \ src/BSON/Unserializable.c \ src/BSON/UTCDateTime.c \ src/BSON/UTCDateTimeInterface.c \ src/BSON/functions.c \ src/MongoDB/BulkWrite.c \ src/MongoDB/Command.c \ src/MongoDB/Cursor.c \ src/MongoDB/CursorId.c \ src/MongoDB/CursorInterface.c \ src/MongoDB/Manager.c \ src/MongoDB/Query.c \ src/MongoDB/ReadConcern.c \ src/MongoDB/ReadPreference.c \ src/MongoDB/Server.c \ src/MongoDB/Session.c \ src/MongoDB/WriteConcern.c \ src/MongoDB/WriteConcernError.c \ src/MongoDB/WriteError.c \ src/MongoDB/WriteResult.c \ src/MongoDB/Exception/AuthenticationException.c \ src/MongoDB/Exception/BulkWriteException.c \ src/MongoDB/Exception/CommandException.c \ src/MongoDB/Exception/ConnectionException.c \ src/MongoDB/Exception/ConnectionTimeoutException.c \ src/MongoDB/Exception/Exception.c \ src/MongoDB/Exception/ExecutionTimeoutException.c \ src/MongoDB/Exception/InvalidArgumentException.c \ src/MongoDB/Exception/LogicException.c \ src/MongoDB/Exception/RuntimeException.c \ src/MongoDB/Exception/ServerException.c \ src/MongoDB/Exception/SSLConnectionException.c \ src/MongoDB/Exception/UnexpectedValueException.c \ src/MongoDB/Exception/WriteException.c \ src/MongoDB/Monitoring/CommandFailedEvent.c \ src/MongoDB/Monitoring/CommandStartedEvent.c \ src/MongoDB/Monitoring/CommandSubscriber.c \ src/MongoDB/Monitoring/CommandSucceededEvent.c \ src/MongoDB/Monitoring/Subscriber.c \ src/MongoDB/Monitoring/functions.c \ " PHP_ARG_WITH([libbson], [whether to use system libbson], [AS_HELP_STRING([--with-libbson=@<:@yes/no@:>@], [MongoDB: Use system libbson [default=no]])], [no], [no]) PHP_ARG_WITH([libmongoc], [whether to use system libmongoc], [AS_HELP_STRING([--with-libmongoc=@<:@yes/no@:>@], [MongoDB: Use system libmongoc [default=no]])], [no], [no]) if test "$PHP_LIBBSON" != "no"; then if test "$PHP_LIBMONGOC" = "no"; then AC_MSG_ERROR(Cannot use system libbson and bundled libmongoc) fi AC_PATH_PROG(PKG_CONFIG, pkg-config, no) AC_MSG_CHECKING(for libbson) if test -x "$PKG_CONFIG" && $PKG_CONFIG --exists libbson-1.0; then if $PKG_CONFIG libbson-1.0 --atleast-version 1.15.2; then PHP_MONGODB_BSON_CFLAGS=`$PKG_CONFIG libbson-1.0 --cflags` PHP_MONGODB_BSON_LIBS=`$PKG_CONFIG libbson-1.0 --libs` PHP_MONGODB_BSON_VERSION=`$PKG_CONFIG libbson-1.0 --modversion` AC_MSG_RESULT(version $PHP_MONGODB_BSON_VERSION found) else AC_MSG_ERROR(system libbson must be upgraded to version >= 1.15.2) fi else AC_MSG_ERROR(pkgconfig and libbson must be installed) fi PHP_MONGODB_CFLAGS="$PHP_MONGODB_CFLAGS $PHP_MONGODB_BSON_CFLAGS" PHP_EVAL_LIBLINE($PHP_MONGODB_BSON_LIBS, MONGODB_SHARED_LIBADD) AC_DEFINE(HAVE_SYSTEM_LIBBSON, 1, [Use system libbson]) fi if test "$PHP_LIBMONGOC" != "no"; then if test "$PHP_LIBBSON" = "no"; then AC_MSG_ERROR(Cannot use system libmongoc and bundled libbson) fi AC_PATH_PROG(PKG_CONFIG, pkg-config, no) AC_MSG_CHECKING(for libmongoc) if test -x "$PKG_CONFIG" && $PKG_CONFIG --exists libmongoc-1.0; then if $PKG_CONFIG libmongoc-1.0 --atleast-version 1.15.2; then PHP_MONGODB_MONGOC_CFLAGS=`$PKG_CONFIG libmongoc-1.0 --cflags` PHP_MONGODB_MONGOC_LIBS=`$PKG_CONFIG libmongoc-1.0 --libs` PHP_MONGODB_MONGOC_VERSION=`$PKG_CONFIG libmongoc-1.0 --modversion` AC_MSG_RESULT(version $PHP_MONGODB_MONGOC_VERSION found) else AC_MSG_ERROR(system libmongoc must be upgraded to version >= 1.15.2) fi else AC_MSG_ERROR(pkgconfig and libmongoc must be installed) fi PHP_MONGODB_CFLAGS="$PHP_MONGODB_CFLAGS $PHP_MONGODB_MONGOC_CFLAGS" PHP_EVAL_LIBLINE($PHP_MONGODB_MONGOC_LIBS, MONGODB_SHARED_LIBADD) AC_DEFINE(HAVE_SYSTEM_LIBMONGOC, 1, [Use system libmongoc]) fi if test "$PHP_LIBBSON" = "no" -a "$PHP_LIBMONGOC" = "no"; then PHP_MONGODB_BUNDLED_CFLAGS="$STD_CFLAGS -DBSON_COMPILATION -DMONGOC_COMPILATION" dnl M4 doesn't know if we're building statically or as a shared module, so dnl attempt to include both paths while ignoring errors. If neither path dnl exists, report an error during configure (this is later than M4 parsing dnl during phpize but better than nothing). m4_pushdef([_include],[ dnl TODO: Fix this for PECL install (PHPC-1218) dnl if test ! \( -f "$1" -o -f "ext/mongodb/$1" \); then dnl AC_MSG_ERROR([m4 could not include $1: No such file or directory]) dnl fi m4_builtin([sinclude],[$1]) m4_builtin([sinclude],[ext/mongodb/][$1]) ]) dnl Avoid using AC_CONFIG_MACRO_DIR, which might conflict with PHP _include([scripts/autotools/m4/as_var_copy.m4]) _include([scripts/autotools/m4/ax_check_compile_flag.m4]) _include([scripts/autotools/m4/ax_prototype.m4]) _include([scripts/autotools/m4/ax_pthread.m4]) _include([scripts/autotools/m4/php_mongodb.m4]) _include([scripts/autotools/m4/pkg.m4]) _include([scripts/autotools/CheckCompiler.m4]) _include([scripts/autotools/CheckHost.m4]) _include([scripts/autotools/libbson/CheckAtomics.m4]) _include([scripts/autotools/libbson/CheckHeaders.m4]) _include([scripts/autotools/libbson/Endian.m4]) _include([scripts/autotools/libbson/FindDependencies.m4]) _include([scripts/autotools/libbson/Versions.m4]) _include([scripts/autotools/libmongoc/CheckCompression.m4]) _include([scripts/autotools/libmongoc/CheckResolv.m4]) _include([scripts/autotools/libmongoc/CheckSasl.m4]) _include([scripts/autotools/libmongoc/CheckSSL.m4]) _include([scripts/autotools/libmongoc/CheckICU.m4]) _include([scripts/autotools/libmongoc/FindDependencies.m4]) _include([scripts/autotools/libmongoc/PlatformFlags.m4]) _include([scripts/autotools/libmongoc/Versions.m4]) _include([scripts/autotools/libmongoc/WeakSymbols.m4]) m4_popdef([_include]) AC_SUBST(BSON_EXTRA_ALIGN, 0) AC_SUBST(BSON_OS, 1) AC_SUBST(MONGOC_NO_AUTOMATIC_GLOBALS, 1) AC_SUBST(MONGOC_ENABLE_RDTSCP, 0) AC_SUBST(MONGOC_ENABLE_SHM_COUNTERS, 0) AC_SUBST(MONGOC_TRACE, 1) dnl Assignments for metadata handshake. Leave CFLAGS/LDFLAGS empty as they dnl would likely cause platform info (PHP version) to be truncated. We can dnl consider restoring CFLAGS/LDFLAGS once CDRIVER-3134 is resolved. AC_SUBST(MONGOC_CC, [$CC]) AC_SUBST(MONGOC_USER_SET_CFLAGS, []) AC_SUBST(MONGOC_USER_SET_LDFLAGS, []) dnl Generated with: find src/libmongoc/src/common -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' ' PHP_MONGODB_COMMON_SOURCES="common-b64.c common-md5.c" dnl Generated with: find src/libmongoc/src/libbson/src/bson -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' PHP_MONGODB_BSON_SOURCES="bcon.c bson-atomic.c bson.c bson-clock.c bson-context.c bson-decimal128.c bson-error.c bson-iso8601.c bson-iter.c bson-json.c bson-keys.c bson-md5.c bson-memory.c bson-oid.c bson-reader.c bson-string.c bson-timegm.c bson-utf8.c bson-value.c bson-version-functions.c bson-writer.c" dnl Generated with: find src/libmongoc/src/libbson/src/jsonsl -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' PHP_MONGODB_JSONSL_SOURCES="jsonsl.c" dnl Generated with: find src/libmongoc/src/libmongoc/src/mongoc -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' PHP_MONGODB_MONGOC_SOURCES="mongoc-aggregate.c mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-change-stream.c mongoc-client.c mongoc-client-pool.c mongoc-client-session.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-change-stream.c mongoc-cursor-cmd.c mongoc-cursor-cmd-deprecated.c mongoc-cursor-find.c mongoc-cursor-find-cmd.c mongoc-cursor-find-opquery.c mongoc-cursor-legacy.c mongoc-cyrus.c mongoc-database.c mongoc-error.c mongoc-find-and-modify.c mongoc-gridfs-bucket.c mongoc-gridfs-bucket-file.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-opts.c mongoc-opts-helpers.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-gridfs-download.c mongoc-stream-gridfs-upload.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-command-legacy.c mongoc-write-concern.c" dnl Generated with: find src/libmongoc/src/zlib-1.2.11 -maxdepth 1 -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' ' PHP_MONGODB_ZLIB_SOURCES="adler32.c compress.c crc32.c deflate.c gzclose.c gzlib.c gzread.c gzwrite.c infback.c inffast.c inflate.c inftrees.c trees.c uncompr.c zutil.c" PHP_MONGODB_ADD_SOURCES([src/libmongoc/src/common/], $PHP_MONGODB_COMMON_SOURCES, $PHP_MONGODB_BUNDLED_CFLAGS) PHP_MONGODB_ADD_SOURCES([src/libmongoc/src/libbson/src/bson/], $PHP_MONGODB_BSON_SOURCES, $PHP_MONGODB_BUNDLED_CFLAGS) PHP_MONGODB_ADD_SOURCES([src/libmongoc/src/libbson/src/jsonsl/], $PHP_MONGODB_JSONSL_SOURCES, $PHP_MONGODB_BUNDLED_CFLAGS) PHP_MONGODB_ADD_SOURCES([src/libmongoc/src/libmongoc/src/mongoc/], $PHP_MONGODB_MONGOC_SOURCES, $PHP_MONGODB_BUNDLED_CFLAGS) PHP_MONGODB_ADD_INCLUDE([src/libmongoc/src/common/]) PHP_MONGODB_ADD_INCLUDE([src/libmongoc/src/libbson/src/]) PHP_MONGODB_ADD_INCLUDE([src/libmongoc/src/libbson/src/jsonsl/]) PHP_MONGODB_ADD_INCLUDE([src/libmongoc/src/libmongoc/src/]) PHP_MONGODB_ADD_BUILD_DIR([src/libmongoc/src/common/]) PHP_MONGODB_ADD_BUILD_DIR([src/libmongoc/src/libbson/src/bson/]) PHP_MONGODB_ADD_BUILD_DIR([src/libmongoc/src/libbson/src/jsonsl/]) PHP_MONGODB_ADD_BUILD_DIR([src/libmongoc/src/libmongoc/src/mongoc/]) dnl TODO: Use $ext_srcdir if we can move this after PHP_NEW_EXTENSION ac_config_dir=PHP_EXT_SRCDIR(mongodb) AC_CONFIG_FILES([ ${ac_config_dir}/src/libmongoc/src/libbson/src/bson/bson-config.h ${ac_config_dir}/src/libmongoc/src/libbson/src/bson/bson-version.h ${ac_config_dir}/src/libmongoc/src/libmongoc/src/mongoc/mongoc-config.h ${ac_config_dir}/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version.h ]) if test "x$bundled_zlib" = "xyes"; then PHP_MONGODB_ZLIB_CFLAGS="$PHP_MONGODB_BUNDLED_CFLAGS" AC_CHECK_HEADER([unistd.h], [PHP_MONGODB_ZLIB_CFLAGS="$PHP_MONGODB_ZLIB_CFLAGS -DHAVE_UNISTD_H=1"], []) PHP_MONGODB_ADD_SOURCES([src/libmongoc/src/zlib-1.2.11/], $PHP_MONGODB_ZLIB_SOURCES, $PHP_MONGODB_ZLIB_CFLAGS) PHP_MONGODB_ADD_INCLUDE([src/libmongoc/src/zlib-1.2.11/]) PHP_MONGODB_ADD_BUILD_DIR([src/libmongoc/src/zlib-1.2.11/]) AC_CONFIG_FILES([${ac_config_dir}/src/libmongoc/src/zlib-1.2.11/zconf.h]) fi fi PHP_NEW_EXTENSION(mongodb, $PHP_MONGODB_SOURCES, $ext_shared,, $PHP_MONGODB_CFLAGS) PHP_SUBST(MONGODB_SHARED_LIBADD) PHP_ADD_EXTENSION_DEP(mongodb, date) PHP_ADD_EXTENSION_DEP(mongodb, json) PHP_ADD_EXTENSION_DEP(mongodb, spl) PHP_ADD_EXTENSION_DEP(mongodb, standard) PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/src/BSON/]) PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/src/MongoDB/]) PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/src/MongoDB/Exception/]) PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/src/MongoDB/Monitoring/]) PHP_ADD_INCLUDE(PHP_EXT_SRCDIR(mongodb)[/src/contrib/]) PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/src/BSON/]) PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/src/MongoDB/]) PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/src/MongoDB/Exception/]) PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/src/MongoDB/Monitoring/]) PHP_ADD_BUILD_DIR(PHP_EXT_BUILDDIR(mongodb)[/src/contrib/]) dnl Necessary to ensure that static builds include "-pthread" when linking if test "$ext_shared" != "yes"; then EXTRA_LDFLAGS_PROGRAM="$EXTRA_LDFLAGS_PROGRAM $EXTRA_LDFLAGS" fi dnl This must come after PHP_NEW_EXTENSION, otherwise the srcdir won't be set PHP_ADD_MAKEFILE_FRAGMENT AC_CONFIG_COMMANDS_POST([ if test "$enable_static" = "no"; then echo " mongodb was configured with the following options: Build configuration: CFLAGS : $CFLAGS Extra CFLAGS : $STD_CFLAGS $EXTRA_CFLAGS Developers flags (slow) : $MAINTAINER_CFLAGS Code Coverage flags (extra slow) : $COVERAGE_CFLAGS System mongoc : $PHP_LIBMONGOC System libbson : $PHP_LIBBSON LDFLAGS : $LDFLAGS EXTRA_LDFLAGS : $EXTRA_LDFLAGS MONGODB_SHARED_LIBADD : $MONGODB_SHARED_LIBADD Please submit bugreports at: https://jira.mongodb.org/browse/PHPC " fi ]) fi dnl: vim: et sw=2 mongodb-1.6.1/config.w320000644000076500000240000003251113572250761014260 0ustar alcaeusstaff// vim:ft=javascript function mongodb_generate_header(inpath, outpath, replacements) { STDOUT.WriteLine("Generating " + outpath); var infile = FSO.OpenTextFile(inpath, 1); var outdata = infile.ReadAll(); infile.Close(); for (var key in replacements) { var replacement = replacements[key]; if (typeof replacement === 'string') { replacement = replacement.replace(/"/g, '\\"'); } outdata = outdata.replace(new RegExp('@' + key + '@', 'g'), replacement); } var outfile = FSO.CreateTextFile(outpath, true); outfile.Write(outdata); outfile.Close(); } function mongodb_parse_version_file(inpath, prefix) { var infile = FSO.OpenTextFile(inpath, 1); var version = infile.ReadLine(); infile.Close(); var xyz_pre = version.split("-"); var xyz = xyz_pre[0].split("."); var pre = xyz_pre.length > 1 ? xyz_pre[1] : ""; var replacements = {}; replacements[prefix + "VERSION"] = version; replacements[prefix + "MAJOR_VERSION"] = xyz[0]; replacements[prefix + "MINOR_VERSION"] = xyz[1]; replacements[prefix + "MICRO_VERSION"] = xyz[2]; replacements[prefix + "PRERELEASE_VERSION"] = pre; return replacements; } ARG_ENABLE("mongodb", "MongoDB support", "no"); ARG_WITH("mongodb-sasl", "MongoDB: Build against Cyrus-SASL", "yes"); if (PHP_MONGODB != "no") { /* Note: ADD_EXTENSION_DEP() cannot be used to declare that we depend on the * date and standard extensions. Assume that they're always enabled. */ ADD_EXTENSION_DEP("mongodb", "json", false); ADD_EXTENSION_DEP("mongodb", "spl", false); /* MongoDB does not actually depend on PHP's OpenSSL extension, but this is in * place to ensure that later SSL library checks succeed. This can be removed * once we support building with Secure Channel. */ ADD_EXTENSION_DEP("mongodb", "openssl", false); var PHP_MONGODB_CFLAGS="\ /D BSON_COMPILATION /D MONGOC_COMPILATION \ /I" + configure_module_dirname + " \ /I" + configure_module_dirname + "/src/BSON \ /I" + configure_module_dirname + "/src/MongoDB \ /I" + configure_module_dirname + "/src/MongoDB/Exception \ /I" + configure_module_dirname + "/src/contrib \ /I" + configure_module_dirname + "/src/libmongoc/src/common \ /I" + configure_module_dirname + "/src/libmongoc/src/libbson/src \ /I" + configure_module_dirname + "/src/libmongoc/src/libbson/src/jsonsl \ /I" + configure_module_dirname + "/src/libmongoc/src/libmongoc/src \ "; // Condense whitespace in CFLAGS PHP_MONGODB_CFLAGS = PHP_MONGODB_CFLAGS.replace(/\s+/g, ' '); // Generated with: find src/libmongoc/src/common -name '*.c' -print0 | cut -sz -d / -f 5- | sort -z | tr '\000' ' ' var PHP_MONGODB_COMMON_SOURCES="common-b64.c common-md5.c" // Generated with: find src/libmongoc/src/libbson/src/bson -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' var PHP_MONGODB_BSON_SOURCES="bcon.c bson-atomic.c bson.c bson-clock.c bson-context.c bson-decimal128.c bson-error.c bson-iso8601.c bson-iter.c bson-json.c bson-keys.c bson-md5.c bson-memory.c bson-oid.c bson-reader.c bson-string.c bson-timegm.c bson-utf8.c bson-value.c bson-version-functions.c bson-writer.c"; // Generated with: find src/libmongoc/src/libbson/src/jsonsl -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' var PHP_MONGODB_JSONSL_SOURCES="jsonsl.c"; // Generated with: find src/libmongoc/src/libmongoc/src/mongoc -name '*.c' -print0 | cut -sz -d / -f 7- | sort -z | tr '\000' ' ' var PHP_MONGODB_MONGOC_SOURCES="mongoc-aggregate.c mongoc-apm.c mongoc-array.c mongoc-async.c mongoc-async-cmd.c mongoc-buffer.c mongoc-bulk-operation.c mongoc-change-stream.c mongoc-client.c mongoc-client-pool.c mongoc-client-session.c mongoc-cluster.c mongoc-cluster-cyrus.c mongoc-cluster-sasl.c mongoc-cluster-sspi.c mongoc-cmd.c mongoc-collection.c mongoc-compression.c mongoc-counters.c mongoc-crypto.c mongoc-crypto-cng.c mongoc-crypto-common-crypto.c mongoc-crypto-openssl.c mongoc-cursor-array.c mongoc-cursor.c mongoc-cursor-change-stream.c mongoc-cursor-cmd.c mongoc-cursor-cmd-deprecated.c mongoc-cursor-find.c mongoc-cursor-find-cmd.c mongoc-cursor-find-opquery.c mongoc-cursor-legacy.c mongoc-cyrus.c mongoc-database.c mongoc-error.c mongoc-find-and-modify.c mongoc-gridfs-bucket.c mongoc-gridfs-bucket-file.c mongoc-gridfs.c mongoc-gridfs-file.c mongoc-gridfs-file-list.c mongoc-gridfs-file-page.c mongoc-handshake.c mongoc-host-list.c mongoc-index.c mongoc-init.c mongoc-libressl.c mongoc-linux-distro-scanner.c mongoc-list.c mongoc-log.c mongoc-matcher.c mongoc-matcher-op.c mongoc-memcmp.c mongoc-openssl.c mongoc-opts.c mongoc-opts-helpers.c mongoc-queue.c mongoc-rand-cng.c mongoc-rand-common-crypto.c mongoc-rand-openssl.c mongoc-read-concern.c mongoc-read-prefs.c mongoc-rpc.c mongoc-sasl.c mongoc-scram.c mongoc-secure-channel.c mongoc-secure-transport.c mongoc-server-description.c mongoc-server-stream.c mongoc-set.c mongoc-socket.c mongoc-ssl.c mongoc-sspi.c mongoc-stream-buffered.c mongoc-stream.c mongoc-stream-file.c mongoc-stream-gridfs.c mongoc-stream-gridfs-download.c mongoc-stream-gridfs-upload.c mongoc-stream-socket.c mongoc-stream-tls.c mongoc-stream-tls-libressl.c mongoc-stream-tls-openssl-bio.c mongoc-stream-tls-openssl.c mongoc-stream-tls-secure-channel.c mongoc-stream-tls-secure-transport.c mongoc-topology.c mongoc-topology-description-apm.c mongoc-topology-description.c mongoc-topology-scanner.c mongoc-uri.c mongoc-util.c mongoc-version-functions.c mongoc-write-command.c mongoc-write-command-legacy.c mongoc-write-concern.c"; EXTENSION("mongodb", "php_phongo.c phongo_compat.c", null, PHP_MONGODB_CFLAGS); ADD_SOURCES(configure_module_dirname + "/src", "bson.c bson-encode.c", "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/BSON", "Binary.c BinaryInterface.c DBPointer.c Decimal128.c Decimal128Interface.c Int64.c Javascript.c JavascriptInterface.c MaxKey.c MaxKeyInterface.c MinKey.c MinKeyInterface.c ObjectId.c ObjectIdInterface.c Persistable.c Regex.c RegexInterface.c Serializable.c Symbol.c Timestamp.c TimestampInterface.c Type.c Undefined.c Unserializable.c UTCDateTime.c UTCDateTimeInterface.c functions.c", "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/MongoDB", "BulkWrite.c Command.c Cursor.c CursorId.c CursorInterface.c Manager.c Query.c ReadConcern.c ReadPreference.c Server.c Session.c WriteConcern.c WriteConcernError.c WriteError.c WriteResult.c", "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/MongoDB/Exception", "AuthenticationException.c BulkWriteException.c CommandException.c ConnectionException.c ConnectionTimeoutException.c Exception.c ExecutionTimeoutException.c InvalidArgumentException.c LogicException.c RuntimeException.c ServerException.c SSLConnectionException.c UnexpectedValueException.c WriteException.c", "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/MongoDB/Monitoring", "CommandFailedEvent.c CommandStartedEvent.c CommandSubscriber.c CommandSucceededEvent.c Subscriber.c functions.c", "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/libmongoc/src/common", PHP_MONGODB_COMMON_SOURCES, "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/libmongoc/src/libbson/src/bson", PHP_MONGODB_BSON_SOURCES, "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/libmongoc/src/libbson/src/jsonsl", PHP_MONGODB_JSONSL_SOURCES, "mongodb"); ADD_SOURCES(configure_module_dirname + "/src/libmongoc/src/libmongoc/src/mongoc", PHP_MONGODB_MONGOC_SOURCES, "mongodb"); var bson_opts = { BSON_BYTE_ORDER: 1234, BSON_OS: 2, BSON_HAVE_STDBOOL_H: 0, BSON_HAVE_STRINGS_H: 0, BSON_HAVE_ATOMIC_32_ADD_AND_FETCH: 0, BSON_HAVE_ATOMIC_64_ADD_AND_FETCH: 0, BSON_PTHREAD_ONCE_INIT_NEEDS_BRACES: 0, BSON_HAVE_CLOCK_GETTIME: 0, BSON_HAVE_STRNLEN: 0, BSON_HAVE_SNPRINTF: 0, BSON_HAVE_REALLOCF: 0, BSON_NEEDS_SET_OUTPUT_FORMAT: 0, BSON_HAVE_TIMESPEC: 0, BSON_EXTRA_ALIGN: 0, BSON_HAVE_SYSCALL_TID: 0, BSON_HAVE_DECIMAL128: 0, BSON_HAVE_GMTIME_R: 0, BSON_HAVE_RAND_R: 0 }; if (CHECK_FUNC_IN_HEADER("stdio.h", "_set_output_format")) { bson_opts.BSON_NEEDS_SET_OUTPUT_FORMAT = 1; } mongodb_generate_header( configure_module_dirname + "/src/libmongoc/src/libbson/src/bson/bson-config.h.in", configure_module_dirname + "/src/libmongoc/src/libbson/src/bson/bson-config.h", bson_opts ); mongodb_generate_header( configure_module_dirname + "/src/libmongoc/src/libbson/src/bson/bson-version.h.in", configure_module_dirname + "/src/libmongoc/src/libbson/src/bson/bson-version.h", mongodb_parse_version_file(configure_module_dirname + "/src/LIBMONGOC_VERSION_CURRENT", "BSON_") ); var mongoc_opts = { // TODO: Support building with Secure Channel on Windows MONGOC_ENABLE_SSL_SECURE_CHANNEL: 0, MONGOC_ENABLE_CRYPTO_CNG: 0, // Secure Transport does not apply to Windows MONGOC_ENABLE_SSL_SECURE_TRANSPORT: 0, MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO: 0, MONGOC_ENABLE_SSL_LIBRESSL: 0, MONGOC_ENABLE_SSL_OPENSSL: 0, MONGOC_ENABLE_CRYPTO_LIBCRYPTO: 0, MONGOC_ENABLE_SSL: 0, MONGOC_ENABLE_CRYPTO: 0, MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE: 0, MONGOC_ENABLE_COMPRESSION_SNAPPY: 0, MONGOC_ENABLE_COMPRESSION_ZLIB: 0, MONGOC_ENABLE_COMPRESSION_ZSTD: 0, MONGOC_ENABLE_COMPRESSION: 0, MONGOC_ENABLE_SASL: 0, MONGOC_ENABLE_SASL_CYRUS: 0, MONGOC_ENABLE_SASL_GSSAPI: 0, MONGOC_ENABLE_SASL_SSPI: 0, MONGOC_ENABLE_ICU: 0, MONGOC_ENABLE_RDTSCP: 0, MONGOC_ENABLE_SHM_COUNTERS: 0, MONGOC_HAVE_ASN1_STRING_GET0_DATA: 0, MONGOC_HAVE_SASL_CLIENT_DONE: 0, MONGOC_HAVE_SCHED_GETCPU: 0, MONGOC_HAVE_SOCKLEN: 1, MONGOC_HAVE_WEAK_SYMBOLS: 0, MONGOC_NO_AUTOMATIC_GLOBALS: 1, MONGOC_SOCKET_ARG2: "struct sockaddr", MONGOC_SOCKET_ARG3: "socklen_t", MONGOC_TRACE: 1, MONGOC_HAVE_DNSAPI: 0, MONGOC_HAVE_RES_NSEARCH: 0, MONGOC_HAVE_RES_NDESTROY: 0, MONGOC_HAVE_RES_NCLOSE: 0, MONGOC_HAVE_RES_SEARCH: 0, MONGOC_CC: "", MONGOC_USER_SET_CFLAGS: "", MONGOC_USER_SET_LDFLAGS: "" }; var mongoc_ssl_path_to_check = PHP_MONGODB; if (typeof PHP_OPENSSL === 'string') { mongoc_ssl_path_to_check += ";" + PHP_OPENSSL; } var mongoc_ssl_found = false; /* PHP 7.1.2 introduced SETUP_OPENSSL(), which supports OpenSSL 1.1.x. Earlier * versions will use the legacy check for OpenSSL 1.0.x and lower. */ if (typeof SETUP_OPENSSL === 'function') { openssl_type = SETUP_OPENSSL("mongodb", mongoc_ssl_path_to_check); mongoc_ssl_found = openssl_type > 0; if (openssl_type >= 2) { mongoc_opts.MONGOC_HAVE_ASN1_STRING_GET0_DATA = 1; } } else if (CHECK_LIB("ssleay32.lib", "mongodb", mongoc_ssl_path_to_check) && CHECK_LIB("libeay32.lib", "mongodb", mongoc_ssl_path_to_check) && CHECK_LIB("crypt32.lib", "mongodb", mongoc_ssl_path_to_check) && CHECK_HEADER_ADD_INCLUDE("openssl/ssl.h", "CFLAGS_MONGODB")) { mongoc_ssl_found = true; } if (mongoc_ssl_found) { mongoc_opts.MONGOC_ENABLE_SSL_OPENSSL = 1; mongoc_opts.MONGOC_ENABLE_CRYPTO_LIBCRYPTO = 1; mongoc_opts.MONGOC_ENABLE_SSL = 1; mongoc_opts.MONGOC_ENABLE_CRYPTO = 1; } else { WARNING("mongodb libopenssl support not enabled, libs not found"); } // TODO: Support building with native GSSAPI (SSPI) on Windows if (PHP_MONGODB_SASL != "no" && CHECK_LIB("libsasl.lib", "mongodb", PHP_MONGODB) && CHECK_HEADER_ADD_INCLUDE("sasl/sasl.h", "CFLAGS_MONGODB")) { mongoc_opts.MONGOC_ENABLE_SASL = 1; mongoc_opts.MONGOC_ENABLE_SASL_CYRUS = 1; if (CHECK_FUNC_IN_HEADER("sasl/sasl.h", "sasl_client_done")) { mongoc_opts.MONGOC_HAVE_SASL_CLIENT_DONE = 1; } } else { WARNING("mongodb libsasl support not enabled, libs not found"); } if (CHECK_LIB("dnsapi.lib", "mongodb", PHP_MONGODB) && CHECK_HEADER_ADD_INCLUDE("windns.h", "CFLAGS_MONGODB")) { mongoc_opts.MONGOC_HAVE_DNSAPI = 1; } if (CHECK_LIB("icuuc.lib", "mongodb", PHP_MONGODB) && CHECK_HEADER_ADD_INCLUDE("unicode/utf.h", "CFLAGS_MONGODB")) { mongoc_opts.MONGODB_ENABLE_ICU = 1; ADD_FLAG("LIBS_MONGODB", "icudt.lib icuin.lib icuio.lib"); /* Compat for ICU before 58.1.*/ if (CHECK_LIB("icule.lib", "mongodb", PHP_MONGODB)) { ADD_FLAG("LIBS_MONGODB", "icule.lib"); } if (CHECK_LIB("iculx.lib", "mongodb", PHP_MONGODB)) { ADD_FLAG("LIBS_MONGODB", "iculx.lib"); } ADD_FLAG("CFLAGS_MONGODB", "/EHsc /D U_USING_ICU_NAMESPACE=1"); } if (typeof COMPILER_NAME === 'string') { mongoc_opts.MONGOC_CC = COMPILER_NAME; } else if (typeof VC_VERSIONS === 'array' && typeof VC_VERSIONS[VCVERS] === 'string') { mongoc_opts.MONGOC_CC = VC_VERSIONS[VCVERS]; } else if (typeof COMPILER_NAME_LONG === 'string') { mongoc_opts.MONGOC_CC = COMPILER_NAME_LONG; } /* MONGOC_USER_SET_CFLAGS and MONGOC_USER_SET_LDFLAGS can be left blank, as we * do not expect CFLAGS or LDFLAGS to be customized at build time. */ mongodb_generate_header( configure_module_dirname + "/src/libmongoc/src/libmongoc/src/mongoc/mongoc-config.h.in", configure_module_dirname + "/src/libmongoc/src/libmongoc/src/mongoc/mongoc-config.h", mongoc_opts ); mongodb_generate_header( configure_module_dirname + "/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version.h.in", configure_module_dirname + "/src/libmongoc/src/libmongoc/src/mongoc/mongoc-version.h", mongodb_parse_version_file(configure_module_dirname + "/src/LIBMONGOC_VERSION_CURRENT", "MONGOC_") ); } mongodb-1.6.1/phongo_compat.c0000644000076500000240000000422013572250761015453 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Our Compatability header */ #include "phongo_compat.h" void phongo_add_exception_prop(const char* prop, int prop_len, zval* value TSRMLS_DC) { if (EG(exception)) { #if PHP_VERSION_ID >= 70000 zval ex; EXCEPTION_P(EG(exception), ex); zend_update_property(Z_OBJCE(ex), &ex, prop, prop_len, value); #else zval* ex = NULL; EXCEPTION_P(EG(exception), ex); zend_update_property(Z_OBJCE_P(ex), ex, prop, prop_len, value TSRMLS_CC); #endif } } #ifdef ZEND_HASH_GET_APPLY_COUNT /* PHP 7.2 or earlier recursion protection */ zend_bool php_phongo_zend_hash_apply_protection_begin(HashTable* ht) { if (!ht) { return 1; } if (ZEND_HASH_GET_APPLY_COUNT(ht) > 0) { return 0; } if (ZEND_HASH_APPLY_PROTECTION(ht)) { ZEND_HASH_INC_APPLY_COUNT(ht); } return 1; } zend_bool php_phongo_zend_hash_apply_protection_end(HashTable* ht) { if (!ht) { return 1; } if (ZEND_HASH_GET_APPLY_COUNT(ht) == 0) { return 0; } if (ZEND_HASH_APPLY_PROTECTION(ht)) { ZEND_HASH_DEC_APPLY_COUNT(ht); } return 1; } #else /* PHP 7.3 or later */ zend_bool php_phongo_zend_hash_apply_protection_begin(zend_array* ht) { if (GC_IS_RECURSIVE(ht)) { return 0; } if (!(GC_FLAGS(ht) & GC_IMMUTABLE)) { GC_PROTECT_RECURSION(ht); } return 1; } zend_bool php_phongo_zend_hash_apply_protection_end(zend_array* ht) { if (!GC_IS_RECURSIVE(ht)) { return 0; } if (!(GC_FLAGS(ht) & GC_IMMUTABLE)) { GC_UNPROTECT_RECURSION(ht); } return 1; } #endif /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/phongo_compat.h0000644000076500000240000002346613572250761015475 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_COMPAT_H #define PHONGO_COMPAT_H #include #include #if PHP_VERSION_ID >= 70000 #include #endif #ifdef PHP_WIN32 #include "config.w32.h" #else #include #endif #ifndef PHP_FE_END #define PHP_FE_END \ { \ NULL, NULL, NULL \ } #endif #ifndef HASH_KEY_NON_EXISTENT #define HASH_KEY_NON_EXISTENT HASH_KEY_NON_EXISTANT #endif #if PHP_VERSION_ID >= 70000 #define str_efree(s) efree((char*) s) #else #include #endif #if defined(__GNUC__) #define ARG_UNUSED __attribute__((unused)) #else #define ARG_UNUSED #endif #if defined(__GNUC__) #define PHONGO_GNUC_CHECK_VERSION(major, minor) \ ((__GNUC__ > (major)) || \ ((__GNUC__ == (major)) && (__GNUC_MINOR__ >= (minor)))) #else #define PHONGO_GNUC_CHECK_VERSION(major, minor) 0 #endif #if PHONGO_GNUC_CHECK_VERSION(7, 0) #define PHONGO_BREAK_INTENTIONALLY_MISSING __attribute__((fallthrough)); #else #define PHONGO_BREAK_INTENTIONALLY_MISSING #endif #if PHP_VERSION_ID >= 70000 #define phongo_char zend_string #define phongo_long zend_long #if SIZEOF_ZEND_LONG == 8 #define PHONGO_LONG_FORMAT PRId64 #elif SIZEOF_ZEND_LONG == 4 #define PHONGO_LONG_FORMAT PRId32 #else #error Unsupported architecture (integers are neither 32-bit nor 64-bit) #endif #define SIZEOF_PHONGO_LONG SIZEOF_ZEND_LONG #define phongo_create_object_retval zend_object* #define phongo_get_gc_table zval** #define PHONGO_ALLOC_OBJECT_T(_obj_t, _class_type) (_obj_t*) ecalloc(1, sizeof(_obj_t) + zend_object_properties_size(_class_type)) #define PHONGO_TSRMLS_FETCH_FROM_CTX(user_data) #define DECLARE_RETURN_VALUE_USED int return_value_used = 1; #define EXCEPTION_P(_ex, _zp) ZVAL_OBJ(&_zp, _ex) #define ADD_ASSOC_STRING(_zv, _key, _value) add_assoc_string_ex(_zv, ZEND_STRL(_key), (char*) (_value)); #define ADD_ASSOC_STRINGL(_zv, _key, _value, _len) add_assoc_stringl_ex(_zv, ZEND_STRL(_key), (char*) (_value), _len); #define ADD_ASSOC_STRING_EX(_zv, _key, _key_len, _value, _value_len) add_assoc_stringl_ex(_zv, _key, _key_len, (char*) (_value), _value_len); #define ADD_ASSOC_LONG_EX(_zv, _key, _value) add_assoc_long_ex(_zv, ZEND_STRL(_key), _value); #define ADD_ASSOC_ZVAL_EX(_zv, _key, _value) add_assoc_zval_ex(_zv, ZEND_STRL(_key), _value); #define ADD_ASSOC_ZVAL(_zv, _key, _value) add_assoc_zval(_zv, _key, _value); #define ADD_ASSOC_NULL_EX(_zv, _key) add_assoc_null_ex(_zv, ZEND_STRL(_key)); #define ADD_ASSOC_BOOL_EX(_zv, _key, _value) add_assoc_bool_ex(_zv, ZEND_STRL(_key), _value); #define ADD_NEXT_INDEX_STRINGL(_zv, _value, _len) add_next_index_stringl(_zv, _value, _len); #define phongo_free_object_arg zend_object #define phongo_zpp_char_len size_t #define ZEND_HASH_APPLY_COUNT(ht) (ht)->u.v.nApplyCount #define PHONGO_RETVAL_STRINGL(s, slen) RETVAL_STRINGL(s, slen) #define PHONGO_RETURN_STRINGL(s, slen) RETURN_STRINGL(s, slen) #define PHONGO_RETVAL_STRING(s) RETVAL_STRING(s) #define PHONGO_RETURN_STRING(s) RETURN_STRING(s) #define PHONGO_RETVAL_SMART_STR(val) PHONGO_RETVAL_STRINGL(ZSTR_VAL((val).s), ZSTR_LEN((val).s)); #define ZVAL_RETVAL_TYPE zval #define ZVAL_STATIC_INIT \ { \ { \ 0 \ } \ } #else #define phongo_char char #define phongo_long long #define PHONGO_LONG_FORMAT "ld" #define SIZEOF_PHONGO_LONG SIZEOF_LONG #define ZSTR_VAL(str) str #define phongo_create_object_retval zend_object_value #define phongo_get_gc_table zval*** #define PHONGO_ALLOC_OBJECT_T(_obj_t, _class_type) (_obj_t*) ecalloc(1, sizeof(_obj_t)) #define PHONGO_TSRMLS_FETCH_FROM_CTX(user_data) TSRMLS_FETCH_FROM_CTX(user_data) #define DECLARE_RETURN_VALUE_USED #define EXCEPTION_P(_ex, _zp) _zp = _ex #define ADD_ASSOC_STRING(_zv, _key, _value) add_assoc_string_ex(_zv, ZEND_STRS(_key), (char*) (_value), 1); #define ADD_ASSOC_STRINGL(_zv, _key, _value, _len) add_assoc_stringl_ex(_zv, ZEND_STRS(_key), (char*) (_value), _len, 1); #define ADD_ASSOC_STRING_EX(_zv, _key, _key_len, _value, _value_len) add_assoc_stringl_ex(_zv, _key, _key_len + 1, (char*) (_value), _value_len, 1); #define ADD_ASSOC_LONG_EX(_zv, _key, _value) add_assoc_long_ex(_zv, ZEND_STRS(_key), _value); #define ADD_ASSOC_ZVAL_EX(_zv, _key, _value) add_assoc_zval_ex(_zv, ZEND_STRS(_key), _value); #define ADD_ASSOC_ZVAL(_zv, _key, _value) add_assoc_zval(_zv, _key, _value); #define ADD_ASSOC_NULL_EX(_zv, _key) add_assoc_null_ex(_zv, ZEND_STRS(_key)); #define ADD_ASSOC_BOOL_EX(_zv, _key, _value) add_assoc_bool_ex(_zv, ZEND_STRS(_key), _value); #define ADD_NEXT_INDEX_STRINGL(_zv, _value, _len) add_next_index_stringl(_zv, _value, _len, 1); #define Z_PHPDATE_P(object) ((php_date_obj*) zend_object_store_get_object(object TSRMLS_CC)) #define Z_ISUNDEF(x) !x #define ZVAL_UNDEF(x) \ do { \ (*x) = NULL; \ } while (0) #define ZVAL_ARR(z, a) \ do { \ HashTable* __arr = (a); \ zval* __z = (z); \ Z_ARRVAL_P(__z) = __arr; \ Z_TYPE_P(__z) = IS_ARRAY; \ } while (0); #define phongo_free_object_arg void #define phongo_zpp_char_len int #define ZEND_HASH_APPLY_PROTECTION(ht) 1 #define ZEND_HASH_GET_APPLY_COUNT(ht) ((ht)->nApplyCount) #define ZEND_HASH_DEC_APPLY_COUNT(ht) ((ht)->nApplyCount -= 1) #define ZEND_HASH_INC_APPLY_COUNT(ht) ((ht)->nApplyCount += 1) #define PHONGO_RETVAL_STRINGL(s, slen) RETVAL_STRINGL(s, slen, 1) #define PHONGO_RETURN_STRINGL(s, slen) RETURN_STRINGL(s, slen, 1) #define PHONGO_RETVAL_STRING(s) RETVAL_STRING(s, 1) #define PHONGO_RETURN_STRING(s) RETURN_STRING(s, 1) #define PHONGO_RETVAL_SMART_STR(val) PHONGO_RETVAL_STRINGL((val).c, (val).len); #define ZVAL_RETVAL_TYPE zval* #define ZVAL_STATIC_INIT zval_used_for_init #endif #if SIZEOF_PHONGO_LONG == 8 #define ADD_INDEX_INT64(_zv, _index, _value) add_index_long((_zv), (_index), (_value)) #define ADD_NEXT_INDEX_INT64(_zv, _value) add_next_index_long((_zv), (_value)) #define ADD_ASSOC_INT64(_zv, _key, _value) add_assoc_long((_zv), (_key), (_value)) #elif SIZEOF_PHONGO_LONG == 4 #if PHP_VERSION_ID >= 70000 #define ADD_INDEX_INT64(_zv, _index, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval zchild; \ php_phongo_new_int64(&zchild, (_value) TSRMLS_CC); \ add_index_zval((_zv), (_index), &zchild); \ } else { \ add_index_long((_zv), (_index), (_value)); \ } #define ADD_NEXT_INDEX_INT64(_zv, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval zchild; \ php_phongo_new_int64(&zchild, (_value) TSRMLS_CC); \ add_next_index_zval((_zv), &zchild); \ } else { \ add_next_index_long((_zv), (_value)); \ } #define ADD_ASSOC_INT64(_zv, _key, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval zchild; \ php_phongo_new_int64(&zchild, (_value) TSRMLS_CC); \ add_assoc_zval((_zv), (_key), &zchild); \ } else { \ add_assoc_long((_zv), (_key), (_value)); \ } #else /* PHP_VERSION_ID < 70000 */ #define ADD_INDEX_INT64(_zv, _index, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval* zchild = NULL; \ TSRMLS_FETCH(); \ MAKE_STD_ZVAL(zchild); \ php_phongo_new_int64(zchild, (_value) TSRMLS_CC); \ add_index_zval((_zv), (_index), zchild); \ } else { \ add_index_long((_zv), (_index), (_value)); \ } #define ADD_NEXT_INDEX_INT64(_zv, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval* zchild = NULL; \ TSRMLS_FETCH(); \ MAKE_STD_ZVAL(zchild); \ php_phongo_new_int64(zchild, (_value) TSRMLS_CC); \ add_next_index_zval((_zv), zchild); \ } else { \ add_next_index_long((_zv), (_value)); \ } #define ADD_ASSOC_INT64(_zv, _key, _value) \ if ((_value) > INT32_MAX || (_value) < INT32_MIN) { \ zval* zchild = NULL; \ TSRMLS_FETCH(); \ MAKE_STD_ZVAL(zchild); \ php_phongo_new_int64(zchild, (_value) TSRMLS_CC); \ add_assoc_zval((_zv), (_key), zchild); \ } else { \ add_assoc_long((_zv), (_key), (_value)); \ } #endif /* PHP_VERSION_ID */ #else /* SIZEOF_PHONGO_LONG != 8 && SIZEOF_PHONGO_LONG != 4 */ #error Unsupported architecture (integers are neither 32-bit nor 64-bit) #endif /* SIZEOF_PHONGO_LONG */ void phongo_add_exception_prop(const char* prop, int prop_len, zval* value TSRMLS_DC); zend_bool php_phongo_zend_hash_apply_protection_begin(HashTable* ht); zend_bool php_phongo_zend_hash_apply_protection_end(HashTable* ht); #endif /* PHONGO_COMPAT_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/phongo_version.h0000644000076500000240000000151413572250761015665 0ustar alcaeusstaff/* * Copyright 2014-2018 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_VERSION_H #define PHONGO_VERSION_H /* clang-format off */ #define PHP_MONGODB_VERSION "1.6.1" #define PHP_MONGODB_STABILITY "stable" #define PHP_MONGODB_VERSION_DESC 1,6,1,0 /* clang-format on */ #endif /* PHONGO_VERSION_H */ mongodb-1.6.1/php_bson.h0000644000076500000240000001160013572250761014433 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_BSON_H #define PHONGO_BSON_H #include /* PHP Core stuff */ #include #define BSON_UNSERIALIZE_FUNC_NAME "bsonUnserialize" #define BSON_SERIALIZE_FUNC_NAME "bsonSerialize" #define PHONGO_ODM_FIELD_NAME "__pclass" typedef enum { PHONGO_BSON_NONE = 0x00, PHONGO_BSON_ADD_ID = 0x01, PHONGO_BSON_RETURN_ID = 0x02 } php_phongo_bson_flags_t; typedef enum { PHONGO_TYPEMAP_NONE, PHONGO_TYPEMAP_NATIVE_ARRAY, PHONGO_TYPEMAP_NATIVE_OBJECT, PHONGO_TYPEMAP_CLASS } php_phongo_bson_typemap_types; typedef enum { PHONGO_FIELD_PATH_ITEM_NONE, PHONGO_FIELD_PATH_ITEM_ARRAY, PHONGO_FIELD_PATH_ITEM_DOCUMENT } php_phongo_bson_field_path_item_types; typedef struct { char** elements; php_phongo_bson_field_path_item_types* element_types; size_t allocated_size; size_t size; size_t ref_count; bool owns_elements; } php_phongo_field_path; typedef struct _php_phongo_field_path_map_element { php_phongo_field_path* entry; php_phongo_bson_typemap_types node_type; zend_class_entry* node_ce; } php_phongo_field_path_map_element; typedef struct { php_phongo_bson_typemap_types document_type; zend_class_entry* document; php_phongo_bson_typemap_types array_type; zend_class_entry* array; php_phongo_bson_typemap_types root_type; zend_class_entry* root; struct { php_phongo_field_path_map_element** map; size_t allocated_size; size_t size; } field_paths; } php_phongo_bson_typemap; typedef struct { ZVAL_RETVAL_TYPE zchild; php_phongo_bson_typemap map; zend_class_entry* odm; bool is_visiting_array; php_phongo_field_path* field_path; } php_phongo_bson_state; #if PHP_VERSION_ID >= 70000 #define PHONGO_BSON_STATE_INITIALIZER \ { \ { { 0 } }, { PHONGO_TYPEMAP_NONE, NULL, PHONGO_TYPEMAP_NONE, NULL, PHONGO_TYPEMAP_NONE, NULL }, NULL, NULL \ } #else #define PHONGO_BSON_STATE_INITIALIZER \ { \ NULL, { PHONGO_TYPEMAP_NONE, NULL, PHONGO_TYPEMAP_NONE, NULL, PHONGO_TYPEMAP_NONE, NULL }, NULL, NULL \ } #endif void php_phongo_zval_to_bson(zval* data, php_phongo_bson_flags_t flags, bson_t* bson, bson_t** bson_out TSRMLS_DC); bool php_phongo_bson_to_zval_ex(const unsigned char* data, int data_len, php_phongo_bson_state* state); #if PHP_VERSION_ID >= 70000 bool php_phongo_bson_to_zval(const unsigned char* data, int data_len, zval* out); #else bool php_phongo_bson_to_zval(const unsigned char* data, int data_len, zval** out); #endif bool php_phongo_bson_typemap_to_state(zval* typemap, php_phongo_bson_typemap* map TSRMLS_DC); void php_phongo_bson_state_ctor(php_phongo_bson_state* state); void php_phongo_bson_state_dtor(php_phongo_bson_state* state); void php_phongo_bson_state_copy_ctor(php_phongo_bson_state* dst, php_phongo_bson_state* src); void php_phongo_bson_typemap_dtor(php_phongo_bson_typemap* map); php_phongo_field_path* php_phongo_field_path_alloc(bool owns_elements); void php_phongo_field_path_free(php_phongo_field_path* field_path); void php_phongo_field_path_write_item_at_current_level(php_phongo_field_path* field_path, const char* element); void php_phongo_field_path_write_type_at_current_level(php_phongo_field_path* field_path, php_phongo_bson_field_path_item_types element_type); bool php_phongo_field_path_push(php_phongo_field_path* field_path, const char* element, php_phongo_bson_field_path_item_types element_type); bool php_phongo_field_path_pop(php_phongo_field_path* field_path); char* php_phongo_field_path_as_string(php_phongo_field_path* field_path); #endif /* PHONGO_BSON_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/php_phongo.c0000644000076500000240000032555113572250761014774 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* External libs */ #include "bson/bson.h" #include "mongoc/mongoc.h" /* PHP Core stuff */ #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_CONFIG_H #include "config.h" #endif #if PHP_VERSION_ID >= 70000 #include #else #include #endif /* getpid() */ #if HAVE_UNISTD_H #include #endif #ifdef PHP_WIN32 #include #endif /* Stream wrapper */ #include

#include
/* Debug log writing */ #include
/* For formating timestamp in the log */ #include /* String manipulation */ #include /* PHP array helpers */ #include "php_array_api.h" /* Our Compatability header */ #include "phongo_compat.h" /* Our stuffz */ #include "php_phongo.h" #include "php_bson.h" #include "src/BSON/functions.h" #include "src/MongoDB/Monitoring/functions.h" #undef MONGOC_LOG_DOMAIN #define MONGOC_LOG_DOMAIN "PHONGO" #define PHONGO_DEBUG_INI "mongodb.debug" #define PHONGO_DEBUG_INI_DEFAULT "" ZEND_DECLARE_MODULE_GLOBALS(mongodb) #if PHP_VERSION_ID >= 70000 #if defined(ZTS) && defined(COMPILE_DL_MONGODB) ZEND_TSRMLS_CACHE_DEFINE(); #endif #endif /* Declare zend_class_entry dependencies, which are initialized in MINIT */ zend_class_entry* php_phongo_date_immutable_ce; zend_class_entry* php_phongo_json_serializable_ce; php_phongo_server_description_type_map_t php_phongo_server_description_type_map[PHONGO_SERVER_DESCRIPTION_TYPES] = { { PHONGO_SERVER_UNKNOWN, "Unknown" }, { PHONGO_SERVER_STANDALONE, "Standalone" }, { PHONGO_SERVER_MONGOS, "Mongos" }, { PHONGO_SERVER_POSSIBLE_PRIMARY, "PossiblePrimary" }, { PHONGO_SERVER_RS_PRIMARY, "RSPrimary" }, { PHONGO_SERVER_RS_SECONDARY, "RSSecondary" }, { PHONGO_SERVER_RS_ARBITER, "RSArbiter" }, { PHONGO_SERVER_RS_OTHER, "RSOther" }, { PHONGO_SERVER_RS_GHOST, "RSGhost" }, }; /* {{{ phongo_std_object_handlers */ zend_object_handlers phongo_std_object_handlers; zend_object_handlers* phongo_get_std_object_handlers(void) { return &phongo_std_object_handlers; } /* }}} */ /* Forward declarations */ static bool phongo_split_namespace(const char* namespace, char** dbname, char** cname); /* {{{ Error reporting and logging */ zend_class_entry* phongo_exception_from_phongo_domain(php_phongo_error_domain_t domain) { switch (domain) { case PHONGO_ERROR_INVALID_ARGUMENT: return php_phongo_invalidargumentexception_ce; case PHONGO_ERROR_LOGIC: return php_phongo_logicexception_ce; case PHONGO_ERROR_RUNTIME: return php_phongo_runtimeexception_ce; case PHONGO_ERROR_UNEXPECTED_VALUE: return php_phongo_unexpectedvalueexception_ce; case PHONGO_ERROR_MONGOC_FAILED: return php_phongo_runtimeexception_ce; case PHONGO_ERROR_CONNECTION_FAILED: return php_phongo_connectionexception_ce; } MONGOC_ERROR("Resolving unknown phongo error domain: %d", domain); return php_phongo_runtimeexception_ce; } zend_class_entry* phongo_exception_from_mongoc_domain(uint32_t /* mongoc_error_domain_t */ domain, uint32_t /* mongoc_error_code_t */ code) { if (domain == MONGOC_ERROR_CLIENT && code == MONGOC_ERROR_CLIENT_AUTHENTICATE) { return php_phongo_authenticationexception_ce; } if (domain == MONGOC_ERROR_COMMAND && code == MONGOC_ERROR_COMMAND_INVALID_ARG) { return php_phongo_invalidargumentexception_ce; } if (domain == MONGOC_ERROR_SERVER) { if (code == PHONGO_SERVER_ERROR_EXCEEDED_TIME_LIMIT) { return php_phongo_executiontimeoutexception_ce; } return php_phongo_serverexception_ce; } if (domain == MONGOC_ERROR_SERVER_SELECTION && code == MONGOC_ERROR_SERVER_SELECTION_FAILURE) { return php_phongo_connectiontimeoutexception_ce; } if (domain == MONGOC_ERROR_STREAM) { if (code == MONGOC_ERROR_STREAM_SOCKET) { return php_phongo_connectiontimeoutexception_ce; } return php_phongo_connectionexception_ce; } if (domain == MONGOC_ERROR_WRITE_CONCERN) { return php_phongo_serverexception_ce; } if (domain == MONGOC_ERROR_PROTOCOL && code == MONGOC_ERROR_PROTOCOL_BAD_WIRE_VERSION) { return php_phongo_connectionexception_ce; } return php_phongo_runtimeexception_ce; } void phongo_throw_exception(php_phongo_error_domain_t domain TSRMLS_DC, const char* format, ...) { va_list args; char* message; int message_len; va_start(args, format); message_len = vspprintf(&message, 0, format, args); zend_throw_exception(phongo_exception_from_phongo_domain(domain), message, 0 TSRMLS_CC); efree(message); va_end(args); } static void phongo_exception_add_error_labels(const bson_t* reply TSRMLS_DC) { bson_iter_t iter; if (!reply) { return; } if (bson_iter_init_find(&iter, reply, "errorLabels")) { bson_iter_t error_labels; #if PHP_VERSION_ID >= 70000 zval labels; array_init(&labels); #else zval* labels = NULL; ALLOC_INIT_ZVAL(labels); array_init(labels); #endif bson_iter_recurse(&iter, &error_labels); while (bson_iter_next(&error_labels)) { if (BSON_ITER_HOLDS_UTF8(&error_labels)) { const char* error_label; uint32_t error_label_len; error_label = bson_iter_utf8(&error_labels, &error_label_len); #if PHP_VERSION_ID >= 70000 ADD_NEXT_INDEX_STRINGL(&labels, error_label, error_label_len); #else ADD_NEXT_INDEX_STRINGL(labels, error_label, error_label_len); #endif } } #if PHP_VERSION_ID >= 70000 phongo_add_exception_prop(ZEND_STRL("errorLabels"), &labels); #else phongo_add_exception_prop(ZEND_STRL("errorLabels"), labels TSRMLS_CC); #endif zval_ptr_dtor(&labels); } } void phongo_throw_exception_from_bson_error_t_and_reply(bson_error_t* error, const bson_t* reply TSRMLS_DC) { /* Server errors (other than ExceededTimeLimit) and write concern errors * may use CommandException and report the result document for the * failed command. For BC, ExceededTimeLimit errors will continue to use * ExcecutionTimeoutException and omit the result document. */ if (reply && ((error->domain == MONGOC_ERROR_SERVER && error->code != PHONGO_SERVER_ERROR_EXCEEDED_TIME_LIMIT) || error->domain == MONGOC_ERROR_WRITE_CONCERN)) { #if PHP_VERSION_ID >= 70000 zval zv; #else zval* zv; #endif zend_throw_exception(php_phongo_commandexception_ce, error->message, error->code TSRMLS_CC); php_phongo_bson_to_zval(bson_get_data(reply), reply->len, &zv); #if PHP_VERSION_ID >= 70000 phongo_add_exception_prop(ZEND_STRL("resultDocument"), &zv); #else phongo_add_exception_prop(ZEND_STRL("resultDocument"), zv TSRMLS_CC); #endif zval_ptr_dtor(&zv); } else { zend_throw_exception(phongo_exception_from_mongoc_domain(error->domain, error->code), error->message, error->code TSRMLS_CC); } phongo_exception_add_error_labels(reply TSRMLS_CC); } void phongo_throw_exception_from_bson_error_t(bson_error_t* error TSRMLS_DC) { phongo_throw_exception_from_bson_error_t_and_reply(error, NULL TSRMLS_CC); } static void php_phongo_log(mongoc_log_level_t log_level, const char* log_domain, const char* message, void* user_data) { struct timeval tv; time_t t; phongo_long tu; phongo_char* dt; PHONGO_TSRMLS_FETCH_FROM_CTX(user_data); (void) user_data; gettimeofday(&tv, NULL); t = tv.tv_sec; tu = tv.tv_usec; dt = php_format_date((char*) ZEND_STRL("Y-m-d\\TH:i:s"), t, 0 TSRMLS_CC); fprintf(MONGODB_G(debug_fd), "[%s.%06" PHONGO_LONG_FORMAT "+00:00] %10s: %-8s> %s\n", ZSTR_VAL(dt), tu, log_domain, mongoc_log_level_str(log_level), message); fflush(MONGODB_G(debug_fd)); efree(dt); } /* }}} */ /* {{{ Init objects */ static void phongo_cursor_init(zval* return_value, mongoc_client_t* client, mongoc_cursor_t* cursor, zval* readPreference, zval* session TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern; object_init_ex(return_value, php_phongo_cursor_ce); intern = Z_CURSOR_OBJ_P(return_value); intern->cursor = cursor; intern->server_id = mongoc_cursor_get_hint(cursor); intern->client = client; intern->advanced = false; if (readPreference) { #if PHP_VERSION_ID >= 70000 ZVAL_ZVAL(&intern->read_preference, readPreference, 1, 0); #else Z_ADDREF_P(readPreference); intern->read_preference = readPreference; #endif } if (session) { #if PHP_VERSION_ID >= 70000 ZVAL_ZVAL(&intern->session, session, 1, 0); #else Z_ADDREF_P(session); intern->session = session; #endif } } /* }}} */ static void phongo_cursor_init_for_command(zval* return_value, mongoc_client_t* client, mongoc_cursor_t* cursor, const char* db, zval* command, zval* readPreference, zval* session TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern; phongo_cursor_init(return_value, client, cursor, readPreference, session TSRMLS_CC); intern = Z_CURSOR_OBJ_P(return_value); intern->database = estrdup(db); #if PHP_VERSION_ID >= 70000 ZVAL_ZVAL(&intern->command, command, 1, 0); #else Z_ADDREF_P(command); intern->command = command; #endif } /* }}} */ static void phongo_cursor_init_for_query(zval* return_value, mongoc_client_t* client, mongoc_cursor_t* cursor, const char* namespace, zval* query, zval* readPreference, zval* session TSRMLS_DC) /* {{{ */ { php_phongo_cursor_t* intern; phongo_cursor_init(return_value, client, cursor, readPreference, session TSRMLS_CC); intern = Z_CURSOR_OBJ_P(return_value); /* namespace has already been validated by phongo_execute_query() */ phongo_split_namespace(namespace, &intern->database, &intern->collection); /* cursor has already been advanced by phongo_execute_query() calling * phongo_cursor_advance_and_check_for_error() */ intern->advanced = true; #if PHP_VERSION_ID >= 70000 ZVAL_ZVAL(&intern->query, query, 1, 0); #else Z_ADDREF_P(query); intern->query = query; #endif } /* }}} */ void phongo_server_init(zval* return_value, mongoc_client_t* client, uint32_t server_id TSRMLS_DC) /* {{{ */ { php_phongo_server_t* server; object_init_ex(return_value, php_phongo_server_ce); server = Z_SERVER_OBJ_P(return_value); server->server_id = server_id; server->client = client; } /* }}} */ void phongo_session_init(zval* return_value, mongoc_client_session_t* client_session TSRMLS_DC) /* {{{ */ { php_phongo_session_t* session; object_init_ex(return_value, php_phongo_session_ce); session = Z_SESSION_OBJ_P(return_value); session->client_session = client_session; } /* }}} */ void phongo_readconcern_init(zval* return_value, const mongoc_read_concern_t* read_concern TSRMLS_DC) /* {{{ */ { php_phongo_readconcern_t* intern; object_init_ex(return_value, php_phongo_readconcern_ce); intern = Z_READCONCERN_OBJ_P(return_value); intern->read_concern = mongoc_read_concern_copy(read_concern); } /* }}} */ void phongo_readpreference_init(zval* return_value, const mongoc_read_prefs_t* read_prefs TSRMLS_DC) /* {{{ */ { php_phongo_readpreference_t* intern; object_init_ex(return_value, php_phongo_readpreference_ce); intern = Z_READPREFERENCE_OBJ_P(return_value); intern->read_preference = mongoc_read_prefs_copy(read_prefs); } /* }}} */ void phongo_writeconcern_init(zval* return_value, const mongoc_write_concern_t* write_concern TSRMLS_DC) /* {{{ */ { php_phongo_writeconcern_t* intern; object_init_ex(return_value, php_phongo_writeconcern_ce); intern = Z_WRITECONCERN_OBJ_P(return_value); intern->write_concern = mongoc_write_concern_copy(write_concern); } /* }}} */ zend_bool phongo_writeconcernerror_init(zval* return_value, bson_t* bson TSRMLS_DC) /* {{{ */ { bson_iter_t iter; php_phongo_writeconcernerror_t* intern; object_init_ex(return_value, php_phongo_writeconcernerror_ce); intern = Z_WRITECONCERNERROR_OBJ_P(return_value); if (bson_iter_init_find(&iter, bson, "code") && BSON_ITER_HOLDS_INT32(&iter)) { intern->code = bson_iter_int32(&iter); } if (bson_iter_init_find(&iter, bson, "errmsg") && BSON_ITER_HOLDS_UTF8(&iter)) { uint32_t errmsg_len; const char* err_msg = bson_iter_utf8(&iter, &errmsg_len); intern->message = estrndup(err_msg, errmsg_len); } if (bson_iter_init_find(&iter, bson, "errInfo") && BSON_ITER_HOLDS_DOCUMENT(&iter)) { uint32_t len; const uint8_t* data = NULL; bson_iter_document(&iter, &len, &data); if (!php_phongo_bson_to_zval(data, len, &intern->info)) { zval_ptr_dtor(&intern->info); ZVAL_UNDEF(&intern->info); return false; } } return true; } /* }}} */ zend_bool phongo_writeerror_init(zval* return_value, bson_t* bson TSRMLS_DC) /* {{{ */ { bson_iter_t iter; php_phongo_writeerror_t* intern; object_init_ex(return_value, php_phongo_writeerror_ce); intern = Z_WRITEERROR_OBJ_P(return_value); if (bson_iter_init_find(&iter, bson, "code") && BSON_ITER_HOLDS_INT32(&iter)) { intern->code = bson_iter_int32(&iter); } if (bson_iter_init_find(&iter, bson, "errmsg") && BSON_ITER_HOLDS_UTF8(&iter)) { uint32_t errmsg_len; const char* err_msg = bson_iter_utf8(&iter, &errmsg_len); intern->message = estrndup(err_msg, errmsg_len); } if (bson_iter_init_find(&iter, bson, "errInfo") && BSON_ITER_HOLDS_DOCUMENT(&iter)) { uint32_t len; const uint8_t* data = NULL; bson_iter_document(&iter, &len, &data); if (!php_phongo_bson_to_zval(data, len, &intern->info)) { zval_ptr_dtor(&intern->info); ZVAL_UNDEF(&intern->info); return false; } } if (bson_iter_init_find(&iter, bson, "index") && BSON_ITER_HOLDS_INT32(&iter)) { intern->index = bson_iter_int32(&iter); } return true; } /* }}} */ static php_phongo_writeresult_t* phongo_writeresult_init(zval* return_value, bson_t* reply, mongoc_client_t* client, uint32_t server_id TSRMLS_DC) /* {{{ */ { php_phongo_writeresult_t* writeresult; object_init_ex(return_value, php_phongo_writeresult_ce); writeresult = Z_WRITERESULT_OBJ_P(return_value); writeresult->reply = bson_copy(reply); writeresult->server_id = server_id; writeresult->client = client; return writeresult; } /* }}} */ /* }}} */ /* {{{ CRUD */ /* Splits a namespace name into the database and collection names, allocated with estrdup. */ static bool phongo_split_namespace(const char* namespace, char** dbname, char** cname) /* {{{ */ { char* dot = strchr(namespace, '.'); if (!dot) { return false; } if (cname) { *cname = estrdup(namespace + (dot - namespace) + 1); } if (dbname) { *dbname = estrndup(namespace, dot - namespace); } return true; } /* }}} */ /* Parses the "readConcern" option for an execute method. If mongoc_opts is not * NULL, the option will be appended. On error, false is returned and an * exception is thrown. */ static bool phongo_parse_read_concern(zval* options, bson_t* mongoc_opts TSRMLS_DC) /* {{{ */ { zval* option = NULL; mongoc_read_concern_t* read_concern; if (!options) { return true; } if (Z_TYPE_P(options) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options)); return false; } option = php_array_fetchc(options, "readConcern"); if (!option) { return true; } if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_readconcern_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_readconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option)); return false; } read_concern = Z_READCONCERN_OBJ_P(option)->read_concern; if (mongoc_opts && !mongoc_read_concern_append(read_concern, mongoc_opts)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"readConcern\" option"); return false; } return true; } /* }}} */ /* Parses the "readPreference" option for an execute method. If zreadPreference * is not NULL, it will be assigned to the option. On error, false is returned * and an exception is thrown. */ bool phongo_parse_read_preference(zval* options, zval** zreadPreference TSRMLS_DC) /* {{{ */ { zval* option = NULL; if (!options) { return true; } if (Z_TYPE_P(options) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options)); return false; } option = php_array_fetchc(options, "readPreference"); if (!option) { return true; } if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_readpreference_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"readPreference\" option to be %s, %s given", ZSTR_VAL(php_phongo_readpreference_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option)); return false; } if (zreadPreference) { *zreadPreference = option; } return true; } /* }}} */ /* Parses the "session" option for an execute method. The client object should * correspond to the Manager executing the operation and will be used to ensure * that the session is correctly associated with that client. If mongoc_opts is * not NULL, the option will be appended. If zsession is not NULL, it will be * assigned to the option. On error, false is returned and an exception is * thrown. */ bool phongo_parse_session(zval* options, mongoc_client_t* client, bson_t* mongoc_opts, zval** zsession TSRMLS_DC) /* {{{ */ { zval* option = NULL; const mongoc_client_session_t* client_session; if (!options) { return true; } if (Z_TYPE_P(options) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options)); return false; } option = php_array_fetchc(options, "session"); if (!option) { return true; } if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_session_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"session\" option to be %s, %s given", ZSTR_VAL(php_phongo_session_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option)); return false; } client_session = Z_SESSION_OBJ_P(option)->client_session; if (client != mongoc_client_session_get_client(client_session)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot use Session started from a different Manager"); return false; } if (mongoc_opts && !mongoc_client_session_append(client_session, mongoc_opts, NULL)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"session\" option"); return false; } if (zsession) { *zsession = option; } return true; } /* }}} */ /* Parses the "writeConcern" option for an execute method. If mongoc_opts is not * NULL, the option will be appended. If zwriteConcern is not NULL, it will be * assigned to the option. On error, false is returned and an exception is * thrown. */ static bool phongo_parse_write_concern(zval* options, bson_t* mongoc_opts, zval** zwriteConcern TSRMLS_DC) /* {{{ */ { zval* option = NULL; mongoc_write_concern_t* write_concern; if (!options) { return true; } if (Z_TYPE_P(options) != IS_ARRAY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected options to be array, %s given", PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(options)); return false; } option = php_array_fetchc(options, "writeConcern"); if (!option) { return true; } if (Z_TYPE_P(option) != IS_OBJECT || !instanceof_function(Z_OBJCE_P(option), php_phongo_writeconcern_ce TSRMLS_CC)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected \"writeConcern\" option to be %s, %s given", ZSTR_VAL(php_phongo_writeconcern_ce->name), PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(option)); return false; } write_concern = Z_WRITECONCERN_OBJ_P(option)->write_concern; if (mongoc_opts && !mongoc_write_concern_append(write_concern, mongoc_opts)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"writeConcern\" option"); return false; } if (zwriteConcern) { *zwriteConcern = option; } return true; } bool phongo_execute_bulk_write(mongoc_client_t* client, const char* namespace, php_phongo_bulkwrite_t* bulk_write, zval* options, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC) /* {{{ */ { bson_error_t error = { 0 }; int success; bson_t reply = BSON_INITIALIZER; mongoc_bulk_operation_t* bulk = bulk_write->bulk; php_phongo_writeresult_t* writeresult; zval* zwriteConcern = NULL; zval* zsession = NULL; const mongoc_write_concern_t* write_concern = NULL; if (bulk_write->executed) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "BulkWrite objects may only be executed once and this instance has already been executed"); return false; } if (!phongo_split_namespace(namespace, &bulk_write->database, &bulk_write->collection)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s: %s", "Invalid namespace provided", namespace); return false; } if (!phongo_parse_session(options, client, NULL, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ return false; } if (!phongo_parse_write_concern(options, NULL, &zwriteConcern TSRMLS_CC)) { /* Exception should already have been thrown */ return false; } /* If a write concern was not specified, libmongoc will use the client's * write concern; however, we should still fetch it for the write result. * Additionally, we need to check if an unacknowledged write concern would * conflict with an explicit session. */ write_concern = zwriteConcern ? Z_WRITECONCERN_OBJ_P(zwriteConcern)->write_concern : mongoc_client_get_write_concern(client); if (zsession && !mongoc_write_concern_is_acknowledged(write_concern)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot combine \"session\" option with an unacknowledged write concern"); return false; } mongoc_bulk_operation_set_database(bulk, bulk_write->database); mongoc_bulk_operation_set_collection(bulk, bulk_write->collection); mongoc_bulk_operation_set_client(bulk, client); mongoc_bulk_operation_set_hint(bulk, server_id); if (zsession) { mongoc_bulk_operation_set_client_session(bulk, Z_SESSION_OBJ_P(zsession)->client_session); } if (zwriteConcern) { mongoc_bulk_operation_set_write_concern(bulk, Z_WRITECONCERN_OBJ_P(zwriteConcern)->write_concern); } success = mongoc_bulk_operation_execute(bulk, &reply, &error); bulk_write->executed = true; /* Write succeeded and the user doesn't care for the results */ if (success && !return_value_used) { bson_destroy(&reply); return true; } writeresult = phongo_writeresult_init(return_value, &reply, client, mongoc_bulk_operation_get_hint(bulk) TSRMLS_CC); writeresult->write_concern = mongoc_write_concern_copy(write_concern); /* A BulkWriteException is always thrown if mongoc_bulk_operation_execute() * fails to ensure that the write result is accessible. If the error does * not originate from the server (e.g. socket error), throw the appropriate * exception first. It will be included in BulkWriteException's message and * will also be accessible via Exception::getPrevious(). */ if (!success) { if (error.domain != MONGOC_ERROR_SERVER && error.domain != MONGOC_ERROR_WRITE_CONCERN) { phongo_throw_exception_from_bson_error_t_and_reply(&error, &reply TSRMLS_CC); } /* Argument errors occur before command execution, so there is no need * to layer this InvalidArgumentException behind a BulkWriteException. * In practice, this will be a "Cannot do an empty bulk write" error. */ if (error.domain == MONGOC_ERROR_COMMAND && error.code == MONGOC_ERROR_COMMAND_INVALID_ARG) { goto cleanup; } if (EG(exception)) { char *message; (void) spprintf(&message, 0, "Bulk write failed due to previous %s: %s", PHONGO_ZVAL_EXCEPTION_NAME(EG(exception)), error.message); zend_throw_exception(php_phongo_bulkwriteexception_ce, message, 0 TSRMLS_CC); efree(message); } else { zend_throw_exception(php_phongo_bulkwriteexception_ce, error.message, error.code TSRMLS_CC); } /* Ensure error labels are added to the final BulkWriteException. If a * previous exception was also thrown, error labels will already have * been added by phongo_throw_exception_from_bson_error_t_and_reply. */ phongo_exception_add_error_labels(&reply TSRMLS_CC); phongo_add_exception_prop(ZEND_STRL("writeResult"), return_value TSRMLS_CC); } cleanup: bson_destroy(&reply); return success; } /* }}} */ /* Advance the cursor and return whether there is an error. On error, false is * returned and an exception is thrown. */ bool phongo_cursor_advance_and_check_for_error(mongoc_cursor_t* cursor TSRMLS_DC) /* {{{ */ { const bson_t* doc = NULL; if (!mongoc_cursor_next(cursor, &doc)) { bson_error_t error = { 0 }; /* Check for connection related exceptions */ if (EG(exception)) { return false; } /* Could simply be no docs, which is not an error */ if (mongoc_cursor_error_document(cursor, &error, &doc)) { phongo_throw_exception_from_bson_error_t_and_reply(&error, doc TSRMLS_CC); return false; } } return true; } /* }}} */ bool phongo_execute_query(mongoc_client_t* client, const char* namespace, zval* zquery, zval* options, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC) /* {{{ */ { const php_phongo_query_t* query; bson_t opts = BSON_INITIALIZER; mongoc_cursor_t* cursor; char* dbname; char* collname; mongoc_collection_t* collection; zval* zreadPreference = NULL; zval* zsession = NULL; if (!phongo_split_namespace(namespace, &dbname, &collname)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "%s: %s", "Invalid namespace provided", namespace); return false; } collection = mongoc_client_get_collection(client, dbname, collname); efree(dbname); efree(collname); query = Z_QUERY_OBJ_P(zquery); bson_copy_to(query->opts, &opts); if (query->read_concern) { mongoc_collection_set_read_concern(collection, query->read_concern); } if (!phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) { /* Exception should already have been thrown */ mongoc_collection_destroy(collection); bson_destroy(&opts); return false; } if (!phongo_parse_session(options, client, &opts, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ mongoc_collection_destroy(collection); bson_destroy(&opts); return false; } if (!BSON_APPEND_INT32(&opts, "serverId", server_id)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"serverId\" option"); mongoc_collection_destroy(collection); bson_destroy(&opts); return false; } cursor = mongoc_collection_find_with_opts(collection, query->filter, &opts, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC)); mongoc_collection_destroy(collection); bson_destroy(&opts); /* maxAwaitTimeMS must be set before the cursor is sent */ if (query->max_await_time_ms) { mongoc_cursor_set_max_await_time_ms(cursor, query->max_await_time_ms); } if (!phongo_cursor_advance_and_check_for_error(cursor TSRMLS_CC)) { mongoc_cursor_destroy(cursor); return false; } if (!return_value_used) { mongoc_cursor_destroy(cursor); return true; } phongo_cursor_init_for_query(return_value, client, cursor, namespace, zquery, zreadPreference, zsession TSRMLS_CC); return true; } /* }}} */ static bson_t* create_wrapped_command_envelope(const char* db, bson_t* reply) { bson_t* tmp; size_t max_ns_len = strlen(db) + 5 + 1; /* db + ".$cmd" + '\0' */ char* ns = emalloc(max_ns_len); snprintf(ns, max_ns_len, "%s.$cmd", db); tmp = BCON_NEW("cursor", "{", "id", BCON_INT64(0), "ns", BCON_UTF8(ns), "firstBatch", "[", BCON_DOCUMENT(reply), "]", "}"); efree(ns); return tmp; } static zval* phongo_create_implicit_session(mongoc_client_t* client TSRMLS_DC) /* {{{ */ { mongoc_client_session_t* cs; zval* zsession; cs = mongoc_client_start_session(client, NULL, NULL); if (!cs) { return NULL; } #if PHP_VERSION_ID >= 70000 zsession = ecalloc(sizeof(zval), 1); #else ALLOC_INIT_ZVAL(zsession); #endif phongo_session_init(zsession, cs TSRMLS_CC); return zsession; } /* }}} */ bool phongo_execute_command(mongoc_client_t* client, php_phongo_command_type_t type, const char* db, zval* zcommand, zval* options, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC) /* {{{ */ { const php_phongo_command_t* command; bson_iter_t iter; bson_t reply; bson_error_t error = { 0 }; bson_t opts = BSON_INITIALIZER; mongoc_cursor_t* cmd_cursor; zval* zreadPreference = NULL; zval* zsession = NULL; bool result = false; bool free_reply = false; bool free_zsession = false; bool is_unacknowledged_write_concern = false; command = Z_COMMAND_OBJ_P(zcommand); if ((type & PHONGO_OPTION_READ_CONCERN) && !phongo_parse_read_concern(options, &opts TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if ((type & PHONGO_OPTION_READ_PREFERENCE) && !phongo_parse_read_preference(options, &zreadPreference TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (!phongo_parse_session(options, client, &opts, &zsession TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } if (type & PHONGO_OPTION_WRITE_CONCERN) { zval* zwriteConcern = NULL; if (!phongo_parse_write_concern(options, &opts, &zwriteConcern TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } /* Determine if the explicit or inherited write concern is * unacknowledged so that we can ensure it does not conflict with an * explicit or implicit session. */ if (zwriteConcern) { is_unacknowledged_write_concern = !mongoc_write_concern_is_acknowledged(Z_WRITECONCERN_OBJ_P(zwriteConcern)->write_concern); } else if (type != PHONGO_COMMAND_RAW) { is_unacknowledged_write_concern = !mongoc_write_concern_is_acknowledged(mongoc_client_get_write_concern(client)); } } if (zsession && is_unacknowledged_write_concern) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot combine \"session\" option with an unacknowledged write concern"); goto cleanup; } /* If an explicit session was not provided and the effective write concern * is not unacknowledged, attempt to create an implicit client session * (ignoring any errors). */ if (!zsession && !is_unacknowledged_write_concern) { zsession = phongo_create_implicit_session(client TSRMLS_CC); if (zsession) { free_zsession = true; if (!mongoc_client_session_append(Z_SESSION_OBJ_P(zsession)->client_session, &opts, NULL)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending implicit \"sessionId\" option"); goto cleanup; } } } if (!BSON_APPEND_INT32(&opts, "serverId", server_id)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Error appending \"serverId\" option"); goto cleanup; } /* Although "opts" already always includes the serverId option, the read * preference is added to the command parts, which is relevant for mongos * command construction. */ switch (type) { case PHONGO_COMMAND_RAW: result = mongoc_client_command_with_opts(client, db, command->bson, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC), &opts, &reply, &error); break; case PHONGO_COMMAND_READ: result = mongoc_client_read_command_with_opts(client, db, command->bson, phongo_read_preference_from_zval(zreadPreference TSRMLS_CC), &opts, &reply, &error); break; case PHONGO_COMMAND_WRITE: result = mongoc_client_write_command_with_opts(client, db, command->bson, &opts, &reply, &error); break; case PHONGO_COMMAND_READ_WRITE: /* We can pass NULL as readPreference, as this argument was added historically, but has no function */ result = mongoc_client_read_write_command_with_opts(client, db, command->bson, NULL, &opts, &reply, &error); break; default: /* Should never happen, but if it does: exception */ phongo_throw_exception(PHONGO_ERROR_LOGIC TSRMLS_CC, "Type '%d' should never have been passed to phongo_execute_command, please file a bug report", type); goto cleanup; } free_reply = true; if (!result) { phongo_throw_exception_from_bson_error_t_and_reply(&error, &reply TSRMLS_CC); goto cleanup; } if (!return_value_used) { goto cleanup; } /* According to mongoc_cursor_new_from_command_reply_with_opts(), the reply * bson_t is ultimately destroyed on both success and failure. */ if (bson_iter_init_find(&iter, &reply, "cursor") && BSON_ITER_HOLDS_DOCUMENT(&iter)) { bson_t initial_reply = BSON_INITIALIZER; bson_t cursor_opts = BSON_INITIALIZER; bson_error_t error = { 0 }; bson_copy_to(&reply, &initial_reply); bson_append_int32(&cursor_opts, "serverId", -1, server_id); if (command->max_await_time_ms) { bson_append_bool(&cursor_opts, "awaitData", -1, 1); bson_append_int64(&cursor_opts, "maxAwaitTimeMS", -1, command->max_await_time_ms); bson_append_bool(&cursor_opts, "tailable", -1, 1); } if (command->batch_size) { bson_append_int64(&cursor_opts, "batchSize", -1, command->batch_size); } if (zsession && !mongoc_client_session_append(Z_SESSION_OBJ_P(zsession)->client_session, &cursor_opts, &error)) { phongo_throw_exception_from_bson_error_t(&error TSRMLS_CC); bson_destroy(&initial_reply); bson_destroy(&cursor_opts); result = false; goto cleanup; } cmd_cursor = mongoc_cursor_new_from_command_reply_with_opts(client, &initial_reply, &cursor_opts); bson_destroy(&cursor_opts); } else { bson_t cursor_opts = BSON_INITIALIZER; bson_t* wrapped_reply = create_wrapped_command_envelope(db, &reply); bson_append_int32(&cursor_opts, "serverId", -1, server_id); cmd_cursor = mongoc_cursor_new_from_command_reply_with_opts(client, wrapped_reply, &cursor_opts); bson_destroy(&cursor_opts); } phongo_cursor_init_for_command(return_value, client, cmd_cursor, db, zcommand, zreadPreference, zsession TSRMLS_CC); cleanup: bson_destroy(&opts); if (free_reply) { bson_destroy(&reply); } if (free_zsession) { #if PHP_VERSION_ID >= 70000 zval_ptr_dtor(zsession); efree(zsession); #else zval_ptr_dtor(&zsession); #endif } return result; } /* }}} */ /* }}} */ /* {{{ mongoc types from from_zval */ const mongoc_write_concern_t* phongo_write_concern_from_zval(zval* zwrite_concern TSRMLS_DC) /* {{{ */ { if (zwrite_concern) { php_phongo_writeconcern_t* intern = Z_WRITECONCERN_OBJ_P(zwrite_concern); if (intern) { return intern->write_concern; } } return NULL; } /* }}} */ const mongoc_read_concern_t* phongo_read_concern_from_zval(zval* zread_concern TSRMLS_DC) /* {{{ */ { if (zread_concern) { php_phongo_readconcern_t* intern = Z_READCONCERN_OBJ_P(zread_concern); if (intern) { return intern->read_concern; } } return NULL; } /* }}} */ const mongoc_read_prefs_t* phongo_read_preference_from_zval(zval* zread_preference TSRMLS_DC) /* {{{ */ { if (zread_preference) { php_phongo_readpreference_t* intern = Z_READPREFERENCE_OBJ_P(zread_preference); if (intern) { return intern->read_preference; } } return NULL; } /* }}} */ /* }}} */ /* {{{ phongo zval from mongoc types */ void php_phongo_cursor_id_new_from_id(zval* object, int64_t cursorid TSRMLS_DC) /* {{{ */ { php_phongo_cursorid_t* intern; object_init_ex(object, php_phongo_cursorid_ce); intern = Z_CURSORID_OBJ_P(object); intern->id = cursorid; } /* }}} */ void php_phongo_objectid_new_from_oid(zval* object, const bson_oid_t* oid TSRMLS_DC) /* {{{ */ { php_phongo_objectid_t* intern; object_init_ex(object, php_phongo_objectid_ce); intern = Z_OBJECTID_OBJ_P(object); bson_oid_to_string(oid, intern->oid); intern->initialized = true; } /* }}} */ php_phongo_server_description_type_t php_phongo_server_description_type(mongoc_server_description_t* sd) { const char* name = mongoc_server_description_type(sd); int i; for (i = 0; i < PHONGO_SERVER_DESCRIPTION_TYPES; i++) { if (!strcmp(name, php_phongo_server_description_type_map[i].name)) { return php_phongo_server_description_type_map[i].type; } } return PHONGO_SERVER_UNKNOWN; } void php_phongo_server_to_zval(zval* retval, mongoc_server_description_t* sd) /* {{{ */ { mongoc_host_list_t* host = mongoc_server_description_host(sd); const bson_t* is_master = mongoc_server_description_ismaster(sd); bson_iter_t iter; array_init(retval); ADD_ASSOC_STRING(retval, "host", host->host); ADD_ASSOC_LONG_EX(retval, "port", host->port); ADD_ASSOC_LONG_EX(retval, "type", php_phongo_server_description_type(sd)); ADD_ASSOC_BOOL_EX(retval, "is_primary", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_PRIMARY].name)); ADD_ASSOC_BOOL_EX(retval, "is_secondary", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_SECONDARY].name)); ADD_ASSOC_BOOL_EX(retval, "is_arbiter", !strcmp(mongoc_server_description_type(sd), php_phongo_server_description_type_map[PHONGO_SERVER_RS_ARBITER].name)); ADD_ASSOC_BOOL_EX(retval, "is_hidden", bson_iter_init_find_case(&iter, is_master, "hidden") && bson_iter_as_bool(&iter)); ADD_ASSOC_BOOL_EX(retval, "is_passive", bson_iter_init_find_case(&iter, is_master, "passive") && bson_iter_as_bool(&iter)); if (bson_iter_init_find(&iter, is_master, "tags") && BSON_ITER_HOLDS_DOCUMENT(&iter)) { const uint8_t* bytes; uint32_t len; php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; bson_iter_document(&iter, &len, &bytes); php_phongo_bson_to_zval_ex(bytes, len, &state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(retval, "tags", &state.zchild); #else ADD_ASSOC_ZVAL_EX(retval, "tags", state.zchild); #endif } { php_phongo_bson_state state = PHONGO_BSON_STATE_INITIALIZER; /* Use native arrays for debugging output */ state.map.root_type = PHONGO_TYPEMAP_NATIVE_ARRAY; state.map.document_type = PHONGO_TYPEMAP_NATIVE_ARRAY; php_phongo_bson_to_zval_ex(bson_get_data(is_master), is_master->len, &state); #if PHP_VERSION_ID >= 70000 ADD_ASSOC_ZVAL_EX(retval, "last_is_master", &state.zchild); #else ADD_ASSOC_ZVAL_EX(retval, "last_is_master", state.zchild); #endif } ADD_ASSOC_LONG_EX(retval, "round_trip_time", (phongo_long) mongoc_server_description_round_trip_time(sd)); } /* }}} */ void php_phongo_read_concern_to_zval(zval* retval, const mongoc_read_concern_t* read_concern) /* {{{ */ { const char* level = mongoc_read_concern_get_level(read_concern); array_init_size(retval, 1); if (level) { ADD_ASSOC_STRING(retval, "level", level); } } /* }}} */ /* If options is not an array, insert it as a field in a newly allocated array. * This may be used to convert legacy options (e.g. ReadPreference option for * an executeQuery method) into an options array. * * A pointer to the array zval will always be returned. If allocated is set to * true, php_phongo_prep_legacy_option_free() should be used to free the array * zval later. */ zval* php_phongo_prep_legacy_option(zval* options, const char* key, bool* allocated TSRMLS_DC) /* {{{ */ { *allocated = false; if (options && Z_TYPE_P(options) != IS_ARRAY) { #if PHP_VERSION_ID >= 70000 zval* new_options = ecalloc(sizeof(zval), 1); #else zval* new_options = NULL; ALLOC_INIT_ZVAL(new_options); #endif array_init_size(new_options, 1); add_assoc_zval(new_options, key, options); Z_ADDREF_P(options); *allocated = true; return new_options; } return options; } /* }}} */ void php_phongo_prep_legacy_option_free(zval* options TSRMLS_DC) /* {{{ */ { #if PHP_VERSION_ID >= 70000 zval_ptr_dtor(options); efree(options); #else zval_ptr_dtor(&options); #endif } /* }}} */ /* Prepare tagSets for BSON encoding by converting each array in the set to an * object. This ensures that empty arrays will serialize as empty documents. * * php_phongo_read_preference_tags_are_valid() handles actual validation of the * tag set structure. */ void php_phongo_read_preference_prep_tagsets(zval* tagSets TSRMLS_DC) /* {{{ */ { HashTable* ht_data; if (Z_TYPE_P(tagSets) != IS_ARRAY) { return; } ht_data = HASH_OF(tagSets); #if PHP_VERSION_ID >= 70000 { zval* tagSet; ZEND_HASH_FOREACH_VAL(ht_data, tagSet) { ZVAL_DEREF(tagSet); if (Z_TYPE_P(tagSet) == IS_ARRAY) { SEPARATE_ZVAL_NOREF(tagSet); convert_to_object(tagSet); } } ZEND_HASH_FOREACH_END(); } #else { HashPosition pos; zval** tagSet; for ( zend_hash_internal_pointer_reset_ex(ht_data, &pos); zend_hash_get_current_data_ex(ht_data, (void**) &tagSet, &pos) == SUCCESS; zend_hash_move_forward_ex(ht_data, &pos)) { if (Z_TYPE_PP(tagSet) == IS_ARRAY) { SEPARATE_ZVAL_IF_NOT_REF(tagSet); convert_to_object(*tagSet); } } } #endif return; } /* }}} */ /* Checks if tags is valid to set on a mongoc_read_prefs_t. It may be null or an * array of one or more documents. */ bool php_phongo_read_preference_tags_are_valid(const bson_t* tags) /* {{{ */ { bson_iter_t iter; if (bson_empty0(tags)) { return true; } if (!bson_iter_init(&iter, tags)) { return false; } while (bson_iter_next(&iter)) { if (!BSON_ITER_HOLDS_DOCUMENT(&iter)) { return false; } } return true; } /* }}} */ void php_phongo_write_concern_to_zval(zval* retval, const mongoc_write_concern_t* write_concern) /* {{{ */ { const char* wtag = mongoc_write_concern_get_wtag(write_concern); const int32_t w = mongoc_write_concern_get_w(write_concern); const int64_t wtimeout = mongoc_write_concern_get_wtimeout_int64(write_concern); array_init_size(retval, 4); if (wtag) { ADD_ASSOC_STRING(retval, "w", wtag); } else if (mongoc_write_concern_get_wmajority(write_concern)) { ADD_ASSOC_STRING(retval, "w", PHONGO_WRITE_CONCERN_W_MAJORITY); } else if (w != MONGOC_WRITE_CONCERN_W_DEFAULT) { ADD_ASSOC_LONG_EX(retval, "w", w); } if (mongoc_write_concern_journal_is_set(write_concern)) { ADD_ASSOC_BOOL_EX(retval, "j", mongoc_write_concern_get_journal(write_concern)); } if (wtimeout != 0) { /* Note: PHP currently enforces that wimeoutMS is a 32-bit integer, so * casting will never truncate the value. This may change with * PHPC-1411. */ ADD_ASSOC_LONG_EX(retval, "wtimeout", (int32_t) wtimeout); } } /* }}} */ /* }}} */ static mongoc_uri_t* php_phongo_make_uri(const char* uri_string, bson_t* options TSRMLS_DC) /* {{{ */ { mongoc_uri_t* uri; bson_error_t error = { 0 }; uri = mongoc_uri_new_with_error(uri_string, &error); MONGOC_DEBUG("Connection string: '%s'", uri_string); if (!uri) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse MongoDB URI: '%s'. %s.", uri_string, error.message); return NULL; } return uri; } /* }}} */ static const char* php_phongo_bson_type_to_string(bson_type_t type) /* {{{ */ { switch (type) { case BSON_TYPE_EOD: return "EOD"; case BSON_TYPE_DOUBLE: return "double"; case BSON_TYPE_UTF8: return "string"; case BSON_TYPE_DOCUMENT: return "document"; case BSON_TYPE_ARRAY: return "array"; case BSON_TYPE_BINARY: return "Binary"; case BSON_TYPE_UNDEFINED: return "undefined"; case BSON_TYPE_OID: return "ObjectId"; case BSON_TYPE_BOOL: return "boolean"; case BSON_TYPE_DATE_TIME: return "UTCDateTime"; case BSON_TYPE_NULL: return "null"; case BSON_TYPE_REGEX: return "Regex"; case BSON_TYPE_DBPOINTER: return "DBPointer"; case BSON_TYPE_CODE: return "Javascript"; case BSON_TYPE_SYMBOL: return "symbol"; case BSON_TYPE_CODEWSCOPE: return "Javascript with scope"; case BSON_TYPE_INT32: return "32-bit integer"; case BSON_TYPE_TIMESTAMP: return "Timestamp"; case BSON_TYPE_INT64: return "64-bit integer"; case BSON_TYPE_DECIMAL128: return "Decimal128"; case BSON_TYPE_MAXKEY: return "MaxKey"; case BSON_TYPE_MINKEY: return "MinKey"; default: return "unknown"; } } /* }}} */ #define PHONGO_URI_INVALID_TYPE(iter, expected) \ phongo_throw_exception( \ PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, \ "Expected %s for \"%s\" URI option, %s given", \ (expected), \ bson_iter_key(&(iter)), \ php_phongo_bson_type_to_string(bson_iter_type(&(iter)))) static bool php_phongo_uri_finalize_auth(mongoc_uri_t* uri TSRMLS_DC) /* {{{ */ { /* authSource with GSSAPI or X509 should always be external */ if (mongoc_uri_get_auth_mechanism(uri)) { if (!strcasecmp(mongoc_uri_get_auth_mechanism(uri), "GSSAPI") || !strcasecmp(mongoc_uri_get_auth_mechanism(uri), "MONGODB-X509")) { const char *source = mongoc_uri_get_auth_source(uri); if (source) { if (strcasecmp(source, "$external")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse URI options: GSSAPI and X509 require \"$external\" authSource."); return false; } } else { mongoc_uri_set_auth_source(uri, "$external"); } } /* MONGODB-X509 is the only mechanism that doesn't require username */ if (strcasecmp(mongoc_uri_get_auth_mechanism(uri), "MONGODB-X509")) { if (!mongoc_uri_get_username(uri) || !strcmp(mongoc_uri_get_username(uri), "")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse URI options: '%s' authentication mechanism requires username.", mongoc_uri_get_auth_mechanism(uri)); return false; } } /* MONGODB-X509 errors if a password is supplied. */ if (!strcasecmp(mongoc_uri_get_auth_mechanism(uri), "MONGODB-X509")) { if (mongoc_uri_get_password(uri)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse URI options: X509 authentication mechanism does not accept a password."); return false; } } } return true; } /* }}} */ static bool php_phongo_uri_finalize_tls(mongoc_uri_t* uri TSRMLS_DC) /* {{{ */ { const bson_t *options; bson_iter_t iter; if (!(options = mongoc_uri_get_options(uri))) { return true; } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_TLSINSECURE) && (bson_iter_init_find_case(&iter, options, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES) || bson_iter_init_find_case(&iter, options, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse URI options: %s may not be combined with %s or %s.", MONGOC_URI_TLSINSECURE, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES); return false; } return true; } /* }}} */ static bool php_phongo_apply_options_to_uri(mongoc_uri_t* uri, bson_t* options TSRMLS_DC) /* {{{ */ { bson_iter_t iter; /* Return early if there are no options to apply */ if (bson_empty0(options) || !bson_iter_init(&iter, options)) { return true; } while (bson_iter_next(&iter)) { const char* key = bson_iter_key(&iter); /* Skip read preference, read concern, and write concern options, as * those will be processed by other functions. */ if (!strcasecmp(key, MONGOC_URI_JOURNAL) || !strcasecmp(key, MONGOC_URI_MAXSTALENESSSECONDS) || !strcasecmp(key, MONGOC_URI_READCONCERNLEVEL) || !strcasecmp(key, MONGOC_URI_READPREFERENCE) || !strcasecmp(key, MONGOC_URI_READPREFERENCETAGS) || !strcasecmp(key, MONGOC_URI_SAFE) || !strcasecmp(key, MONGOC_URI_SLAVEOK) || !strcasecmp(key, MONGOC_URI_W) || !strcasecmp(key, MONGOC_URI_WTIMEOUTMS)) { continue; } if (mongoc_uri_option_is_bool(key)) { /* The option's type is not validated because bson_iter_as_bool() is * used to cast the value to a boolean. Validation may be introduced * in PHPC-990. */ if (!mongoc_uri_set_option_as_bool(uri, key, bson_iter_as_bool(&iter))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (mongoc_uri_option_is_int32(key)) { if (!BSON_ITER_HOLDS_INT32(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "32-bit integer"); return false; } if (!mongoc_uri_set_option_as_int32(uri, key, bson_iter_int32(&iter))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (mongoc_uri_option_is_utf8(key)) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_option_as_utf8(uri, key, bson_iter_utf8(&iter, NULL))) { /* Assignment uses mongoc_uri_set_appname() for the "appname" * option, which validates length in addition to UTF-8 encoding. * For BC, we report the invalid string to the user. */ if (!strcasecmp(key, MONGOC_URI_APPNAME)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Invalid appname value: '%s'", bson_iter_utf8(&iter, NULL)); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); } return false; } continue; } if (!strcasecmp(key, "username")) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_username(uri, bson_iter_utf8(&iter, NULL))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (!strcasecmp(key, "password")) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_password(uri, bson_iter_utf8(&iter, NULL))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (!strcasecmp(key, MONGOC_URI_AUTHMECHANISM)) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_auth_mechanism(uri, bson_iter_utf8(&iter, NULL))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (!strcasecmp(key, MONGOC_URI_AUTHSOURCE)) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_auth_source(uri, bson_iter_utf8(&iter, NULL))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (!strcasecmp(key, MONGOC_URI_AUTHMECHANISMPROPERTIES)) { bson_t properties; uint32_t len; const uint8_t* data; if (!BSON_ITER_HOLDS_DOCUMENT(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "array or object"); return false; } bson_iter_document(&iter, &len, &data); if (!bson_init_static(&properties, data, len)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Could not initialize BSON structure for auth mechanism properties"); return false; } if (!mongoc_uri_set_mechanism_properties(uri, &properties)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } if (!strcasecmp(key, MONGOC_URI_GSSAPISERVICENAME)) { bson_t unused, properties = BSON_INITIALIZER; if (mongoc_uri_get_mechanism_properties(uri, &unused)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "authMechanismProperties SERVICE_NAME already set, ignoring \"%s\"", key); return false; } if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } bson_append_utf8(&properties, "SERVICE_NAME", -1, bson_iter_utf8(&iter, NULL), -1); if (!mongoc_uri_set_mechanism_properties(uri, &properties)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); bson_destroy(&properties); return false; } bson_destroy(&properties); continue; } if (!strcasecmp(key, MONGOC_URI_COMPRESSORS)) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); return false; } if (!mongoc_uri_set_compressors(uri, bson_iter_utf8(&iter, NULL))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" URI option", key); return false; } continue; } } // Finalize auth options if (!php_phongo_uri_finalize_auth(uri TSRMLS_CC)) { /* Exception should already have been thrown */ return false; } return true; } /* }}} */ static bool php_phongo_apply_rc_options_to_uri(mongoc_uri_t* uri, bson_t* options TSRMLS_DC) /* {{{ */ { bson_iter_t iter; mongoc_read_concern_t* new_rc; const mongoc_read_concern_t* old_rc; if (!(old_rc = mongoc_uri_get_read_concern(uri))) { phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a read concern"); return false; } /* Return early if there are no options to apply */ if (bson_empty0(options)) { return true; } if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_READCONCERNLEVEL)) { return true; } new_rc = mongoc_read_concern_copy(old_rc); if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READCONCERNLEVEL)) { if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); mongoc_read_concern_destroy(new_rc); return false; } mongoc_read_concern_set_level(new_rc, bson_iter_utf8(&iter, NULL)); } mongoc_uri_set_read_concern(uri, new_rc); mongoc_read_concern_destroy(new_rc); return true; } /* }}} */ static bool php_phongo_apply_rp_options_to_uri(mongoc_uri_t* uri, bson_t* options TSRMLS_DC) /* {{{ */ { bson_iter_t iter; mongoc_read_prefs_t* new_rp; const mongoc_read_prefs_t* old_rp; if (!(old_rp = mongoc_uri_get_read_prefs_t(uri))) { phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a read preference"); return false; } /* Return early if there are no options to apply */ if (bson_empty0(options)) { return true; } if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_SLAVEOK) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCE) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCETAGS) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_MAXSTALENESSSECONDS)) { return true; } new_rp = mongoc_read_prefs_copy(old_rp); if (bson_iter_init_find_case(&iter, options, MONGOC_URI_SLAVEOK)) { if (!BSON_ITER_HOLDS_BOOL(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "boolean"); mongoc_read_prefs_destroy(new_rp); return false; } if (bson_iter_bool(&iter)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY_PREFERRED); } } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCE)) { const char* str; if (!BSON_ITER_HOLDS_UTF8(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "string"); mongoc_read_prefs_destroy(new_rp); return false; } str = bson_iter_utf8(&iter, NULL); if (0 == strcasecmp("primary", str)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_PRIMARY); } else if (0 == strcasecmp("primarypreferred", str)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_PRIMARY_PREFERRED); } else if (0 == strcasecmp("secondary", str)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY); } else if (0 == strcasecmp("secondarypreferred", str)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_SECONDARY_PREFERRED); } else if (0 == strcasecmp("nearest", str)) { mongoc_read_prefs_set_mode(new_rp, MONGOC_READ_NEAREST); } else { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Unsupported %s value: '%s'", bson_iter_key(&iter), str); mongoc_read_prefs_destroy(new_rp); return false; } } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_READPREFERENCETAGS)) { bson_t tags; uint32_t len; const uint8_t* data; if (!BSON_ITER_HOLDS_ARRAY(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "array"); mongoc_read_prefs_destroy(new_rp); return false; } bson_iter_array(&iter, &len, &data); if (!bson_init_static(&tags, data, len)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Could not initialize BSON structure for read preference tags"); mongoc_read_prefs_destroy(new_rp); return false; } if (!php_phongo_read_preference_tags_are_valid(&tags)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference tags must be an array of zero or more documents"); mongoc_read_prefs_destroy(new_rp); return false; } mongoc_read_prefs_set_tags(new_rp, &tags); } if (mongoc_read_prefs_get_mode(new_rp) == MONGOC_READ_PRIMARY && !bson_empty(mongoc_read_prefs_get_tags(new_rp))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Primary read preference mode conflicts with tags"); mongoc_read_prefs_destroy(new_rp); return false; } /* Handle maxStalenessSeconds, and make sure it is not combined with primary * readPreference */ if (bson_iter_init_find_case(&iter, options, MONGOC_URI_MAXSTALENESSSECONDS)) { int64_t max_staleness_seconds; if (!BSON_ITER_HOLDS_INT(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "integer"); mongoc_read_prefs_destroy(new_rp); return false; } max_staleness_seconds = bson_iter_as_int64(&iter); if (max_staleness_seconds != MONGOC_NO_MAX_STALENESS) { if (max_staleness_seconds < MONGOC_SMALLEST_MAX_STALENESS_SECONDS) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be >= %d, %" PRId64 " given", MONGOC_SMALLEST_MAX_STALENESS_SECONDS, max_staleness_seconds); mongoc_read_prefs_destroy(new_rp); return false; } if (max_staleness_seconds > INT32_MAX) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected maxStalenessSeconds to be <= %d, %" PRId64 " given", INT32_MAX, max_staleness_seconds); mongoc_read_prefs_destroy(new_rp); return false; } if (mongoc_read_prefs_get_mode(new_rp) == MONGOC_READ_PRIMARY) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Primary read preference mode conflicts with maxStalenessSeconds"); mongoc_read_prefs_destroy(new_rp); return false; } } mongoc_read_prefs_set_max_staleness_seconds(new_rp, max_staleness_seconds); } /* This may be redundant in light of the last check (primary with tags), but * we'll check anyway in case additional validation is implemented. */ if (!mongoc_read_prefs_is_valid(new_rp)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Read preference is not valid"); mongoc_read_prefs_destroy(new_rp); return false; } mongoc_uri_set_read_prefs_t(uri, new_rp); mongoc_read_prefs_destroy(new_rp); return true; } /* }}} */ static bool php_phongo_apply_wc_options_to_uri(mongoc_uri_t* uri, bson_t* options TSRMLS_DC) /* {{{ */ { bson_iter_t iter; mongoc_write_concern_t* new_wc; const mongoc_write_concern_t* old_wc; if (!(old_wc = mongoc_uri_get_write_concern(uri))) { phongo_throw_exception(PHONGO_ERROR_MONGOC_FAILED TSRMLS_CC, "mongoc_uri_t does not have a write concern"); return false; } /* Return early if there are no options to apply */ if (bson_empty0(options)) { return true; } if (!bson_iter_init_find_case(&iter, options, MONGOC_URI_JOURNAL) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_SAFE) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_W) && !bson_iter_init_find_case(&iter, options, MONGOC_URI_WTIMEOUTMS)) { return true; } new_wc = mongoc_write_concern_copy(old_wc); if (bson_iter_init_find_case(&iter, options, MONGOC_URI_SAFE)) { if (!BSON_ITER_HOLDS_BOOL(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "boolean"); mongoc_write_concern_destroy(new_wc); return false; } mongoc_write_concern_set_w(new_wc, bson_iter_bool(&iter) ? 1 : MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED); } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_WTIMEOUTMS)) { int64_t wtimeout; /* Although the write concern spec defines wtimeoutMS as 64-bit, PHP has * historically required 32-bit. This may change with PHPC-1411. */ if (!BSON_ITER_HOLDS_INT32(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "32-bit integer"); mongoc_write_concern_destroy(new_wc); return false; } wtimeout = bson_iter_as_int64(&iter); if (wtimeout < 0) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Expected wtimeoutMS to be >= 0, %" PRId64 " given", wtimeout); mongoc_write_concern_destroy(new_wc); return false; } mongoc_write_concern_set_wtimeout_int64(new_wc, wtimeout); } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_JOURNAL)) { if (!BSON_ITER_HOLDS_BOOL(&iter)) { PHONGO_URI_INVALID_TYPE(iter, "boolean"); mongoc_write_concern_destroy(new_wc); return false; } mongoc_write_concern_set_journal(new_wc, bson_iter_bool(&iter)); } if (bson_iter_init_find_case(&iter, options, MONGOC_URI_W)) { if (BSON_ITER_HOLDS_INT32(&iter)) { int32_t value = bson_iter_int32(&iter); switch (value) { case MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED: case MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED: mongoc_write_concern_set_w(new_wc, value); break; default: if (value > 0) { mongoc_write_concern_set_w(new_wc, value); break; } phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Unsupported w value: %d", value); mongoc_write_concern_destroy(new_wc); return false; } } else if (BSON_ITER_HOLDS_UTF8(&iter)) { const char* str = bson_iter_utf8(&iter, NULL); if (0 == strcasecmp(PHONGO_WRITE_CONCERN_W_MAJORITY, str)) { mongoc_write_concern_set_w(new_wc, MONGOC_WRITE_CONCERN_W_MAJORITY); } else { mongoc_write_concern_set_wtag(new_wc, str); } } else { PHONGO_URI_INVALID_TYPE(iter, "32-bit integer or string"); mongoc_write_concern_destroy(new_wc); return false; } } if (mongoc_write_concern_get_journal(new_wc)) { int32_t w = mongoc_write_concern_get_w(new_wc); if (w == MONGOC_WRITE_CONCERN_W_UNACKNOWLEDGED || w == MONGOC_WRITE_CONCERN_W_ERRORS_IGNORED) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Journal conflicts with w value: %d", w); mongoc_write_concern_destroy(new_wc); return false; } } /* This may be redundant in light of the last check (unacknowledged w with journal), but we'll check anyway in case additional validation is implemented. */ if (!mongoc_write_concern_is_valid(new_wc)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Write concern is not valid"); mongoc_write_concern_destroy(new_wc); return false; } mongoc_uri_set_write_concern(uri, new_wc); mongoc_write_concern_destroy(new_wc); return true; } /* }}} */ #ifdef MONGOC_ENABLE_SSL static void php_phongo_mongoc_ssl_opts_from_uri(mongoc_ssl_opt_t* ssl_opt, mongoc_uri_t* uri, bool* any_ssl_option_set) { bool insecure = mongoc_uri_get_option_as_bool(uri, MONGOC_URI_TLSINSECURE, false); const char* pem_file = mongoc_uri_get_option_as_utf8(uri, MONGOC_URI_TLSCERTIFICATEKEYFILE, NULL); const char* pem_pwd = mongoc_uri_get_option_as_utf8(uri, MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD, NULL); const char* ca_file = mongoc_uri_get_option_as_utf8(uri, MONGOC_URI_TLSCAFILE, NULL); ssl_opt->pem_file = pem_file ? estrdup(pem_file) : NULL; ssl_opt->pem_pwd = pem_pwd ? estrdup(pem_pwd) : NULL; ssl_opt->ca_file = ca_file ? estrdup(ca_file) : NULL; ssl_opt->weak_cert_validation = mongoc_uri_get_option_as_bool(uri, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, insecure); ssl_opt->allow_invalid_hostname = mongoc_uri_get_option_as_bool(uri, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES, insecure); /* Boolean options default to false, so we cannot consider them for * any_ssl_option_set. This isn't actually a problem as libmongoc will * already have assigned them when creating the client, enabling SSL, and * assigning SSL options. Therefore, we only need to check for non-defaults * (i.e. non-NULL strings, true booleans). */ if (pem_file || pem_pwd || ca_file || ssl_opt->weak_cert_validation || ssl_opt->allow_invalid_hostname) { *any_ssl_option_set = true; } } static inline char* php_phongo_fetch_ssl_opt_string(zval* zoptions, const char* key) { int plen; zend_bool pfree; char* pval; char* value; pval = php_array_fetch_string(zoptions, key, &plen, &pfree); value = pfree ? pval : estrndup(pval, plen); return value; } static mongoc_ssl_opt_t* php_phongo_make_ssl_opt(mongoc_uri_t* uri, zval* zoptions TSRMLS_DC) { mongoc_ssl_opt_t* ssl_opt; bool any_ssl_option_set = false; if (!zoptions) { return NULL; } #if defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) || defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) if (php_array_existsc(zoptions, "ca_dir")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"ca_dir\" option is not supported by Secure Channel and Secure Transport"); return NULL; } if (php_array_existsc(zoptions, "capath")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"capath\" option is not supported by Secure Channel and Secure Transport"); return NULL; } #endif #if defined(MONGOC_ENABLE_SSL_LIBRESSL) || defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) if (php_array_existsc(zoptions, "crl_file")) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "\"crl_file\" option is not supported by LibreSSL and Secure Transport"); return NULL; } #endif ssl_opt = ecalloc(1, sizeof(mongoc_ssl_opt_t)); /* If SSL options are set in the URL, we need to read them and set them on * the options struct so we can merge potential options from passed in * driverOptions (zoptions) */ if (mongoc_uri_get_tls(uri)) { php_phongo_mongoc_ssl_opts_from_uri(ssl_opt, uri, &any_ssl_option_set); } #define PHONGO_SSL_OPTION_SWAP_STRING(o, n) \ if ((o)) { \ efree((char*) (o)); \ } \ (o) = php_phongo_fetch_ssl_opt_string(zoptions, n); /* Apply driver options that don't have a corresponding URI option. These * are set directly on the SSL options struct. */ if (php_array_existsc(zoptions, "ca_dir")) { PHONGO_SSL_OPTION_SWAP_STRING(ssl_opt->ca_dir, "ca_dir"); any_ssl_option_set = true; } else if (php_array_existsc(zoptions, "capath")) { PHONGO_SSL_OPTION_SWAP_STRING(ssl_opt->ca_dir, "capath"); any_ssl_option_set = true; } if (php_array_existsc(zoptions, "crl_file")) { PHONGO_SSL_OPTION_SWAP_STRING(ssl_opt->crl_file, "crl_file"); any_ssl_option_set = true; } #undef PHONGO_SSL_OPTION_SWAP_STRING if (!any_ssl_option_set) { efree(ssl_opt); return NULL; } return ssl_opt; } static void php_phongo_free_ssl_opt(mongoc_ssl_opt_t* ssl_opt) { if (ssl_opt->pem_file) { str_efree(ssl_opt->pem_file); } if (ssl_opt->pem_pwd) { str_efree(ssl_opt->pem_pwd); } if (ssl_opt->ca_file) { str_efree(ssl_opt->ca_file); } if (ssl_opt->ca_dir) { str_efree(ssl_opt->ca_dir); } if (ssl_opt->crl_file) { str_efree(ssl_opt->crl_file); } efree(ssl_opt); } static inline bool php_phongo_apply_driver_option_to_uri(mongoc_uri_t* uri, zval* zoptions, const char* driverOptionKey, const char* optionKey) { bool ret; char* value; value = php_phongo_fetch_ssl_opt_string(zoptions, driverOptionKey); ret = mongoc_uri_set_option_as_utf8(uri, optionKey, value); efree(value); return ret; } static bool php_phongo_apply_driver_options_to_uri(mongoc_uri_t* uri, zval* zoptions TSRMLS_DC) { if (!zoptions) { return true; } /* Map TLS driver options to the canonical tls options in the URI. */ if (php_array_existsc(zoptions, "allow_invalid_hostname")) { if (!mongoc_uri_set_option_as_bool(uri, MONGOC_URI_TLSALLOWINVALIDHOSTNAMES, php_array_fetchc_bool(zoptions, "allow_invalid_hostname"))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "allow_invalid_hostname"); return false; } } if (php_array_existsc(zoptions, "weak_cert_validation")) { if (!mongoc_uri_set_option_as_bool(uri, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, php_array_fetchc_bool(zoptions, "weak_cert_validation"))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "weak_cert_validation"); return false; } } else if (php_array_existsc(zoptions, "allow_self_signed")) { if (!mongoc_uri_set_option_as_bool(uri, MONGOC_URI_TLSALLOWINVALIDCERTIFICATES, php_array_fetchc_bool(zoptions, "allow_self_signed"))) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "allow_self_signed"); return false; } } if (php_array_existsc(zoptions, "pem_file")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "pem_file", MONGOC_URI_TLSCERTIFICATEKEYFILE)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "pem_file"); return false; } } else if (php_array_existsc(zoptions, "local_cert")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "local_cert", MONGOC_URI_TLSCERTIFICATEKEYFILE)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "local_cert"); return false; } } if (php_array_existsc(zoptions, "pem_pwd")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "pem_pwd", MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "pem_pwd"); return false; } } else if (php_array_existsc(zoptions, "passphrase")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "passphrase", MONGOC_URI_TLSCERTIFICATEKEYFILEPASSWORD)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "passphrase"); return false; } } if (php_array_existsc(zoptions, "ca_file")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "ca_file", MONGOC_URI_TLSCAFILE)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "ca_file"); return false; } } else if (php_array_existsc(zoptions, "cafile")) { if (!php_phongo_apply_driver_option_to_uri(uri, zoptions, "cafile", MONGOC_URI_TLSCAFILE)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Failed to parse \"%s\" driver option", "cafile"); return false; } } return true; } #endif /* APM callbacks */ static void php_phongo_dispatch_handlers(const char* name, zval* z_event) { #if PHP_VERSION_ID >= 70000 zval* value; ZEND_HASH_FOREACH_VAL(MONGODB_G(subscribers), value) { if (EG(exception)) { break; } /* We can't use the zend_call_method_with_1_params macro here, as it * does a sizeof() on the name argument, which does only work with * constant names, but not with parameterized ones as it does * "sizeof(char*)" in that case. */ zend_call_method(value, NULL, NULL, name, strlen(name), NULL, 1, z_event, NULL TSRMLS_CC); } ZEND_HASH_FOREACH_END(); #else HashPosition pos; TSRMLS_FETCH(); zend_hash_internal_pointer_reset_ex(MONGODB_G(subscribers), &pos); for (;; zend_hash_move_forward_ex(MONGODB_G(subscribers), &pos)) { zval** value; if (zend_hash_get_current_data_ex(MONGODB_G(subscribers), (void**) &value, &pos) == FAILURE) { break; } if (EG(exception)) { break; } /* We can't use the zend_call_method_with_1_params macro here, as it * does a sizeof() on the name argument, which does only work with * constant names, but not with parameterized ones as it does * "sizeof(char*)" in that case. */ zend_call_method(value, NULL, NULL, name, strlen(name), NULL, 1, z_event, NULL TSRMLS_CC); } #endif } static void php_phongo_command_started(const mongoc_apm_command_started_t* event) { php_phongo_commandstartedevent_t* p_event; #if PHP_VERSION_ID >= 70000 zval z_event; #else zval* z_event = NULL; #endif TSRMLS_FETCH(); /* Return early if there are no APM subscribers to notify */ if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) { return; } #if PHP_VERSION_ID >= 70000 object_init_ex(&z_event, php_phongo_commandstartedevent_ce); p_event = Z_COMMANDSTARTEDEVENT_OBJ_P(&z_event); #else MAKE_STD_ZVAL(z_event); object_init_ex(z_event, php_phongo_commandstartedevent_ce); p_event = Z_COMMANDSTARTEDEVENT_OBJ_P(z_event); #endif p_event->client = mongoc_apm_command_started_get_context(event); p_event->command_name = estrdup(mongoc_apm_command_started_get_command_name(event)); p_event->server_id = mongoc_apm_command_started_get_server_id(event); p_event->operation_id = mongoc_apm_command_started_get_operation_id(event); p_event->request_id = mongoc_apm_command_started_get_request_id(event); p_event->command = bson_copy(mongoc_apm_command_started_get_command(event)); p_event->database_name = estrdup(mongoc_apm_command_started_get_database_name(event)); #if PHP_VERSION_ID >= 70000 php_phongo_dispatch_handlers("commandStarted", &z_event); #else php_phongo_dispatch_handlers("commandStarted", z_event); #endif zval_ptr_dtor(&z_event); } static void php_phongo_command_succeeded(const mongoc_apm_command_succeeded_t* event) { php_phongo_commandsucceededevent_t* p_event; #if PHP_VERSION_ID >= 70000 zval z_event; #else zval* z_event = NULL; #endif TSRMLS_FETCH(); /* Return early if there are no APM subscribers to notify */ if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) { return; } #if PHP_VERSION_ID >= 70000 object_init_ex(&z_event, php_phongo_commandsucceededevent_ce); p_event = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(&z_event); #else MAKE_STD_ZVAL(z_event); object_init_ex(z_event, php_phongo_commandsucceededevent_ce); p_event = Z_COMMANDSUCCEEDEDEVENT_OBJ_P(z_event); #endif p_event->client = mongoc_apm_command_succeeded_get_context(event); p_event->command_name = estrdup(mongoc_apm_command_succeeded_get_command_name(event)); p_event->server_id = mongoc_apm_command_succeeded_get_server_id(event); p_event->operation_id = mongoc_apm_command_succeeded_get_operation_id(event); p_event->request_id = mongoc_apm_command_succeeded_get_request_id(event); p_event->duration_micros = mongoc_apm_command_succeeded_get_duration(event); p_event->reply = bson_copy(mongoc_apm_command_succeeded_get_reply(event)); #if PHP_VERSION_ID >= 70000 php_phongo_dispatch_handlers("commandSucceeded", &z_event); #else php_phongo_dispatch_handlers("commandSucceeded", z_event); #endif zval_ptr_dtor(&z_event); } static void php_phongo_command_failed(const mongoc_apm_command_failed_t* event) { php_phongo_commandfailedevent_t* p_event; #if PHP_VERSION_ID >= 70000 zval z_event; #else zval* z_event = NULL; #endif bson_error_t tmp_error = { 0 }; zend_class_entry* default_exception_ce; TSRMLS_FETCH(); default_exception_ce = zend_exception_get_default(TSRMLS_C); /* Return early if there are no APM subscribers to notify */ if (!MONGODB_G(subscribers) || zend_hash_num_elements(MONGODB_G(subscribers)) == 0) { return; } #if PHP_VERSION_ID >= 70000 object_init_ex(&z_event, php_phongo_commandfailedevent_ce); p_event = Z_COMMANDFAILEDEVENT_OBJ_P(&z_event); #else MAKE_STD_ZVAL(z_event); object_init_ex(z_event, php_phongo_commandfailedevent_ce); p_event = Z_COMMANDFAILEDEVENT_OBJ_P(z_event); #endif p_event->client = mongoc_apm_command_failed_get_context(event); p_event->command_name = estrdup(mongoc_apm_command_failed_get_command_name(event)); p_event->server_id = mongoc_apm_command_failed_get_server_id(event); p_event->operation_id = mongoc_apm_command_failed_get_operation_id(event); p_event->request_id = mongoc_apm_command_failed_get_request_id(event); p_event->duration_micros = mongoc_apm_command_failed_get_duration(event); p_event->reply = bson_copy(mongoc_apm_command_failed_get_reply(event)); /* We need to process and convert the error right here, otherwise * debug_info will turn into a recursive loop, and with the wrong trace * locations */ mongoc_apm_command_failed_get_error(event, &tmp_error); { #if PHP_VERSION_ID < 70000 MAKE_STD_ZVAL(p_event->z_error); object_init_ex(p_event->z_error, phongo_exception_from_mongoc_domain(tmp_error.domain, tmp_error.code)); zend_update_property_string(default_exception_ce, p_event->z_error, ZEND_STRL("message"), tmp_error.message TSRMLS_CC); zend_update_property_long(default_exception_ce, p_event->z_error, ZEND_STRL("code"), tmp_error.code TSRMLS_CC); #else object_init_ex(&p_event->z_error, phongo_exception_from_mongoc_domain(tmp_error.domain, tmp_error.code)); zend_update_property_string(default_exception_ce, &p_event->z_error, ZEND_STRL("message"), tmp_error.message TSRMLS_CC); zend_update_property_long(default_exception_ce, &p_event->z_error, ZEND_STRL("code"), tmp_error.code TSRMLS_CC); #endif } #if PHP_VERSION_ID >= 70000 php_phongo_dispatch_handlers("commandFailed", &z_event); #else php_phongo_dispatch_handlers("commandFailed", z_event); #endif zval_ptr_dtor(&z_event); } /* Sets the callbacks for APM */ int php_phongo_set_monitoring_callbacks(mongoc_client_t* client) { int retval; mongoc_apm_callbacks_t* callbacks = mongoc_apm_callbacks_new(); mongoc_apm_set_command_started_cb(callbacks, php_phongo_command_started); mongoc_apm_set_command_succeeded_cb(callbacks, php_phongo_command_succeeded); mongoc_apm_set_command_failed_cb(callbacks, php_phongo_command_failed); retval = mongoc_client_set_apm_callbacks(client, callbacks, client); mongoc_apm_callbacks_destroy(callbacks); return retval; } /* Creates a hash for a client by concatenating the URI string with serialized * options arrays. On success, a persistent string is returned (i.e. pefree() * should be used to free it) and hash_len will be set to the string's length. * On error, an exception will have been thrown and NULL will be returned. */ static char* php_phongo_manager_make_client_hash(const char* uri_string, zval* options, zval* driverOptions, size_t* hash_len TSRMLS_DC) { char* hash = NULL; smart_str var_buf = { 0 }; php_serialize_data_t var_hash; #if PHP_VERSION_ID >= 70000 zval args; array_init_size(&args, 4); ADD_ASSOC_LONG_EX(&args, "pid", getpid()); ADD_ASSOC_STRING(&args, "uri", uri_string); if (options) { ADD_ASSOC_ZVAL_EX(&args, "options", options); Z_ADDREF_P(options); } else { ADD_ASSOC_NULL_EX(&args, "options"); } if (driverOptions) { ADD_ASSOC_ZVAL_EX(&args, "driverOptions", driverOptions); Z_ADDREF_P(driverOptions); } else { ADD_ASSOC_NULL_EX(&args, "driverOptions"); } PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&var_buf, &args, &var_hash); PHP_VAR_SERIALIZE_DESTROY(var_hash); if (!EG(exception)) { *hash_len = ZSTR_LEN(var_buf.s); hash = pestrndup(ZSTR_VAL(var_buf.s), *hash_len, 1); } zval_ptr_dtor(&args); #else zval* args; MAKE_STD_ZVAL(args); array_init_size(args, 4); ADD_ASSOC_LONG_EX(args, "pid", getpid()); ADD_ASSOC_STRING(args, "uri", uri_string); if (options) { ADD_ASSOC_ZVAL_EX(args, "options", options); Z_ADDREF_P(options); } else { ADD_ASSOC_NULL_EX(args, "options"); } if (driverOptions) { ADD_ASSOC_ZVAL_EX(args, "driverOptions", driverOptions); Z_ADDREF_P(driverOptions); } else { ADD_ASSOC_NULL_EX(args, "driverOptions"); } PHP_VAR_SERIALIZE_INIT(var_hash); php_var_serialize(&var_buf, &args, &var_hash TSRMLS_CC); PHP_VAR_SERIALIZE_DESTROY(var_hash); if (!EG(exception)) { *hash_len = var_buf.len; hash = pestrndup(var_buf.c, *hash_len, 1); } zval_ptr_dtor(&args); #endif smart_str_free(&var_buf); return hash; } static mongoc_client_t* php_phongo_make_mongo_client(const mongoc_uri_t* uri TSRMLS_DC) /* {{{ */ { const char *mongoc_version, *bson_version; #ifdef HAVE_SYSTEM_LIBMONGOC mongoc_version = mongoc_get_version(); #else mongoc_version = "bundled"; #endif #ifdef HAVE_SYSTEM_LIBBSON bson_version = bson_get_version(); #else bson_version = "bundled"; #endif MONGOC_DEBUG( "Creating Manager, phongo-%s[%s] - mongoc-%s(%s), libbson-%s(%s), php-%s", PHP_MONGODB_VERSION, PHP_MONGODB_STABILITY, MONGOC_VERSION_S, mongoc_version, BSON_VERSION_S, bson_version, PHP_VERSION); return mongoc_client_new_from_uri(uri); } /* }}} */ static void php_phongo_persist_client(const char* hash, size_t hash_len, mongoc_client_t* client TSRMLS_DC) { php_phongo_pclient_t* pclient = (php_phongo_pclient_t*) pecalloc(1, sizeof(php_phongo_pclient_t), 1); pclient->pid = (int) getpid(); pclient->client = client; #if PHP_VERSION_ID >= 70000 zend_hash_str_update_ptr(&MONGODB_G(pclients), hash, hash_len, pclient); #else zend_hash_update(&MONGODB_G(pclients), hash, hash_len + 1, &pclient, sizeof(php_phongo_pclient_t*), NULL); #endif } static mongoc_client_t* php_phongo_find_client(const char* hash, size_t hash_len TSRMLS_DC) { #if PHP_VERSION_ID >= 70000 php_phongo_pclient_t* pclient; if ((pclient = zend_hash_str_find_ptr(&MONGODB_G(pclients), hash, hash_len)) != NULL) { return pclient->client; } #else php_phongo_pclient_t** pclient; if (zend_hash_find(&MONGODB_G(pclients), hash, hash_len + 1, (void**) &pclient) == SUCCESS) { return (*pclient)->client; } #endif return NULL; } void phongo_manager_init(php_phongo_manager_t* manager, const char* uri_string, zval* options, zval* driverOptions TSRMLS_DC) /* {{{ */ { char* hash = NULL; size_t hash_len = 0; bson_t bson_options = BSON_INITIALIZER; mongoc_uri_t* uri = NULL; #ifdef MONGOC_ENABLE_SSL mongoc_ssl_opt_t* ssl_opt = NULL; #endif if (!(hash = php_phongo_manager_make_client_hash(uri_string, options, driverOptions, &hash_len TSRMLS_CC))) { /* Exception should already have been thrown and there is nothing to free */ return; } if ((manager->client = php_phongo_find_client(hash, hash_len TSRMLS_CC))) { MONGOC_DEBUG("Found client for hash: %s\n", hash); goto cleanup; } if (options) { php_phongo_zval_to_bson(options, PHONGO_BSON_NONE, &bson_options, NULL TSRMLS_CC); } /* An exception may be thrown during BSON conversion */ if (EG(exception)) { goto cleanup; } if (!(uri = php_phongo_make_uri(uri_string, &bson_options TSRMLS_CC))) { /* Exception should already have been thrown */ goto cleanup; } if (!php_phongo_apply_options_to_uri(uri, &bson_options TSRMLS_CC) || !php_phongo_apply_rc_options_to_uri(uri, &bson_options TSRMLS_CC) || !php_phongo_apply_rp_options_to_uri(uri, &bson_options TSRMLS_CC) || !php_phongo_apply_wc_options_to_uri(uri, &bson_options TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } #ifdef MONGOC_ENABLE_SSL if (!php_phongo_apply_driver_options_to_uri(uri, driverOptions TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } ssl_opt = php_phongo_make_ssl_opt(uri, driverOptions TSRMLS_CC); /* An exception may be thrown during SSL option creation */ if (EG(exception)) { goto cleanup; } if (!php_phongo_uri_finalize_tls(uri TSRMLS_CC)) { /* Exception should already have been thrown */ goto cleanup; } #else if (mongoc_uri_get_tls(uri)) { phongo_throw_exception(PHONGO_ERROR_INVALID_ARGUMENT TSRMLS_CC, "Cannot create SSL client. SSL is not enabled in this build."); goto cleanup; } #endif manager->client = php_phongo_make_mongo_client(uri TSRMLS_CC); mongoc_client_set_error_api(manager->client, MONGOC_ERROR_API_VERSION_2); if (!manager->client) { phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Failed to create Manager from URI: '%s'", uri_string); goto cleanup; } #ifdef MONGOC_ENABLE_SSL if (ssl_opt && mongoc_uri_get_tls(uri)) { mongoc_client_set_ssl_opts(manager->client, ssl_opt); } #endif MONGOC_DEBUG("Created client hash: %s\n", hash); php_phongo_persist_client(hash, hash_len, manager->client TSRMLS_CC); cleanup: if (hash) { pefree(hash, 1); } bson_destroy(&bson_options); if (uri) { mongoc_uri_destroy(uri); } #ifdef MONGOC_ENABLE_SSL if (ssl_opt) { php_phongo_free_ssl_opt(ssl_opt); } #endif } /* }}} */ void php_phongo_new_utcdatetime_from_epoch(zval* object, int64_t msec_since_epoch TSRMLS_DC) /* {{{ */ { php_phongo_utcdatetime_t* intern; object_init_ex(object, php_phongo_utcdatetime_ce); intern = Z_UTCDATETIME_OBJ_P(object); intern->milliseconds = msec_since_epoch; intern->initialized = true; } /* }}} */ void php_phongo_new_timestamp_from_increment_and_timestamp(zval* object, uint32_t increment, uint32_t timestamp TSRMLS_DC) /* {{{ */ { php_phongo_timestamp_t* intern; object_init_ex(object, php_phongo_timestamp_ce); intern = Z_TIMESTAMP_OBJ_P(object); intern->increment = increment; intern->timestamp = timestamp; intern->initialized = true; } /* }}} */ void php_phongo_new_javascript_from_javascript(int init, zval* object, const char* code, size_t code_len TSRMLS_DC) /* {{{ */ { php_phongo_new_javascript_from_javascript_and_scope(init, object, code, code_len, NULL TSRMLS_CC); } /* }}} */ void php_phongo_new_javascript_from_javascript_and_scope(int init, zval* object, const char* code, size_t code_len, const bson_t* scope TSRMLS_DC) /* {{{ */ { php_phongo_javascript_t* intern; if (init) { object_init_ex(object, php_phongo_javascript_ce); } intern = Z_JAVASCRIPT_OBJ_P(object); intern->code = estrndup(code, code_len); intern->code_len = code_len; intern->scope = scope ? bson_copy(scope) : NULL; } /* }}} */ void php_phongo_new_binary_from_binary_and_type(zval* object, const char* data, size_t data_len, bson_subtype_t type TSRMLS_DC) /* {{{ */ { php_phongo_binary_t* intern; object_init_ex(object, php_phongo_binary_ce); intern = Z_BINARY_OBJ_P(object); intern->data = estrndup(data, data_len); intern->data_len = data_len; intern->type = (uint8_t) type; } /* }}} */ void php_phongo_new_decimal128(zval* object, const bson_decimal128_t* decimal TSRMLS_DC) /* {{{ */ { php_phongo_decimal128_t* intern; object_init_ex(object, php_phongo_decimal128_ce); intern = Z_DECIMAL128_OBJ_P(object); memcpy(&intern->decimal, decimal, sizeof(bson_decimal128_t)); intern->initialized = true; } /* }}} */ void php_phongo_new_int64(zval* object, int64_t integer TSRMLS_DC) /* {{{ */ { php_phongo_int64_t* intern; object_init_ex(object, php_phongo_int64_ce); intern = Z_INT64_OBJ_P(object); intern->integer = integer; intern->initialized = true; } /* }}} */ /* qsort() compare callback for alphabetizing regex flags upon initialization */ static int php_phongo_regex_compare_flags(const void* f1, const void* f2) { if (*(const char*) f1 == *(const char*) f2) { return 0; } return (*(const char*) f1 > *(const char*) f2) ? 1 : -1; } void php_phongo_new_regex_from_regex_and_options(zval* object, const char* pattern, const char* flags TSRMLS_DC) /* {{{ */ { php_phongo_regex_t* intern; object_init_ex(object, php_phongo_regex_ce); intern = Z_REGEX_OBJ_P(object); intern->pattern_len = strlen(pattern); intern->pattern = estrndup(pattern, intern->pattern_len); intern->flags_len = strlen(flags); intern->flags = estrndup(flags, intern->flags_len); /* Ensure flags are alphabetized upon initialization. This may be removed * once CDRIVER-1883 is implemented. */ qsort((void*) intern->flags, intern->flags_len, 1, php_phongo_regex_compare_flags); } /* }}} */ void php_phongo_new_symbol(zval* object, const char* symbol, size_t symbol_len TSRMLS_DC) /* {{{ */ { php_phongo_symbol_t* intern; object_init_ex(object, php_phongo_symbol_ce); intern = Z_SYMBOL_OBJ_P(object); intern->symbol = estrndup(symbol, symbol_len); intern->symbol_len = symbol_len; } /* }}} */ void php_phongo_new_dbpointer(zval* object, const char* ref, size_t ref_len, const bson_oid_t* oid TSRMLS_DC) /* {{{ */ { php_phongo_dbpointer_t* intern; object_init_ex(object, php_phongo_dbpointer_ce); intern = Z_DBPOINTER_OBJ_P(object); intern->ref = estrndup(ref, ref_len); intern->ref_len = ref_len; bson_oid_to_string(oid, intern->id); } /* }}} */ /* {{{ Memory allocation wrappers */ static void* php_phongo_malloc(size_t num_bytes) /* {{{ */ { return pemalloc(num_bytes, 1); } /* }}} */ static void* php_phongo_calloc(size_t num_members, size_t num_bytes) /* {{{ */ { return pecalloc(num_members, num_bytes, 1); } /* }}} */ static void* php_phongo_realloc(void* mem, size_t num_bytes) { /* {{{ */ return perealloc(mem, num_bytes, 1); } /* }}} */ static void php_phongo_free(void* mem) /* {{{ */ { if (mem) { pefree(mem, 1); } } /* }}} */ /* }}} */ /* {{{ M[INIT|SHUTDOWN] R[INIT|SHUTDOWN] G[INIT|SHUTDOWN] MINFO INI */ ZEND_INI_MH(OnUpdateDebug) { void*** ctx = NULL; char* tmp_dir = NULL; TSRMLS_SET_CTX(ctx); /* Close any previously open log files */ if (MONGODB_G(debug_fd)) { if (MONGODB_G(debug_fd) != stderr && MONGODB_G(debug_fd) != stdout) { fclose(MONGODB_G(debug_fd)); } MONGODB_G(debug_fd) = NULL; } if (!new_value || (new_value && !ZSTR_VAL(new_value)[0]) || strcasecmp("0", ZSTR_VAL(new_value)) == 0 || strcasecmp("off", ZSTR_VAL(new_value)) == 0 || strcasecmp("no", ZSTR_VAL(new_value)) == 0 || strcasecmp("false", ZSTR_VAL(new_value)) == 0) { mongoc_log_trace_disable(); mongoc_log_set_handler(NULL, NULL); #if PHP_VERSION_ID >= 70000 return OnUpdateString(entry, new_value, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC); #else return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC); #endif } if (strcasecmp(ZSTR_VAL(new_value), "stderr") == 0) { MONGODB_G(debug_fd) = stderr; } else if (strcasecmp(ZSTR_VAL(new_value), "stdout") == 0) { MONGODB_G(debug_fd) = stdout; } else if ( strcasecmp("1", ZSTR_VAL(new_value)) == 0 || strcasecmp("on", ZSTR_VAL(new_value)) == 0 || strcasecmp("yes", ZSTR_VAL(new_value)) == 0 || strcasecmp("true", ZSTR_VAL(new_value)) == 0) { tmp_dir = NULL; } else { tmp_dir = ZSTR_VAL(new_value); } if (!MONGODB_G(debug_fd)) { time_t t; int fd = -1; char* prefix; int len; phongo_char* filename; time(&t); len = spprintf(&prefix, 0, "PHONGO-%ld", t); fd = php_open_temporary_fd(tmp_dir, prefix, &filename TSRMLS_CC); if (fd != -1) { const char* path = ZSTR_VAL(filename); MONGODB_G(debug_fd) = VCWD_FOPEN(path, "a"); } efree(filename); efree(prefix); close(fd); } mongoc_log_trace_enable(); mongoc_log_set_handler(php_phongo_log, ctx); #if PHP_VERSION_ID >= 70000 return OnUpdateString(entry, new_value, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC); #else return OnUpdateString(entry, new_value, new_value_length, mh_arg1, mh_arg2, mh_arg3, stage TSRMLS_CC); #endif } /* {{{ INI entries */ PHP_INI_BEGIN() #if PHP_VERSION_ID >= 70000 STD_PHP_INI_ENTRY(PHONGO_DEBUG_INI, PHONGO_DEBUG_INI_DEFAULT, PHP_INI_ALL, OnUpdateDebug, debug, zend_mongodb_globals, mongodb_globals) #else { 0, PHP_INI_ALL, (char*) PHONGO_DEBUG_INI, sizeof(PHONGO_DEBUG_INI), OnUpdateDebug, (void*) XtOffsetOf(zend_mongodb_globals, debug), (void*) &mglo, NULL, (char*) PHONGO_DEBUG_INI_DEFAULT, sizeof(PHONGO_DEBUG_INI_DEFAULT) - 1, NULL, 0, 0, 0, NULL }, #endif PHP_INI_END() /* }}} */ static inline void php_phongo_pclient_destroy(php_phongo_pclient_t* pclient) { /* Do not destroy mongoc_client_t objects created by other processes. This * ensures that we do not shutdown sockets that may still be in use by our * parent process (see: CDRIVER-2049). While this is a leak, we are already * in MSHUTDOWN at this point. */ if (pclient->pid == getpid()) { mongoc_client_destroy(pclient->client); } pefree(pclient, 1); } #if PHP_VERSION_ID >= 70000 static void php_phongo_pclient_dtor(zval* zv) { php_phongo_pclient_destroy((php_phongo_pclient_t*) Z_PTR_P(zv)); } #else static void php_phongo_pclient_dtor(void* pp) { php_phongo_pclient_destroy(*((php_phongo_pclient_t**) pp)); } #endif /* {{{ PHP_RINIT_FUNCTION */ PHP_RINIT_FUNCTION(mongodb) { /* Initialize HashTable for APM subscribers, which is initialized to NULL in * GINIT and destroyed and reset to NULL in RSHUTDOWN. */ if (MONGODB_G(subscribers) == NULL) { ALLOC_HASHTABLE(MONGODB_G(subscribers)); zend_hash_init(MONGODB_G(subscribers), 0, NULL, ZVAL_PTR_DTOR, 0); } return SUCCESS; } /* }}} */ /* {{{ PHP_GINIT_FUNCTION */ PHP_GINIT_FUNCTION(mongodb) { bson_mem_vtable_t bsonMemVTable = { php_phongo_malloc, php_phongo_calloc, php_phongo_realloc, php_phongo_free, }; #if PHP_VERSION_ID >= 70000 #if defined(COMPILE_DL_MONGODB) && defined(ZTS) ZEND_TSRMLS_CACHE_UPDATE(); #endif #endif memset(mongodb_globals, 0, sizeof(zend_mongodb_globals)); mongodb_globals->bsonMemVTable = bsonMemVTable; /* Initialize HashTable for persistent clients */ zend_hash_init_ex(&mongodb_globals->pclients, 0, NULL, php_phongo_pclient_dtor, 1, 0); } /* }}} */ static zend_class_entry* php_phongo_fetch_internal_class(const char* class_name, size_t class_name_len TSRMLS_DC) { #if PHP_VERSION_ID >= 70000 zend_class_entry* pce; if ((pce = zend_hash_str_find_ptr(CG(class_table), class_name, class_name_len))) { return pce; } #else zend_class_entry** pce; if (zend_hash_find(CG(class_table), class_name, class_name_len + 1, (void**) &pce) == SUCCESS) { return *pce; } #endif return NULL; } /* {{{ PHP_MINIT_FUNCTION */ PHP_MINIT_FUNCTION(mongodb) { char* php_version_string; (void) type; /* We don't care if we are loaded via dl() or extension= */ REGISTER_INI_ENTRIES(); /* Initialize libmongoc */ mongoc_init(); /* Set handshake options */ php_version_string = malloc(4 + sizeof(PHP_VERSION) + 1); snprintf(php_version_string, 4 + sizeof(PHP_VERSION) + 1, "PHP %s", PHP_VERSION); mongoc_handshake_data_append("ext-mongodb:PHP", PHP_MONGODB_VERSION, php_version_string); free(php_version_string); /* Initialize libbson */ bson_mem_set_vtable(&MONGODB_G(bsonMemVTable)); /* Prep default object handlers to be used when we register the classes */ memcpy(&phongo_std_object_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); phongo_std_object_handlers.clone_obj = NULL; /* phongo_std_object_handlers.get_debug_info = NULL; phongo_std_object_handlers.compare_objects = NULL; phongo_std_object_handlers.cast_object = NULL; phongo_std_object_handlers.count_elements = NULL; phongo_std_object_handlers.get_closure = NULL; */ /* Initialize zend_class_entry dependencies. * * Although DateTimeImmutable was introduced in PHP 5.5.0, * php_date_get_immutable_ce() is not available in PHP versions before * 5.5.24 and 5.6.8. * * Although JsonSerializable was introduced in PHP 5.4.0, * php_json_serializable_ce is not exported in PHP versions before 5.4.26 * and 5.5.10. For later PHP versions, looking up the class manually also * helps with distros that disable LTDL_LAZY for dlopen() (e.g. Fedora). */ php_phongo_date_immutable_ce = php_phongo_fetch_internal_class(ZEND_STRL("datetimeimmutable") TSRMLS_CC); php_phongo_json_serializable_ce = php_phongo_fetch_internal_class(ZEND_STRL("jsonserializable") TSRMLS_CC); if (php_phongo_json_serializable_ce == NULL) { zend_error(E_ERROR, "JsonSerializable class is not defined. Please ensure that the 'json' module is loaded before the 'mongodb' module."); return FAILURE; } /* Register base BSON classes first */ php_phongo_type_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_serializable_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_unserializable_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_binary_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_decimal128_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_javascript_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_maxkey_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_minkey_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_objectid_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_regex_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_timestamp_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_utcdatetime_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_binary_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_decimal128_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_int64_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_javascript_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_maxkey_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_minkey_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_objectid_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_persistable_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_regex_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_symbol_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_timestamp_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_undefined_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_cursor_interface_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_command_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_cursor_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_cursorid_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_manager_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_query_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_readconcern_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_readpreference_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_server_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_session_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_writeconcernerror_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_writeerror_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_writeresult_init_ce(INIT_FUNC_ARGS_PASSTHRU); /* Register base exception classes first */ php_phongo_exception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_runtimeexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_serverexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_connectionexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_writeexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_authenticationexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_bulkwriteexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_commandexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_connectiontimeoutexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_executiontimeoutexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_invalidargumentexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_logicexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_sslconnectionexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_unexpectedvalueexception_init_ce(INIT_FUNC_ARGS_PASSTHRU); /* Register base APM classes first */ php_phongo_subscriber_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_commandsubscriber_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_commandstartedevent_init_ce(INIT_FUNC_ARGS_PASSTHRU); php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS_PASSTHRU); REGISTER_STRING_CONSTANT("MONGODB_VERSION", (char*) PHP_MONGODB_VERSION, CONST_CS | CONST_PERSISTENT); REGISTER_STRING_CONSTANT("MONGODB_STABILITY", (char*) PHP_MONGODB_STABILITY, CONST_CS | CONST_PERSISTENT); return SUCCESS; } /* }}} */ /* {{{ PHP_MSHUTDOWN_FUNCTION */ PHP_MSHUTDOWN_FUNCTION(mongodb) { (void) type; /* We don't care if we are loaded via dl() or extension= */ /* Destroy HashTable for persistent clients. The HashTable destructor will * destroy any mongoc_client_t objects that were created by this process. */ zend_hash_destroy(&MONGODB_G(pclients)); bson_mem_restore_vtable(); /* Cleanup after libmongoc */ mongoc_cleanup(); UNREGISTER_INI_ENTRIES(); return SUCCESS; } /* }}} */ /* {{{ PHP_RSHUTDOWN_FUNCTION */ PHP_RSHUTDOWN_FUNCTION(mongodb) { /* Destroy HashTable for APM subscribers, which was initialized in RINIT */ if (MONGODB_G(subscribers)) { zend_hash_destroy(MONGODB_G(subscribers)); FREE_HASHTABLE(MONGODB_G(subscribers)); MONGODB_G(subscribers) = NULL; } return SUCCESS; } /* }}} */ /* {{{ PHP_GSHUTDOWN_FUNCTION */ PHP_GSHUTDOWN_FUNCTION(mongodb) { mongodb_globals->debug = NULL; if (mongodb_globals->debug_fd) { fclose(mongodb_globals->debug_fd); mongodb_globals->debug_fd = NULL; } } /* }}} */ /* {{{ PHP_MINFO_FUNCTION */ PHP_MINFO_FUNCTION(mongodb) { php_info_print_table_start(); php_info_print_table_header(2, "MongoDB support", "enabled"); php_info_print_table_row(2, "MongoDB extension version", PHP_MONGODB_VERSION); php_info_print_table_row(2, "MongoDB extension stability", PHP_MONGODB_STABILITY); #ifdef HAVE_SYSTEM_LIBBSON php_info_print_table_row(2, "libbson headers version", BSON_VERSION_S); php_info_print_table_row(2, "libbson library version", bson_get_version()); #else php_info_print_table_row(2, "libbson bundled version", BSON_VERSION_S); #endif #ifdef HAVE_SYSTEM_LIBMONGOC php_info_print_table_row(2, "libmongoc headers version", MONGOC_VERSION_S); php_info_print_table_row(2, "libmongoc library version", mongoc_get_version()); #else /* Bundled libraries, buildtime = runtime */ php_info_print_table_row(2, "libmongoc bundled version", MONGOC_VERSION_S); #endif #ifdef MONGOC_ENABLE_SSL php_info_print_table_row(2, "libmongoc SSL", "enabled"); #if defined(MONGOC_ENABLE_SSL_OPENSSL) php_info_print_table_row(2, "libmongoc SSL library", "OpenSSL"); #elif defined(MONGOC_ENABLE_SSL_LIBRESSL) php_info_print_table_row(2, "libmongoc SSL library", "LibreSSL"); #elif defined(MONGOC_ENABLE_SSL_SECURE_TRANSPORT) php_info_print_table_row(2, "libmongoc SSL library", "Secure Transport"); #elif defined(MONGOC_ENABLE_SSL_SECURE_CHANNEL) php_info_print_table_row(2, "libmongoc SSL library", "Secure Channel"); #else php_info_print_table_row(2, "libmongoc SSL library", "unknown"); #endif #else /* MONGOC_ENABLE_SSL */ php_info_print_table_row(2, "libmongoc SSL", "disabled"); #endif #ifdef MONGOC_ENABLE_CRYPTO php_info_print_table_row(2, "libmongoc crypto", "enabled"); #if defined(MONGOC_ENABLE_CRYPTO_LIBCRYPTO) php_info_print_table_row(2, "libmongoc crypto library", "libcrypto"); #elif defined(MONGOC_ENABLE_CRYPTO_COMMON_CRYPTO) php_info_print_table_row(2, "libmongoc crypto library", "Common Crypto"); #elif defined(MONGOC_ENABLE_CRYPTO_CNG) php_info_print_table_row(2, "libmongoc crypto library", "CNG"); #else php_info_print_table_row(2, "libmongoc crypto library", "unknown"); #endif #ifdef MONGOC_ENABLE_CRYPTO_SYSTEM_PROFILE php_info_print_table_row(2, "libmongoc crypto system profile", "enabled"); #else php_info_print_table_row(2, "libmongoc crypto system profile", "disabled"); #endif #else /* MONGOC_ENABLE_CRYPTO */ php_info_print_table_row(2, "libmongoc crypto", "disabled"); #endif #ifdef MONGOC_ENABLE_SASL php_info_print_table_row(2, "libmongoc SASL", "enabled"); #else php_info_print_table_row(2, "libmongoc SASL", "disabled"); #endif #ifdef MONGOC_ENABLE_ICU php_info_print_table_row(2, "libmongoc ICU", "enabled"); #else php_info_print_table_row(2, "libmongoc ICU", "disabled"); #endif #ifdef MONGOC_ENABLE_COMPRESSION php_info_print_table_row(2, "libmongoc compression", "enabled"); #ifdef MONGOC_ENABLE_COMPRESSION_SNAPPY php_info_print_table_row(2, "libmongoc compression snappy", "enabled"); #else php_info_print_table_row(2, "libmongoc compression snappy", "disabled"); #endif #ifdef MONGOC_ENABLE_COMPRESSION_ZLIB php_info_print_table_row(2, "libmongoc compression zlib", "enabled"); #else php_info_print_table_row(2, "libmongoc compression zlib", "disabled"); #endif #else /* MONGOC_ENABLE_COMPRESSION */ php_info_print_table_row(2, "libmongoc compression", "disabled"); #endif php_info_print_table_end(); DISPLAY_INI_ENTRIES(); } /* }}} */ /* }}} */ /* {{{ Shared function entries for disabling constructors and unserialize() */ PHP_FUNCTION(MongoDB_disabled___construct) /* {{{ */ { phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "Accessing private constructor"); } /* }}} */ PHP_FUNCTION(MongoDB_disabled___wakeup) /* {{{ */ { if (zend_parse_parameters_none() == FAILURE) { return; } phongo_throw_exception(PHONGO_ERROR_RUNTIME TSRMLS_CC, "%s", "MongoDB\\Driver objects cannot be serialized"); } /* }}} */ /* }}} */ /* {{{ mongodb_functions[] */ ZEND_BEGIN_ARG_INFO_EX(ai_bson_fromPHP, 0, 0, 1) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO_EX(ai_bson_toPHP, 0, 0, 1) ZEND_ARG_INFO(0, bson) ZEND_ARG_ARRAY_INFO(0, typemap, 0) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO_EX(ai_bson_toJSON, 0, 0, 1) ZEND_ARG_INFO(0, bson) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO_EX(ai_bson_fromJSON, 0, 0, 1) ZEND_ARG_INFO(0, json) ZEND_END_ARG_INFO(); ZEND_BEGIN_ARG_INFO_EX(ai_mongodb_driver_monitoring_subscriber, 0, 0, 1) ZEND_ARG_OBJ_INFO(0, subscriber, MongoDB\\Driver\\Monitoring\\Subscriber, 0) ZEND_END_ARG_INFO(); static const zend_function_entry mongodb_functions[] = { ZEND_NS_NAMED_FE("MongoDB\\BSON", fromPHP, PHP_FN(MongoDB_BSON_fromPHP), ai_bson_fromPHP) ZEND_NS_NAMED_FE("MongoDB\\BSON", toPHP, PHP_FN(MongoDB_BSON_toPHP), ai_bson_toPHP) ZEND_NS_NAMED_FE("MongoDB\\BSON", toJSON, PHP_FN(MongoDB_BSON_toJSON), ai_bson_toJSON) ZEND_NS_NAMED_FE("MongoDB\\BSON", toCanonicalExtendedJSON, PHP_FN(MongoDB_BSON_toCanonicalExtendedJSON), ai_bson_toJSON) ZEND_NS_NAMED_FE("MongoDB\\BSON", toRelaxedExtendedJSON, PHP_FN(MongoDB_BSON_toRelaxedExtendedJSON), ai_bson_toJSON) ZEND_NS_NAMED_FE("MongoDB\\BSON", fromJSON, PHP_FN(MongoDB_BSON_fromJSON), ai_bson_fromJSON) ZEND_NS_NAMED_FE("MongoDB\\Driver\\Monitoring", addSubscriber, PHP_FN(MongoDB_Driver_Monitoring_addSubscriber), ai_mongodb_driver_monitoring_subscriber) ZEND_NS_NAMED_FE("MongoDB\\Driver\\Monitoring", removeSubscriber, PHP_FN(MongoDB_Driver_Monitoring_removeSubscriber), ai_mongodb_driver_monitoring_subscriber) PHP_FE_END }; /* }}} */ static const zend_module_dep mongodb_deps[] = { ZEND_MOD_REQUIRED("date") ZEND_MOD_REQUIRED("json") ZEND_MOD_REQUIRED("spl") ZEND_MOD_REQUIRED("standard") ZEND_MOD_END }; /* {{{ mongodb_module_entry */ zend_module_entry mongodb_module_entry = { STANDARD_MODULE_HEADER_EX, NULL, mongodb_deps, "mongodb", mongodb_functions, PHP_MINIT(mongodb), PHP_MSHUTDOWN(mongodb), PHP_RINIT(mongodb), PHP_RSHUTDOWN(mongodb), PHP_MINFO(mongodb), PHP_MONGODB_VERSION, PHP_MODULE_GLOBALS(mongodb), PHP_GINIT(mongodb), PHP_GSHUTDOWN(mongodb), NULL, STANDARD_MODULE_PROPERTIES_EX }; /* }}} */ #ifdef COMPILE_DL_MONGODB ZEND_GET_MODULE(mongodb) #endif /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/php_phongo.h0000644000076500000240000002477013572250761015000 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_H #define PHONGO_H /* External libs */ #include "bson/bson.h" #include "mongoc/mongoc.h" #include "phongo_version.h" #include "phongo_compat.h" #include "php_phongo_classes.h" #define phpext_mongodb_ptr &mongodb_module_entry extern zend_module_entry mongodb_module_entry; /* Structure for persisted libmongoc clients. The PID is included to ensure that * processes do not destroy clients created by other processes (relevant for * forking). We avoid using pid_t for Windows compatibility. */ typedef struct { mongoc_client_t* client; int pid; } php_phongo_pclient_t; ZEND_BEGIN_MODULE_GLOBALS(mongodb) char* debug; FILE* debug_fd; bson_mem_vtable_t bsonMemVTable; HashTable pclients; HashTable* subscribers; ZEND_END_MODULE_GLOBALS(mongodb) #if PHP_VERSION_ID >= 70000 #define MONGODB_G(v) ZEND_MODULE_GLOBALS_ACCESSOR(mongodb, v) #if defined(ZTS) && defined(COMPILE_DL_MONGODB) ZEND_TSRMLS_CACHE_EXTERN() #endif #else /* PHP_VERSION_ID >= 70000 */ #ifdef ZTS #define MONGODB_G(v) TSRMG(mongodb_globals_id, zend_mongodb_globals*, v) #define mglo mongodb_globals_id #else /* ZTS */ #define MONGODB_G(v) (mongodb_globals.v) #define mglo mongodb_globals #endif /* ZTS */ #endif /* PHP_VERSION_ID >= 70000 */ #define PHONGO_WRITE_CONCERN_W_MAJORITY "majority" /* This enum is necessary since mongoc_server_description_type_t is private and * we need to translate strings returned by mongoc_server_description_type() to * Server integer constants. */ typedef enum { PHONGO_SERVER_UNKNOWN = 0, PHONGO_SERVER_STANDALONE = 1, PHONGO_SERVER_MONGOS = 2, PHONGO_SERVER_POSSIBLE_PRIMARY = 3, PHONGO_SERVER_RS_PRIMARY = 4, PHONGO_SERVER_RS_SECONDARY = 5, PHONGO_SERVER_RS_ARBITER = 6, PHONGO_SERVER_RS_OTHER = 7, PHONGO_SERVER_RS_GHOST = 8, PHONGO_SERVER_DESCRIPTION_TYPES = 9, } php_phongo_server_description_type_t; typedef struct { php_phongo_server_description_type_t type; const char* name; } php_phongo_server_description_type_map_t; extern php_phongo_server_description_type_map_t php_phongo_server_description_type_map[]; typedef enum { PHONGO_ERROR_INVALID_ARGUMENT = 1, PHONGO_ERROR_RUNTIME = 2, PHONGO_ERROR_UNEXPECTED_VALUE = 8, PHONGO_ERROR_MONGOC_FAILED = 3, PHONGO_ERROR_CONNECTION_FAILED = 7, PHONGO_ERROR_LOGIC = 9 } php_phongo_error_domain_t; /* This constant is used for determining if a server error for an exceeded query * or command should select ExecutionTimeoutException. */ #define PHONGO_SERVER_ERROR_EXCEEDED_TIME_LIMIT 50 zend_class_entry* phongo_exception_from_mongoc_domain(uint32_t /* mongoc_error_domain_t */ domain, uint32_t /* mongoc_error_code_t */ code); zend_class_entry* phongo_exception_from_phongo_domain(php_phongo_error_domain_t domain); void phongo_throw_exception(php_phongo_error_domain_t domain TSRMLS_DC, const char* format, ...) #if PHP_VERSION_ID < 70000 #ifndef PHP_WIN32 #ifdef ZTS __attribute__((format(printf, 3, 4))) #else __attribute__((format(printf, 2, 3))) #endif /* ZTS */ #endif /* PHP_WIN32 */ #endif /* PHP_VERSION_ID < 70000 */ ; void phongo_throw_exception_from_bson_error_t(bson_error_t* error TSRMLS_DC); void phongo_throw_exception_from_bson_error_t_and_reply(bson_error_t* error, const bson_t* reply TSRMLS_DC); /* This enum is used for processing options in phongo_execute_parse_options and * selecting a libmongoc function to use in phongo_execute_command. The values * are important, as READ and WRITE are also used as a bit field to determine * whether readPreference, readConcern, and writeConcern options are parsed. */ typedef enum { PHONGO_OPTION_READ_CONCERN = 0x01, PHONGO_OPTION_READ_PREFERENCE = 0x02, PHONGO_OPTION_WRITE_CONCERN = 0x04, PHONGO_COMMAND_RAW = 0x07, PHONGO_COMMAND_READ = 0x03, PHONGO_COMMAND_WRITE = 0x04, PHONGO_COMMAND_READ_WRITE = 0x05, } php_phongo_command_type_t; zend_object_handlers* phongo_get_std_object_handlers(void); void phongo_server_init(zval* return_value, mongoc_client_t* client, uint32_t server_id TSRMLS_DC); void phongo_session_init(zval* return_value, mongoc_client_session_t* client_session TSRMLS_DC); void phongo_readconcern_init(zval* return_value, const mongoc_read_concern_t* read_concern TSRMLS_DC); void phongo_readpreference_init(zval* return_value, const mongoc_read_prefs_t* read_prefs TSRMLS_DC); void phongo_writeconcern_init(zval* return_value, const mongoc_write_concern_t* write_concern TSRMLS_DC); bool phongo_execute_bulk_write(mongoc_client_t* client, const char* namespace, php_phongo_bulkwrite_t* bulk_write, zval* zwriteConcern, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC); bool phongo_execute_command(mongoc_client_t* client, php_phongo_command_type_t type, const char* db, zval* zcommand, zval* zreadPreference, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC); bool phongo_execute_query(mongoc_client_t* client, const char* namespace, zval* zquery, zval* zreadPreference, uint32_t server_id, zval* return_value, int return_value_used TSRMLS_DC); bool phongo_cursor_advance_and_check_for_error(mongoc_cursor_t* cursor TSRMLS_DC); const mongoc_read_concern_t* phongo_read_concern_from_zval(zval* zread_concern TSRMLS_DC); const mongoc_read_prefs_t* phongo_read_preference_from_zval(zval* zread_preference TSRMLS_DC); const mongoc_write_concern_t* phongo_write_concern_from_zval(zval* zwrite_concern TSRMLS_DC); php_phongo_server_description_type_t php_phongo_server_description_type(mongoc_server_description_t* sd); bool phongo_parse_read_preference(zval* options, zval** zreadPreference TSRMLS_DC); bool phongo_parse_session(zval* options, mongoc_client_t* client, bson_t* mongoc_opts, zval** zsession TSRMLS_DC); zval* php_phongo_prep_legacy_option(zval* options, const char* key, bool* allocated TSRMLS_DC); void php_phongo_prep_legacy_option_free(zval* options TSRMLS_DC); void php_phongo_read_preference_prep_tagsets(zval* tagSets TSRMLS_DC); bool php_phongo_read_preference_tags_are_valid(const bson_t* tags); void php_phongo_server_to_zval(zval* retval, mongoc_server_description_t* sd); void php_phongo_read_concern_to_zval(zval* retval, const mongoc_read_concern_t* read_concern); void php_phongo_write_concern_to_zval(zval* retval, const mongoc_write_concern_t* write_concern); void php_phongo_cursor_to_zval(zval* retval, const mongoc_cursor_t* cursor); void phongo_manager_init(php_phongo_manager_t* manager, const char* uri_string, zval* options, zval* driverOptions TSRMLS_DC); int php_phongo_set_monitoring_callbacks(mongoc_client_t* client); void php_phongo_objectid_new_from_oid(zval* object, const bson_oid_t* oid TSRMLS_DC); void php_phongo_cursor_id_new_from_id(zval* object, int64_t cursorid TSRMLS_DC); void php_phongo_new_utcdatetime_from_epoch(zval* object, int64_t msec_since_epoch TSRMLS_DC); void php_phongo_new_timestamp_from_increment_and_timestamp(zval* object, uint32_t increment, uint32_t timestamp TSRMLS_DC); void php_phongo_new_javascript_from_javascript(int init, zval* object, const char* code, size_t code_len TSRMLS_DC); void php_phongo_new_javascript_from_javascript_and_scope(int init, zval* object, const char* code, size_t code_len, const bson_t* scope TSRMLS_DC); void php_phongo_new_binary_from_binary_and_type(zval* object, const char* data, size_t data_len, bson_subtype_t type TSRMLS_DC); void php_phongo_new_decimal128(zval* object, const bson_decimal128_t* decimal TSRMLS_DC); void php_phongo_new_int64(zval* object, int64_t integer TSRMLS_DC); void php_phongo_new_regex_from_regex_and_options(zval* object, const char* pattern, const char* flags TSRMLS_DC); void php_phongo_new_symbol(zval* object, const char* symbol, size_t symbol_len TSRMLS_DC); void php_phongo_new_dbpointer(zval* object, const char* namespace, size_t namespace_len, const bson_oid_t* oid TSRMLS_DC); zend_bool phongo_writeerror_init(zval* return_value, bson_t* bson TSRMLS_DC); zend_bool phongo_writeconcernerror_init(zval* return_value, bson_t* bson TSRMLS_DC); #if PHP_VERSION_ID >= 70000 #define PHONGO_CE_FINAL(ce) \ do { \ ce->ce_flags |= ZEND_ACC_FINAL; \ } while (0); #else #define PHONGO_CE_FINAL(ce) \ do { \ ce->ce_flags |= ZEND_ACC_FINAL_CLASS; \ } while (0); #endif #define PHONGO_CE_DISABLE_SERIALIZATION(ce) \ do { \ ce->serialize = zend_class_serialize_deny; \ ce->unserialize = zend_class_unserialize_deny; \ } while (0); #define PHONGO_GET_PROPERTY_HASH_INIT_PROPS(is_debug, intern, props, size) \ do { \ if (is_debug) { \ ALLOC_HASHTABLE(props); \ zend_hash_init((props), (size), NULL, ZVAL_PTR_DTOR, 0); \ } else if ((intern)->properties) { \ (props) = (intern)->properties; \ } else { \ ALLOC_HASHTABLE(props); \ zend_hash_init((props), (size), NULL, ZVAL_PTR_DTOR, 0); \ (intern)->properties = (props); \ } \ } while (0); #define PHONGO_ZVAL_CLASS_OR_TYPE_NAME(zv) (Z_TYPE(zv) == IS_OBJECT ? ZSTR_VAL(Z_OBJCE(zv)->name) : zend_get_type_by_const(Z_TYPE(zv))) #define PHONGO_ZVAL_CLASS_OR_TYPE_NAME_P(zvp) PHONGO_ZVAL_CLASS_OR_TYPE_NAME(*(zvp)) #if PHP_VERSION_ID >= 70000 #define PHONGO_ZVAL_EXCEPTION_NAME(e) (ZSTR_VAL(e->ce->name)) #else #define PHONGO_ZVAL_EXCEPTION_NAME(e) (ZSTR_VAL(Z_OBJCE_P(e)->name)) #endif #endif /* PHONGO_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/php_phongo_classes.h0000644000076500000240000005436513572250761016520 0ustar alcaeusstaff/* * Copyright 2014-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_CLASSES_H #define PHONGO_CLASSES_H #include "php_phongo_structs.h" /* Export zend_class_entry dependencies, which are initialized in MINIT */ extern zend_class_entry* php_phongo_date_immutable_ce; extern zend_class_entry* php_phongo_json_serializable_ce; #if PHP_VERSION_ID >= 70000 static inline php_phongo_bulkwrite_t* php_bulkwrite_fetch_object(zend_object* obj) { return (php_phongo_bulkwrite_t*) ((char*) obj - XtOffsetOf(php_phongo_bulkwrite_t, std)); } static inline php_phongo_command_t* php_command_fetch_object(zend_object* obj) { return (php_phongo_command_t*) ((char*) obj - XtOffsetOf(php_phongo_command_t, std)); } static inline php_phongo_cursor_t* php_cursor_fetch_object(zend_object* obj) { return (php_phongo_cursor_t*) ((char*) obj - XtOffsetOf(php_phongo_cursor_t, std)); } static inline php_phongo_cursorid_t* php_cursorid_fetch_object(zend_object* obj) { return (php_phongo_cursorid_t*) ((char*) obj - XtOffsetOf(php_phongo_cursorid_t, std)); } static inline php_phongo_manager_t* php_manager_fetch_object(zend_object* obj) { return (php_phongo_manager_t*) ((char*) obj - XtOffsetOf(php_phongo_manager_t, std)); } static inline php_phongo_query_t* php_query_fetch_object(zend_object* obj) { return (php_phongo_query_t*) ((char*) obj - XtOffsetOf(php_phongo_query_t, std)); } static inline php_phongo_readconcern_t* php_readconcern_fetch_object(zend_object* obj) { return (php_phongo_readconcern_t*) ((char*) obj - XtOffsetOf(php_phongo_readconcern_t, std)); } static inline php_phongo_readpreference_t* php_readpreference_fetch_object(zend_object* obj) { return (php_phongo_readpreference_t*) ((char*) obj - XtOffsetOf(php_phongo_readpreference_t, std)); } static inline php_phongo_server_t* php_server_fetch_object(zend_object* obj) { return (php_phongo_server_t*) ((char*) obj - XtOffsetOf(php_phongo_server_t, std)); } static inline php_phongo_session_t* php_session_fetch_object(zend_object* obj) { return (php_phongo_session_t*) ((char*) obj - XtOffsetOf(php_phongo_session_t, std)); } static inline php_phongo_writeconcern_t* php_writeconcern_fetch_object(zend_object* obj) { return (php_phongo_writeconcern_t*) ((char*) obj - XtOffsetOf(php_phongo_writeconcern_t, std)); } static inline php_phongo_writeconcernerror_t* php_writeconcernerror_fetch_object(zend_object* obj) { return (php_phongo_writeconcernerror_t*) ((char*) obj - XtOffsetOf(php_phongo_writeconcernerror_t, std)); } static inline php_phongo_writeerror_t* php_writeerror_fetch_object(zend_object* obj) { return (php_phongo_writeerror_t*) ((char*) obj - XtOffsetOf(php_phongo_writeerror_t, std)); } static inline php_phongo_writeresult_t* php_writeresult_fetch_object(zend_object* obj) { return (php_phongo_writeresult_t*) ((char*) obj - XtOffsetOf(php_phongo_writeresult_t, std)); } static inline php_phongo_binary_t* php_binary_fetch_object(zend_object* obj) { return (php_phongo_binary_t*) ((char*) obj - XtOffsetOf(php_phongo_binary_t, std)); } static inline php_phongo_dbpointer_t* php_dbpointer_fetch_object(zend_object* obj) { return (php_phongo_dbpointer_t*) ((char*) obj - XtOffsetOf(php_phongo_dbpointer_t, std)); } static inline php_phongo_decimal128_t* php_decimal128_fetch_object(zend_object* obj) { return (php_phongo_decimal128_t*) ((char*) obj - XtOffsetOf(php_phongo_decimal128_t, std)); } static inline php_phongo_int64_t* php_int64_fetch_object(zend_object* obj) { return (php_phongo_int64_t*) ((char*) obj - XtOffsetOf(php_phongo_int64_t, std)); } static inline php_phongo_javascript_t* php_javascript_fetch_object(zend_object* obj) { return (php_phongo_javascript_t*) ((char*) obj - XtOffsetOf(php_phongo_javascript_t, std)); } static inline php_phongo_maxkey_t* php_maxkey_fetch_object(zend_object* obj) { return (php_phongo_maxkey_t*) ((char*) obj - XtOffsetOf(php_phongo_maxkey_t, std)); } static inline php_phongo_minkey_t* php_minkey_fetch_object(zend_object* obj) { return (php_phongo_minkey_t*) ((char*) obj - XtOffsetOf(php_phongo_minkey_t, std)); } static inline php_phongo_objectid_t* php_objectid_fetch_object(zend_object* obj) { return (php_phongo_objectid_t*) ((char*) obj - XtOffsetOf(php_phongo_objectid_t, std)); } static inline php_phongo_regex_t* php_regex_fetch_object(zend_object* obj) { return (php_phongo_regex_t*) ((char*) obj - XtOffsetOf(php_phongo_regex_t, std)); } static inline php_phongo_symbol_t* php_symbol_fetch_object(zend_object* obj) { return (php_phongo_symbol_t*) ((char*) obj - XtOffsetOf(php_phongo_symbol_t, std)); } static inline php_phongo_timestamp_t* php_timestamp_fetch_object(zend_object* obj) { return (php_phongo_timestamp_t*) ((char*) obj - XtOffsetOf(php_phongo_timestamp_t, std)); } static inline php_phongo_undefined_t* php_undefined_fetch_object(zend_object* obj) { return (php_phongo_undefined_t*) ((char*) obj - XtOffsetOf(php_phongo_undefined_t, std)); } static inline php_phongo_utcdatetime_t* php_utcdatetime_fetch_object(zend_object* obj) { return (php_phongo_utcdatetime_t*) ((char*) obj - XtOffsetOf(php_phongo_utcdatetime_t, std)); } static inline php_phongo_commandfailedevent_t* php_commandfailedevent_fetch_object(zend_object* obj) { return (php_phongo_commandfailedevent_t*) ((char*) obj - XtOffsetOf(php_phongo_commandfailedevent_t, std)); } static inline php_phongo_commandstartedevent_t* php_commandstartedevent_fetch_object(zend_object* obj) { return (php_phongo_commandstartedevent_t*) ((char*) obj - XtOffsetOf(php_phongo_commandstartedevent_t, std)); } static inline php_phongo_commandsucceededevent_t* php_commandsucceededevent_fetch_object(zend_object* obj) { return (php_phongo_commandsucceededevent_t*) ((char*) obj - XtOffsetOf(php_phongo_commandsucceededevent_t, std)); } #define Z_COMMAND_OBJ_P(zv) (php_command_fetch_object(Z_OBJ_P(zv))) #define Z_CURSOR_OBJ_P(zv) (php_cursor_fetch_object(Z_OBJ_P(zv))) #define Z_CURSORID_OBJ_P(zv) (php_cursorid_fetch_object(Z_OBJ_P(zv))) #define Z_MANAGER_OBJ_P(zv) (php_manager_fetch_object(Z_OBJ_P(zv))) #define Z_QUERY_OBJ_P(zv) (php_query_fetch_object(Z_OBJ_P(zv))) #define Z_READCONCERN_OBJ_P(zv) (php_readconcern_fetch_object(Z_OBJ_P(zv))) #define Z_READPREFERENCE_OBJ_P(zv) (php_readpreference_fetch_object(Z_OBJ_P(zv))) #define Z_SERVER_OBJ_P(zv) (php_server_fetch_object(Z_OBJ_P(zv))) #define Z_SESSION_OBJ_P(zv) (php_session_fetch_object(Z_OBJ_P(zv))) #define Z_BULKWRITE_OBJ_P(zv) (php_bulkwrite_fetch_object(Z_OBJ_P(zv))) #define Z_WRITECONCERN_OBJ_P(zv) (php_writeconcern_fetch_object(Z_OBJ_P(zv))) #define Z_WRITECONCERNERROR_OBJ_P(zv) (php_writeconcernerror_fetch_object(Z_OBJ_P(zv))) #define Z_WRITEERROR_OBJ_P(zv) (php_writeerror_fetch_object(Z_OBJ_P(zv))) #define Z_WRITERESULT_OBJ_P(zv) (php_writeresult_fetch_object(Z_OBJ_P(zv))) #define Z_BINARY_OBJ_P(zv) (php_binary_fetch_object(Z_OBJ_P(zv))) #define Z_DBPOINTER_OBJ_P(zv) (php_dbpointer_fetch_object(Z_OBJ_P(zv))) #define Z_DECIMAL128_OBJ_P(zv) (php_decimal128_fetch_object(Z_OBJ_P(zv))) #define Z_INT64_OBJ_P(zv) (php_int64_fetch_object(Z_OBJ_P(zv))) #define Z_JAVASCRIPT_OBJ_P(zv) (php_javascript_fetch_object(Z_OBJ_P(zv))) #define Z_MAXKEY_OBJ_P(zv) (php_maxkey_fetch_object(Z_OBJ_P(zv))) #define Z_MINKEY_OBJ_P(zv) (php_minkey_fetch_object(Z_OBJ_P(zv))) #define Z_OBJECTID_OBJ_P(zv) (php_objectid_fetch_object(Z_OBJ_P(zv))) #define Z_REGEX_OBJ_P(zv) (php_regex_fetch_object(Z_OBJ_P(zv))) #define Z_SYMBOL_OBJ_P(zv) (php_symbol_fetch_object(Z_OBJ_P(zv))) #define Z_TIMESTAMP_OBJ_P(zv) (php_timestamp_fetch_object(Z_OBJ_P(zv))) #define Z_UNDEFINED_OBJ_P(zv) (php_undefined_fetch_object(Z_OBJ_P(zv))) #define Z_UTCDATETIME_OBJ_P(zv) (php_utcdatetime_fetch_object(Z_OBJ_P(zv))) #define Z_COMMANDFAILEDEVENT_OBJ_P(zv) (php_commandfailedevent_fetch_object(Z_OBJ_P(zv))) #define Z_COMMANDSTARTEDEVENT_OBJ_P(zv) (php_commandstartedevent_fetch_object(Z_OBJ_P(zv))) #define Z_COMMANDSUCCEEDEDEVENT_OBJ_P(zv) (php_commandsucceededevent_fetch_object(Z_OBJ_P(zv))) #define Z_OBJ_COMMAND(zo) (php_command_fetch_object(zo)) #define Z_OBJ_CURSOR(zo) (php_cursor_fetch_object(zo)) #define Z_OBJ_CURSORID(zo) (php_cursorid_fetch_object(zo)) #define Z_OBJ_MANAGER(zo) (php_manager_fetch_object(zo)) #define Z_OBJ_QUERY(zo) (php_query_fetch_object(zo)) #define Z_OBJ_READCONCERN(zo) (php_readconcern_fetch_object(zo)) #define Z_OBJ_READPREFERENCE(zo) (php_readpreference_fetch_object(zo)) #define Z_OBJ_SERVER(zo) (php_server_fetch_object(zo)) #define Z_OBJ_SESSION(zo) (php_session_fetch_object(zo)) #define Z_OBJ_BULKWRITE(zo) (php_bulkwrite_fetch_object(zo)) #define Z_OBJ_WRITECONCERN(zo) (php_writeconcern_fetch_object(zo)) #define Z_OBJ_WRITECONCERNERROR(zo) (php_writeconcernerror_fetch_object(zo)) #define Z_OBJ_WRITEERROR(zo) (php_writeerror_fetch_object(zo)) #define Z_OBJ_WRITERESULT(zo) (php_writeresult_fetch_object(zo)) #define Z_OBJ_BINARY(zo) (php_binary_fetch_object(zo)) #define Z_OBJ_DBPOINTER(zo) (php_dbpointer_fetch_object(zo)) #define Z_OBJ_DECIMAL128(zo) (php_decimal128_fetch_object(zo)) #define Z_OBJ_INT64(zo) (php_int64_fetch_object(zo)) #define Z_OBJ_JAVASCRIPT(zo) (php_javascript_fetch_object(zo)) #define Z_OBJ_MAXKEY(zo) (php_maxkey_fetch_object(zo)) #define Z_OBJ_MINKEY(zo) (php_minkey_fetch_object(zo)) #define Z_OBJ_OBJECTID(zo) (php_objectid_fetch_object(zo)) #define Z_OBJ_REGEX(zo) (php_regex_fetch_object(zo)) #define Z_OBJ_SYMBOL(zo) (php_symbol_fetch_object(zo)) #define Z_OBJ_TIMESTAMP(zo) (php_timestamp_fetch_object(zo)) #define Z_OBJ_UNDEFINED(zo) (php_undefined_fetch_object(zo)) #define Z_OBJ_UTCDATETIME(zo) (php_utcdatetime_fetch_object(zo)) #define Z_OBJ_COMMANDFAILEDEVENT(zo) (php_commandfailedevent_fetch_object(zo)) #define Z_OBJ_COMMANDSTARTEDEVENT(zo) (php_commandstartedevent_fetch_object(zo)) #define Z_OBJ_COMMANDSUCCEEDEDEVENT(zo) (php_commandsucceededevent_fetch_object(zo)) #else /* PHP_VERSION_ID >= 70000 */ #define Z_COMMAND_OBJ_P(zv) ((php_phongo_command_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_CURSOR_OBJ_P(zv) ((php_phongo_cursor_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_CURSORID_OBJ_P(zv) ((php_phongo_cursorid_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_MANAGER_OBJ_P(zv) ((php_phongo_manager_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_QUERY_OBJ_P(zv) ((php_phongo_query_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_READCONCERN_OBJ_P(zv) ((php_phongo_readconcern_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_READPREFERENCE_OBJ_P(zv) ((php_phongo_readpreference_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_SERVER_OBJ_P(zv) ((php_phongo_server_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_SESSION_OBJ_P(zv) ((php_phongo_session_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_BULKWRITE_OBJ_P(zv) ((php_phongo_bulkwrite_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_WRITECONCERN_OBJ_P(zv) ((php_phongo_writeconcern_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_WRITECONCERNERROR_OBJ_P(zv) ((php_phongo_writeconcernerror_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_WRITEERROR_OBJ_P(zv) ((php_phongo_writeerror_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_WRITERESULT_OBJ_P(zv) ((php_phongo_writeresult_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_BINARY_OBJ_P(zv) ((php_phongo_binary_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_DBPOINTER_OBJ_P(zv) ((php_phongo_dbpointer_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_DECIMAL128_OBJ_P(zv) ((php_phongo_decimal128_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_INT64_OBJ_P(zv) ((php_phongo_int64_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_JAVASCRIPT_OBJ_P(zv) ((php_phongo_javascript_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_MAXKEY_OBJ_P(zv) ((php_phongo_maxkey_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_MINKEY_OBJ_P(zv) ((php_phongo_minkey_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_OBJECTID_OBJ_P(zv) ((php_phongo_objectid_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_REGEX_OBJ_P(zv) ((php_phongo_regex_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_SYMBOL_OBJ_P(zv) ((php_phongo_symbol_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_TIMESTAMP_OBJ_P(zv) ((php_phongo_timestamp_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_UNDEFINED_OBJ_P(zv) ((php_phongo_undefined_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_UTCDATETIME_OBJ_P(zv) ((php_phongo_utcdatetime_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_COMMANDFAILEDEVENT_OBJ_P(zv) ((php_phongo_commandfailedevent_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_COMMANDSTARTEDEVENT_OBJ_P(zv) ((php_phongo_commandstartedevent_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_COMMANDSUCCEEDEDEVENT_OBJ_P(zv) ((php_phongo_commandsucceededevent_t*) zend_object_store_get_object(zv TSRMLS_CC)) #define Z_OBJ_COMMAND(zo) ((php_phongo_command_t*) zo) #define Z_OBJ_CURSOR(zo) ((php_phongo_cursor_t*) zo) #define Z_OBJ_CURSORID(zo) ((php_phongo_cursorid_t*) zo) #define Z_OBJ_MANAGER(zo) ((php_phongo_manager_t*) zo) #define Z_OBJ_QUERY(zo) ((php_phongo_query_t*) zo) #define Z_OBJ_READCONCERN(zo) ((php_phongo_readconcern_t*) zo) #define Z_OBJ_READPREFERENCE(zo) ((php_phongo_readpreference_t*) zo) #define Z_OBJ_SERVER(zo) ((php_phongo_server_t*) zo) #define Z_OBJ_SESSION(zo) ((php_phongo_session_t*) zo) #define Z_OBJ_BULKWRITE(zo) ((php_phongo_bulkwrite_t*) zo) #define Z_OBJ_WRITECONCERN(zo) ((php_phongo_writeconcern_t*) zo) #define Z_OBJ_WRITECONCERNERROR(zo) ((php_phongo_writeconcernerror_t*) zo) #define Z_OBJ_WRITEERROR(zo) ((php_phongo_writeerror_t*) zo) #define Z_OBJ_WRITERESULT(zo) ((php_phongo_writeresult_t*) zo) #define Z_OBJ_BINARY(zo) ((php_phongo_binary_t*) zo) #define Z_OBJ_DBPOINTER(zo) ((php_phongo_dbpointer_t*) zo) #define Z_OBJ_DECIMAL128(zo) ((php_phongo_decimal128_t*) zo) #define Z_OBJ_INT64(zo) ((php_phongo_int64_t*) zo) #define Z_OBJ_JAVASCRIPT(zo) ((php_phongo_javascript_t*) zo) #define Z_OBJ_MAXKEY(zo) ((php_phongo_maxkey_t*) zo) #define Z_OBJ_MINKEY(zo) ((php_phongo_minkey_t*) zo) #define Z_OBJ_OBJECTID(zo) ((php_phongo_objectid_t*) zo) #define Z_OBJ_REGEX(zo) ((php_phongo_regex_t*) zo) #define Z_OBJ_SYMBOL(zo) ((php_phongo_symbol_t*) zo) #define Z_OBJ_TIMESTAMP(zo) ((php_phongo_timestamp_t*) zo) #define Z_OBJ_UNDEFINED(zo) ((php_phongo_undefined_t*) zo) #define Z_OBJ_UTCDATETIME(zo) ((php_phongo_utcdatetime_t*) zo) #define Z_OBJ_COMMANDFAILEDEVENT(zo) ((php_phongo_commandfailedevent_t*) zo) #define Z_OBJ_COMMANDSTARTEDEVENT(zo) ((php_phongo_commandstartedevent_t*) zo) #define Z_OBJ_COMMANDSUCCEEDEDEVENT(zo) ((php_phongo_commandsucceededevent_t*) zo) #endif /* PHP_VERSION_ID >= 70000 */ typedef struct { zend_object_iterator intern; php_phongo_cursor_t* cursor; } php_phongo_cursor_iterator; extern zend_class_entry* php_phongo_command_ce; extern zend_class_entry* php_phongo_cursor_ce; extern zend_class_entry* php_phongo_cursorid_ce; extern zend_class_entry* php_phongo_manager_ce; extern zend_class_entry* php_phongo_query_ce; extern zend_class_entry* php_phongo_readconcern_ce; extern zend_class_entry* php_phongo_readpreference_ce; extern zend_class_entry* php_phongo_server_ce; extern zend_class_entry* php_phongo_session_ce; extern zend_class_entry* php_phongo_bulkwrite_ce; extern zend_class_entry* php_phongo_writeconcern_ce; extern zend_class_entry* php_phongo_writeconcernerror_ce; extern zend_class_entry* php_phongo_writeerror_ce; extern zend_class_entry* php_phongo_writeresult_ce; extern zend_class_entry* php_phongo_cursor_interface_ce; extern zend_class_entry* php_phongo_exception_ce; extern zend_class_entry* php_phongo_logicexception_ce; extern zend_class_entry* php_phongo_runtimeexception_ce; extern zend_class_entry* php_phongo_serverexception_ce; extern zend_class_entry* php_phongo_commandexception_ce; extern zend_class_entry* php_phongo_unexpectedvalueexception_ce; extern zend_class_entry* php_phongo_invalidargumentexception_ce; extern zend_class_entry* php_phongo_connectionexception_ce; extern zend_class_entry* php_phongo_authenticationexception_ce; extern zend_class_entry* php_phongo_sslconnectionexception_ce; extern zend_class_entry* php_phongo_executiontimeoutexception_ce; extern zend_class_entry* php_phongo_connectiontimeoutexception_ce; extern zend_class_entry* php_phongo_writeexception_ce; extern zend_class_entry* php_phongo_bulkwriteexception_ce; extern zend_class_entry* php_phongo_type_ce; extern zend_class_entry* php_phongo_persistable_ce; extern zend_class_entry* php_phongo_unserializable_ce; extern zend_class_entry* php_phongo_serializable_ce; extern zend_class_entry* php_phongo_binary_ce; extern zend_class_entry* php_phongo_dbpointer_ce; extern zend_class_entry* php_phongo_decimal128_ce; extern zend_class_entry* php_phongo_int64_ce; extern zend_class_entry* php_phongo_javascript_ce; extern zend_class_entry* php_phongo_maxkey_ce; extern zend_class_entry* php_phongo_minkey_ce; extern zend_class_entry* php_phongo_objectid_ce; extern zend_class_entry* php_phongo_regex_ce; extern zend_class_entry* php_phongo_symbol_ce; extern zend_class_entry* php_phongo_timestamp_ce; extern zend_class_entry* php_phongo_undefined_ce; extern zend_class_entry* php_phongo_utcdatetime_ce; extern zend_class_entry* php_phongo_binary_interface_ce; extern zend_class_entry* php_phongo_decimal128_interface_ce; extern zend_class_entry* php_phongo_javascript_interface_ce; extern zend_class_entry* php_phongo_maxkey_interface_ce; extern zend_class_entry* php_phongo_minkey_interface_ce; extern zend_class_entry* php_phongo_objectid_interface_ce; extern zend_class_entry* php_phongo_regex_interface_ce; extern zend_class_entry* php_phongo_timestamp_interface_ce; extern zend_class_entry* php_phongo_utcdatetime_interface_ce; extern zend_class_entry* php_phongo_commandfailedevent_ce; extern zend_class_entry* php_phongo_commandstartedevent_ce; extern zend_class_entry* php_phongo_commandsubscriber_ce; extern zend_class_entry* php_phongo_commandsucceededevent_ce; extern zend_class_entry* php_phongo_subscriber_ce; extern void php_phongo_binary_init_ce(INIT_FUNC_ARGS); extern void php_phongo_dbpointer_init_ce(INIT_FUNC_ARGS); extern void php_phongo_decimal128_init_ce(INIT_FUNC_ARGS); extern void php_phongo_int64_init_ce(INIT_FUNC_ARGS); extern void php_phongo_javascript_init_ce(INIT_FUNC_ARGS); extern void php_phongo_maxkey_init_ce(INIT_FUNC_ARGS); extern void php_phongo_minkey_init_ce(INIT_FUNC_ARGS); extern void php_phongo_objectid_init_ce(INIT_FUNC_ARGS); extern void php_phongo_persistable_init_ce(INIT_FUNC_ARGS); extern void php_phongo_regex_init_ce(INIT_FUNC_ARGS); extern void php_phongo_serializable_init_ce(INIT_FUNC_ARGS); extern void php_phongo_symbol_init_ce(INIT_FUNC_ARGS); extern void php_phongo_timestamp_init_ce(INIT_FUNC_ARGS); extern void php_phongo_type_init_ce(INIT_FUNC_ARGS); extern void php_phongo_undefined_init_ce(INIT_FUNC_ARGS); extern void php_phongo_unserializable_init_ce(INIT_FUNC_ARGS); extern void php_phongo_utcdatetime_init_ce(INIT_FUNC_ARGS); extern void php_phongo_binary_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_decimal128_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_javascript_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_maxkey_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_minkey_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_objectid_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_regex_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_timestamp_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_utcdatetime_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_bulkwrite_init_ce(INIT_FUNC_ARGS); extern void php_phongo_command_init_ce(INIT_FUNC_ARGS); extern void php_phongo_cursor_init_ce(INIT_FUNC_ARGS); extern void php_phongo_cursorid_init_ce(INIT_FUNC_ARGS); extern void php_phongo_manager_init_ce(INIT_FUNC_ARGS); extern void php_phongo_query_init_ce(INIT_FUNC_ARGS); extern void php_phongo_readconcern_init_ce(INIT_FUNC_ARGS); extern void php_phongo_readpreference_init_ce(INIT_FUNC_ARGS); extern void php_phongo_server_init_ce(INIT_FUNC_ARGS); extern void php_phongo_session_init_ce(INIT_FUNC_ARGS); extern void php_phongo_writeconcern_init_ce(INIT_FUNC_ARGS); extern void php_phongo_writeconcernerror_init_ce(INIT_FUNC_ARGS); extern void php_phongo_writeerror_init_ce(INIT_FUNC_ARGS); extern void php_phongo_writeresult_init_ce(INIT_FUNC_ARGS); extern void php_phongo_cursor_interface_init_ce(INIT_FUNC_ARGS); extern void php_phongo_authenticationexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_bulkwriteexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_commandexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_connectionexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_connectiontimeoutexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_exception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_executiontimeoutexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_invalidargumentexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_logicexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_runtimeexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_serverexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_sslconnectionexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_unexpectedvalueexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_writeexception_init_ce(INIT_FUNC_ARGS); extern void php_phongo_commandfailedevent_init_ce(INIT_FUNC_ARGS); extern void php_phongo_commandstartedevent_init_ce(INIT_FUNC_ARGS); extern void php_phongo_commandsubscriber_init_ce(INIT_FUNC_ARGS); extern void php_phongo_commandsucceededevent_init_ce(INIT_FUNC_ARGS); extern void php_phongo_subscriber_init_ce(INIT_FUNC_ARGS); /* Shared function entries for disabling constructors and unserialize() */ PHP_FUNCTION(MongoDB_disabled___construct); PHP_FUNCTION(MongoDB_disabled___wakeup); #endif /* PHONGO_CLASSES_H */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */ mongodb-1.6.1/php_phongo_structs.h0000644000076500000240000001567613572250761016574 0ustar alcaeusstaff/* * Copyright 2015-2017 MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef PHONGO_STRUCTS_H #define PHONGO_STRUCTS_H #include #include "php_bson.h" #if PHP_VERSION_ID >= 70000 #define PHONGO_ZEND_OBJECT_PRE #define PHONGO_ZEND_OBJECT_POST zend_object std; #define PHONGO_STRUCT_ZVAL zval #else #define PHONGO_ZEND_OBJECT_PRE zend_object std; #define PHONGO_ZEND_OBJECT_POST #define PHONGO_STRUCT_ZVAL zval* #endif typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_bulk_operation_t* bulk; size_t num_ops; bool ordered; int bypass; char* database; char* collection; bool executed; PHONGO_ZEND_OBJECT_POST } php_phongo_bulkwrite_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bson_t* bson; uint32_t max_await_time_ms; uint32_t batch_size; PHONGO_ZEND_OBJECT_POST } php_phongo_command_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_cursor_t* cursor; mongoc_client_t* client; uint32_t server_id; bool advanced; php_phongo_bson_state visitor_data; bool got_iterator; long current; char* database; char* collection; PHONGO_STRUCT_ZVAL query; PHONGO_STRUCT_ZVAL command; PHONGO_STRUCT_ZVAL read_preference; PHONGO_STRUCT_ZVAL session; PHONGO_ZEND_OBJECT_POST } php_phongo_cursor_t; typedef struct { PHONGO_ZEND_OBJECT_PRE int64_t id; PHONGO_ZEND_OBJECT_POST } php_phongo_cursorid_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_t* client; PHONGO_ZEND_OBJECT_POST } php_phongo_manager_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bson_t* filter; bson_t* opts; mongoc_read_concern_t* read_concern; uint32_t max_await_time_ms; PHONGO_ZEND_OBJECT_POST } php_phongo_query_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_read_concern_t* read_concern; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_readconcern_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_read_prefs_t* read_preference; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_readpreference_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_t* client; uint32_t server_id; PHONGO_ZEND_OBJECT_POST } php_phongo_server_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_session_t* client_session; PHONGO_ZEND_OBJECT_POST } php_phongo_session_t; typedef struct { PHONGO_ZEND_OBJECT_PRE HashTable* properties; mongoc_write_concern_t* write_concern; PHONGO_ZEND_OBJECT_POST } php_phongo_writeconcern_t; typedef struct { PHONGO_ZEND_OBJECT_PRE int code; char* message; PHONGO_STRUCT_ZVAL info; PHONGO_ZEND_OBJECT_POST } php_phongo_writeconcernerror_t; typedef struct { PHONGO_ZEND_OBJECT_PRE int code; char* message; PHONGO_STRUCT_ZVAL info; uint32_t index; PHONGO_ZEND_OBJECT_POST } php_phongo_writeerror_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_write_concern_t* write_concern; bson_t* reply; mongoc_client_t* client; uint32_t server_id; PHONGO_ZEND_OBJECT_POST } php_phongo_writeresult_t; typedef struct { PHONGO_ZEND_OBJECT_PRE char* data; int data_len; uint8_t type; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_binary_t; typedef struct { PHONGO_ZEND_OBJECT_PRE char* ref; size_t ref_len; char id[25]; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_dbpointer_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bool initialized; bson_decimal128_t decimal; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_decimal128_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bool initialized; int64_t integer; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_int64_t; typedef struct { PHONGO_ZEND_OBJECT_PRE char* code; size_t code_len; bson_t* scope; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_javascript_t; typedef struct { PHONGO_ZEND_OBJECT_PRE PHONGO_ZEND_OBJECT_POST } php_phongo_maxkey_t; typedef struct { PHONGO_ZEND_OBJECT_PRE PHONGO_ZEND_OBJECT_POST } php_phongo_minkey_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bool initialized; char oid[25]; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_objectid_t; typedef struct { PHONGO_ZEND_OBJECT_PRE char* pattern; int pattern_len; char* flags; int flags_len; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_regex_t; typedef struct { PHONGO_ZEND_OBJECT_PRE char* symbol; size_t symbol_len; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_symbol_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bool initialized; uint32_t increment; uint32_t timestamp; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_timestamp_t; typedef struct { PHONGO_ZEND_OBJECT_PRE PHONGO_ZEND_OBJECT_POST } php_phongo_undefined_t; typedef struct { PHONGO_ZEND_OBJECT_PRE bool initialized; int64_t milliseconds; HashTable* properties; PHONGO_ZEND_OBJECT_POST } php_phongo_utcdatetime_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_t* client; char* command_name; uint32_t server_id; uint64_t operation_id; uint64_t request_id; uint64_t duration_micros; bson_t* reply; PHONGO_STRUCT_ZVAL z_error; PHONGO_ZEND_OBJECT_POST } php_phongo_commandfailedevent_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_t* client; char* command_name; uint32_t server_id; uint64_t operation_id; uint64_t request_id; bson_t* command; char* database_name; PHONGO_ZEND_OBJECT_POST } php_phongo_commandstartedevent_t; typedef struct { PHONGO_ZEND_OBJECT_PRE mongoc_client_t* client; char* command_name; uint32_t server_id; uint64_t operation_id; uint64_t request_id; uint64_t duration_micros; bson_t* reply; PHONGO_ZEND_OBJECT_POST } php_phongo_commandsucceededevent_t; #undef PHONGO_ZEND_OBJECT_PRE #undef PHONGO_ZEND_OBJECT_POST #undef PHONGO_STRUCT_ZVAL #endif /* PHONGO_STRUCTS */ /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * End: * vim600: noet sw=4 ts=4 fdm=marker * vim<600: noet sw=4 ts=4 */