pax_global_header00006660000000000000000000000064120156777040014523gustar00rootroot0000000000000052 comment=de3c895144032cf7662b38358b4fce5c0a77772a lua-cjson-2.1.0+dfsg/000077500000000000000000000000001201567770400143355ustar00rootroot00000000000000lua-cjson-2.1.0+dfsg/CMakeLists.txt000066400000000000000000000047131201567770400171020ustar00rootroot00000000000000# If Lua is installed in a non-standard location, please set the LUA_DIR # environment variable to point to prefix for the install. Eg: # Unix: export LUA_DIR=/home/user/pkg # Windows: set LUA_DIR=c:\lua51 project(lua-cjson C) cmake_minimum_required(VERSION 2.6) option(USE_INTERNAL_FPCONV "Use internal strtod() / g_fmt() code for performance") option(MULTIPLE_THREADS "Support multi-threaded apps with internal fpconv - recommended" ON) if(NOT CMAKE_BUILD_TYPE) set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel." FORCE) endif() find_package(Lua51 REQUIRED) include_directories(${LUA_INCLUDE_DIR}) if(NOT USE_INTERNAL_FPCONV) # Use libc number conversion routines (strtod(), sprintf()) set(FPCONV_SOURCES fpconv.c) else() # Use internal number conversion routines add_definitions(-DUSE_INTERNAL_FPCONV) set(FPCONV_SOURCES g_fmt.c dtoa.c) include(TestBigEndian) TEST_BIG_ENDIAN(IEEE_BIG_ENDIAN) if(IEEE_BIG_ENDIAN) add_definitions(-DIEEE_BIG_ENDIAN) endif() if(MULTIPLE_THREADS) set(CMAKE_THREAD_PREFER_PTHREAD TRUE) find_package(Threads REQUIRED) if(NOT CMAKE_USE_PTHREADS_INIT) message(FATAL_ERROR "Pthreads not found - required by MULTIPLE_THREADS option") endif() add_definitions(-DMULTIPLE_THREADS) endif() endif() # Handle platforms missing isinf() macro (Eg, some Solaris systems). include(CheckSymbolExists) CHECK_SYMBOL_EXISTS(isinf math.h HAVE_ISINF) if(NOT HAVE_ISINF) add_definitions(-DUSE_INTERNAL_ISINF) endif() set(_MODULE_LINK "${CMAKE_THREAD_LIBS_INIT}") get_filename_component(_lua_lib_dir ${LUA_LIBRARY} PATH) if(APPLE) set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "${CMAKE_SHARED_MODULE_CREATE_C_FLAGS} -undefined dynamic_lookup") endif() if(WIN32) # Win32 modules need to be linked to the Lua library. set(_MODULE_LINK ${LUA_LIBRARY} ${_MODULE_LINK}) set(_lua_module_dir "${_lua_lib_dir}") # Windows sprintf()/strtod() handle NaN/inf differently. Not supported. add_definitions(-DDISABLE_INVALID_NUMBERS) else() set(_lua_module_dir "${_lua_lib_dir}/lua/5.1") endif() add_library(cjson MODULE lua_cjson.c strbuf.c ${FPCONV_SOURCES}) set_target_properties(cjson PROPERTIES PREFIX "") target_link_libraries(cjson ${_MODULE_LINK}) install(TARGETS cjson DESTINATION "${_lua_module_dir}") # vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/LICENSE000066400000000000000000000020711201567770400153420ustar00rootroot00000000000000Copyright (c) 2010-2012 Mark Pulford Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. lua-cjson-2.1.0+dfsg/Makefile000066400000000000000000000074521201567770400160050ustar00rootroot00000000000000##### Available defines for CJSON_CFLAGS ##### ## ## USE_INTERNAL_ISINF: Workaround for Solaris platforms missing isinf(). ## DISABLE_INVALID_NUMBERS: Permanently disable invalid JSON numbers: ## NaN, Infinity, hex. ## ## Optional built-in number conversion uses the following defines: ## USE_INTERNAL_FPCONV: Use builtin strtod/dtoa for numeric conversions. ## IEEE_BIG_ENDIAN: Required on big endian architectures. ## MULTIPLE_THREADS: Must be set when Lua CJSON may be used in a ## multi-threaded application. Requries _pthreads_. ##### Build defaults ##### LUA_VERSION = 5.1 TARGET = cjson.so PREFIX = /usr/local #CFLAGS = -g -Wall -pedantic -fno-inline CFLAGS = -O3 -Wall -pedantic -DNDEBUG CJSON_CFLAGS = -fpic CJSON_LDFLAGS = -shared LUA_INCLUDE_DIR = $(PREFIX)/include LUA_CMODULE_DIR = $(PREFIX)/lib/lua/$(LUA_VERSION) LUA_MODULE_DIR = $(PREFIX)/share/lua/$(LUA_VERSION) LUA_BIN_DIR = $(PREFIX)/bin ##### Platform overrides ##### ## ## Tweak one of the platform sections below to suit your situation. ## ## See http://lua-users.org/wiki/BuildingModules for further platform ## specific details. ## Linux ## FreeBSD #LUA_INCLUDE_DIR = $(PREFIX)/include/lua51 ## MacOSX (Macports) #PREFIX = /opt/local #CJSON_LDFLAGS = -bundle -undefined dynamic_lookup ## Solaris #CC = gcc #CJSON_CFLAGS = -fpic -DUSE_INTERNAL_ISINF ## Windows (MinGW) #TARGET = cjson.dll #PREFIX = /home/user/opt #CJSON_CFLAGS = -DDISABLE_INVALID_NUMBERS #CJSON_LDFLAGS = -shared -L$(PREFIX)/lib -llua51 #LUA_BIN_SUFFIX = .lua ##### Number conversion configuration ##### ## Use Libc support for number conversion (default) FPCONV_OBJS = fpconv.o ## Use built in number conversion #FPCONV_OBJS = g_fmt.o dtoa.o #CJSON_CFLAGS += -DUSE_INTERNAL_FPCONV ## Compile built in number conversion for big endian architectures #CJSON_CFLAGS += -DIEEE_BIG_ENDIAN ## Compile built in number conversion to support multi-threaded ## applications (recommended) #CJSON_CFLAGS += -pthread -DMULTIPLE_THREADS #CJSON_LDFLAGS += -pthread ##### End customisable sections ##### TEST_FILES = README bench.lua genutf8.pl test.lua octets-escaped.dat \ example1.json example2.json example3.json example4.json \ example5.json numbers.json rfc-example1.json \ rfc-example2.json types.json DATAPERM = 644 EXECPERM = 755 ASCIIDOC = asciidoc BUILD_CFLAGS = -I$(LUA_INCLUDE_DIR) $(CJSON_CFLAGS) OBJS = lua_cjson.o strbuf.o $(FPCONV_OBJS) .PHONY: all clean install install-extra doc .SUFFIXES: .html .txt .c.o: $(CC) -c $(CFLAGS) $(CPPFLAGS) $(BUILD_CFLAGS) -o $@ $< .txt.html: $(ASCIIDOC) -n -a toc $< all: $(TARGET) doc: manual.html performance.html $(TARGET): $(OBJS) $(CC) $(LDFLAGS) $(CJSON_LDFLAGS) -o $@ $(OBJS) install: $(TARGET) mkdir -p $(DESTDIR)/$(LUA_CMODULE_DIR) cp $(TARGET) $(DESTDIR)/$(LUA_CMODULE_DIR) chmod $(EXECPERM) $(DESTDIR)/$(LUA_CMODULE_DIR)/$(TARGET) install-extra: mkdir -p $(DESTDIR)/$(LUA_MODULE_DIR)/cjson/tests \ $(DESTDIR)/$(LUA_BIN_DIR) cp lua/cjson/util.lua $(DESTDIR)/$(LUA_MODULE_DIR)/cjson chmod $(DATAPERM) $(DESTDIR)/$(LUA_MODULE_DIR)/cjson/util.lua cp lua/lua2json.lua $(DESTDIR)/$(LUA_BIN_DIR)/lua2json$(LUA_BIN_SUFFIX) chmod $(EXECPERM) $(DESTDIR)/$(LUA_BIN_DIR)/lua2json$(LUA_BIN_SUFFIX) cp lua/json2lua.lua $(DESTDIR)/$(LUA_BIN_DIR)/json2lua$(LUA_BIN_SUFFIX) chmod $(EXECPERM) $(DESTDIR)/$(LUA_BIN_DIR)/json2lua$(LUA_BIN_SUFFIX) cd tests; cp $(TEST_FILES) $(DESTDIR)/$(LUA_MODULE_DIR)/cjson/tests cd tests; chmod $(DATAPERM) $(TEST_FILES); chmod $(EXECPERM) *.lua *.pl clean: rm -f *.o $(TARGET) lua-cjson-2.1.0+dfsg/NEWS000066400000000000000000000032661201567770400150430ustar00rootroot00000000000000Version 2.1.0 (Mar 1 2012) * Added cjson.safe module interface which returns nil after an error * Improved Makefile compatibility with Solaris make Version 2.0.0 (Jan 22 2012) * Improved platform compatibility for strtod/sprintf locale workaround * Added option to build with David Gay's dtoa.c for improved performance * Added support for Lua 5.2 * Added option to encode infinity/NaN as JSON null * Fixed encode bug with a raised default limit and deeply nested tables * Updated Makefile for compatibility with non-GNU make implementations * Added CMake build support * Added HTML manual * Increased default nesting limit to 1000 * Added support for re-entrant use of encode and decode * Added support for installing lua2json and json2lua utilities * Added encode_invalid_numbers() and decode_invalid_numbers() * Added decode_max_depth() * Removed registration of global cjson module table * Removed refuse_invalid_numbers() Version 1.0.4 (Nov 30 2011) * Fixed numeric conversion under locales with a comma decimal separator Version 1.0.3 (Sep 15 2011) * Fixed detection of objects with numeric string keys * Provided work around for missing isinf() on Solaris Version 1.0.2 (May 30 2011) * Portability improvements for Windows - No longer links with -lm - Use "socket" instead of "posix" for sub-second timing * Removed UTF-8 test dependency on Perl Text::Iconv * Added simple CLI commands for testing Lua <-> JSON conversions * Added cjson.encode_number_precision() Version 1.0.1 (May 10 2011) * Added build support for OSX * Removed unnecessary whitespace from JSON output * Added cjson.encode_keep_buffer() * Fixed memory leak on Lua stack overflow exception Version 1.0 (May 9 2011) * Initial release lua-cjson-2.1.0+dfsg/THANKS000066400000000000000000000003461201567770400152530ustar00rootroot00000000000000The following people have helped with bug reports, testing and/or suggestions: - Louis-Philippe Perron (@loopole) - Ondřej Jirman - Steve Donovan - Zhang "agentzh" Yichun Thanks! lua-cjson-2.1.0+dfsg/dtoa.c000066400000000000000000002546161201567770400154460ustar00rootroot00000000000000/**************************************************************** * * The author of this software is David M. Gay. * * Copyright (c) 1991, 2000, 2001 by Lucent Technologies. * * Permission to use, copy, modify, and distribute this software for any * purpose without fee is hereby granted, provided that this entire notice * is included in all copies of any software which is or includes a copy * or modification of this software and in all copies of the supporting * documentation for such software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. * ***************************************************************/ /* Please send bug reports to David M. Gay (dmg at acm dot org, * with " at " changed at "@" and " dot " changed to "."). */ /* On a machine with IEEE extended-precision registers, it is * necessary to specify double-precision (53-bit) rounding precision * before invoking strtod or dtoa. If the machine uses (the equivalent * of) Intel 80x87 arithmetic, the call * _control87(PC_53, MCW_PC); * does this with many compilers. Whether this or another call is * appropriate depends on the compiler; for this to work, it may be * necessary to #include "float.h" or another system-dependent header * file. */ /* strtod for IEEE-, VAX-, and IBM-arithmetic machines. * * This strtod returns a nearest machine number to the input decimal * string (or sets errno to ERANGE). With IEEE arithmetic, ties are * broken by the IEEE round-even rule. Otherwise ties are broken by * biased rounding (add half and chop). * * Inspired loosely by William D. Clinger's paper "How to Read Floating * Point Numbers Accurately" [Proc. ACM SIGPLAN '90, pp. 92-101]. * * Modifications: * * 1. We only require IEEE, IBM, or VAX double-precision * arithmetic (not IEEE double-extended). * 2. We get by with floating-point arithmetic in a case that * Clinger missed -- when we're computing d * 10^n * for a small integer d and the integer n is not too * much larger than 22 (the maximum integer k for which * we can represent 10^k exactly), we may be able to * compute (d*10^k) * 10^(e-k) with just one roundoff. * 3. Rather than a bit-at-a-time adjustment of the binary * result in the hard case, we use floating-point * arithmetic to determine the adjustment to within * one bit; only in really hard cases do we need to * compute a second residual. * 4. Because of 3., we don't need a large table of powers of 10 * for ten-to-e (just some small tables, e.g. of 10^k * for 0 <= k <= 22). */ /* * #define IEEE_8087 for IEEE-arithmetic machines where the least * significant byte has the lowest address. * #define IEEE_MC68k for IEEE-arithmetic machines where the most * significant byte has the lowest address. * #define Long int on machines with 32-bit ints and 64-bit longs. * #define IBM for IBM mainframe-style floating-point arithmetic. * #define VAX for VAX-style floating-point arithmetic (D_floating). * #define No_leftright to omit left-right logic in fast floating-point * computation of dtoa. This will cause dtoa modes 4 and 5 to be * treated the same as modes 2 and 3 for some inputs. * #define Honor_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3 * and strtod and dtoa should round accordingly. Unless Trust_FLT_ROUNDS * is also #defined, fegetround() will be queried for the rounding mode. * Note that both FLT_ROUNDS and fegetround() are specified by the C99 * standard (and are specified to be consistent, with fesetround() * affecting the value of FLT_ROUNDS), but that some (Linux) systems * do not work correctly in this regard, so using fegetround() is more * portable than using FLT_ROUNDS directly. * #define Check_FLT_ROUNDS if FLT_ROUNDS can assume the values 2 or 3 * and Honor_FLT_ROUNDS is not #defined. * #define RND_PRODQUOT to use rnd_prod and rnd_quot (assembly routines * that use extended-precision instructions to compute rounded * products and quotients) with IBM. * #define ROUND_BIASED for IEEE-format with biased rounding and arithmetic * that rounds toward +Infinity. * #define ROUND_BIASED_without_Round_Up for IEEE-format with biased * rounding when the underlying floating-point arithmetic uses * unbiased rounding. This prevent using ordinary floating-point * arithmetic when the result could be computed with one rounding error. * #define Inaccurate_Divide for IEEE-format with correctly rounded * products but inaccurate quotients, e.g., for Intel i860. * #define NO_LONG_LONG on machines that do not have a "long long" * integer type (of >= 64 bits). On such machines, you can * #define Just_16 to store 16 bits per 32-bit Long when doing * high-precision integer arithmetic. Whether this speeds things * up or slows things down depends on the machine and the number * being converted. If long long is available and the name is * something other than "long long", #define Llong to be the name, * and if "unsigned Llong" does not work as an unsigned version of * Llong, #define #ULLong to be the corresponding unsigned type. * #define KR_headers for old-style C function headers. * #define Bad_float_h if your system lacks a float.h or if it does not * define some or all of DBL_DIG, DBL_MAX_10_EXP, DBL_MAX_EXP, * FLT_RADIX, FLT_ROUNDS, and DBL_MAX. * #define MALLOC your_malloc, where your_malloc(n) acts like malloc(n) * if memory is available and otherwise does something you deem * appropriate. If MALLOC is undefined, malloc will be invoked * directly -- and assumed always to succeed. Similarly, if you * want something other than the system's free() to be called to * recycle memory acquired from MALLOC, #define FREE to be the * name of the alternate routine. (FREE or free is only called in * pathological cases, e.g., in a dtoa call after a dtoa return in * mode 3 with thousands of digits requested.) * #define Omit_Private_Memory to omit logic (added Jan. 1998) for making * memory allocations from a private pool of memory when possible. * When used, the private pool is PRIVATE_MEM bytes long: 2304 bytes, * unless #defined to be a different length. This default length * suffices to get rid of MALLOC calls except for unusual cases, * such as decimal-to-binary conversion of a very long string of * digits. The longest string dtoa can return is about 751 bytes * long. For conversions by strtod of strings of 800 digits and * all dtoa conversions in single-threaded executions with 8-byte * pointers, PRIVATE_MEM >= 7400 appears to suffice; with 4-byte * pointers, PRIVATE_MEM >= 7112 appears adequate. * #define NO_INFNAN_CHECK if you do not wish to have INFNAN_CHECK * #defined automatically on IEEE systems. On such systems, * when INFNAN_CHECK is #defined, strtod checks * for Infinity and NaN (case insensitively). On some systems * (e.g., some HP systems), it may be necessary to #define NAN_WORD0 * appropriately -- to the most significant word of a quiet NaN. * (On HP Series 700/800 machines, -DNAN_WORD0=0x7ff40000 works.) * When INFNAN_CHECK is #defined and No_Hex_NaN is not #defined, * strtod also accepts (case insensitively) strings of the form * NaN(x), where x is a string of hexadecimal digits and spaces; * if there is only one string of hexadecimal digits, it is taken * for the 52 fraction bits of the resulting NaN; if there are two * or more strings of hex digits, the first is for the high 20 bits, * the second and subsequent for the low 32 bits, with intervening * white space ignored; but if this results in none of the 52 * fraction bits being on (an IEEE Infinity symbol), then NAN_WORD0 * and NAN_WORD1 are used instead. * #define MULTIPLE_THREADS if the system offers preemptively scheduled * multiple threads. In this case, you must provide (or suitably * #define) two locks, acquired by ACQUIRE_DTOA_LOCK(n) and freed * by FREE_DTOA_LOCK(n) for n = 0 or 1. (The second lock, accessed * in pow5mult, ensures lazy evaluation of only one copy of high * powers of 5; omitting this lock would introduce a small * probability of wasting memory, but would otherwise be harmless.) * You must also invoke freedtoa(s) to free the value s returned by * dtoa. You may do so whether or not MULTIPLE_THREADS is #defined. * #define NO_IEEE_Scale to disable new (Feb. 1997) logic in strtod that * avoids underflows on inputs whose result does not underflow. * If you #define NO_IEEE_Scale on a machine that uses IEEE-format * floating-point numbers and flushes underflows to zero rather * than implementing gradual underflow, then you must also #define * Sudden_Underflow. * #define USE_LOCALE to use the current locale's decimal_point value. * #define SET_INEXACT if IEEE arithmetic is being used and extra * computation should be done to set the inexact flag when the * result is inexact and avoid setting inexact when the result * is exact. In this case, dtoa.c must be compiled in * an environment, perhaps provided by #include "dtoa.c" in a * suitable wrapper, that defines two functions, * int get_inexact(void); * void clear_inexact(void); * such that get_inexact() returns a nonzero value if the * inexact bit is already set, and clear_inexact() sets the * inexact bit to 0. When SET_INEXACT is #defined, strtod * also does extra computations to set the underflow and overflow * flags when appropriate (i.e., when the result is tiny and * inexact or when it is a numeric value rounded to +-infinity). * #define NO_ERRNO if strtod should not assign errno = ERANGE when * the result overflows to +-Infinity or underflows to 0. * #define NO_HEX_FP to omit recognition of hexadecimal floating-point * values by strtod. * #define NO_STRTOD_BIGCOMP (on IEEE-arithmetic systems only for now) * to disable logic for "fast" testing of very long input strings * to strtod. This testing proceeds by initially truncating the * input string, then if necessary comparing the whole string with * a decimal expansion to decide close cases. This logic is only * used for input more than STRTOD_DIGLIM digits long (default 40). */ #include "dtoa_config.h" #ifndef Long #define Long long #endif #ifndef ULong typedef unsigned Long ULong; #endif #ifdef DEBUG #include "stdio.h" #define Bug(x) {fprintf(stderr, "%s\n", x); exit(1);} #endif #include "stdlib.h" #include "string.h" #ifdef USE_LOCALE #include "locale.h" #endif #ifdef Honor_FLT_ROUNDS #ifndef Trust_FLT_ROUNDS #include #endif #endif #ifdef MALLOC #ifdef KR_headers extern char *MALLOC(); #else extern void *MALLOC(size_t); #endif #else #define MALLOC malloc #endif #ifndef Omit_Private_Memory #ifndef PRIVATE_MEM #define PRIVATE_MEM 2304 #endif #define PRIVATE_mem ((PRIVATE_MEM+sizeof(double)-1)/sizeof(double)) static double private_mem[PRIVATE_mem], *pmem_next = private_mem; #endif #undef IEEE_Arith #undef Avoid_Underflow #ifdef IEEE_MC68k #define IEEE_Arith #endif #ifdef IEEE_8087 #define IEEE_Arith #endif #ifdef IEEE_Arith #ifndef NO_INFNAN_CHECK #undef INFNAN_CHECK #define INFNAN_CHECK #endif #else #undef INFNAN_CHECK #define NO_STRTOD_BIGCOMP #endif #include "errno.h" #ifdef Bad_float_h #ifdef IEEE_Arith #define DBL_DIG 15 #define DBL_MAX_10_EXP 308 #define DBL_MAX_EXP 1024 #define FLT_RADIX 2 #endif /*IEEE_Arith*/ #ifdef IBM #define DBL_DIG 16 #define DBL_MAX_10_EXP 75 #define DBL_MAX_EXP 63 #define FLT_RADIX 16 #define DBL_MAX 7.2370055773322621e+75 #endif #ifdef VAX #define DBL_DIG 16 #define DBL_MAX_10_EXP 38 #define DBL_MAX_EXP 127 #define FLT_RADIX 2 #define DBL_MAX 1.7014118346046923e+38 #endif #ifndef LONG_MAX #define LONG_MAX 2147483647 #endif #else /* ifndef Bad_float_h */ #include "float.h" #endif /* Bad_float_h */ #ifndef __MATH_H__ #include "math.h" #endif #ifdef __cplusplus extern "C" { #endif #ifndef CONST #ifdef KR_headers #define CONST /* blank */ #else #define CONST const #endif #endif #if defined(IEEE_8087) + defined(IEEE_MC68k) + defined(VAX) + defined(IBM) != 1 Exactly one of IEEE_8087, IEEE_MC68k, VAX, or IBM should be defined. #endif typedef union { double d; ULong L[2]; } U; #ifdef IEEE_8087 #define word0(x) (x)->L[1] #define word1(x) (x)->L[0] #else #define word0(x) (x)->L[0] #define word1(x) (x)->L[1] #endif #define dval(x) (x)->d #ifndef STRTOD_DIGLIM #define STRTOD_DIGLIM 40 #endif #ifdef DIGLIM_DEBUG extern int strtod_diglim; #else #define strtod_diglim STRTOD_DIGLIM #endif /* The following definition of Storeinc is appropriate for MIPS processors. * An alternative that might be better on some machines is * #define Storeinc(a,b,c) (*a++ = b << 16 | c & 0xffff) */ #if defined(IEEE_8087) + defined(VAX) #define Storeinc(a,b,c) (((unsigned short *)a)[1] = (unsigned short)b, \ ((unsigned short *)a)[0] = (unsigned short)c, a++) #else #define Storeinc(a,b,c) (((unsigned short *)a)[0] = (unsigned short)b, \ ((unsigned short *)a)[1] = (unsigned short)c, a++) #endif /* #define P DBL_MANT_DIG */ /* Ten_pmax = floor(P*log(2)/log(5)) */ /* Bletch = (highest power of 2 < DBL_MAX_10_EXP) / 16 */ /* Quick_max = floor((P-1)*log(FLT_RADIX)/log(10) - 1) */ /* Int_max = floor(P*log(FLT_RADIX)/log(10) - 1) */ #ifdef IEEE_Arith #define Exp_shift 20 #define Exp_shift1 20 #define Exp_msk1 0x100000 #define Exp_msk11 0x100000 #define Exp_mask 0x7ff00000 #define P 53 #define Nbits 53 #define Bias 1023 #define Emax 1023 #define Emin (-1022) #define Exp_1 0x3ff00000 #define Exp_11 0x3ff00000 #define Ebits 11 #define Frac_mask 0xfffff #define Frac_mask1 0xfffff #define Ten_pmax 22 #define Bletch 0x10 #define Bndry_mask 0xfffff #define Bndry_mask1 0xfffff #define LSB 1 #define Sign_bit 0x80000000 #define Log2P 1 #define Tiny0 0 #define Tiny1 1 #define Quick_max 14 #define Int_max 14 #ifndef NO_IEEE_Scale #define Avoid_Underflow #ifdef Flush_Denorm /* debugging option */ #undef Sudden_Underflow #endif #endif #ifndef Flt_Rounds #ifdef FLT_ROUNDS #define Flt_Rounds FLT_ROUNDS #else #define Flt_Rounds 1 #endif #endif /*Flt_Rounds*/ #ifdef Honor_FLT_ROUNDS #undef Check_FLT_ROUNDS #define Check_FLT_ROUNDS #else #define Rounding Flt_Rounds #endif #else /* ifndef IEEE_Arith */ #undef Check_FLT_ROUNDS #undef Honor_FLT_ROUNDS #undef SET_INEXACT #undef Sudden_Underflow #define Sudden_Underflow #ifdef IBM #undef Flt_Rounds #define Flt_Rounds 0 #define Exp_shift 24 #define Exp_shift1 24 #define Exp_msk1 0x1000000 #define Exp_msk11 0x1000000 #define Exp_mask 0x7f000000 #define P 14 #define Nbits 56 #define Bias 65 #define Emax 248 #define Emin (-260) #define Exp_1 0x41000000 #define Exp_11 0x41000000 #define Ebits 8 /* exponent has 7 bits, but 8 is the right value in b2d */ #define Frac_mask 0xffffff #define Frac_mask1 0xffffff #define Bletch 4 #define Ten_pmax 22 #define Bndry_mask 0xefffff #define Bndry_mask1 0xffffff #define LSB 1 #define Sign_bit 0x80000000 #define Log2P 4 #define Tiny0 0x100000 #define Tiny1 0 #define Quick_max 14 #define Int_max 15 #else /* VAX */ #undef Flt_Rounds #define Flt_Rounds 1 #define Exp_shift 23 #define Exp_shift1 7 #define Exp_msk1 0x80 #define Exp_msk11 0x800000 #define Exp_mask 0x7f80 #define P 56 #define Nbits 56 #define Bias 129 #define Emax 126 #define Emin (-129) #define Exp_1 0x40800000 #define Exp_11 0x4080 #define Ebits 8 #define Frac_mask 0x7fffff #define Frac_mask1 0xffff007f #define Ten_pmax 24 #define Bletch 2 #define Bndry_mask 0xffff007f #define Bndry_mask1 0xffff007f #define LSB 0x10000 #define Sign_bit 0x8000 #define Log2P 1 #define Tiny0 0x80 #define Tiny1 0 #define Quick_max 15 #define Int_max 15 #endif /* IBM, VAX */ #endif /* IEEE_Arith */ #ifndef IEEE_Arith #define ROUND_BIASED #else #ifdef ROUND_BIASED_without_Round_Up #undef ROUND_BIASED #define ROUND_BIASED #endif #endif #ifdef RND_PRODQUOT #define rounded_product(a,b) a = rnd_prod(a, b) #define rounded_quotient(a,b) a = rnd_quot(a, b) #ifdef KR_headers extern double rnd_prod(), rnd_quot(); #else extern double rnd_prod(double, double), rnd_quot(double, double); #endif #else #define rounded_product(a,b) a *= b #define rounded_quotient(a,b) a /= b #endif #define Big0 (Frac_mask1 | Exp_msk1*(DBL_MAX_EXP+Bias-1)) #define Big1 0xffffffff #ifndef Pack_32 #define Pack_32 #endif typedef struct BCinfo BCinfo; struct BCinfo { int dp0, dp1, dplen, dsign, e0, inexact, nd, nd0, rounding, scale, uflchk; }; #ifdef KR_headers #define FFFFFFFF ((((unsigned long)0xffff)<<16)|(unsigned long)0xffff) #else #define FFFFFFFF 0xffffffffUL #endif #ifdef NO_LONG_LONG #undef ULLong #ifdef Just_16 #undef Pack_32 /* When Pack_32 is not defined, we store 16 bits per 32-bit Long. * This makes some inner loops simpler and sometimes saves work * during multiplications, but it often seems to make things slightly * slower. Hence the default is now to store 32 bits per Long. */ #endif #else /* long long available */ #ifndef Llong #define Llong long long #endif #ifndef ULLong #define ULLong unsigned Llong #endif #endif /* NO_LONG_LONG */ #ifndef MULTIPLE_THREADS #define ACQUIRE_DTOA_LOCK(n) /*nothing*/ #define FREE_DTOA_LOCK(n) /*nothing*/ #endif #define Kmax 7 #ifdef __cplusplus extern "C" double fpconv_strtod(const char *s00, char **se); extern "C" char *dtoa(double d, int mode, int ndigits, int *decpt, int *sign, char **rve); #endif struct Bigint { struct Bigint *next; int k, maxwds, sign, wds; ULong x[1]; }; typedef struct Bigint Bigint; static Bigint *freelist[Kmax+1]; static Bigint * Balloc #ifdef KR_headers (k) int k; #else (int k) #endif { int x; Bigint *rv; #ifndef Omit_Private_Memory unsigned int len; #endif ACQUIRE_DTOA_LOCK(0); /* The k > Kmax case does not need ACQUIRE_DTOA_LOCK(0), */ /* but this case seems very unlikely. */ if (k <= Kmax && (rv = freelist[k])) freelist[k] = rv->next; else { x = 1 << k; #ifdef Omit_Private_Memory rv = (Bigint *)MALLOC(sizeof(Bigint) + (x-1)*sizeof(ULong)); #else len = (sizeof(Bigint) + (x-1)*sizeof(ULong) + sizeof(double) - 1) /sizeof(double); if (k <= Kmax && pmem_next - private_mem + len <= PRIVATE_mem) { rv = (Bigint*)pmem_next; pmem_next += len; } else rv = (Bigint*)MALLOC(len*sizeof(double)); #endif rv->k = k; rv->maxwds = x; } FREE_DTOA_LOCK(0); rv->sign = rv->wds = 0; return rv; } static void Bfree #ifdef KR_headers (v) Bigint *v; #else (Bigint *v) #endif { if (v) { if (v->k > Kmax) #ifdef FREE FREE((void*)v); #else free((void*)v); #endif else { ACQUIRE_DTOA_LOCK(0); v->next = freelist[v->k]; freelist[v->k] = v; FREE_DTOA_LOCK(0); } } } #define Bcopy(x,y) memcpy((char *)&x->sign, (char *)&y->sign, \ y->wds*sizeof(Long) + 2*sizeof(int)) static Bigint * multadd #ifdef KR_headers (b, m, a) Bigint *b; int m, a; #else (Bigint *b, int m, int a) /* multiply by m and add a */ #endif { int i, wds; #ifdef ULLong ULong *x; ULLong carry, y; #else ULong carry, *x, y; #ifdef Pack_32 ULong xi, z; #endif #endif Bigint *b1; wds = b->wds; x = b->x; i = 0; carry = a; do { #ifdef ULLong y = *x * (ULLong)m + carry; carry = y >> 32; *x++ = y & FFFFFFFF; #else #ifdef Pack_32 xi = *x; y = (xi & 0xffff) * m + carry; z = (xi >> 16) * m + (y >> 16); carry = z >> 16; *x++ = (z << 16) + (y & 0xffff); #else y = *x * m + carry; carry = y >> 16; *x++ = y & 0xffff; #endif #endif } while(++i < wds); if (carry) { if (wds >= b->maxwds) { b1 = Balloc(b->k+1); Bcopy(b1, b); Bfree(b); b = b1; } b->x[wds++] = carry; b->wds = wds; } return b; } static Bigint * s2b #ifdef KR_headers (s, nd0, nd, y9, dplen) CONST char *s; int nd0, nd, dplen; ULong y9; #else (const char *s, int nd0, int nd, ULong y9, int dplen) #endif { Bigint *b; int i, k; Long x, y; x = (nd + 8) / 9; for(k = 0, y = 1; x > y; y <<= 1, k++) ; #ifdef Pack_32 b = Balloc(k); b->x[0] = y9; b->wds = 1; #else b = Balloc(k+1); b->x[0] = y9 & 0xffff; b->wds = (b->x[1] = y9 >> 16) ? 2 : 1; #endif i = 9; if (9 < nd0) { s += 9; do b = multadd(b, 10, *s++ - '0'); while(++i < nd0); s += dplen; } else s += dplen + 9; for(; i < nd; i++) b = multadd(b, 10, *s++ - '0'); return b; } static int hi0bits #ifdef KR_headers (x) ULong x; #else (ULong x) #endif { int k = 0; if (!(x & 0xffff0000)) { k = 16; x <<= 16; } if (!(x & 0xff000000)) { k += 8; x <<= 8; } if (!(x & 0xf0000000)) { k += 4; x <<= 4; } if (!(x & 0xc0000000)) { k += 2; x <<= 2; } if (!(x & 0x80000000)) { k++; if (!(x & 0x40000000)) return 32; } return k; } static int lo0bits #ifdef KR_headers (y) ULong *y; #else (ULong *y) #endif { int k; ULong x = *y; if (x & 7) { if (x & 1) return 0; if (x & 2) { *y = x >> 1; return 1; } *y = x >> 2; return 2; } k = 0; if (!(x & 0xffff)) { k = 16; x >>= 16; } if (!(x & 0xff)) { k += 8; x >>= 8; } if (!(x & 0xf)) { k += 4; x >>= 4; } if (!(x & 0x3)) { k += 2; x >>= 2; } if (!(x & 1)) { k++; x >>= 1; if (!x) return 32; } *y = x; return k; } static Bigint * i2b #ifdef KR_headers (i) int i; #else (int i) #endif { Bigint *b; b = Balloc(1); b->x[0] = i; b->wds = 1; return b; } static Bigint * mult #ifdef KR_headers (a, b) Bigint *a, *b; #else (Bigint *a, Bigint *b) #endif { Bigint *c; int k, wa, wb, wc; ULong *x, *xa, *xae, *xb, *xbe, *xc, *xc0; ULong y; #ifdef ULLong ULLong carry, z; #else ULong carry, z; #ifdef Pack_32 ULong z2; #endif #endif if (a->wds < b->wds) { c = a; a = b; b = c; } k = a->k; wa = a->wds; wb = b->wds; wc = wa + wb; if (wc > a->maxwds) k++; c = Balloc(k); for(x = c->x, xa = x + wc; x < xa; x++) *x = 0; xa = a->x; xae = xa + wa; xb = b->x; xbe = xb + wb; xc0 = c->x; #ifdef ULLong for(; xb < xbe; xc0++) { if ((y = *xb++)) { x = xa; xc = xc0; carry = 0; do { z = *x++ * (ULLong)y + *xc + carry; carry = z >> 32; *xc++ = z & FFFFFFFF; } while(x < xae); *xc = carry; } } #else #ifdef Pack_32 for(; xb < xbe; xb++, xc0++) { if (y = *xb & 0xffff) { x = xa; xc = xc0; carry = 0; do { z = (*x & 0xffff) * y + (*xc & 0xffff) + carry; carry = z >> 16; z2 = (*x++ >> 16) * y + (*xc >> 16) + carry; carry = z2 >> 16; Storeinc(xc, z2, z); } while(x < xae); *xc = carry; } if (y = *xb >> 16) { x = xa; xc = xc0; carry = 0; z2 = *xc; do { z = (*x & 0xffff) * y + (*xc >> 16) + carry; carry = z >> 16; Storeinc(xc, z, z2); z2 = (*x++ >> 16) * y + (*xc & 0xffff) + carry; carry = z2 >> 16; } while(x < xae); *xc = z2; } } #else for(; xb < xbe; xc0++) { if (y = *xb++) { x = xa; xc = xc0; carry = 0; do { z = *x++ * y + *xc + carry; carry = z >> 16; *xc++ = z & 0xffff; } while(x < xae); *xc = carry; } } #endif #endif for(xc0 = c->x, xc = xc0 + wc; wc > 0 && !*--xc; --wc) ; c->wds = wc; return c; } static Bigint *p5s; static Bigint * pow5mult #ifdef KR_headers (b, k) Bigint *b; int k; #else (Bigint *b, int k) #endif { Bigint *b1, *p5, *p51; int i; static int p05[3] = { 5, 25, 125 }; if ((i = k & 3)) b = multadd(b, p05[i-1], 0); if (!(k >>= 2)) return b; if (!(p5 = p5s)) { /* first time */ #ifdef MULTIPLE_THREADS ACQUIRE_DTOA_LOCK(1); if (!(p5 = p5s)) { p5 = p5s = i2b(625); p5->next = 0; } FREE_DTOA_LOCK(1); #else p5 = p5s = i2b(625); p5->next = 0; #endif } for(;;) { if (k & 1) { b1 = mult(b, p5); Bfree(b); b = b1; } if (!(k >>= 1)) break; if (!(p51 = p5->next)) { #ifdef MULTIPLE_THREADS ACQUIRE_DTOA_LOCK(1); if (!(p51 = p5->next)) { p51 = p5->next = mult(p5,p5); p51->next = 0; } FREE_DTOA_LOCK(1); #else p51 = p5->next = mult(p5,p5); p51->next = 0; #endif } p5 = p51; } return b; } static Bigint * lshift #ifdef KR_headers (b, k) Bigint *b; int k; #else (Bigint *b, int k) #endif { int i, k1, n, n1; Bigint *b1; ULong *x, *x1, *xe, z; #ifdef Pack_32 n = k >> 5; #else n = k >> 4; #endif k1 = b->k; n1 = n + b->wds + 1; for(i = b->maxwds; n1 > i; i <<= 1) k1++; b1 = Balloc(k1); x1 = b1->x; for(i = 0; i < n; i++) *x1++ = 0; x = b->x; xe = x + b->wds; #ifdef Pack_32 if (k &= 0x1f) { k1 = 32 - k; z = 0; do { *x1++ = *x << k | z; z = *x++ >> k1; } while(x < xe); if ((*x1 = z)) ++n1; } #else if (k &= 0xf) { k1 = 16 - k; z = 0; do { *x1++ = *x << k & 0xffff | z; z = *x++ >> k1; } while(x < xe); if (*x1 = z) ++n1; } #endif else do *x1++ = *x++; while(x < xe); b1->wds = n1 - 1; Bfree(b); return b1; } static int cmp #ifdef KR_headers (a, b) Bigint *a, *b; #else (Bigint *a, Bigint *b) #endif { ULong *xa, *xa0, *xb, *xb0; int i, j; i = a->wds; j = b->wds; #ifdef DEBUG if (i > 1 && !a->x[i-1]) Bug("cmp called with a->x[a->wds-1] == 0"); if (j > 1 && !b->x[j-1]) Bug("cmp called with b->x[b->wds-1] == 0"); #endif if (i -= j) return i; xa0 = a->x; xa = xa0 + j; xb0 = b->x; xb = xb0 + j; for(;;) { if (*--xa != *--xb) return *xa < *xb ? -1 : 1; if (xa <= xa0) break; } return 0; } static Bigint * diff #ifdef KR_headers (a, b) Bigint *a, *b; #else (Bigint *a, Bigint *b) #endif { Bigint *c; int i, wa, wb; ULong *xa, *xae, *xb, *xbe, *xc; #ifdef ULLong ULLong borrow, y; #else ULong borrow, y; #ifdef Pack_32 ULong z; #endif #endif i = cmp(a,b); if (!i) { c = Balloc(0); c->wds = 1; c->x[0] = 0; return c; } if (i < 0) { c = a; a = b; b = c; i = 1; } else i = 0; c = Balloc(a->k); c->sign = i; wa = a->wds; xa = a->x; xae = xa + wa; wb = b->wds; xb = b->x; xbe = xb + wb; xc = c->x; borrow = 0; #ifdef ULLong do { y = (ULLong)*xa++ - *xb++ - borrow; borrow = y >> 32 & (ULong)1; *xc++ = y & FFFFFFFF; } while(xb < xbe); while(xa < xae) { y = *xa++ - borrow; borrow = y >> 32 & (ULong)1; *xc++ = y & FFFFFFFF; } #else #ifdef Pack_32 do { y = (*xa & 0xffff) - (*xb & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; z = (*xa++ >> 16) - (*xb++ >> 16) - borrow; borrow = (z & 0x10000) >> 16; Storeinc(xc, z, y); } while(xb < xbe); while(xa < xae) { y = (*xa & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; z = (*xa++ >> 16) - borrow; borrow = (z & 0x10000) >> 16; Storeinc(xc, z, y); } #else do { y = *xa++ - *xb++ - borrow; borrow = (y & 0x10000) >> 16; *xc++ = y & 0xffff; } while(xb < xbe); while(xa < xae) { y = *xa++ - borrow; borrow = (y & 0x10000) >> 16; *xc++ = y & 0xffff; } #endif #endif while(!*--xc) wa--; c->wds = wa; return c; } static double ulp #ifdef KR_headers (x) U *x; #else (U *x) #endif { Long L; U u; L = (word0(x) & Exp_mask) - (P-1)*Exp_msk1; #ifndef Avoid_Underflow #ifndef Sudden_Underflow if (L > 0) { #endif #endif #ifdef IBM L |= Exp_msk1 >> 4; #endif word0(&u) = L; word1(&u) = 0; #ifndef Avoid_Underflow #ifndef Sudden_Underflow } else { L = -L >> Exp_shift; if (L < Exp_shift) { word0(&u) = 0x80000 >> L; word1(&u) = 0; } else { word0(&u) = 0; L -= Exp_shift; word1(&u) = L >= 31 ? 1 : 1 << 31 - L; } } #endif #endif return dval(&u); } static double b2d #ifdef KR_headers (a, e) Bigint *a; int *e; #else (Bigint *a, int *e) #endif { ULong *xa, *xa0, w, y, z; int k; U d; #ifdef VAX ULong d0, d1; #else #define d0 word0(&d) #define d1 word1(&d) #endif xa0 = a->x; xa = xa0 + a->wds; y = *--xa; #ifdef DEBUG if (!y) Bug("zero y in b2d"); #endif k = hi0bits(y); *e = 32 - k; #ifdef Pack_32 if (k < Ebits) { d0 = Exp_1 | y >> (Ebits - k); w = xa > xa0 ? *--xa : 0; d1 = y << ((32-Ebits) + k) | w >> (Ebits - k); goto ret_d; } z = xa > xa0 ? *--xa : 0; if (k -= Ebits) { d0 = Exp_1 | y << k | z >> (32 - k); y = xa > xa0 ? *--xa : 0; d1 = z << k | y >> (32 - k); } else { d0 = Exp_1 | y; d1 = z; } #else if (k < Ebits + 16) { z = xa > xa0 ? *--xa : 0; d0 = Exp_1 | y << k - Ebits | z >> Ebits + 16 - k; w = xa > xa0 ? *--xa : 0; y = xa > xa0 ? *--xa : 0; d1 = z << k + 16 - Ebits | w << k - Ebits | y >> 16 + Ebits - k; goto ret_d; } z = xa > xa0 ? *--xa : 0; w = xa > xa0 ? *--xa : 0; k -= Ebits + 16; d0 = Exp_1 | y << k + 16 | z << k | w >> 16 - k; y = xa > xa0 ? *--xa : 0; d1 = w << k + 16 | y << k; #endif ret_d: #ifdef VAX word0(&d) = d0 >> 16 | d0 << 16; word1(&d) = d1 >> 16 | d1 << 16; #else #undef d0 #undef d1 #endif return dval(&d); } static Bigint * d2b #ifdef KR_headers (d, e, bits) U *d; int *e, *bits; #else (U *d, int *e, int *bits) #endif { Bigint *b; int de, k; ULong *x, y, z; #ifndef Sudden_Underflow int i; #endif #ifdef VAX ULong d0, d1; d0 = word0(d) >> 16 | word0(d) << 16; d1 = word1(d) >> 16 | word1(d) << 16; #else #define d0 word0(d) #define d1 word1(d) #endif #ifdef Pack_32 b = Balloc(1); #else b = Balloc(2); #endif x = b->x; z = d0 & Frac_mask; d0 &= 0x7fffffff; /* clear sign bit, which we ignore */ #ifdef Sudden_Underflow de = (int)(d0 >> Exp_shift); #ifndef IBM z |= Exp_msk11; #endif #else if ((de = (int)(d0 >> Exp_shift))) z |= Exp_msk1; #endif #ifdef Pack_32 if ((y = d1)) { if ((k = lo0bits(&y))) { x[0] = y | z << (32 - k); z >>= k; } else x[0] = y; #ifndef Sudden_Underflow i = #endif b->wds = (x[1] = z) ? 2 : 1; } else { k = lo0bits(&z); x[0] = z; #ifndef Sudden_Underflow i = #endif b->wds = 1; k += 32; } #else if (y = d1) { if (k = lo0bits(&y)) if (k >= 16) { x[0] = y | z << 32 - k & 0xffff; x[1] = z >> k - 16 & 0xffff; x[2] = z >> k; i = 2; } else { x[0] = y & 0xffff; x[1] = y >> 16 | z << 16 - k & 0xffff; x[2] = z >> k & 0xffff; x[3] = z >> k+16; i = 3; } else { x[0] = y & 0xffff; x[1] = y >> 16; x[2] = z & 0xffff; x[3] = z >> 16; i = 3; } } else { #ifdef DEBUG if (!z) Bug("Zero passed to d2b"); #endif k = lo0bits(&z); if (k >= 16) { x[0] = z; i = 0; } else { x[0] = z & 0xffff; x[1] = z >> 16; i = 1; } k += 32; } while(!x[i]) --i; b->wds = i + 1; #endif #ifndef Sudden_Underflow if (de) { #endif #ifdef IBM *e = (de - Bias - (P-1) << 2) + k; *bits = 4*P + 8 - k - hi0bits(word0(d) & Frac_mask); #else *e = de - Bias - (P-1) + k; *bits = P - k; #endif #ifndef Sudden_Underflow } else { *e = de - Bias - (P-1) + 1 + k; #ifdef Pack_32 *bits = 32*i - hi0bits(x[i-1]); #else *bits = (i+2)*16 - hi0bits(x[i]); #endif } #endif return b; } #undef d0 #undef d1 static double ratio #ifdef KR_headers (a, b) Bigint *a, *b; #else (Bigint *a, Bigint *b) #endif { U da, db; int k, ka, kb; dval(&da) = b2d(a, &ka); dval(&db) = b2d(b, &kb); #ifdef Pack_32 k = ka - kb + 32*(a->wds - b->wds); #else k = ka - kb + 16*(a->wds - b->wds); #endif #ifdef IBM if (k > 0) { word0(&da) += (k >> 2)*Exp_msk1; if (k &= 3) dval(&da) *= 1 << k; } else { k = -k; word0(&db) += (k >> 2)*Exp_msk1; if (k &= 3) dval(&db) *= 1 << k; } #else if (k > 0) word0(&da) += k*Exp_msk1; else { k = -k; word0(&db) += k*Exp_msk1; } #endif return dval(&da) / dval(&db); } static CONST double tens[] = { 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22 #ifdef VAX , 1e23, 1e24 #endif }; static CONST double #ifdef IEEE_Arith bigtens[] = { 1e16, 1e32, 1e64, 1e128, 1e256 }; static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128, #ifdef Avoid_Underflow 9007199254740992.*9007199254740992.e-256 /* = 2^106 * 1e-256 */ #else 1e-256 #endif }; /* The factor of 2^53 in tinytens[4] helps us avoid setting the underflow */ /* flag unnecessarily. It leads to a song and dance at the end of strtod. */ #define Scale_Bit 0x10 #define n_bigtens 5 #else #ifdef IBM bigtens[] = { 1e16, 1e32, 1e64 }; static CONST double tinytens[] = { 1e-16, 1e-32, 1e-64 }; #define n_bigtens 3 #else bigtens[] = { 1e16, 1e32 }; static CONST double tinytens[] = { 1e-16, 1e-32 }; #define n_bigtens 2 #endif #endif #undef Need_Hexdig #ifdef INFNAN_CHECK #ifndef No_Hex_NaN #define Need_Hexdig #endif #endif #ifndef Need_Hexdig #ifndef NO_HEX_FP #define Need_Hexdig #endif #endif #ifdef Need_Hexdig /*{*/ static unsigned char hexdig[256]; static void #ifdef KR_headers htinit(h, s, inc) unsigned char *h; unsigned char *s; int inc; #else htinit(unsigned char *h, unsigned char *s, int inc) #endif { int i, j; for(i = 0; (j = s[i]) !=0; i++) h[j] = i + inc; } static void #ifdef KR_headers hexdig_init() #else hexdig_init(void) #endif { #define USC (unsigned char *) htinit(hexdig, USC "0123456789", 0x10); htinit(hexdig, USC "abcdef", 0x10 + 10); htinit(hexdig, USC "ABCDEF", 0x10 + 10); } #endif /* } Need_Hexdig */ #ifdef INFNAN_CHECK #ifndef NAN_WORD0 #define NAN_WORD0 0x7ff80000 #endif #ifndef NAN_WORD1 #define NAN_WORD1 0 #endif static int match #ifdef KR_headers (sp, t) char **sp, *t; #else (const char **sp, const char *t) #endif { int c, d; CONST char *s = *sp; while((d = *t++)) { if ((c = *++s) >= 'A' && c <= 'Z') c += 'a' - 'A'; if (c != d) return 0; } *sp = s + 1; return 1; } #ifndef No_Hex_NaN static void hexnan #ifdef KR_headers (rvp, sp) U *rvp; CONST char **sp; #else (U *rvp, const char **sp) #endif { ULong c, x[2]; CONST char *s; int c1, havedig, udx0, xshift; if (!hexdig['0']) hexdig_init(); x[0] = x[1] = 0; havedig = xshift = 0; udx0 = 1; s = *sp; /* allow optional initial 0x or 0X */ while((c = *(CONST unsigned char*)(s+1)) && c <= ' ') ++s; if (s[1] == '0' && (s[2] == 'x' || s[2] == 'X')) s += 2; while((c = *(CONST unsigned char*)++s)) { if ((c1 = hexdig[c])) c = c1 & 0xf; else if (c <= ' ') { if (udx0 && havedig) { udx0 = 0; xshift = 1; } continue; } #ifdef GDTOA_NON_PEDANTIC_NANCHECK else if (/*(*/ c == ')' && havedig) { *sp = s + 1; break; } else return; /* invalid form: don't change *sp */ #else else { do { if (/*(*/ c == ')') { *sp = s + 1; break; } } while((c = *++s)); break; } #endif havedig = 1; if (xshift) { xshift = 0; x[0] = x[1]; x[1] = 0; } if (udx0) x[0] = (x[0] << 4) | (x[1] >> 28); x[1] = (x[1] << 4) | c; } if ((x[0] &= 0xfffff) || x[1]) { word0(rvp) = Exp_mask | x[0]; word1(rvp) = x[1]; } } #endif /*No_Hex_NaN*/ #endif /* INFNAN_CHECK */ #ifdef Pack_32 #define ULbits 32 #define kshift 5 #define kmask 31 #else #define ULbits 16 #define kshift 4 #define kmask 15 #endif #if !defined(NO_HEX_FP) || defined(Honor_FLT_ROUNDS) /*{*/ static Bigint * #ifdef KR_headers increment(b) Bigint *b; #else increment(Bigint *b) #endif { ULong *x, *xe; Bigint *b1; x = b->x; xe = x + b->wds; do { if (*x < (ULong)0xffffffffL) { ++*x; return b; } *x++ = 0; } while(x < xe); { if (b->wds >= b->maxwds) { b1 = Balloc(b->k+1); Bcopy(b1,b); Bfree(b); b = b1; } b->x[b->wds++] = 1; } return b; } #endif /*}*/ #ifndef NO_HEX_FP /*{*/ static void #ifdef KR_headers rshift(b, k) Bigint *b; int k; #else rshift(Bigint *b, int k) #endif { ULong *x, *x1, *xe, y; int n; x = x1 = b->x; n = k >> kshift; if (n < b->wds) { xe = x + b->wds; x += n; if (k &= kmask) { n = 32 - k; y = *x++ >> k; while(x < xe) { *x1++ = (y | (*x << n)) & 0xffffffff; y = *x++ >> k; } if ((*x1 = y) !=0) x1++; } else while(x < xe) *x1++ = *x++; } if ((b->wds = x1 - b->x) == 0) b->x[0] = 0; } static ULong #ifdef KR_headers any_on(b, k) Bigint *b; int k; #else any_on(Bigint *b, int k) #endif { int n, nwds; ULong *x, *x0, x1, x2; x = b->x; nwds = b->wds; n = k >> kshift; if (n > nwds) n = nwds; else if (n < nwds && (k &= kmask)) { x1 = x2 = x[n]; x1 >>= k; x1 <<= k; if (x1 != x2) return 1; } x0 = x; x += n; while(x > x0) if (*--x) return 1; return 0; } enum { /* rounding values: same as FLT_ROUNDS */ Round_zero = 0, Round_near = 1, Round_up = 2, Round_down = 3 }; void #ifdef KR_headers gethex(sp, rvp, rounding, sign) CONST char **sp; U *rvp; int rounding, sign; #else gethex( CONST char **sp, U *rvp, int rounding, int sign) #endif { Bigint *b; CONST unsigned char *decpt, *s0, *s, *s1; Long e, e1; ULong L, lostbits, *x; int big, denorm, esign, havedig, k, n, nbits, up, zret; #ifdef IBM int j; #endif enum { #ifdef IEEE_Arith /*{{*/ emax = 0x7fe - Bias - P + 1, emin = Emin - P + 1 #else /*}{*/ emin = Emin - P, #ifdef VAX emax = 0x7ff - Bias - P + 1 #endif #ifdef IBM emax = 0x7f - Bias - P #endif #endif /*}}*/ }; #ifdef USE_LOCALE int i; #ifdef NO_LOCALE_CACHE const unsigned char *decimalpoint = (unsigned char*) localeconv()->decimal_point; #else const unsigned char *decimalpoint; static unsigned char *decimalpoint_cache; if (!(s0 = decimalpoint_cache)) { s0 = (unsigned char*)localeconv()->decimal_point; if ((decimalpoint_cache = (unsigned char*) MALLOC(strlen((CONST char*)s0) + 1))) { strcpy((char*)decimalpoint_cache, (CONST char*)s0); s0 = decimalpoint_cache; } } decimalpoint = s0; #endif #endif if (!hexdig['0']) hexdig_init(); havedig = 0; s0 = *(CONST unsigned char **)sp + 2; while(s0[havedig] == '0') havedig++; s0 += havedig; s = s0; decpt = 0; zret = 0; e = 0; if (hexdig[*s]) havedig++; else { zret = 1; #ifdef USE_LOCALE for(i = 0; decimalpoint[i]; ++i) { if (s[i] != decimalpoint[i]) goto pcheck; } decpt = s += i; #else if (*s != '.') goto pcheck; decpt = ++s; #endif if (!hexdig[*s]) goto pcheck; while(*s == '0') s++; if (hexdig[*s]) zret = 0; havedig = 1; s0 = s; } while(hexdig[*s]) s++; #ifdef USE_LOCALE if (*s == *decimalpoint && !decpt) { for(i = 1; decimalpoint[i]; ++i) { if (s[i] != decimalpoint[i]) goto pcheck; } decpt = s += i; #else if (*s == '.' && !decpt) { decpt = ++s; #endif while(hexdig[*s]) s++; }/*}*/ if (decpt) e = -(((Long)(s-decpt)) << 2); pcheck: s1 = s; big = esign = 0; switch(*s) { case 'p': case 'P': switch(*++s) { case '-': esign = 1; /* no break */ case '+': s++; } if ((n = hexdig[*s]) == 0 || n > 0x19) { s = s1; break; } e1 = n - 0x10; while((n = hexdig[*++s]) !=0 && n <= 0x19) { if (e1 & 0xf8000000) big = 1; e1 = 10*e1 + n - 0x10; } if (esign) e1 = -e1; e += e1; } *sp = (char*)s; if (!havedig) *sp = (char*)s0 - 1; if (zret) goto retz1; if (big) { if (esign) { #ifdef IEEE_Arith switch(rounding) { case Round_up: if (sign) break; goto ret_tiny; case Round_down: if (!sign) break; goto ret_tiny; } #endif goto retz; #ifdef IEEE_Arith ret_tiny: #ifndef NO_ERRNO errno = ERANGE; #endif word0(rvp) = 0; word1(rvp) = 1; return; #endif /* IEEE_Arith */ } switch(rounding) { case Round_near: goto ovfl1; case Round_up: if (!sign) goto ovfl1; goto ret_big; case Round_down: if (sign) goto ovfl1; goto ret_big; } ret_big: word0(rvp) = Big0; word1(rvp) = Big1; return; } n = s1 - s0 - 1; for(k = 0; n > (1 << (kshift-2)) - 1; n >>= 1) k++; b = Balloc(k); x = b->x; n = 0; L = 0; #ifdef USE_LOCALE for(i = 0; decimalpoint[i+1]; ++i); #endif while(s1 > s0) { #ifdef USE_LOCALE if (*--s1 == decimalpoint[i]) { s1 -= i; continue; } #else if (*--s1 == '.') continue; #endif if (n == ULbits) { *x++ = L; L = 0; n = 0; } L |= (hexdig[*s1] & 0x0f) << n; n += 4; } *x++ = L; b->wds = n = x - b->x; n = ULbits*n - hi0bits(L); nbits = Nbits; lostbits = 0; x = b->x; if (n > nbits) { n -= nbits; if (any_on(b,n)) { lostbits = 1; k = n - 1; if (x[k>>kshift] & 1 << (k & kmask)) { lostbits = 2; if (k > 0 && any_on(b,k)) lostbits = 3; } } rshift(b, n); e += n; } else if (n < nbits) { n = nbits - n; b = lshift(b, n); e -= n; x = b->x; } if (e > Emax) { ovfl: Bfree(b); ovfl1: #ifndef NO_ERRNO errno = ERANGE; #endif word0(rvp) = Exp_mask; word1(rvp) = 0; return; } denorm = 0; if (e < emin) { denorm = 1; n = emin - e; if (n >= nbits) { #ifdef IEEE_Arith /*{*/ switch (rounding) { case Round_near: if (n == nbits && (n < 2 || any_on(b,n-1))) goto ret_tiny; break; case Round_up: if (!sign) goto ret_tiny; break; case Round_down: if (sign) goto ret_tiny; } #endif /* } IEEE_Arith */ Bfree(b); retz: #ifndef NO_ERRNO errno = ERANGE; #endif retz1: rvp->d = 0.; return; } k = n - 1; if (lostbits) lostbits = 1; else if (k > 0) lostbits = any_on(b,k); if (x[k>>kshift] & 1 << (k & kmask)) lostbits |= 2; nbits -= n; rshift(b,n); e = emin; } if (lostbits) { up = 0; switch(rounding) { case Round_zero: break; case Round_near: if (lostbits & 2 && (lostbits & 1) | (x[0] & 1)) up = 1; break; case Round_up: up = 1 - sign; break; case Round_down: up = sign; } if (up) { k = b->wds; b = increment(b); x = b->x; if (denorm) { #if 0 if (nbits == Nbits - 1 && x[nbits >> kshift] & 1 << (nbits & kmask)) denorm = 0; /* not currently used */ #endif } else if (b->wds > k || ((n = nbits & kmask) !=0 && hi0bits(x[k-1]) < 32-n)) { rshift(b,1); if (++e > Emax) goto ovfl; } } } #ifdef IEEE_Arith if (denorm) word0(rvp) = b->wds > 1 ? b->x[1] & ~0x100000 : 0; else word0(rvp) = (b->x[1] & ~0x100000) | ((e + 0x3ff + 52) << 20); word1(rvp) = b->x[0]; #endif #ifdef IBM if ((j = e & 3)) { k = b->x[0] & ((1 << j) - 1); rshift(b,j); if (k) { switch(rounding) { case Round_up: if (!sign) increment(b); break; case Round_down: if (sign) increment(b); break; case Round_near: j = 1 << (j-1); if (k & j && ((k & (j-1)) | lostbits)) increment(b); } } } e >>= 2; word0(rvp) = b->x[1] | ((e + 65 + 13) << 24); word1(rvp) = b->x[0]; #endif #ifdef VAX /* The next two lines ignore swap of low- and high-order 2 bytes. */ /* word0(rvp) = (b->x[1] & ~0x800000) | ((e + 129 + 55) << 23); */ /* word1(rvp) = b->x[0]; */ word0(rvp) = ((b->x[1] & ~0x800000) >> 16) | ((e + 129 + 55) << 7) | (b->x[1] << 16); word1(rvp) = (b->x[0] >> 16) | (b->x[0] << 16); #endif Bfree(b); } #endif /*!NO_HEX_FP}*/ static int #ifdef KR_headers dshift(b, p2) Bigint *b; int p2; #else dshift(Bigint *b, int p2) #endif { int rv = hi0bits(b->x[b->wds-1]) - 4; if (p2 > 0) rv -= p2; return rv & kmask; } static int quorem #ifdef KR_headers (b, S) Bigint *b, *S; #else (Bigint *b, Bigint *S) #endif { int n; ULong *bx, *bxe, q, *sx, *sxe; #ifdef ULLong ULLong borrow, carry, y, ys; #else ULong borrow, carry, y, ys; #ifdef Pack_32 ULong si, z, zs; #endif #endif n = S->wds; #ifdef DEBUG /*debug*/ if (b->wds > n) /*debug*/ Bug("oversize b in quorem"); #endif if (b->wds < n) return 0; sx = S->x; sxe = sx + --n; bx = b->x; bxe = bx + n; q = *bxe / (*sxe + 1); /* ensure q <= true quotient */ #ifdef DEBUG #ifdef NO_STRTOD_BIGCOMP /*debug*/ if (q > 9) #else /* An oversized q is possible when quorem is called from bigcomp and */ /* the input is near, e.g., twice the smallest denormalized number. */ /*debug*/ if (q > 15) #endif /*debug*/ Bug("oversized quotient in quorem"); #endif if (q) { borrow = 0; carry = 0; do { #ifdef ULLong ys = *sx++ * (ULLong)q + carry; carry = ys >> 32; y = *bx - (ys & FFFFFFFF) - borrow; borrow = y >> 32 & (ULong)1; *bx++ = y & FFFFFFFF; #else #ifdef Pack_32 si = *sx++; ys = (si & 0xffff) * q + carry; zs = (si >> 16) * q + (ys >> 16); carry = zs >> 16; y = (*bx & 0xffff) - (ys & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; z = (*bx >> 16) - (zs & 0xffff) - borrow; borrow = (z & 0x10000) >> 16; Storeinc(bx, z, y); #else ys = *sx++ * q + carry; carry = ys >> 16; y = *bx - (ys & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; *bx++ = y & 0xffff; #endif #endif } while(sx <= sxe); if (!*bxe) { bx = b->x; while(--bxe > bx && !*bxe) --n; b->wds = n; } } if (cmp(b, S) >= 0) { q++; borrow = 0; carry = 0; bx = b->x; sx = S->x; do { #ifdef ULLong ys = *sx++ + carry; carry = ys >> 32; y = *bx - (ys & FFFFFFFF) - borrow; borrow = y >> 32 & (ULong)1; *bx++ = y & FFFFFFFF; #else #ifdef Pack_32 si = *sx++; ys = (si & 0xffff) + carry; zs = (si >> 16) + (ys >> 16); carry = zs >> 16; y = (*bx & 0xffff) - (ys & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; z = (*bx >> 16) - (zs & 0xffff) - borrow; borrow = (z & 0x10000) >> 16; Storeinc(bx, z, y); #else ys = *sx++ + carry; carry = ys >> 16; y = *bx - (ys & 0xffff) - borrow; borrow = (y & 0x10000) >> 16; *bx++ = y & 0xffff; #endif #endif } while(sx <= sxe); bx = b->x; bxe = bx + n; if (!*bxe) { while(--bxe > bx && !*bxe) --n; b->wds = n; } } return q; } #if defined(Avoid_Underflow) || !defined(NO_STRTOD_BIGCOMP) /*{*/ static double sulp #ifdef KR_headers (x, bc) U *x; BCinfo *bc; #else (U *x, BCinfo *bc) #endif { U u; double rv; int i; rv = ulp(x); if (!bc->scale || (i = 2*P + 1 - ((word0(x) & Exp_mask) >> Exp_shift)) <= 0) return rv; /* Is there an example where i <= 0 ? */ word0(&u) = Exp_1 + (i << Exp_shift); word1(&u) = 0; return rv * u.d; } #endif /*}*/ #ifndef NO_STRTOD_BIGCOMP static void bigcomp #ifdef KR_headers (rv, s0, bc) U *rv; CONST char *s0; BCinfo *bc; #else (U *rv, const char *s0, BCinfo *bc) #endif { Bigint *b, *d; int b2, bbits, d2, dd, dig, dsign, i, j, nd, nd0, p2, p5, speccase; dsign = bc->dsign; nd = bc->nd; nd0 = bc->nd0; p5 = nd + bc->e0 - 1; speccase = 0; #ifndef Sudden_Underflow if (rv->d == 0.) { /* special case: value near underflow-to-zero */ /* threshold was rounded to zero */ b = i2b(1); p2 = Emin - P + 1; bbits = 1; #ifdef Avoid_Underflow word0(rv) = (P+2) << Exp_shift; #else word1(rv) = 1; #endif i = 0; #ifdef Honor_FLT_ROUNDS if (bc->rounding == 1) #endif { speccase = 1; --p2; dsign = 0; goto have_i; } } else #endif b = d2b(rv, &p2, &bbits); #ifdef Avoid_Underflow p2 -= bc->scale; #endif /* floor(log2(rv)) == bbits - 1 + p2 */ /* Check for denormal case. */ i = P - bbits; if (i > (j = P - Emin - 1 + p2)) { #ifdef Sudden_Underflow Bfree(b); b = i2b(1); p2 = Emin; i = P - 1; #ifdef Avoid_Underflow word0(rv) = (1 + bc->scale) << Exp_shift; #else word0(rv) = Exp_msk1; #endif word1(rv) = 0; #else i = j; #endif } #ifdef Honor_FLT_ROUNDS if (bc->rounding != 1) { if (i > 0) b = lshift(b, i); if (dsign) b = increment(b); } else #endif { b = lshift(b, ++i); b->x[0] |= 1; } #ifndef Sudden_Underflow have_i: #endif p2 -= p5 + i; d = i2b(1); /* Arrange for convenient computation of quotients: * shift left if necessary so divisor has 4 leading 0 bits. */ if (p5 > 0) d = pow5mult(d, p5); else if (p5 < 0) b = pow5mult(b, -p5); if (p2 > 0) { b2 = p2; d2 = 0; } else { b2 = 0; d2 = -p2; } i = dshift(d, d2); if ((b2 += i) > 0) b = lshift(b, b2); if ((d2 += i) > 0) d = lshift(d, d2); /* Now b/d = exactly half-way between the two floating-point values */ /* on either side of the input string. Compute first digit of b/d. */ if (!(dig = quorem(b,d))) { b = multadd(b, 10, 0); /* very unlikely */ dig = quorem(b,d); } /* Compare b/d with s0 */ for(i = 0; i < nd0; ) { if ((dd = s0[i++] - '0' - dig)) goto ret; if (!b->x[0] && b->wds == 1) { if (i < nd) dd = 1; goto ret; } b = multadd(b, 10, 0); dig = quorem(b,d); } for(j = bc->dp1; i++ < nd;) { if ((dd = s0[j++] - '0' - dig)) goto ret; if (!b->x[0] && b->wds == 1) { if (i < nd) dd = 1; goto ret; } b = multadd(b, 10, 0); dig = quorem(b,d); } if (b->x[0] || b->wds > 1) dd = -1; ret: Bfree(b); Bfree(d); #ifdef Honor_FLT_ROUNDS if (bc->rounding != 1) { if (dd < 0) { if (bc->rounding == 0) { if (!dsign) goto retlow1; } else if (dsign) goto rethi1; } else if (dd > 0) { if (bc->rounding == 0) { if (dsign) goto rethi1; goto ret1; } if (!dsign) goto rethi1; dval(rv) += 2.*sulp(rv,bc); } else { bc->inexact = 0; if (dsign) goto rethi1; } } else #endif if (speccase) { if (dd <= 0) rv->d = 0.; } else if (dd < 0) { if (!dsign) /* does not happen for round-near */ retlow1: dval(rv) -= sulp(rv,bc); } else if (dd > 0) { if (dsign) { rethi1: dval(rv) += sulp(rv,bc); } } else { /* Exact half-way case: apply round-even rule. */ if ((j = ((word0(rv) & Exp_mask) >> Exp_shift) - bc->scale) <= 0) { i = 1 - j; if (i <= 31) { if (word1(rv) & (0x1 << i)) goto odd; } else if (word0(rv) & (0x1 << (i-32))) goto odd; } else if (word1(rv) & 1) { odd: if (dsign) goto rethi1; goto retlow1; } } #ifdef Honor_FLT_ROUNDS ret1: #endif return; } #endif /* NO_STRTOD_BIGCOMP */ double fpconv_strtod #ifdef KR_headers (s00, se) CONST char *s00; char **se; #else (const char *s00, char **se) #endif { int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, e, e1; int esign, i, j, k, nd, nd0, nf, nz, nz0, nz1, sign; CONST char *s, *s0, *s1; double aadj, aadj1; Long L; U aadj2, adj, rv, rv0; ULong y, z; BCinfo bc; Bigint *bb, *bb1, *bd, *bd0, *bs, *delta; #ifdef Avoid_Underflow ULong Lsb, Lsb1; #endif #ifdef SET_INEXACT int oldinexact; #endif #ifndef NO_STRTOD_BIGCOMP int req_bigcomp = 0; #endif #ifdef Honor_FLT_ROUNDS /*{*/ #ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */ bc.rounding = Flt_Rounds; #else /*}{*/ bc.rounding = 1; switch(fegetround()) { case FE_TOWARDZERO: bc.rounding = 0; break; case FE_UPWARD: bc.rounding = 2; break; case FE_DOWNWARD: bc.rounding = 3; } #endif /*}}*/ #endif /*}*/ #ifdef USE_LOCALE CONST char *s2; #endif sign = nz0 = nz1 = nz = bc.dplen = bc.uflchk = 0; dval(&rv) = 0.; for(s = s00;;s++) switch(*s) { case '-': sign = 1; /* no break */ case '+': if (*++s) goto break2; /* no break */ case 0: goto ret0; case '\t': case '\n': case '\v': case '\f': case '\r': case ' ': continue; default: goto break2; } break2: if (*s == '0') { #ifndef NO_HEX_FP /*{*/ switch(s[1]) { case 'x': case 'X': #ifdef Honor_FLT_ROUNDS gethex(&s, &rv, bc.rounding, sign); #else gethex(&s, &rv, 1, sign); #endif goto ret; } #endif /*}*/ nz0 = 1; while(*++s == '0') ; if (!*s) goto ret; } s0 = s; y = z = 0; for(nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++) if (nd < 9) y = 10*y + c - '0'; else if (nd < 16) z = 10*z + c - '0'; nd0 = nd; bc.dp0 = bc.dp1 = s - s0; for(s1 = s; s1 > s0 && *--s1 == '0'; ) ++nz1; #ifdef USE_LOCALE s1 = localeconv()->decimal_point; if (c == *s1) { c = '.'; if (*++s1) { s2 = s; for(;;) { if (*++s2 != *s1) { c = 0; break; } if (!*++s1) { s = s2; break; } } } } #endif if (c == '.') { c = *++s; bc.dp1 = s - s0; bc.dplen = bc.dp1 - bc.dp0; if (!nd) { for(; c == '0'; c = *++s) nz++; if (c > '0' && c <= '9') { bc.dp0 = s0 - s; bc.dp1 = bc.dp0 + bc.dplen; s0 = s; nf += nz; nz = 0; goto have_dig; } goto dig_done; } for(; c >= '0' && c <= '9'; c = *++s) { have_dig: nz++; if (c -= '0') { nf += nz; for(i = 1; i < nz; i++) if (nd++ < 9) y *= 10; else if (nd <= DBL_DIG + 1) z *= 10; if (nd++ < 9) y = 10*y + c; else if (nd <= DBL_DIG + 1) z = 10*z + c; nz = nz1 = 0; } } } dig_done: e = 0; if (c == 'e' || c == 'E') { if (!nd && !nz && !nz0) { goto ret0; } s00 = s; esign = 0; switch(c = *++s) { case '-': esign = 1; case '+': c = *++s; } if (c >= '0' && c <= '9') { while(c == '0') c = *++s; if (c > '0' && c <= '9') { L = c - '0'; s1 = s; while((c = *++s) >= '0' && c <= '9') L = 10*L + c - '0'; if (s - s1 > 8 || L > 19999) /* Avoid confusion from exponents * so large that e might overflow. */ e = 19999; /* safe for 16 bit ints */ else e = (int)L; if (esign) e = -e; } else e = 0; } else s = s00; } if (!nd) { if (!nz && !nz0) { #ifdef INFNAN_CHECK /* Check for Nan and Infinity */ if (!bc.dplen) switch(c) { case 'i': case 'I': if (match(&s,"nf")) { --s; if (!match(&s,"inity")) ++s; word0(&rv) = 0x7ff00000; word1(&rv) = 0; goto ret; } break; case 'n': case 'N': if (match(&s, "an")) { word0(&rv) = NAN_WORD0; word1(&rv) = NAN_WORD1; #ifndef No_Hex_NaN if (*s == '(') /*)*/ hexnan(&rv, &s); #endif goto ret; } } #endif /* INFNAN_CHECK */ ret0: s = s00; sign = 0; } goto ret; } bc.e0 = e1 = e -= nf; /* Now we have nd0 digits, starting at s0, followed by a * decimal point, followed by nd-nd0 digits. The number we're * after is the integer represented by those digits times * 10**e */ if (!nd0) nd0 = nd; k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1; dval(&rv) = y; if (k > 9) { #ifdef SET_INEXACT if (k > DBL_DIG) oldinexact = get_inexact(); #endif dval(&rv) = tens[k - 9] * dval(&rv) + z; } bd0 = 0; if (nd <= DBL_DIG #ifndef RND_PRODQUOT #ifndef Honor_FLT_ROUNDS && Flt_Rounds == 1 #endif #endif ) { if (!e) goto ret; #ifndef ROUND_BIASED_without_Round_Up if (e > 0) { if (e <= Ten_pmax) { #ifdef VAX goto vax_ovfl_check; #else #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { rv.d = -rv.d; sign = 0; } #endif /* rv = */ rounded_product(dval(&rv), tens[e]); goto ret; #endif } i = DBL_DIG - nd; if (e <= Ten_pmax + i) { /* A fancier test would sometimes let us do * this for larger i values. */ #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { rv.d = -rv.d; sign = 0; } #endif e -= i; dval(&rv) *= tens[i]; #ifdef VAX /* VAX exponent range is so narrow we must * worry about overflow here... */ vax_ovfl_check: word0(&rv) -= P*Exp_msk1; /* rv = */ rounded_product(dval(&rv), tens[e]); if ((word0(&rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) goto ovfl; word0(&rv) += P*Exp_msk1; #else /* rv = */ rounded_product(dval(&rv), tens[e]); #endif goto ret; } } #ifndef Inaccurate_Divide else if (e >= -Ten_pmax) { #ifdef Honor_FLT_ROUNDS /* round correctly FLT_ROUNDS = 2 or 3 */ if (sign) { rv.d = -rv.d; sign = 0; } #endif /* rv = */ rounded_quotient(dval(&rv), tens[-e]); goto ret; } #endif #endif /* ROUND_BIASED_without_Round_Up */ } e1 += nd - k; #ifdef IEEE_Arith #ifdef SET_INEXACT bc.inexact = 1; if (k <= DBL_DIG) oldinexact = get_inexact(); #endif #ifdef Avoid_Underflow bc.scale = 0; #endif #ifdef Honor_FLT_ROUNDS if (bc.rounding >= 2) { if (sign) bc.rounding = bc.rounding == 2 ? 0 : 2; else if (bc.rounding != 2) bc.rounding = 0; } #endif #endif /*IEEE_Arith*/ /* Get starting approximation = rv * 10**e1 */ if (e1 > 0) { if ((i = e1 & 15)) dval(&rv) *= tens[i]; if (e1 &= ~15) { if (e1 > DBL_MAX_10_EXP) { ovfl: /* Can't trust HUGE_VAL */ #ifdef IEEE_Arith #ifdef Honor_FLT_ROUNDS switch(bc.rounding) { case 0: /* toward 0 */ case 3: /* toward -infinity */ word0(&rv) = Big0; word1(&rv) = Big1; break; default: word0(&rv) = Exp_mask; word1(&rv) = 0; } #else /*Honor_FLT_ROUNDS*/ word0(&rv) = Exp_mask; word1(&rv) = 0; #endif /*Honor_FLT_ROUNDS*/ #ifdef SET_INEXACT /* set overflow bit */ dval(&rv0) = 1e300; dval(&rv0) *= dval(&rv0); #endif #else /*IEEE_Arith*/ word0(&rv) = Big0; word1(&rv) = Big1; #endif /*IEEE_Arith*/ range_err: if (bd0) { Bfree(bb); Bfree(bd); Bfree(bs); Bfree(bd0); Bfree(delta); } #ifndef NO_ERRNO errno = ERANGE; #endif goto ret; } e1 >>= 4; for(j = 0; e1 > 1; j++, e1 >>= 1) if (e1 & 1) dval(&rv) *= bigtens[j]; /* The last multiplication could overflow. */ word0(&rv) -= P*Exp_msk1; dval(&rv) *= bigtens[j]; if ((z = word0(&rv) & Exp_mask) > Exp_msk1*(DBL_MAX_EXP+Bias-P)) goto ovfl; if (z > Exp_msk1*(DBL_MAX_EXP+Bias-1-P)) { /* set to largest number */ /* (Can't trust DBL_MAX) */ word0(&rv) = Big0; word1(&rv) = Big1; } else word0(&rv) += P*Exp_msk1; } } else if (e1 < 0) { e1 = -e1; if ((i = e1 & 15)) dval(&rv) /= tens[i]; if (e1 >>= 4) { if (e1 >= 1 << n_bigtens) goto undfl; #ifdef Avoid_Underflow if (e1 & Scale_Bit) bc.scale = 2*P; for(j = 0; e1 > 0; j++, e1 >>= 1) if (e1 & 1) dval(&rv) *= tinytens[j]; if (bc.scale && (j = 2*P + 1 - ((word0(&rv) & Exp_mask) >> Exp_shift)) > 0) { /* scaled rv is denormal; clear j low bits */ if (j >= 32) { if (j > 54) goto undfl; word1(&rv) = 0; if (j >= 53) word0(&rv) = (P+2)*Exp_msk1; else word0(&rv) &= 0xffffffff << (j-32); } else word1(&rv) &= 0xffffffff << j; } #else for(j = 0; e1 > 1; j++, e1 >>= 1) if (e1 & 1) dval(&rv) *= tinytens[j]; /* The last multiplication could underflow. */ dval(&rv0) = dval(&rv); dval(&rv) *= tinytens[j]; if (!dval(&rv)) { dval(&rv) = 2.*dval(&rv0); dval(&rv) *= tinytens[j]; #endif if (!dval(&rv)) { undfl: dval(&rv) = 0.; goto range_err; } #ifndef Avoid_Underflow word0(&rv) = Tiny0; word1(&rv) = Tiny1; /* The refinement below will clean * this approximation up. */ } #endif } } /* Now the hard part -- adjusting rv to the correct value.*/ /* Put digits into bd: true value = bd * 10^e */ bc.nd = nd - nz1; #ifndef NO_STRTOD_BIGCOMP bc.nd0 = nd0; /* Only needed if nd > strtod_diglim, but done here */ /* to silence an erroneous warning about bc.nd0 */ /* possibly not being initialized. */ if (nd > strtod_diglim) { /* ASSERT(strtod_diglim >= 18); 18 == one more than the */ /* minimum number of decimal digits to distinguish double values */ /* in IEEE arithmetic. */ i = j = 18; if (i > nd0) j += bc.dplen; for(;;) { if (--j < bc.dp1 && j >= bc.dp0) j = bc.dp0 - 1; if (s0[j] != '0') break; --i; } e += nd - i; nd = i; if (nd0 > nd) nd0 = nd; if (nd < 9) { /* must recompute y */ y = 0; for(i = 0; i < nd0; ++i) y = 10*y + s0[i] - '0'; for(j = bc.dp1; i < nd; ++i) y = 10*y + s0[j++] - '0'; } } #endif bd0 = s2b(s0, nd0, nd, y, bc.dplen); for(;;) { bd = Balloc(bd0->k); Bcopy(bd, bd0); bb = d2b(&rv, &bbe, &bbbits); /* rv = bb * 2^bbe */ bs = i2b(1); if (e >= 0) { bb2 = bb5 = 0; bd2 = bd5 = e; } else { bb2 = bb5 = -e; bd2 = bd5 = 0; } if (bbe >= 0) bb2 += bbe; else bd2 -= bbe; bs2 = bb2; #ifdef Honor_FLT_ROUNDS if (bc.rounding != 1) bs2++; #endif #ifdef Avoid_Underflow Lsb = LSB; Lsb1 = 0; j = bbe - bc.scale; i = j + bbbits - 1; /* logb(rv) */ j = P + 1 - bbbits; if (i < Emin) { /* denormal */ i = Emin - i; j -= i; if (i < 32) Lsb <<= i; else if (i < 52) Lsb1 = Lsb << (i-32); else Lsb1 = Exp_mask; } #else /*Avoid_Underflow*/ #ifdef Sudden_Underflow #ifdef IBM j = 1 + 4*P - 3 - bbbits + ((bbe + bbbits - 1) & 3); #else j = P + 1 - bbbits; #endif #else /*Sudden_Underflow*/ j = bbe; i = j + bbbits - 1; /* logb(rv) */ if (i < Emin) /* denormal */ j += P - Emin; else j = P + 1 - bbbits; #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ bb2 += j; bd2 += j; #ifdef Avoid_Underflow bd2 += bc.scale; #endif i = bb2 < bd2 ? bb2 : bd2; if (i > bs2) i = bs2; if (i > 0) { bb2 -= i; bd2 -= i; bs2 -= i; } if (bb5 > 0) { bs = pow5mult(bs, bb5); bb1 = mult(bs, bb); Bfree(bb); bb = bb1; } if (bb2 > 0) bb = lshift(bb, bb2); if (bd5 > 0) bd = pow5mult(bd, bd5); if (bd2 > 0) bd = lshift(bd, bd2); if (bs2 > 0) bs = lshift(bs, bs2); delta = diff(bb, bd); bc.dsign = delta->sign; delta->sign = 0; i = cmp(delta, bs); #ifndef NO_STRTOD_BIGCOMP /*{*/ if (bc.nd > nd && i <= 0) { if (bc.dsign) { /* Must use bigcomp(). */ req_bigcomp = 1; break; } #ifdef Honor_FLT_ROUNDS if (bc.rounding != 1) { if (i < 0) { req_bigcomp = 1; break; } } else #endif i = -1; /* Discarded digits make delta smaller. */ } #endif /*}*/ #ifdef Honor_FLT_ROUNDS /*{*/ if (bc.rounding != 1) { if (i < 0) { /* Error is less than an ulp */ if (!delta->x[0] && delta->wds <= 1) { /* exact */ #ifdef SET_INEXACT bc.inexact = 0; #endif break; } if (bc.rounding) { if (bc.dsign) { adj.d = 1.; goto apply_adj; } } else if (!bc.dsign) { adj.d = -1.; if (!word1(&rv) && !(word0(&rv) & Frac_mask)) { y = word0(&rv) & Exp_mask; #ifdef Avoid_Underflow if (!bc.scale || y > 2*P*Exp_msk1) #else if (y) #endif { delta = lshift(delta,Log2P); if (cmp(delta, bs) <= 0) adj.d = -0.5; } } apply_adj: #ifdef Avoid_Underflow /*{*/ if (bc.scale && (y = word0(&rv) & Exp_mask) <= 2*P*Exp_msk1) word0(&adj) += (2*P+1)*Exp_msk1 - y; #else #ifdef Sudden_Underflow if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) { word0(&rv) += P*Exp_msk1; dval(&rv) += adj.d*ulp(dval(&rv)); word0(&rv) -= P*Exp_msk1; } else #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow}*/ dval(&rv) += adj.d*ulp(&rv); } break; } adj.d = ratio(delta, bs); if (adj.d < 1.) adj.d = 1.; if (adj.d <= 0x7ffffffe) { /* adj = rounding ? ceil(adj) : floor(adj); */ y = adj.d; if (y != adj.d) { if (!((bc.rounding>>1) ^ bc.dsign)) y++; adj.d = y; } } #ifdef Avoid_Underflow /*{*/ if (bc.scale && (y = word0(&rv) & Exp_mask) <= 2*P*Exp_msk1) word0(&adj) += (2*P+1)*Exp_msk1 - y; #else #ifdef Sudden_Underflow if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) { word0(&rv) += P*Exp_msk1; adj.d *= ulp(dval(&rv)); if (bc.dsign) dval(&rv) += adj.d; else dval(&rv) -= adj.d; word0(&rv) -= P*Exp_msk1; goto cont; } #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow}*/ adj.d *= ulp(&rv); if (bc.dsign) { if (word0(&rv) == Big0 && word1(&rv) == Big1) goto ovfl; dval(&rv) += adj.d; } else dval(&rv) -= adj.d; goto cont; } #endif /*}Honor_FLT_ROUNDS*/ if (i < 0) { /* Error is less than half an ulp -- check for * special case of mantissa a power of two. */ if (bc.dsign || word1(&rv) || word0(&rv) & Bndry_mask #ifdef IEEE_Arith /*{*/ #ifdef Avoid_Underflow || (word0(&rv) & Exp_mask) <= (2*P+1)*Exp_msk1 #else || (word0(&rv) & Exp_mask) <= Exp_msk1 #endif #endif /*}*/ ) { #ifdef SET_INEXACT if (!delta->x[0] && delta->wds <= 1) bc.inexact = 0; #endif break; } if (!delta->x[0] && delta->wds <= 1) { /* exact result */ #ifdef SET_INEXACT bc.inexact = 0; #endif break; } delta = lshift(delta,Log2P); if (cmp(delta, bs) > 0) goto drop_down; break; } if (i == 0) { /* exactly half-way between */ if (bc.dsign) { if ((word0(&rv) & Bndry_mask1) == Bndry_mask1 && word1(&rv) == ( #ifdef Avoid_Underflow (bc.scale && (y = word0(&rv) & Exp_mask) <= 2*P*Exp_msk1) ? (0xffffffff & (0xffffffff << (2*P+1-(y>>Exp_shift)))) : #endif 0xffffffff)) { /*boundary case -- increment exponent*/ if (word0(&rv) == Big0 && word1(&rv) == Big1) goto ovfl; word0(&rv) = (word0(&rv) & Exp_mask) + Exp_msk1 #ifdef IBM | Exp_msk1 >> 4 #endif ; word1(&rv) = 0; #ifdef Avoid_Underflow bc.dsign = 0; #endif break; } } else if (!(word0(&rv) & Bndry_mask) && !word1(&rv)) { drop_down: /* boundary case -- decrement exponent */ #ifdef Sudden_Underflow /*{{*/ L = word0(&rv) & Exp_mask; #ifdef IBM if (L < Exp_msk1) #else #ifdef Avoid_Underflow if (L <= (bc.scale ? (2*P+1)*Exp_msk1 : Exp_msk1)) #else if (L <= Exp_msk1) #endif /*Avoid_Underflow*/ #endif /*IBM*/ { if (bc.nd >nd) { bc.uflchk = 1; break; } goto undfl; } L -= Exp_msk1; #else /*Sudden_Underflow}{*/ #ifdef Avoid_Underflow if (bc.scale) { L = word0(&rv) & Exp_mask; if (L <= (2*P+1)*Exp_msk1) { if (L > (P+2)*Exp_msk1) /* round even ==> */ /* accept rv */ break; /* rv = smallest denormal */ if (bc.nd >nd) { bc.uflchk = 1; break; } goto undfl; } } #endif /*Avoid_Underflow*/ L = (word0(&rv) & Exp_mask) - Exp_msk1; #endif /*Sudden_Underflow}}*/ word0(&rv) = L | Bndry_mask1; word1(&rv) = 0xffffffff; #ifdef IBM goto cont; #else #ifndef NO_STRTOD_BIGCOMP if (bc.nd > nd) goto cont; #endif break; #endif } #ifndef ROUND_BIASED #ifdef Avoid_Underflow if (Lsb1) { if (!(word0(&rv) & Lsb1)) break; } else if (!(word1(&rv) & Lsb)) break; #else if (!(word1(&rv) & LSB)) break; #endif #endif if (bc.dsign) #ifdef Avoid_Underflow dval(&rv) += sulp(&rv, &bc); #else dval(&rv) += ulp(&rv); #endif #ifndef ROUND_BIASED else { #ifdef Avoid_Underflow dval(&rv) -= sulp(&rv, &bc); #else dval(&rv) -= ulp(&rv); #endif #ifndef Sudden_Underflow if (!dval(&rv)) { if (bc.nd >nd) { bc.uflchk = 1; break; } goto undfl; } #endif } #ifdef Avoid_Underflow bc.dsign = 1 - bc.dsign; #endif #endif break; } if ((aadj = ratio(delta, bs)) <= 2.) { if (bc.dsign) aadj = aadj1 = 1.; else if (word1(&rv) || word0(&rv) & Bndry_mask) { #ifndef Sudden_Underflow if (word1(&rv) == Tiny1 && !word0(&rv)) { if (bc.nd >nd) { bc.uflchk = 1; break; } goto undfl; } #endif aadj = 1.; aadj1 = -1.; } else { /* special case -- power of FLT_RADIX to be */ /* rounded down... */ if (aadj < 2./FLT_RADIX) aadj = 1./FLT_RADIX; else aadj *= 0.5; aadj1 = -aadj; } } else { aadj *= 0.5; aadj1 = bc.dsign ? aadj : -aadj; #ifdef Check_FLT_ROUNDS switch(bc.rounding) { case 2: /* towards +infinity */ aadj1 -= 0.5; break; case 0: /* towards 0 */ case 3: /* towards -infinity */ aadj1 += 0.5; } #else if (Flt_Rounds == 0) aadj1 += 0.5; #endif /*Check_FLT_ROUNDS*/ } y = word0(&rv) & Exp_mask; /* Check for overflow */ if (y == Exp_msk1*(DBL_MAX_EXP+Bias-1)) { dval(&rv0) = dval(&rv); word0(&rv) -= P*Exp_msk1; adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; if ((word0(&rv) & Exp_mask) >= Exp_msk1*(DBL_MAX_EXP+Bias-P)) { if (word0(&rv0) == Big0 && word1(&rv0) == Big1) goto ovfl; word0(&rv) = Big0; word1(&rv) = Big1; goto cont; } else word0(&rv) += P*Exp_msk1; } else { #ifdef Avoid_Underflow if (bc.scale && y <= 2*P*Exp_msk1) { if (aadj <= 0x7fffffff) { if ((z = aadj) <= 0) z = 1; aadj = z; aadj1 = bc.dsign ? aadj : -aadj; } dval(&aadj2) = aadj1; word0(&aadj2) += (2*P+1)*Exp_msk1 - y; aadj1 = dval(&aadj2); adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; if (rv.d == 0.) #ifdef NO_STRTOD_BIGCOMP goto undfl; #else { if (bc.nd > nd) bc.dsign = 1; break; } #endif } else { adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; } #else #ifdef Sudden_Underflow if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) { dval(&rv0) = dval(&rv); word0(&rv) += P*Exp_msk1; adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; #ifdef IBM if ((word0(&rv) & Exp_mask) < P*Exp_msk1) #else if ((word0(&rv) & Exp_mask) <= P*Exp_msk1) #endif { if (word0(&rv0) == Tiny0 && word1(&rv0) == Tiny1) { if (bc.nd >nd) { bc.uflchk = 1; break; } goto undfl; } word0(&rv) = Tiny0; word1(&rv) = Tiny1; goto cont; } else word0(&rv) -= P*Exp_msk1; } else { adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; } #else /*Sudden_Underflow*/ /* Compute adj so that the IEEE rounding rules will * correctly round rv + adj in some half-way cases. * If rv * ulp(rv) is denormalized (i.e., * y <= (P-1)*Exp_msk1), we must adjust aadj to avoid * trouble from bits lost to denormalization; * example: 1.2e-307 . */ if (y <= (P-1)*Exp_msk1 && aadj > 1.) { aadj1 = (double)(int)(aadj + 0.5); if (!bc.dsign) aadj1 = -aadj1; } adj.d = aadj1 * ulp(&rv); dval(&rv) += adj.d; #endif /*Sudden_Underflow*/ #endif /*Avoid_Underflow*/ } z = word0(&rv) & Exp_mask; #ifndef SET_INEXACT if (bc.nd == nd) { #ifdef Avoid_Underflow if (!bc.scale) #endif if (y == z) { /* Can we stop now? */ L = (Long)aadj; aadj -= L; /* The tolerances below are conservative. */ if (bc.dsign || word1(&rv) || word0(&rv) & Bndry_mask) { if (aadj < .4999999 || aadj > .5000001) break; } else if (aadj < .4999999/FLT_RADIX) break; } } #endif cont: Bfree(bb); Bfree(bd); Bfree(bs); Bfree(delta); } Bfree(bb); Bfree(bd); Bfree(bs); Bfree(bd0); Bfree(delta); #ifndef NO_STRTOD_BIGCOMP if (req_bigcomp) { bd0 = 0; bc.e0 += nz1; bigcomp(&rv, s0, &bc); y = word0(&rv) & Exp_mask; if (y == Exp_mask) goto ovfl; if (y == 0 && rv.d == 0.) goto undfl; } #endif #ifdef SET_INEXACT if (bc.inexact) { if (!oldinexact) { word0(&rv0) = Exp_1 + (70 << Exp_shift); word1(&rv0) = 0; dval(&rv0) += 1.; } } else if (!oldinexact) clear_inexact(); #endif #ifdef Avoid_Underflow if (bc.scale) { word0(&rv0) = Exp_1 - 2*P*Exp_msk1; word1(&rv0) = 0; dval(&rv) *= dval(&rv0); #ifndef NO_ERRNO /* try to avoid the bug of testing an 8087 register value */ #ifdef IEEE_Arith if (!(word0(&rv) & Exp_mask)) #else if (word0(&rv) == 0 && word1(&rv) == 0) #endif errno = ERANGE; #endif } #endif /* Avoid_Underflow */ #ifdef SET_INEXACT if (bc.inexact && !(word0(&rv) & Exp_mask)) { /* set underflow bit */ dval(&rv0) = 1e-300; dval(&rv0) *= dval(&rv0); } #endif ret: if (se) *se = (char *)s; return sign ? -dval(&rv) : dval(&rv); } #ifndef MULTIPLE_THREADS static char *dtoa_result; #endif static char * #ifdef KR_headers rv_alloc(i) int i; #else rv_alloc(int i) #endif { int j, k, *r; j = sizeof(ULong); for(k = 0; sizeof(Bigint) - sizeof(ULong) - sizeof(int) + j <= i; j <<= 1) k++; r = (int*)Balloc(k); *r = k; return #ifndef MULTIPLE_THREADS dtoa_result = #endif (char *)(r+1); } static char * #ifdef KR_headers nrv_alloc(s, rve, n) char *s, **rve; int n; #else nrv_alloc(const char *s, char **rve, int n) #endif { char *rv, *t; t = rv = rv_alloc(n); while((*t = *s++)) t++; if (rve) *rve = t; return rv; } /* freedtoa(s) must be used to free values s returned by dtoa * when MULTIPLE_THREADS is #defined. It should be used in all cases, * but for consistency with earlier versions of dtoa, it is optional * when MULTIPLE_THREADS is not defined. */ void #ifdef KR_headers freedtoa(s) char *s; #else freedtoa(char *s) #endif { Bigint *b = (Bigint *)((int *)s - 1); b->maxwds = 1 << (b->k = *(int*)b); Bfree(b); #ifndef MULTIPLE_THREADS if (s == dtoa_result) dtoa_result = 0; #endif } /* dtoa for IEEE arithmetic (dmg): convert double to ASCII string. * * Inspired by "How to Print Floating-Point Numbers Accurately" by * Guy L. Steele, Jr. and Jon L. White [Proc. ACM SIGPLAN '90, pp. 112-126]. * * Modifications: * 1. Rather than iterating, we use a simple numeric overestimate * to determine k = floor(log10(d)). We scale relevant * quantities using O(log2(k)) rather than O(k) multiplications. * 2. For some modes > 2 (corresponding to ecvt and fcvt), we don't * try to generate digits strictly left to right. Instead, we * compute with fewer bits and propagate the carry if necessary * when rounding the final digit up. This is often faster. * 3. Under the assumption that input will be rounded nearest, * mode 0 renders 1e23 as 1e23 rather than 9.999999999999999e22. * That is, we allow equality in stopping tests when the * round-nearest rule will give the same floating-point value * as would satisfaction of the stopping test with strict * inequality. * 4. We remove common factors of powers of 2 from relevant * quantities. * 5. When converting floating-point integers less than 1e16, * we use floating-point arithmetic rather than resorting * to multiple-precision integers. * 6. When asked to produce fewer than 15 digits, we first try * to get by with floating-point arithmetic; we resort to * multiple-precision integer arithmetic only if we cannot * guarantee that the floating-point calculation has given * the correctly rounded result. For k requested digits and * "uniformly" distributed input, the probability is * something like 10^(k-15) that we must resort to the Long * calculation. */ char * dtoa #ifdef KR_headers (dd, mode, ndigits, decpt, sign, rve) double dd; int mode, ndigits, *decpt, *sign; char **rve; #else (double dd, int mode, int ndigits, int *decpt, int *sign, char **rve) #endif { /* Arguments ndigits, decpt, sign are similar to those of ecvt and fcvt; trailing zeros are suppressed from the returned string. If not null, *rve is set to point to the end of the return value. If d is +-Infinity or NaN, then *decpt is set to 9999. mode: 0 ==> shortest string that yields d when read in and rounded to nearest. 1 ==> like 0, but with Steele & White stopping rule; e.g. with IEEE P754 arithmetic , mode 0 gives 1e23 whereas mode 1 gives 9.999999999999999e22. 2 ==> max(1,ndigits) significant digits. This gives a return value similar to that of ecvt, except that trailing zeros are suppressed. 3 ==> through ndigits past the decimal point. This gives a return value similar to that from fcvt, except that trailing zeros are suppressed, and ndigits can be negative. 4,5 ==> similar to 2 and 3, respectively, but (in round-nearest mode) with the tests of mode 0 to possibly return a shorter string that rounds to d. With IEEE arithmetic and compilation with -DHonor_FLT_ROUNDS, modes 4 and 5 behave the same as modes 2 and 3 when FLT_ROUNDS != 1. 6-9 ==> Debugging modes similar to mode - 4: don't try fast floating-point estimate (if applicable). Values of mode other than 0-9 are treated as mode 0. Sufficient space is allocated to the return value to hold the suppressed trailing zeros. */ int bbits, b2, b5, be, dig, i, ieps, ilim, ilim0, ilim1, j, j1, k, k0, k_check, leftright, m2, m5, s2, s5, spec_case, try_quick; Long L; #ifndef Sudden_Underflow int denorm; ULong x; #endif Bigint *b, *b1, *delta, *mlo, *mhi, *S; U d2, eps, u; double ds; char *s, *s0; #ifndef No_leftright #ifdef IEEE_Arith U eps1; #endif #endif #ifdef SET_INEXACT int inexact, oldinexact; #endif #ifdef Honor_FLT_ROUNDS /*{*/ int Rounding; #ifdef Trust_FLT_ROUNDS /*{{ only define this if FLT_ROUNDS really works! */ Rounding = Flt_Rounds; #else /*}{*/ Rounding = 1; switch(fegetround()) { case FE_TOWARDZERO: Rounding = 0; break; case FE_UPWARD: Rounding = 2; break; case FE_DOWNWARD: Rounding = 3; } #endif /*}}*/ #endif /*}*/ #ifndef MULTIPLE_THREADS if (dtoa_result) { freedtoa(dtoa_result); dtoa_result = 0; } #endif u.d = dd; if (word0(&u) & Sign_bit) { /* set sign for everything, including 0's and NaNs */ *sign = 1; word0(&u) &= ~Sign_bit; /* clear sign bit */ } else *sign = 0; #if defined(IEEE_Arith) + defined(VAX) #ifdef IEEE_Arith if ((word0(&u) & Exp_mask) == Exp_mask) #else if (word0(&u) == 0x8000) #endif { /* Infinity or NaN */ *decpt = 9999; #ifdef IEEE_Arith if (!word1(&u) && !(word0(&u) & 0xfffff)) return nrv_alloc("inf", rve, 8); #endif return nrv_alloc("nan", rve, 3); } #endif #ifdef IBM dval(&u) += 0; /* normalize */ #endif if (!dval(&u)) { *decpt = 1; return nrv_alloc("0", rve, 1); } #ifdef SET_INEXACT try_quick = oldinexact = get_inexact(); inexact = 1; #endif #ifdef Honor_FLT_ROUNDS if (Rounding >= 2) { if (*sign) Rounding = Rounding == 2 ? 0 : 2; else if (Rounding != 2) Rounding = 0; } #endif b = d2b(&u, &be, &bbits); #ifdef Sudden_Underflow i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask>>Exp_shift1)); #else if ((i = (int)(word0(&u) >> Exp_shift1 & (Exp_mask>>Exp_shift1)))) { #endif dval(&d2) = dval(&u); word0(&d2) &= Frac_mask1; word0(&d2) |= Exp_11; #ifdef IBM if (j = 11 - hi0bits(word0(&d2) & Frac_mask)) dval(&d2) /= 1 << j; #endif /* log(x) ~=~ log(1.5) + (x-1.5)/1.5 * log10(x) = log(x) / log(10) * ~=~ log(1.5)/log(10) + (x-1.5)/(1.5*log(10)) * log10(d) = (i-Bias)*log(2)/log(10) + log10(d2) * * This suggests computing an approximation k to log10(d) by * * k = (i - Bias)*0.301029995663981 * + ( (d2-1.5)*0.289529654602168 + 0.176091259055681 ); * * We want k to be too large rather than too small. * The error in the first-order Taylor series approximation * is in our favor, so we just round up the constant enough * to compensate for any error in the multiplication of * (i - Bias) by 0.301029995663981; since |i - Bias| <= 1077, * and 1077 * 0.30103 * 2^-52 ~=~ 7.2e-14, * adding 1e-13 to the constant term more than suffices. * Hence we adjust the constant term to 0.1760912590558. * (We could get a more accurate k by invoking log10, * but this is probably not worthwhile.) */ i -= Bias; #ifdef IBM i <<= 2; i += j; #endif #ifndef Sudden_Underflow denorm = 0; } else { /* d is denormalized */ i = bbits + be + (Bias + (P-1) - 1); x = i > 32 ? word0(&u) << (64 - i) | word1(&u) >> (i - 32) : word1(&u) << (32 - i); dval(&d2) = x; word0(&d2) -= 31*Exp_msk1; /* adjust exponent */ i -= (Bias + (P-1) - 1) + 1; denorm = 1; } #endif ds = (dval(&d2)-1.5)*0.289529654602168 + 0.1760912590558 + i*0.301029995663981; k = (int)ds; if (ds < 0. && ds != k) k--; /* want k = floor(ds) */ k_check = 1; if (k >= 0 && k <= Ten_pmax) { if (dval(&u) < tens[k]) k--; k_check = 0; } j = bbits - i - 1; if (j >= 0) { b2 = 0; s2 = j; } else { b2 = -j; s2 = 0; } if (k >= 0) { b5 = 0; s5 = k; s2 += k; } else { b2 -= k; b5 = -k; s5 = 0; } if (mode < 0 || mode > 9) mode = 0; #ifndef SET_INEXACT #ifdef Check_FLT_ROUNDS try_quick = Rounding == 1; #else try_quick = 1; #endif #endif /*SET_INEXACT*/ if (mode > 5) { mode -= 4; try_quick = 0; } leftright = 1; ilim = ilim1 = -1; /* Values for cases 0 and 1; done here to */ /* silence erroneous "gcc -Wall" warning. */ switch(mode) { case 0: case 1: i = 18; ndigits = 0; break; case 2: leftright = 0; /* no break */ case 4: if (ndigits <= 0) ndigits = 1; ilim = ilim1 = i = ndigits; break; case 3: leftright = 0; /* no break */ case 5: i = ndigits + k + 1; ilim = i; ilim1 = i - 1; if (i <= 0) i = 1; } s = s0 = rv_alloc(i); #ifdef Honor_FLT_ROUNDS if (mode > 1 && Rounding != 1) leftright = 0; #endif if (ilim >= 0 && ilim <= Quick_max && try_quick) { /* Try to get by with floating-point arithmetic. */ i = 0; dval(&d2) = dval(&u); k0 = k; ilim0 = ilim; ieps = 2; /* conservative */ if (k > 0) { ds = tens[k&0xf]; j = k >> 4; if (j & Bletch) { /* prevent overflows */ j &= Bletch - 1; dval(&u) /= bigtens[n_bigtens-1]; ieps++; } for(; j; j >>= 1, i++) if (j & 1) { ieps++; ds *= bigtens[i]; } dval(&u) /= ds; } else if ((j1 = -k)) { dval(&u) *= tens[j1 & 0xf]; for(j = j1 >> 4; j; j >>= 1, i++) if (j & 1) { ieps++; dval(&u) *= bigtens[i]; } } if (k_check && dval(&u) < 1. && ilim > 0) { if (ilim1 <= 0) goto fast_failed; ilim = ilim1; k--; dval(&u) *= 10.; ieps++; } dval(&eps) = ieps*dval(&u) + 7.; word0(&eps) -= (P-1)*Exp_msk1; if (ilim == 0) { S = mhi = 0; dval(&u) -= 5.; if (dval(&u) > dval(&eps)) goto one_digit; if (dval(&u) < -dval(&eps)) goto no_digits; goto fast_failed; } #ifndef No_leftright if (leftright) { /* Use Steele & White method of only * generating digits needed. */ dval(&eps) = 0.5/tens[ilim-1] - dval(&eps); #ifdef IEEE_Arith if (k0 < 0 && j1 >= 307) { eps1.d = 1.01e256; /* 1.01 allows roundoff in the next few lines */ word0(&eps1) -= Exp_msk1 * (Bias+P-1); dval(&eps1) *= tens[j1 & 0xf]; for(i = 0, j = (j1-256) >> 4; j; j >>= 1, i++) if (j & 1) dval(&eps1) *= bigtens[i]; if (eps.d < eps1.d) eps.d = eps1.d; } #endif for(i = 0;;) { L = dval(&u); dval(&u) -= L; *s++ = '0' + (int)L; if (1. - dval(&u) < dval(&eps)) goto bump_up; if (dval(&u) < dval(&eps)) goto ret1; if (++i >= ilim) break; dval(&eps) *= 10.; dval(&u) *= 10.; } } else { #endif /* Generate ilim digits, then fix them up. */ dval(&eps) *= tens[ilim-1]; for(i = 1;; i++, dval(&u) *= 10.) { L = (Long)(dval(&u)); if (!(dval(&u) -= L)) ilim = i; *s++ = '0' + (int)L; if (i == ilim) { if (dval(&u) > 0.5 + dval(&eps)) goto bump_up; else if (dval(&u) < 0.5 - dval(&eps)) { while(*--s == '0'); s++; goto ret1; } break; } } #ifndef No_leftright } #endif fast_failed: s = s0; dval(&u) = dval(&d2); k = k0; ilim = ilim0; } /* Do we have a "small" integer? */ if (be >= 0 && k <= Int_max) { /* Yes. */ ds = tens[k]; if (ndigits < 0 && ilim <= 0) { S = mhi = 0; if (ilim < 0 || dval(&u) <= 5*ds) goto no_digits; goto one_digit; } for(i = 1;; i++, dval(&u) *= 10.) { L = (Long)(dval(&u) / ds); dval(&u) -= L*ds; #ifdef Check_FLT_ROUNDS /* If FLT_ROUNDS == 2, L will usually be high by 1 */ if (dval(&u) < 0) { L--; dval(&u) += ds; } #endif *s++ = '0' + (int)L; if (!dval(&u)) { #ifdef SET_INEXACT inexact = 0; #endif break; } if (i == ilim) { #ifdef Honor_FLT_ROUNDS if (mode > 1) switch(Rounding) { case 0: goto ret1; case 2: goto bump_up; } #endif dval(&u) += dval(&u); #ifdef ROUND_BIASED if (dval(&u) >= ds) #else if (dval(&u) > ds || (dval(&u) == ds && L & 1)) #endif { bump_up: while(*--s == '9') if (s == s0) { k++; *s = '0'; break; } ++*s++; } break; } } goto ret1; } m2 = b2; m5 = b5; mhi = mlo = 0; if (leftright) { i = #ifndef Sudden_Underflow denorm ? be + (Bias + (P-1) - 1 + 1) : #endif #ifdef IBM 1 + 4*P - 3 - bbits + ((bbits + be - 1) & 3); #else 1 + P - bbits; #endif b2 += i; s2 += i; mhi = i2b(1); } if (m2 > 0 && s2 > 0) { i = m2 < s2 ? m2 : s2; b2 -= i; m2 -= i; s2 -= i; } if (b5 > 0) { if (leftright) { if (m5 > 0) { mhi = pow5mult(mhi, m5); b1 = mult(mhi, b); Bfree(b); b = b1; } if ((j = b5 - m5)) b = pow5mult(b, j); } else b = pow5mult(b, b5); } S = i2b(1); if (s5 > 0) S = pow5mult(S, s5); /* Check for special case that d is a normalized power of 2. */ spec_case = 0; if ((mode < 2 || leftright) #ifdef Honor_FLT_ROUNDS && Rounding == 1 #endif ) { if (!word1(&u) && !(word0(&u) & Bndry_mask) #ifndef Sudden_Underflow && word0(&u) & (Exp_mask & ~Exp_msk1) #endif ) { /* The special case */ b2 += Log2P; s2 += Log2P; spec_case = 1; } } /* Arrange for convenient computation of quotients: * shift left if necessary so divisor has 4 leading 0 bits. * * Perhaps we should just compute leading 28 bits of S once * and for all and pass them and a shift to quorem, so it * can do shifts and ors to compute the numerator for q. */ i = dshift(S, s2); b2 += i; m2 += i; s2 += i; if (b2 > 0) b = lshift(b, b2); if (s2 > 0) S = lshift(S, s2); if (k_check) { if (cmp(b,S) < 0) { k--; b = multadd(b, 10, 0); /* we botched the k estimate */ if (leftright) mhi = multadd(mhi, 10, 0); ilim = ilim1; } } if (ilim <= 0 && (mode == 3 || mode == 5)) { if (ilim < 0 || cmp(b,S = multadd(S,5,0)) <= 0) { /* no digits, fcvt style */ no_digits: k = -1 - ndigits; goto ret; } one_digit: *s++ = '1'; k++; goto ret; } if (leftright) { if (m2 > 0) mhi = lshift(mhi, m2); /* Compute mlo -- check for special case * that d is a normalized power of 2. */ mlo = mhi; if (spec_case) { mhi = Balloc(mhi->k); Bcopy(mhi, mlo); mhi = lshift(mhi, Log2P); } for(i = 1;;i++) { dig = quorem(b,S) + '0'; /* Do we yet have the shortest decimal string * that will round to d? */ j = cmp(b, mlo); delta = diff(S, mhi); j1 = delta->sign ? 1 : cmp(b, delta); Bfree(delta); #ifndef ROUND_BIASED if (j1 == 0 && mode != 1 && !(word1(&u) & 1) #ifdef Honor_FLT_ROUNDS && Rounding >= 1 #endif ) { if (dig == '9') goto round_9_up; if (j > 0) dig++; #ifdef SET_INEXACT else if (!b->x[0] && b->wds <= 1) inexact = 0; #endif *s++ = dig; goto ret; } #endif if (j < 0 || (j == 0 && mode != 1 #ifndef ROUND_BIASED && !(word1(&u) & 1) #endif )) { if (!b->x[0] && b->wds <= 1) { #ifdef SET_INEXACT inexact = 0; #endif goto accept_dig; } #ifdef Honor_FLT_ROUNDS if (mode > 1) switch(Rounding) { case 0: goto accept_dig; case 2: goto keep_dig; } #endif /*Honor_FLT_ROUNDS*/ if (j1 > 0) { b = lshift(b, 1); j1 = cmp(b, S); #ifdef ROUND_BIASED if (j1 >= 0 /*)*/ #else if ((j1 > 0 || (j1 == 0 && dig & 1)) #endif && dig++ == '9') goto round_9_up; } accept_dig: *s++ = dig; goto ret; } if (j1 > 0) { #ifdef Honor_FLT_ROUNDS if (!Rounding) goto accept_dig; #endif if (dig == '9') { /* possible if i == 1 */ round_9_up: *s++ = '9'; goto roundoff; } *s++ = dig + 1; goto ret; } #ifdef Honor_FLT_ROUNDS keep_dig: #endif *s++ = dig; if (i == ilim) break; b = multadd(b, 10, 0); if (mlo == mhi) mlo = mhi = multadd(mhi, 10, 0); else { mlo = multadd(mlo, 10, 0); mhi = multadd(mhi, 10, 0); } } } else for(i = 1;; i++) { *s++ = dig = quorem(b,S) + '0'; if (!b->x[0] && b->wds <= 1) { #ifdef SET_INEXACT inexact = 0; #endif goto ret; } if (i >= ilim) break; b = multadd(b, 10, 0); } /* Round off last digit */ #ifdef Honor_FLT_ROUNDS switch(Rounding) { case 0: goto trimzeros; case 2: goto roundoff; } #endif b = lshift(b, 1); j = cmp(b, S); #ifdef ROUND_BIASED if (j >= 0) #else if (j > 0 || (j == 0 && dig & 1)) #endif { roundoff: while(*--s == '9') if (s == s0) { k++; *s++ = '1'; goto ret; } ++*s++; } else { #ifdef Honor_FLT_ROUNDS trimzeros: #endif while(*--s == '0'); s++; } ret: Bfree(S); if (mhi) { if (mlo && mlo != mhi) Bfree(mlo); Bfree(mhi); } ret1: #ifdef SET_INEXACT if (inexact) { if (!oldinexact) { word0(&u) = Exp_1 + (70 << Exp_shift); word1(&u) = 0; dval(&u) += 1.; } } else if (!oldinexact) clear_inexact(); #endif Bfree(b); *s = 0; *decpt = k + 1; if (rve) *rve = s; return s0; } #ifdef __cplusplus } #endif lua-cjson-2.1.0+dfsg/dtoa_config.h000066400000000000000000000035431201567770400167670ustar00rootroot00000000000000#ifndef _DTOA_CONFIG_H #define _DTOA_CONFIG_H #include #include #include /* Ensure dtoa.c does not USE_LOCALE. Lua CJSON must not use locale * aware conversion routines. */ #undef USE_LOCALE /* dtoa.c should not touch errno, Lua CJSON does not use it, and it * may not be threadsafe */ #define NO_ERRNO #define Long int32_t #define ULong uint32_t #define Llong int64_t #define ULLong uint64_t #ifdef IEEE_BIG_ENDIAN #define IEEE_MC68k #else #define IEEE_8087 #endif #define MALLOC(n) xmalloc(n) static void *xmalloc(size_t size) { void *p; p = malloc(size); if (!p) { fprintf(stderr, "Out of memory"); abort(); } return p; } #ifdef MULTIPLE_THREADS /* Enable locking to support multi-threaded applications */ #include static pthread_mutex_t private_dtoa_lock[2] = { PTHREAD_MUTEX_INITIALIZER, PTHREAD_MUTEX_INITIALIZER }; #define ACQUIRE_DTOA_LOCK(n) do { \ int r = pthread_mutex_lock(&private_dtoa_lock[n]); \ if (r) { \ fprintf(stderr, "pthread_mutex_lock failed with %d\n", r); \ abort(); \ } \ } while (0) #define FREE_DTOA_LOCK(n) do { \ int r = pthread_mutex_unlock(&private_dtoa_lock[n]); \ if (r) { \ fprintf(stderr, "pthread_mutex_unlock failed with %d\n", r);\ abort(); \ } \ } while (0) #endif /* MULTIPLE_THREADS */ #endif /* _DTOA_CONFIG_H */ /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/fpconv.c000066400000000000000000000136501201567770400160010ustar00rootroot00000000000000/* fpconv - Floating point conversion routines * * Copyright (c) 2011-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* JSON uses a '.' decimal separator. strtod() / sprintf() under C libraries * with locale support will break when the decimal separator is a comma. * * fpconv_* will around these issues with a translation buffer if required. */ #include #include #include #include #include "fpconv.h" /* Lua CJSON assumes the locale is the same for all threads within a * process and doesn't change after initialisation. * * This avoids the need for per thread storage or expensive checks * for call. */ static char locale_decimal_point = '.'; /* In theory multibyte decimal_points are possible, but * Lua CJSON only supports UTF-8 and known locales only have * single byte decimal points ([.,]). * * localconv() may not be thread safe (=>crash), and nl_langinfo() is * not supported on some platforms. Use sprintf() instead - if the * locale does change, at least Lua CJSON won't crash. */ static void fpconv_update_locale() { char buf[8]; snprintf(buf, sizeof(buf), "%g", 0.5); /* Failing this test might imply the platform has a buggy dtoa * implementation or wide characters */ if (buf[0] != '0' || buf[2] != '5' || buf[3] != 0) { fprintf(stderr, "Error: wide characters found or printf() bug."); abort(); } locale_decimal_point = buf[1]; } /* Check for a valid number character: [-+0-9a-yA-Y.] * Eg: -0.6e+5, infinity, 0xF0.F0pF0 * * Used to find the probable end of a number. It doesn't matter if * invalid characters are counted - strtod() will find the valid * number if it exists. The risk is that slightly more memory might * be allocated before a parse error occurs. */ static inline int valid_number_character(char ch) { char lower_ch; if ('0' <= ch && ch <= '9') return 1; if (ch == '-' || ch == '+' || ch == '.') return 1; /* Hex digits, exponent (e), base (p), "infinity",.. */ lower_ch = ch | 0x20; if ('a' <= lower_ch && lower_ch <= 'y') return 1; return 0; } /* Calculate the size of the buffer required for a strtod locale * conversion. */ static int strtod_buffer_size(const char *s) { const char *p = s; while (valid_number_character(*p)) p++; return p - s; } /* Similar to strtod(), but must be passed the current locale's decimal point * character. Guaranteed to be called at the start of any valid number in a string */ double fpconv_strtod(const char *nptr, char **endptr) { char localbuf[FPCONV_G_FMT_BUFSIZE]; char *buf, *endbuf, *dp; int buflen; double value; /* System strtod() is fine when decimal point is '.' */ if (locale_decimal_point == '.') return strtod(nptr, endptr); buflen = strtod_buffer_size(nptr); if (!buflen) { /* No valid characters found, standard strtod() return */ *endptr = (char *)nptr; return 0; } /* Duplicate number into buffer */ if (buflen >= FPCONV_G_FMT_BUFSIZE) { /* Handle unusually large numbers */ buf = malloc(buflen + 1); if (!buf) { fprintf(stderr, "Out of memory"); abort(); } } else { /* This is the common case.. */ buf = localbuf; } memcpy(buf, nptr, buflen); buf[buflen] = 0; /* Update decimal point character if found */ dp = strchr(buf, '.'); if (dp) *dp = locale_decimal_point; value = strtod(buf, &endbuf); *endptr = (char *)&nptr[endbuf - buf]; if (buflen >= FPCONV_G_FMT_BUFSIZE) free(buf); return value; } /* "fmt" must point to a buffer of at least 6 characters */ static void set_number_format(char *fmt, int precision) { int d1, d2, i; assert(1 <= precision && precision <= 14); /* Create printf format (%.14g) from precision */ d1 = precision / 10; d2 = precision % 10; fmt[0] = '%'; fmt[1] = '.'; i = 2; if (d1) { fmt[i++] = '0' + d1; } fmt[i++] = '0' + d2; fmt[i++] = 'g'; fmt[i] = 0; } /* Assumes there is always at least 32 characters available in the target buffer */ int fpconv_g_fmt(char *str, double num, int precision) { char buf[FPCONV_G_FMT_BUFSIZE]; char fmt[6]; int len; char *b; set_number_format(fmt, precision); /* Pass through when decimal point character is dot. */ if (locale_decimal_point == '.') return snprintf(str, FPCONV_G_FMT_BUFSIZE, fmt, num); /* snprintf() to a buffer then translate for other decimal point characters */ len = snprintf(buf, FPCONV_G_FMT_BUFSIZE, fmt, num); /* Copy into target location. Translate decimal point if required */ b = buf; do { *str++ = (*b == locale_decimal_point ? '.' : *b); } while(*b++); return len; } void fpconv_init() { fpconv_update_locale(); } /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/fpconv.h000066400000000000000000000010151201567770400157760ustar00rootroot00000000000000/* Lua CJSON floating point conversion routines */ /* Buffer required to store the largest string representation of a double. * * Longest double printed with %.14g is 21 characters long: * -1.7976931348623e+308 */ # define FPCONV_G_FMT_BUFSIZE 32 #ifdef USE_INTERNAL_FPCONV static inline void fpconv_init() { /* Do nothing - not required */ } #else extern inline void fpconv_init(); #endif extern int fpconv_g_fmt(char*, double, int); extern double fpconv_strtod(const char*, char**); /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/g_fmt.c000066400000000000000000000046601201567770400156030ustar00rootroot00000000000000/**************************************************************** * * The author of this software is David M. Gay. * * Copyright (c) 1991, 1996 by Lucent Technologies. * * Permission to use, copy, modify, and distribute this software for any * purpose without fee is hereby granted, provided that this entire notice * is included in all copies of any software which is or includes a copy * or modification of this software and in all copies of the supporting * documentation for such software. * * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED * WARRANTY. IN PARTICULAR, NEITHER THE AUTHOR NOR LUCENT MAKES ANY * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY * OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. * ***************************************************************/ /* g_fmt(buf,x) stores the closest decimal approximation to x in buf; * it suffices to declare buf * char buf[32]; */ #ifdef __cplusplus extern "C" { #endif extern char *dtoa(double, int, int, int *, int *, char **); extern int g_fmt(char *, double, int); extern void freedtoa(char*); #ifdef __cplusplus } #endif int fpconv_g_fmt(char *b, double x, int precision) { register int i, k; register char *s; int decpt, j, sign; char *b0, *s0, *se; b0 = b; #ifdef IGNORE_ZERO_SIGN if (!x) { *b++ = '0'; *b = 0; goto done; } #endif s = s0 = dtoa(x, 2, precision, &decpt, &sign, &se); if (sign) *b++ = '-'; if (decpt == 9999) /* Infinity or Nan */ { while((*b++ = *s++)); /* "b" is used to calculate the return length. Decrement to exclude the * Null terminator from the length */ b--; goto done0; } if (decpt <= -4 || decpt > precision) { *b++ = *s++; if (*s) { *b++ = '.'; while((*b = *s++)) b++; } *b++ = 'e'; /* sprintf(b, "%+.2d", decpt - 1); */ if (--decpt < 0) { *b++ = '-'; decpt = -decpt; } else *b++ = '+'; for(j = 2, k = 10; 10*k <= decpt; j++, k *= 10); for(;;) { i = decpt / k; *b++ = i + '0'; if (--j <= 0) break; decpt -= i*k; decpt *= 10; } *b = 0; } else if (decpt <= 0) { *b++ = '0'; *b++ = '.'; for(; decpt < 0; decpt++) *b++ = '0'; while((*b++ = *s++)); b--; } else { while((*b = *s++)) { b++; if (--decpt == 0 && *s) *b++ = '.'; } for(; decpt > 0; decpt--) *b++ = '0'; *b = 0; } done0: freedtoa(s0); #ifdef IGNORE_ZERO_SIGN done: #endif return b - b0; } lua-cjson-2.1.0+dfsg/lua-cjson-2.1.0-1.rockspec000066400000000000000000000030171201567770400205560ustar00rootroot00000000000000package = "lua-cjson" version = "2.1.0-1" source = { url = "http://www.kyne.com.au/~mark/software/download/lua-cjson-2.1.0.zip", } description = { summary = "A fast JSON encoding/parsing module", detailed = [[ The Lua CJSON module provides JSON support for Lua. It features: - Fast, standards compliant encoding/parsing routines - Full support for JSON with UTF-8, including decoding surrogate pairs - Optional run-time support for common exceptions to the JSON specification (infinity, NaN,..) - No dependencies on other libraries ]], homepage = "http://www.kyne.com.au/~mark/software/lua-cjson.php", license = "MIT" } dependencies = { "lua >= 5.1" } build = { type = "builtin", modules = { cjson = { sources = { "lua_cjson.c", "strbuf.c", "fpconv.c" }, defines = { -- LuaRocks does not support platform specific configuration for Solaris. -- Uncomment the line below on Solaris platforms if required. -- "USE_INTERNAL_ISINF" } } }, install = { lua = { ["cjson.util"] = "lua/cjson/util.lua" }, bin = { json2lua = "lua/json2lua.lua", lua2json = "lua/lua2json.lua" } }, -- Override default build options (per platform) platforms = { win32 = { modules = { cjson = { defines = { "DISABLE_INVALID_NUMBERS" } } } } }, copy_directories = { "tests" } } -- vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/lua-cjson.spec000066400000000000000000000040411201567770400171030ustar00rootroot00000000000000%define luaver 5.1 %define lualibdir %{_libdir}/lua/%{luaver} %define luadatadir %{_datadir}/lua/%{luaver} Name: lua-cjson Version: 2.1.0 Release: 1%{?dist} Summary: A fast JSON encoding/parsing module for Lua Group: Development/Libraries License: MIT URL: http://www.kyne.com.au/~mark/software/lua-cjson/ Source0: http://www.kyne.com.au/~mark/software/lua-cjson/download/lua-cjson-%{version}.tar.gz BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) BuildRequires: lua >= %{luaver}, lua-devel >= %{luaver} Requires: lua >= %{luaver} %description The Lua CJSON module provides JSON support for Lua. It features: - Fast, standards compliant encoding/parsing routines - Full support for JSON with UTF-8, including decoding surrogate pairs - Optional run-time support for common exceptions to the JSON specification (infinity, NaN,..) - No dependencies on other libraries %prep %setup -q %build make %{?_smp_mflags} CFLAGS="%{optflags}" LUA_INCLUDE_DIR="%{_includedir}" %install rm -rf "$RPM_BUILD_ROOT" make install DESTDIR="$RPM_BUILD_ROOT" LUA_CMODULE_DIR="%{lualibdir}" make install-extra DESTDIR="$RPM_BUILD_ROOT" LUA_MODULE_DIR="%{luadatadir}" \ LUA_BIN_DIR="%{_bindir}" %clean rm -rf "$RPM_BUILD_ROOT" %preun /bin/rm -f "%{luadatadir}/cjson/tests/utf8.dat" %files %defattr(-,root,root,-) %doc LICENSE NEWS performance.html performance.txt manual.html manual.txt rfc4627.txt THANKS %{lualibdir}/* %{luadatadir}/* %{_bindir}/* %changelog * Thu Mar 1 2012 Mark Pulford - 2.1.0-1 - Update for 2.1.0 * Sun Jan 22 2012 Mark Pulford - 2.0.0-1 - Update for 2.0.0 - Install lua2json / json2lua utilities * Wed Nov 27 2011 Mark Pulford - 1.0.4-1 - Update for 1.0.4 * Wed Sep 15 2011 Mark Pulford - 1.0.3-1 - Update for 1.0.3 * Sun May 29 2011 Mark Pulford - 1.0.2-1 - Update for 1.0.2 * Sun May 10 2011 Mark Pulford - 1.0.1-1 - Update for 1.0.1 * Sun May 1 2011 Mark Pulford - 1.0-1 - Initial package lua-cjson-2.1.0+dfsg/lua/000077500000000000000000000000001201567770400151165ustar00rootroot00000000000000lua-cjson-2.1.0+dfsg/lua/cjson/000077500000000000000000000000001201567770400162325ustar00rootroot00000000000000lua-cjson-2.1.0+dfsg/lua/cjson/util.lua000066400000000000000000000152651201567770400177230ustar00rootroot00000000000000local json = require "cjson" -- Various common routines used by the Lua CJSON package -- -- Mark Pulford -- Determine with a Lua table can be treated as an array. -- Explicitly returns "not an array" for very sparse arrays. -- Returns: -- -1 Not an array -- 0 Empty table -- >0 Highest index in the array local function is_array(table) local max = 0 local count = 0 for k, v in pairs(table) do if type(k) == "number" then if k > max then max = k end count = count + 1 else return -1 end end if max > count * 2 then return -1 end return max end local serialise_value local function serialise_table(value, indent, depth) local spacing, spacing2, indent2 if indent then spacing = "\n" .. indent spacing2 = spacing .. " " indent2 = indent .. " " else spacing, spacing2, indent2 = " ", " ", false end depth = depth + 1 if depth > 50 then return "Cannot serialise any further: too many nested tables" end local max = is_array(value) local comma = false local fragment = { "{" .. spacing2 } if max > 0 then -- Serialise array for i = 1, max do if comma then table.insert(fragment, "," .. spacing2) end table.insert(fragment, serialise_value(value[i], indent2, depth)) comma = true end elseif max < 0 then -- Serialise table for k, v in pairs(value) do if comma then table.insert(fragment, "," .. spacing2) end table.insert(fragment, ("[%s] = %s"):format(serialise_value(k, indent2, depth), serialise_value(v, indent2, depth))) comma = true end end table.insert(fragment, spacing .. "}") return table.concat(fragment) end function serialise_value(value, indent, depth) if indent == nil then indent = "" end if depth == nil then depth = 0 end if value == json.null then return "json.null" elseif type(value) == "string" then return ("%q"):format(value) elseif type(value) == "nil" or type(value) == "number" or type(value) == "boolean" then return tostring(value) elseif type(value) == "table" then return serialise_table(value, indent, depth) else return "\"<" .. type(value) .. ">\"" end end local function file_load(filename) local file if filename == nil then file = io.stdin else local err file, err = io.open(filename, "rb") if file == nil then error(("Unable to read '%s': %s"):format(filename, err)) end end local data = file:read("*a") if filename ~= nil then file:close() end if data == nil then error("Failed to read " .. filename) end return data end local function file_save(filename, data) local file if filename == nil then file = io.stdout else local err file, err = io.open(filename, "wb") if file == nil then error(("Unable to write '%s': %s"):format(filename, err)) end end file:write(data) if filename ~= nil then file:close() end end local function compare_values(val1, val2) local type1 = type(val1) local type2 = type(val2) if type1 ~= type2 then return false end -- Check for NaN if type1 == "number" and val1 ~= val1 and val2 ~= val2 then return true end if type1 ~= "table" then return val1 == val2 end -- check_keys stores all the keys that must be checked in val2 local check_keys = {} for k, _ in pairs(val1) do check_keys[k] = true end for k, v in pairs(val2) do if not check_keys[k] then return false end if not compare_values(val1[k], val2[k]) then return false end check_keys[k] = nil end for k, _ in pairs(check_keys) do -- Not the same if any keys from val1 were not found in val2 return false end return true end local test_count_pass = 0 local test_count_total = 0 local function run_test_summary() return test_count_pass, test_count_total end local function run_test(testname, func, input, should_work, output) local function status_line(name, status, value) local statusmap = { [true] = ":success", [false] = ":error" } if status ~= nil then name = name .. statusmap[status] end print(("[%s] %s"):format(name, serialise_value(value, false))) end local result = { pcall(func, unpack(input)) } local success = table.remove(result, 1) local correct = false if success == should_work and compare_values(result, output) then correct = true test_count_pass = test_count_pass + 1 end test_count_total = test_count_total + 1 local teststatus = { [true] = "PASS", [false] = "FAIL" } print(("==> Test [%d] %s: %s"):format(test_count_total, testname, teststatus[correct])) status_line("Input", nil, input) if not correct then status_line("Expected", should_work, output) end status_line("Received", success, result) print() return correct, result end local function run_test_group(tests) local function run_helper(name, func, input) if type(name) == "string" and #name > 0 then print("==> " .. name) end -- Not a protected call, these functions should never generate errors. func(unpack(input or {})) print() end for _, v in ipairs(tests) do -- Run the helper if "should_work" is missing if v[4] == nil then run_helper(unpack(v)) else run_test(unpack(v)) end end end -- Run a Lua script in a separate environment local function run_script(script, env) local env = env or {} local func -- Use setfenv() if it exists, otherwise assume Lua 5.2 load() exists if _G.setfenv then func = loadstring(script) if func then setfenv(func, env) end else func = load(script, nil, nil, env) end if func == nil then error("Invalid syntax.") end func() return env end -- Export functions return { serialise_value = serialise_value, file_load = file_load, file_save = file_save, compare_values = compare_values, run_test_summary = run_test_summary, run_test = run_test, run_test_group = run_test_group, run_script = run_script } -- vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/lua/json2lua.lua000077500000000000000000000004601201567770400173610ustar00rootroot00000000000000#!/usr/bin/env lua -- usage: json2lua.lua [json_file] -- -- Eg: -- echo '[ "testing" ]' | ./json2lua.lua -- ./json2lua.lua test.json local json = require "cjson" local util = require "cjson.util" local json_text = util.file_load(arg[1]) local t = json.decode(json_text) print(util.serialise_value(t)) lua-cjson-2.1.0+dfsg/lua/lua2json.lua000077500000000000000000000006031201567770400173600ustar00rootroot00000000000000#!/usr/bin/env lua -- usage: lua2json.lua [lua_file] -- -- Eg: -- echo '{ "testing" }' | ./lua2json.lua -- ./lua2json.lua test.lua local json = require "cjson" local util = require "cjson.util" local env = { json = { null = json.null }, null = json.null } local t = util.run_script("data = " .. util.file_load(arg[1]), env) print(json.encode(t.data)) -- vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/lua_cjson.c000066400000000000000000001211621201567770400164610ustar00rootroot00000000000000/* Lua CJSON - JSON support for Lua * * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* Caveats: * - JSON "null" values are represented as lightuserdata since Lua * tables cannot contain "nil". Compare with cjson.null. * - Invalid UTF-8 characters are not detected and will be passed * untouched. If required, UTF-8 error checking should be done * outside this library. * - Javascript comments are not part of the JSON spec, and are not * currently supported. * * Note: Decoding is slower than encoding. Lua spends significant * time (30%) managing tables when parsing JSON since it is * difficult to know object/array sizes ahead of time. */ #include #include #include #include #include #include #include "strbuf.h" #include "fpconv.h" #ifndef CJSON_MODNAME #define CJSON_MODNAME "cjson" #endif #ifndef CJSON_VERSION #define CJSON_VERSION "2.1.0" #endif /* Workaround for Solaris platforms missing isinf() */ #if !defined(isinf) && (defined(USE_INTERNAL_ISINF) || defined(MISSING_ISINF)) #define isinf(x) (!isnan(x) && isnan((x) - (x))) #endif #define DEFAULT_SPARSE_CONVERT 0 #define DEFAULT_SPARSE_RATIO 2 #define DEFAULT_SPARSE_SAFE 10 #define DEFAULT_ENCODE_MAX_DEPTH 1000 #define DEFAULT_DECODE_MAX_DEPTH 1000 #define DEFAULT_ENCODE_INVALID_NUMBERS 0 #define DEFAULT_DECODE_INVALID_NUMBERS 1 #define DEFAULT_ENCODE_KEEP_BUFFER 1 #define DEFAULT_ENCODE_NUMBER_PRECISION 14 #ifdef DISABLE_INVALID_NUMBERS #undef DEFAULT_DECODE_INVALID_NUMBERS #define DEFAULT_DECODE_INVALID_NUMBERS 0 #endif typedef enum { T_OBJ_BEGIN, T_OBJ_END, T_ARR_BEGIN, T_ARR_END, T_STRING, T_NUMBER, T_BOOLEAN, T_NULL, T_COLON, T_COMMA, T_END, T_WHITESPACE, T_ERROR, T_UNKNOWN } json_token_type_t; static const char *json_token_type_name[] = { "T_OBJ_BEGIN", "T_OBJ_END", "T_ARR_BEGIN", "T_ARR_END", "T_STRING", "T_NUMBER", "T_BOOLEAN", "T_NULL", "T_COLON", "T_COMMA", "T_END", "T_WHITESPACE", "T_ERROR", "T_UNKNOWN", NULL }; typedef struct { json_token_type_t ch2token[256]; char escape2char[256]; /* Decoding */ /* encode_buf is only allocated and used when * encode_keep_buffer is set */ strbuf_t encode_buf; int encode_sparse_convert; int encode_sparse_ratio; int encode_sparse_safe; int encode_max_depth; int encode_invalid_numbers; /* 2 => Encode as "null" */ int encode_number_precision; int encode_keep_buffer; int decode_invalid_numbers; int decode_max_depth; } json_config_t; typedef struct { const char *data; const char *ptr; strbuf_t *tmp; /* Temporary storage for strings */ json_config_t *cfg; int current_depth; } json_parse_t; typedef struct { json_token_type_t type; int index; union { const char *string; double number; int boolean; } value; int string_len; } json_token_t; static const char *char2escape[256] = { "\\u0000", "\\u0001", "\\u0002", "\\u0003", "\\u0004", "\\u0005", "\\u0006", "\\u0007", "\\b", "\\t", "\\n", "\\u000b", "\\f", "\\r", "\\u000e", "\\u000f", "\\u0010", "\\u0011", "\\u0012", "\\u0013", "\\u0014", "\\u0015", "\\u0016", "\\u0017", "\\u0018", "\\u0019", "\\u001a", "\\u001b", "\\u001c", "\\u001d", "\\u001e", "\\u001f", NULL, NULL, "\\\"", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\/", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\\\", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "\\u007f", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; /* ===== CONFIGURATION ===== */ static json_config_t *json_fetch_config(lua_State *l) { json_config_t *cfg; cfg = lua_touserdata(l, lua_upvalueindex(1)); if (!cfg) luaL_error(l, "BUG: Unable to fetch CJSON configuration"); return cfg; } /* Ensure the correct number of arguments have been provided. * Pad with nil to allow other functions to simply check arg[i] * to find whether an argument was provided */ static json_config_t *json_arg_init(lua_State *l, int args) { luaL_argcheck(l, lua_gettop(l) <= args, args + 1, "found too many arguments"); while (lua_gettop(l) < args) lua_pushnil(l); return json_fetch_config(l); } /* Process integer options for configuration functions */ static int json_integer_option(lua_State *l, int optindex, int *setting, int min, int max) { char errmsg[64]; int value; if (!lua_isnil(l, optindex)) { value = luaL_checkinteger(l, optindex); snprintf(errmsg, sizeof(errmsg), "expected integer between %d and %d", min, max); luaL_argcheck(l, min <= value && value <= max, 1, errmsg); *setting = value; } lua_pushinteger(l, *setting); return 1; } /* Process enumerated arguments for a configuration function */ static int json_enum_option(lua_State *l, int optindex, int *setting, const char **options, int bool_true) { static const char *bool_options[] = { "off", "on", NULL }; if (!options) { options = bool_options; bool_true = 1; } if (!lua_isnil(l, optindex)) { if (bool_true && lua_isboolean(l, optindex)) *setting = lua_toboolean(l, optindex) * bool_true; else *setting = luaL_checkoption(l, optindex, NULL, options); } if (bool_true && (*setting == 0 || *setting == bool_true)) lua_pushboolean(l, *setting); else lua_pushstring(l, options[*setting]); return 1; } /* Configures handling of extremely sparse arrays: * convert: Convert extremely sparse arrays into objects? Otherwise error. * ratio: 0: always allow sparse; 1: never allow sparse; >1: use ratio * safe: Always use an array when the max index <= safe */ static int json_cfg_encode_sparse_array(lua_State *l) { json_config_t *cfg = json_arg_init(l, 3); json_enum_option(l, 1, &cfg->encode_sparse_convert, NULL, 1); json_integer_option(l, 2, &cfg->encode_sparse_ratio, 0, INT_MAX); json_integer_option(l, 3, &cfg->encode_sparse_safe, 0, INT_MAX); return 3; } /* Configures the maximum number of nested arrays/objects allowed when * encoding */ static int json_cfg_encode_max_depth(lua_State *l) { json_config_t *cfg = json_arg_init(l, 1); return json_integer_option(l, 1, &cfg->encode_max_depth, 1, INT_MAX); } /* Configures the maximum number of nested arrays/objects allowed when * encoding */ static int json_cfg_decode_max_depth(lua_State *l) { json_config_t *cfg = json_arg_init(l, 1); return json_integer_option(l, 1, &cfg->decode_max_depth, 1, INT_MAX); } /* Configures number precision when converting doubles to text */ static int json_cfg_encode_number_precision(lua_State *l) { json_config_t *cfg = json_arg_init(l, 1); return json_integer_option(l, 1, &cfg->encode_number_precision, 1, 14); } /* Configures JSON encoding buffer persistence */ static int json_cfg_encode_keep_buffer(lua_State *l) { json_config_t *cfg = json_arg_init(l, 1); int old_value; old_value = cfg->encode_keep_buffer; json_enum_option(l, 1, &cfg->encode_keep_buffer, NULL, 1); /* Init / free the buffer if the setting has changed */ if (old_value ^ cfg->encode_keep_buffer) { if (cfg->encode_keep_buffer) strbuf_init(&cfg->encode_buf, 0); else strbuf_free(&cfg->encode_buf); } return 1; } #if defined(DISABLE_INVALID_NUMBERS) && !defined(USE_INTERNAL_FPCONV) void json_verify_invalid_number_setting(lua_State *l, int *setting) { if (*setting == 1) { *setting = 0; luaL_error(l, "Infinity, NaN, and/or hexadecimal numbers are not supported."); } } #else #define json_verify_invalid_number_setting(l, s) do { } while(0) #endif static int json_cfg_encode_invalid_numbers(lua_State *l) { static const char *options[] = { "off", "on", "null", NULL }; json_config_t *cfg = json_arg_init(l, 1); json_enum_option(l, 1, &cfg->encode_invalid_numbers, options, 1); json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); return 1; } static int json_cfg_decode_invalid_numbers(lua_State *l) { json_config_t *cfg = json_arg_init(l, 1); json_enum_option(l, 1, &cfg->decode_invalid_numbers, NULL, 1); json_verify_invalid_number_setting(l, &cfg->encode_invalid_numbers); return 1; } static int json_destroy_config(lua_State *l) { json_config_t *cfg; cfg = lua_touserdata(l, 1); if (cfg) strbuf_free(&cfg->encode_buf); cfg = NULL; return 0; } static void json_create_config(lua_State *l) { json_config_t *cfg; int i; cfg = lua_newuserdata(l, sizeof(*cfg)); /* Create GC method to clean up strbuf */ lua_newtable(l); lua_pushcfunction(l, json_destroy_config); lua_setfield(l, -2, "__gc"); lua_setmetatable(l, -2); cfg->encode_sparse_convert = DEFAULT_SPARSE_CONVERT; cfg->encode_sparse_ratio = DEFAULT_SPARSE_RATIO; cfg->encode_sparse_safe = DEFAULT_SPARSE_SAFE; cfg->encode_max_depth = DEFAULT_ENCODE_MAX_DEPTH; cfg->decode_max_depth = DEFAULT_DECODE_MAX_DEPTH; cfg->encode_invalid_numbers = DEFAULT_ENCODE_INVALID_NUMBERS; cfg->decode_invalid_numbers = DEFAULT_DECODE_INVALID_NUMBERS; cfg->encode_keep_buffer = DEFAULT_ENCODE_KEEP_BUFFER; cfg->encode_number_precision = DEFAULT_ENCODE_NUMBER_PRECISION; #if DEFAULT_ENCODE_KEEP_BUFFER > 0 strbuf_init(&cfg->encode_buf, 0); #endif /* Decoding init */ /* Tag all characters as an error */ for (i = 0; i < 256; i++) cfg->ch2token[i] = T_ERROR; /* Set tokens that require no further processing */ cfg->ch2token['{'] = T_OBJ_BEGIN; cfg->ch2token['}'] = T_OBJ_END; cfg->ch2token['['] = T_ARR_BEGIN; cfg->ch2token[']'] = T_ARR_END; cfg->ch2token[','] = T_COMMA; cfg->ch2token[':'] = T_COLON; cfg->ch2token['\0'] = T_END; cfg->ch2token[' '] = T_WHITESPACE; cfg->ch2token['\t'] = T_WHITESPACE; cfg->ch2token['\n'] = T_WHITESPACE; cfg->ch2token['\r'] = T_WHITESPACE; /* Update characters that require further processing */ cfg->ch2token['f'] = T_UNKNOWN; /* false? */ cfg->ch2token['i'] = T_UNKNOWN; /* inf, ininity? */ cfg->ch2token['I'] = T_UNKNOWN; cfg->ch2token['n'] = T_UNKNOWN; /* null, nan? */ cfg->ch2token['N'] = T_UNKNOWN; cfg->ch2token['t'] = T_UNKNOWN; /* true? */ cfg->ch2token['"'] = T_UNKNOWN; /* string? */ cfg->ch2token['+'] = T_UNKNOWN; /* number? */ cfg->ch2token['-'] = T_UNKNOWN; for (i = 0; i < 10; i++) cfg->ch2token['0' + i] = T_UNKNOWN; /* Lookup table for parsing escape characters */ for (i = 0; i < 256; i++) cfg->escape2char[i] = 0; /* String error */ cfg->escape2char['"'] = '"'; cfg->escape2char['\\'] = '\\'; cfg->escape2char['/'] = '/'; cfg->escape2char['b'] = '\b'; cfg->escape2char['t'] = '\t'; cfg->escape2char['n'] = '\n'; cfg->escape2char['f'] = '\f'; cfg->escape2char['r'] = '\r'; cfg->escape2char['u'] = 'u'; /* Unicode parsing required */ } /* ===== ENCODING ===== */ static void json_encode_exception(lua_State *l, json_config_t *cfg, strbuf_t *json, int lindex, const char *reason) { if (!cfg->encode_keep_buffer) strbuf_free(json); luaL_error(l, "Cannot serialise %s: %s", lua_typename(l, lua_type(l, lindex)), reason); } /* json_append_string args: * - lua_State * - JSON strbuf * - String (Lua stack index) * * Returns nothing. Doesn't remove string from Lua stack */ static void json_append_string(lua_State *l, strbuf_t *json, int lindex) { const char *escstr; int i; const char *str; size_t len; str = lua_tolstring(l, lindex, &len); /* Worst case is len * 6 (all unicode escapes). * This buffer is reused constantly for small strings * If there are any excess pages, they won't be hit anyway. * This gains ~5% speedup. */ strbuf_ensure_empty_length(json, len * 6 + 2); strbuf_append_char_unsafe(json, '\"'); for (i = 0; i < len; i++) { escstr = char2escape[(unsigned char)str[i]]; if (escstr) strbuf_append_string(json, escstr); else strbuf_append_char_unsafe(json, str[i]); } strbuf_append_char_unsafe(json, '\"'); } /* Find the size of the array on the top of the Lua stack * -1 object (not a pure array) * >=0 elements in array */ static int lua_array_length(lua_State *l, json_config_t *cfg, strbuf_t *json) { double k; int max; int items; max = 0; items = 0; lua_pushnil(l); /* table, startkey */ while (lua_next(l, -2) != 0) { /* table, key, value */ if (lua_type(l, -2) == LUA_TNUMBER && (k = lua_tonumber(l, -2))) { /* Integer >= 1 ? */ if (floor(k) == k && k >= 1) { if (k > max) max = k; items++; lua_pop(l, 1); continue; } } /* Must not be an array (non integer key) */ lua_pop(l, 2); return -1; } /* Encode excessively sparse arrays as objects (if enabled) */ if (cfg->encode_sparse_ratio > 0 && max > items * cfg->encode_sparse_ratio && max > cfg->encode_sparse_safe) { if (!cfg->encode_sparse_convert) json_encode_exception(l, cfg, json, -1, "excessively sparse array"); return -1; } return max; } static void json_check_encode_depth(lua_State *l, json_config_t *cfg, int current_depth, strbuf_t *json) { /* Ensure there are enough slots free to traverse a table (key, * value) and push a string for a potential error message. * * Unlike "decode", the key and value are still on the stack when * lua_checkstack() is called. Hence an extra slot for luaL_error() * below is required just in case the next check to lua_checkstack() * fails. * * While this won't cause a crash due to the EXTRA_STACK reserve * slots, it would still be an improper use of the API. */ if (current_depth <= cfg->encode_max_depth && lua_checkstack(l, 3)) return; if (!cfg->encode_keep_buffer) strbuf_free(json); luaL_error(l, "Cannot serialise, excessive nesting (%d)", current_depth); } static void json_append_data(lua_State *l, json_config_t *cfg, int current_depth, strbuf_t *json); /* json_append_array args: * - lua_State * - JSON strbuf * - Size of passwd Lua array (top of stack) */ static void json_append_array(lua_State *l, json_config_t *cfg, int current_depth, strbuf_t *json, int array_length) { int comma, i; strbuf_append_char(json, '['); comma = 0; for (i = 1; i <= array_length; i++) { if (comma) strbuf_append_char(json, ','); else comma = 1; lua_rawgeti(l, -1, i); json_append_data(l, cfg, current_depth, json); lua_pop(l, 1); } strbuf_append_char(json, ']'); } static void json_append_number(lua_State *l, json_config_t *cfg, strbuf_t *json, int lindex) { double num = lua_tonumber(l, lindex); int len; if (cfg->encode_invalid_numbers == 0) { /* Prevent encoding invalid numbers */ if (isinf(num) || isnan(num)) json_encode_exception(l, cfg, json, lindex, "must not be NaN or Inf"); } else if (cfg->encode_invalid_numbers == 1) { /* Encode invalid numbers, but handle "nan" separately * since some platforms may encode as "-nan". */ if (isnan(num)) { strbuf_append_mem(json, "nan", 3); return; } } else { /* Encode invalid numbers as "null" */ if (isinf(num) || isnan(num)) { strbuf_append_mem(json, "null", 4); return; } } strbuf_ensure_empty_length(json, FPCONV_G_FMT_BUFSIZE); len = fpconv_g_fmt(strbuf_empty_ptr(json), num, cfg->encode_number_precision); strbuf_extend_length(json, len); } static void json_append_object(lua_State *l, json_config_t *cfg, int current_depth, strbuf_t *json) { int comma, keytype; /* Object */ strbuf_append_char(json, '{'); lua_pushnil(l); /* table, startkey */ comma = 0; while (lua_next(l, -2) != 0) { if (comma) strbuf_append_char(json, ','); else comma = 1; /* table, key, value */ keytype = lua_type(l, -2); if (keytype == LUA_TNUMBER) { strbuf_append_char(json, '"'); json_append_number(l, cfg, json, -2); strbuf_append_mem(json, "\":", 2); } else if (keytype == LUA_TSTRING) { json_append_string(l, json, -2); strbuf_append_char(json, ':'); } else { json_encode_exception(l, cfg, json, -2, "table key must be a number or string"); /* never returns */ } /* table, key, value */ json_append_data(l, cfg, current_depth, json); lua_pop(l, 1); /* table, key */ } strbuf_append_char(json, '}'); } /* Serialise Lua data into JSON string. */ static void json_append_data(lua_State *l, json_config_t *cfg, int current_depth, strbuf_t *json) { int len; switch (lua_type(l, -1)) { case LUA_TSTRING: json_append_string(l, json, -1); break; case LUA_TNUMBER: json_append_number(l, cfg, json, -1); break; case LUA_TBOOLEAN: if (lua_toboolean(l, -1)) strbuf_append_mem(json, "true", 4); else strbuf_append_mem(json, "false", 5); break; case LUA_TTABLE: current_depth++; json_check_encode_depth(l, cfg, current_depth, json); len = lua_array_length(l, cfg, json); if (len > 0) json_append_array(l, cfg, current_depth, json, len); else json_append_object(l, cfg, current_depth, json); break; case LUA_TNIL: strbuf_append_mem(json, "null", 4); break; case LUA_TLIGHTUSERDATA: if (lua_touserdata(l, -1) == NULL) { strbuf_append_mem(json, "null", 4); break; } default: /* Remaining types (LUA_TFUNCTION, LUA_TUSERDATA, LUA_TTHREAD, * and LUA_TLIGHTUSERDATA) cannot be serialised */ json_encode_exception(l, cfg, json, -1, "type not supported"); /* never returns */ } } static int json_encode(lua_State *l) { json_config_t *cfg = json_fetch_config(l); strbuf_t local_encode_buf; strbuf_t *encode_buf; char *json; int len; luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); if (!cfg->encode_keep_buffer) { /* Use private buffer */ encode_buf = &local_encode_buf; strbuf_init(encode_buf, 0); } else { /* Reuse existing buffer */ encode_buf = &cfg->encode_buf; strbuf_reset(encode_buf); } json_append_data(l, cfg, 0, encode_buf); json = strbuf_string(encode_buf, &len); lua_pushlstring(l, json, len); if (!cfg->encode_keep_buffer) strbuf_free(encode_buf); return 1; } /* ===== DECODING ===== */ static void json_process_value(lua_State *l, json_parse_t *json, json_token_t *token); static int hexdigit2int(char hex) { if ('0' <= hex && hex <= '9') return hex - '0'; /* Force lowercase */ hex |= 0x20; if ('a' <= hex && hex <= 'f') return 10 + hex - 'a'; return -1; } static int decode_hex4(const char *hex) { int digit[4]; int i; /* Convert ASCII hex digit to numeric digit * Note: this returns an error for invalid hex digits, including * NULL */ for (i = 0; i < 4; i++) { digit[i] = hexdigit2int(hex[i]); if (digit[i] < 0) { return -1; } } return (digit[0] << 12) + (digit[1] << 8) + (digit[2] << 4) + digit[3]; } /* Converts a Unicode codepoint to UTF-8. * Returns UTF-8 string length, and up to 4 bytes in *utf8 */ static int codepoint_to_utf8(char *utf8, int codepoint) { /* 0xxxxxxx */ if (codepoint <= 0x7F) { utf8[0] = codepoint; return 1; } /* 110xxxxx 10xxxxxx */ if (codepoint <= 0x7FF) { utf8[0] = (codepoint >> 6) | 0xC0; utf8[1] = (codepoint & 0x3F) | 0x80; return 2; } /* 1110xxxx 10xxxxxx 10xxxxxx */ if (codepoint <= 0xFFFF) { utf8[0] = (codepoint >> 12) | 0xE0; utf8[1] = ((codepoint >> 6) & 0x3F) | 0x80; utf8[2] = (codepoint & 0x3F) | 0x80; return 3; } /* 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */ if (codepoint <= 0x1FFFFF) { utf8[0] = (codepoint >> 18) | 0xF0; utf8[1] = ((codepoint >> 12) & 0x3F) | 0x80; utf8[2] = ((codepoint >> 6) & 0x3F) | 0x80; utf8[3] = (codepoint & 0x3F) | 0x80; return 4; } return 0; } /* Called when index pointing to beginning of UTF-16 code escape: \uXXXX * \u is guaranteed to exist, but the remaining hex characters may be * missing. * Translate to UTF-8 and append to temporary token string. * Must advance index to the next character to be processed. * Returns: 0 success * -1 error */ static int json_append_unicode_escape(json_parse_t *json) { char utf8[4]; /* Surrogate pairs require 4 UTF-8 bytes */ int codepoint; int surrogate_low; int len; int escape_len = 6; /* Fetch UTF-16 code unit */ codepoint = decode_hex4(json->ptr + 2); if (codepoint < 0) return -1; /* UTF-16 surrogate pairs take the following 2 byte form: * 11011 x yyyyyyyyyy * When x = 0: y is the high 10 bits of the codepoint * x = 1: y is the low 10 bits of the codepoint * * Check for a surrogate pair (high or low) */ if ((codepoint & 0xF800) == 0xD800) { /* Error if the 1st surrogate is not high */ if (codepoint & 0x400) return -1; /* Ensure the next code is a unicode escape */ if (*(json->ptr + escape_len) != '\\' || *(json->ptr + escape_len + 1) != 'u') { return -1; } /* Fetch the next codepoint */ surrogate_low = decode_hex4(json->ptr + 2 + escape_len); if (surrogate_low < 0) return -1; /* Error if the 2nd code is not a low surrogate */ if ((surrogate_low & 0xFC00) != 0xDC00) return -1; /* Calculate Unicode codepoint */ codepoint = (codepoint & 0x3FF) << 10; surrogate_low &= 0x3FF; codepoint = (codepoint | surrogate_low) + 0x10000; escape_len = 12; } /* Convert codepoint to UTF-8 */ len = codepoint_to_utf8(utf8, codepoint); if (!len) return -1; /* Append bytes and advance parse index */ strbuf_append_mem_unsafe(json->tmp, utf8, len); json->ptr += escape_len; return 0; } static void json_set_token_error(json_token_t *token, json_parse_t *json, const char *errtype) { token->type = T_ERROR; token->index = json->ptr - json->data; token->value.string = errtype; } static void json_next_string_token(json_parse_t *json, json_token_t *token) { char *escape2char = json->cfg->escape2char; char ch; /* Caller must ensure a string is next */ assert(*json->ptr == '"'); /* Skip " */ json->ptr++; /* json->tmp is the temporary strbuf used to accumulate the * decoded string value. * json->tmp is sized to handle JSON containing only a string value. */ strbuf_reset(json->tmp); while ((ch = *json->ptr) != '"') { if (!ch) { /* Premature end of the string */ json_set_token_error(token, json, "unexpected end of string"); return; } /* Handle escapes */ if (ch == '\\') { /* Fetch escape character */ ch = *(json->ptr + 1); /* Translate escape code and append to tmp string */ ch = escape2char[(unsigned char)ch]; if (ch == 'u') { if (json_append_unicode_escape(json) == 0) continue; json_set_token_error(token, json, "invalid unicode escape code"); return; } if (!ch) { json_set_token_error(token, json, "invalid escape code"); return; } /* Skip '\' */ json->ptr++; } /* Append normal character or translated single character * Unicode escapes are handled above */ strbuf_append_char_unsafe(json->tmp, ch); json->ptr++; } json->ptr++; /* Eat final quote (") */ strbuf_ensure_null(json->tmp); token->type = T_STRING; token->value.string = strbuf_string(json->tmp, &token->string_len); } /* JSON numbers should take the following form: * -?(0|[1-9]|[1-9][0-9]+)(.[0-9]+)?([eE][-+]?[0-9]+)? * * json_next_number_token() uses strtod() which allows other forms: * - numbers starting with '+' * - NaN, -NaN, infinity, -infinity * - hexadecimal numbers * - numbers with leading zeros * * json_is_invalid_number() detects "numbers" which may pass strtod()'s * error checking, but should not be allowed with strict JSON. * * json_is_invalid_number() may pass numbers which cause strtod() * to generate an error. */ static int json_is_invalid_number(json_parse_t *json) { const char *p = json->ptr; /* Reject numbers starting with + */ if (*p == '+') return 1; /* Skip minus sign if it exists */ if (*p == '-') p++; /* Reject numbers starting with 0x, or leading zeros */ if (*p == '0') { int ch2 = *(p + 1); if ((ch2 | 0x20) == 'x' || /* Hex */ ('0' <= ch2 && ch2 <= '9')) /* Leading zero */ return 1; return 0; } else if (*p <= '9') { return 0; /* Ordinary number */ } /* Reject inf/nan */ if (!strncasecmp(p, "inf", 3)) return 1; if (!strncasecmp(p, "nan", 3)) return 1; /* Pass all other numbers which may still be invalid, but * strtod() will catch them. */ return 0; } static void json_next_number_token(json_parse_t *json, json_token_t *token) { char *endptr; token->type = T_NUMBER; token->value.number = fpconv_strtod(json->ptr, &endptr); if (json->ptr == endptr) json_set_token_error(token, json, "invalid number"); else json->ptr = endptr; /* Skip the processed number */ return; } /* Fills in the token struct. * T_STRING will return a pointer to the json_parse_t temporary string * T_ERROR will leave the json->ptr pointer at the error. */ static void json_next_token(json_parse_t *json, json_token_t *token) { const json_token_type_t *ch2token = json->cfg->ch2token; int ch; /* Eat whitespace. */ while (1) { ch = (unsigned char)*(json->ptr); token->type = ch2token[ch]; if (token->type != T_WHITESPACE) break; json->ptr++; } /* Store location of new token. Required when throwing errors * for unexpected tokens (syntax errors). */ token->index = json->ptr - json->data; /* Don't advance the pointer for an error or the end */ if (token->type == T_ERROR) { json_set_token_error(token, json, "invalid token"); return; } if (token->type == T_END) { return; } /* Found a known single character token, advance index and return */ if (token->type != T_UNKNOWN) { json->ptr++; return; } /* Process characters which triggered T_UNKNOWN * * Must use strncmp() to match the front of the JSON string. * JSON identifier must be lowercase. * When strict_numbers if disabled, either case is allowed for * Infinity/NaN (since we are no longer following the spec..) */ if (ch == '"') { json_next_string_token(json, token); return; } else if (ch == '-' || ('0' <= ch && ch <= '9')) { if (!json->cfg->decode_invalid_numbers && json_is_invalid_number(json)) { json_set_token_error(token, json, "invalid number"); return; } json_next_number_token(json, token); return; } else if (!strncmp(json->ptr, "true", 4)) { token->type = T_BOOLEAN; token->value.boolean = 1; json->ptr += 4; return; } else if (!strncmp(json->ptr, "false", 5)) { token->type = T_BOOLEAN; token->value.boolean = 0; json->ptr += 5; return; } else if (!strncmp(json->ptr, "null", 4)) { token->type = T_NULL; json->ptr += 4; return; } else if (json->cfg->decode_invalid_numbers && json_is_invalid_number(json)) { /* When decode_invalid_numbers is enabled, only attempt to process * numbers we know are invalid JSON (Inf, NaN, hex) * This is required to generate an appropriate token error, * otherwise all bad tokens will register as "invalid number" */ json_next_number_token(json, token); return; } /* Token starts with t/f/n but isn't recognised above. */ json_set_token_error(token, json, "invalid token"); } /* This function does not return. * DO NOT CALL WITH DYNAMIC MEMORY ALLOCATED. * The only supported exception is the temporary parser string * json->tmp struct. * json and token should exist on the stack somewhere. * luaL_error() will long_jmp and release the stack */ static void json_throw_parse_error(lua_State *l, json_parse_t *json, const char *exp, json_token_t *token) { const char *found; strbuf_free(json->tmp); if (token->type == T_ERROR) found = token->value.string; else found = json_token_type_name[token->type]; /* Note: token->index is 0 based, display starting from 1 */ luaL_error(l, "Expected %s but found %s at character %d", exp, found, token->index + 1); } static inline void json_decode_ascend(json_parse_t *json) { json->current_depth--; } static void json_decode_descend(lua_State *l, json_parse_t *json, int slots) { json->current_depth++; if (json->current_depth <= json->cfg->decode_max_depth && lua_checkstack(l, slots)) { return; } strbuf_free(json->tmp); luaL_error(l, "Found too many nested data structures (%d) at character %d", json->current_depth, json->ptr - json->data); } static void json_parse_object_context(lua_State *l, json_parse_t *json) { json_token_t token; /* 3 slots required: * .., table, key, value */ json_decode_descend(l, json, 3); lua_newtable(l); json_next_token(json, &token); /* Handle empty objects */ if (token.type == T_OBJ_END) { json_decode_ascend(json); return; } while (1) { if (token.type != T_STRING) json_throw_parse_error(l, json, "object key string", &token); /* Push key */ lua_pushlstring(l, token.value.string, token.string_len); json_next_token(json, &token); if (token.type != T_COLON) json_throw_parse_error(l, json, "colon", &token); /* Fetch value */ json_next_token(json, &token); json_process_value(l, json, &token); /* Set key = value */ lua_rawset(l, -3); json_next_token(json, &token); if (token.type == T_OBJ_END) { json_decode_ascend(json); return; } if (token.type != T_COMMA) json_throw_parse_error(l, json, "comma or object end", &token); json_next_token(json, &token); } } /* Handle the array context */ static void json_parse_array_context(lua_State *l, json_parse_t *json) { json_token_t token; int i; /* 2 slots required: * .., table, value */ json_decode_descend(l, json, 2); lua_newtable(l); json_next_token(json, &token); /* Handle empty arrays */ if (token.type == T_ARR_END) { json_decode_ascend(json); return; } for (i = 1; ; i++) { json_process_value(l, json, &token); lua_rawseti(l, -2, i); /* arr[i] = value */ json_next_token(json, &token); if (token.type == T_ARR_END) { json_decode_ascend(json); return; } if (token.type != T_COMMA) json_throw_parse_error(l, json, "comma or array end", &token); json_next_token(json, &token); } } /* Handle the "value" context */ static void json_process_value(lua_State *l, json_parse_t *json, json_token_t *token) { switch (token->type) { case T_STRING: lua_pushlstring(l, token->value.string, token->string_len); break;; case T_NUMBER: lua_pushnumber(l, token->value.number); break;; case T_BOOLEAN: lua_pushboolean(l, token->value.boolean); break;; case T_OBJ_BEGIN: json_parse_object_context(l, json); break;; case T_ARR_BEGIN: json_parse_array_context(l, json); break;; case T_NULL: /* In Lua, setting "t[k] = nil" will delete k from the table. * Hence a NULL pointer lightuserdata object is used instead */ lua_pushlightuserdata(l, NULL); break;; default: json_throw_parse_error(l, json, "value", token); } } static int json_decode(lua_State *l) { json_parse_t json; json_token_t token; size_t json_len; luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); json.cfg = json_fetch_config(l); json.data = luaL_checklstring(l, 1, &json_len); json.current_depth = 0; json.ptr = json.data; /* Detect Unicode other than UTF-8 (see RFC 4627, Sec 3) * * CJSON can support any simple data type, hence only the first * character is guaranteed to be ASCII (at worst: '"'). This is * still enough to detect whether the wrong encoding is in use. */ if (json_len >= 2 && (!json.data[0] || !json.data[1])) luaL_error(l, "JSON parser does not support UTF-16 or UTF-32"); /* Ensure the temporary buffer can hold the entire string. * This means we no longer need to do length checks since the decoded * string must be smaller than the entire json string */ json.tmp = strbuf_new(json_len); json_next_token(&json, &token); json_process_value(l, &json, &token); /* Ensure there is no more input left */ json_next_token(&json, &token); if (token.type != T_END) json_throw_parse_error(l, &json, "the end", &token); strbuf_free(json.tmp); return 1; } /* ===== INITIALISATION ===== */ #if !defined(LUA_VERSION_NUM) || LUA_VERSION_NUM < 502 /* Compatibility for Lua 5.1. * * luaL_setfuncs() is used to create a module table where the functions have * json_config_t as their first upvalue. Code borrowed from Lua 5.2 source. */ static void luaL_setfuncs (lua_State *l, const luaL_Reg *reg, int nup) { int i; luaL_checkstack(l, nup, "too many upvalues"); for (; reg->name != NULL; reg++) { /* fill the table with given functions */ for (i = 0; i < nup; i++) /* copy upvalues to the top */ lua_pushvalue(l, -nup); lua_pushcclosure(l, reg->func, nup); /* closure with those upvalues */ lua_setfield(l, -(nup + 2), reg->name); } lua_pop(l, nup); /* remove upvalues */ } #endif /* Call target function in protected mode with all supplied args. * Assumes target function only returns a single non-nil value. * Convert and return thrown errors as: nil, "error message" */ static int json_protect_conversion(lua_State *l) { int err; /* Deliberately throw an error for invalid arguments */ luaL_argcheck(l, lua_gettop(l) == 1, 1, "expected 1 argument"); /* pcall() the function stored as upvalue(1) */ lua_pushvalue(l, lua_upvalueindex(1)); lua_insert(l, 1); err = lua_pcall(l, 1, 1, 0); if (!err) return 1; if (err == LUA_ERRRUN) { lua_pushnil(l); lua_insert(l, -2); return 2; } /* Since we are not using a custom error handler, the only remaining * errors are memory related */ return luaL_error(l, "Memory allocation error in CJSON protected call"); } /* Return cjson module table */ static int lua_cjson_new(lua_State *l) { luaL_Reg reg[] = { { "encode", json_encode }, { "decode", json_decode }, { "encode_sparse_array", json_cfg_encode_sparse_array }, { "encode_max_depth", json_cfg_encode_max_depth }, { "decode_max_depth", json_cfg_decode_max_depth }, { "encode_number_precision", json_cfg_encode_number_precision }, { "encode_keep_buffer", json_cfg_encode_keep_buffer }, { "encode_invalid_numbers", json_cfg_encode_invalid_numbers }, { "decode_invalid_numbers", json_cfg_decode_invalid_numbers }, { "new", lua_cjson_new }, { NULL, NULL } }; /* Initialise number conversions */ fpconv_init(); /* cjson module table */ lua_newtable(l); /* Register functions with config data as upvalue */ json_create_config(l); luaL_setfuncs(l, reg, 1); /* Set cjson.null */ lua_pushlightuserdata(l, NULL); lua_setfield(l, -2, "null"); /* Set module name / version fields */ lua_pushliteral(l, CJSON_MODNAME); lua_setfield(l, -2, "_NAME"); lua_pushliteral(l, CJSON_VERSION); lua_setfield(l, -2, "_VERSION"); return 1; } /* Return cjson.safe module table */ static int lua_cjson_safe_new(lua_State *l) { const char *func[] = { "decode", "encode", NULL }; int i; lua_cjson_new(l); /* Fix new() method */ lua_pushcfunction(l, lua_cjson_safe_new); lua_setfield(l, -2, "new"); for (i = 0; func[i]; i++) { lua_getfield(l, -1, func[i]); lua_pushcclosure(l, json_protect_conversion, 1); lua_setfield(l, -2, func[i]); } return 1; } int luaopen_cjson(lua_State *l) { lua_cjson_new(l); #ifdef ENABLE_CJSON_GLOBAL /* Register a global "cjson" table. */ lua_pushvalue(l, -1); lua_setglobal(l, CJSON_MODNAME); #endif /* Return cjson table */ return 1; } int luaopen_cjson_safe(lua_State *l) { lua_cjson_safe_new(l); /* Return cjson.safe table */ return 1; } /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/manual.html000066400000000000000000001353701201567770400165110ustar00rootroot00000000000000 Lua CJSON 2.1.0 Manual

1. Overview

The Lua CJSON module provides JSON support for Lua.

Features
  • Fast, standards compliant encoding/parsing routines

  • Full support for JSON with UTF-8, including decoding surrogate pairs

  • Optional run-time support for common exceptions to the JSON specification (infinity, NaN,..)

  • No dependencies on other libraries

Caveats
  • UTF-16 and UTF-32 are not supported

Lua CJSON is covered by the MIT license. Review the file LICENSE for details.

The latest version of this software is available from the Lua CJSON website.

Feel free to email me if you have any patches, suggestions, or comments.

2. Installation

Lua CJSON requires either Lua 5.1, Lua 5.2, or LuaJIT to build.

The build method can be selected from 4 options:

Make

Unix (including Linux, BSD, Mac OSX & Solaris), Windows

CMake

Unix, Windows

RPM

Linux

LuaRocks

Unix, Windows

2.1. Make

The included Makefile has generic settings.

First, review and update the included makefile to suit your platform (if required).

Next, build and install the module:

make install

Or install manually into your Lua module directory:

make
cp cjson.so $LUA_MODULE_DIRECTORY

2.2. CMake

CMake can generate build configuration for many different platforms (including Unix and Windows).

First, generate the makefile for your platform using CMake. If CMake is unable to find Lua, manually set the LUA_DIR environment variable to the base prefix of your Lua 5.1 installation.

While cmake is used in the example below, ccmake or cmake-gui may be used to present an interface for changing the default build options.

mkdir build
cd build
# Optional: export LUA_DIR=$LUA51_PREFIX
cmake ..

Next, build and install the module:

make install
# Or:
make
cp cjson.so $LUA_MODULE_DIRECTORY

Review the CMake documentation for further details.

2.3. RPM

Linux distributions using RPM can create a package via the included RPM spec file. Ensure the rpm-build package (or similar) has been installed.

Build and install the module via RPM:

rpmbuild -tb lua-cjson-2.1.0.tar.gz
rpm -Uvh $LUA_CJSON_RPM

2.4. LuaRocks

LuaRocks can be used to install and manage Lua modules on a wide range of platforms (including Windows).

First, extract the Lua CJSON source package.

Next, install the module:

cd lua-cjson-2.1.0
luarocks make
Note
LuaRocks does not support platform specific configuration for Solaris. On Solaris, you may need to manually uncomment USE_INTERNAL_ISINF in the rockspec before building this module.

Review the LuaRocks documentation for further details.

2.5. Build Options (#define)

Lua CJSON offers several #define build options to address portability issues, and enable non-default features. Some build methods may automatically set platform specific options if required. Other features should be enabled manually.

USE_INTERNAL_ISINF

Workaround for Solaris platforms missing isinf.

DISABLE_INVALID_NUMBERS

Recommended on platforms where strtod / sprintf are not POSIX compliant (eg, Windows MinGW). Prevents cjson.encode_invalid_numbers and cjson.decode_invalid_numbers from being enabled. However, cjson.encode_invalid_numbers may still be set to "null". When using the Lua CJSON built-in floating point conversion this option is unnecessary and is ignored.

2.5.1. Built-in floating point conversion

Lua CJSON may be built with David Gay’s floating point conversion routines. This can increase overall performance by up to 50% on some platforms when converting a large amount of numeric data. However, this option reduces portability and is disabled by default.

USE_INTERNAL_FPCONV

Enable internal number conversion routines.

IEEE_BIG_ENDIAN

Must be set on big endian architectures.

MULTIPLE_THREADS

Must be set if Lua CJSON may be used in a multi-threaded application. Requires the pthreads library.

3. API (Functions)

3.1. Synopsis

-- Module instantiation
local cjson = require "cjson"
local cjson2 = cjson.new()
local cjson_safe = require "cjson.safe"

-- Translate Lua value to/from JSON
text = cjson.encode(value)
value = cjson.decode(text)

-- Get and/or set Lua CJSON configuration
setting = cjson.decode_invalid_numbers([setting])
setting = cjson.encode_invalid_numbers([setting])
keep = cjson.encode_keep_buffer([keep])
depth = cjson.encode_max_depth([depth])
depth = cjson.decode_max_depth([depth])
convert, ratio, safe = cjson.encode_sparse_array([convert[, ratio[, safe]]])

3.2. Module Instantiation

local cjson = require "cjson"
local cjson2 = cjson.new()
local cjson_safe = require "cjson.safe"

Import Lua CJSON via the Lua require function. Lua CJSON does not register a global module table.

The cjson module will throw an error during JSON conversion if any invalid data is encountered. Refer to cjson.encode and cjson.decode for details.

The cjson.safe module behaves identically to the cjson module, except when errors are encountered during JSON conversion. On error, the cjson_safe.encode and cjson_safe.decode functions will return nil followed by the error message.

cjson.new can be used to instantiate an independent copy of the Lua CJSON module. The new module has a separate persistent encoding buffer, and default settings.

Lua CJSON can support Lua implementations using multiple preemptive threads within a single Lua state provided the persistent encoding buffer is not shared. This can be achieved by one of the following methods:

  • Disabling the persistent encoding buffer with cjson.encode_keep_buffer

  • Ensuring each thread calls cjson.encode separately (ie, treat cjson.encode as non-reentrant).

  • Using a separate cjson module table per preemptive thread (cjson.new)

Note
Lua CJSON uses strtod and snprintf to perform numeric conversion as they are usually well supported, fast and bug free. However, these functions require a workaround for JSON encoding/parsing under locales using a comma decimal separator. Lua CJSON detects the current locale during instantiation to determine and automatically implement the workaround if required. Lua CJSON should be reinitialised via cjson.new if the locale of the current process changes. Using a different locale per thread is not supported.

3.3. decode

value = cjson.decode(json_text)

cjson.decode will deserialise any UTF-8 JSON string into a Lua value or table.

UTF-16 and UTF-32 JSON strings are not supported.

cjson.decode requires that any NULL (ASCII 0) and double quote (ASCII 34) characters are escaped within strings. All escape codes will be decoded and other bytes will be passed transparently. UTF-8 characters are not validated during decoding and should be checked elsewhere if required.

JSON null will be converted to a NULL lightuserdata value. This can be compared with cjson.null for convenience.

By default, numbers incompatible with the JSON specification (infinity, NaN, hexadecimal) can be decoded. This default can be changed with cjson.decode_invalid_numbers.

Example: Decoding
json_text = '[ true, { "foo": "bar" } ]'
value = cjson.decode(json_text)
-- Returns: { true, { foo = "bar" } }
Caution
Care must be taken after decoding JSON objects with numeric keys. Each numeric key will be stored as a Lua string. Any subsequent code assuming type number may break.

3.4. decode_invalid_numbers

setting = cjson.decode_invalid_numbers([setting])
-- "setting" must be a boolean. Default: true.

Lua CJSON may generate an error when trying to decode numbers not supported by the JSON specification. Invalid numbers are defined as:

  • infinity

  • not-a-number (NaN)

  • hexadecimal

Available settings:

true

Accept and decode invalid numbers. This is the default setting.

false

Throw an error when invalid numbers are encountered.

The current setting is always returned, and is only updated when an argument is provided.

3.5. decode_max_depth

depth = cjson.decode_max_depth([depth])
-- "depth" must be a positive integer. Default: 1000.

Lua CJSON will generate an error when parsing deeply nested JSON once the maximum array/object depth has been exceeded. This check prevents unnecessarily complicated JSON from slowing down the application, or crashing the application due to lack of process stack space.

An error may be generated before the depth limit is hit if Lua is unable to allocate more objects on the Lua stack.

By default, Lua CJSON will reject JSON with arrays and/or objects nested more than 1000 levels deep.

The current setting is always returned, and is only updated when an argument is provided.

3.6. encode

json_text = cjson.encode(value)

cjson.encode will serialise a Lua value into a string containing the JSON representation.

cjson.encode supports the following types:

  • boolean

  • lightuserdata (NULL value only)

  • nil

  • number

  • string

  • table

The remaining Lua types will generate an error:

  • function

  • lightuserdata (non-NULL values)

  • thread

  • userdata

By default, numbers are encoded with 14 significant digits. Refer to cjson.encode_number_precision for details.

Lua CJSON will escape the following characters within each UTF-8 string:

  • Control characters (ASCII 0 - 31)

  • Double quote (ASCII 34)

  • Forward slash (ASCII 47)

  • Blackslash (ASCII 92)

  • Delete (ASCII 127)

All other bytes are passed transparently.

Caution

Lua CJSON will successfully encode/decode binary strings, but this is technically not supported by JSON and may not be compatible with other JSON libraries. To ensure the output is valid JSON, applications should ensure all Lua strings passed to cjson.encode are UTF-8.

Base64 is commonly used to encode binary data as the most efficient encoding under UTF-8 can only reduce the encoded size by a further ~8%. Lua Base64 routines can be found in the LuaSocket and lbase64 packages.

Lua CJSON uses a heuristic to determine whether to encode a Lua table as a JSON array or an object. A Lua table with only positive integer keys of type number will be encoded as a JSON array. All other tables will be encoded as a JSON object.

Lua CJSON does not use metamethods when serialising tables.

  • rawget is used to iterate over Lua arrays

  • next is used to iterate over Lua objects

Lua arrays with missing entries (sparse arrays) may optionally be encoded in several different ways. Refer to cjson.encode_sparse_array for details.

JSON object keys are always strings. Hence cjson.encode only supports table keys which are type number or string. All other types will generate an error.

Note
Standards compliant JSON must be encapsulated in either an object ({}) or an array ([]). If strictly standards compliant JSON is desired, a table must be passed to cjson.encode.

By default, encoding the following Lua values will generate errors:

  • Numbers incompatible with the JSON specification (infinity, NaN)

  • Tables nested more than 1000 levels deep

  • Excessively sparse Lua arrays

These defaults can be changed with:

Example: Encoding
value = { true, { foo = "bar" } }
json_text = cjson.encode(value)
-- Returns: '[true,{"foo":"bar"}]'

3.7. encode_invalid_numbers

setting = cjson.encode_invalid_numbers([setting])
-- "setting" must a boolean or "null". Default: false.

Lua CJSON may generate an error when encoding floating point numbers not supported by the JSON specification (invalid numbers):

  • infinity

  • not-a-number (NaN)

Available settings:

true

Allow invalid numbers to be encoded. This will generate non-standard JSON, but this output is supported by some libraries.

"null"

Encode invalid numbers as a JSON null value. This allows infinity and NaN to be encoded into valid JSON.

false

Throw an error when attempting to encode invalid numbers. This is the default setting.

The current setting is always returned, and is only updated when an argument is provided.

3.8. encode_keep_buffer

keep = cjson.encode_keep_buffer([keep])
-- "keep" must be a boolean. Default: true.

Lua CJSON can reuse the JSON encoding buffer to improve performance.

Available settings:

true

The buffer will grow to the largest size required and is not freed until the Lua CJSON module is garbage collected. This is the default setting.

false

Free the encode buffer after each call to cjson.encode.

The current setting is always returned, and is only updated when an argument is provided.

3.9. encode_max_depth

depth = cjson.encode_max_depth([depth])
-- "depth" must be a positive integer. Default: 1000.

Once the maximum table depth has been exceeded Lua CJSON will generate an error. This prevents a deeply nested or recursive data structure from crashing the application.

By default, Lua CJSON will generate an error when trying to encode data structures with more than 1000 nested tables.

The current setting is always returned, and is only updated when an argument is provided.

Example: Recursive Lua table
a = {}; a[1] = a

3.10. encode_number_precision

precision = cjson.encode_number_precision([precision])
-- "precision" must be an integer between 1 and 14. Default: 14.

The amount of significant digits returned by Lua CJSON when encoding numbers can be changed to balance accuracy versus performance. For data structures containing many numbers, setting cjson.encode_number_precision to a smaller integer, for example 3, can improve encoding performance by up to 50%.

By default, Lua CJSON will output 14 significant digits when converting a number to text.

The current setting is always returned, and is only updated when an argument is provided.

3.11. encode_sparse_array

convert, ratio, safe = cjson.encode_sparse_array([convert[, ratio[, safe]]])
-- "convert" must be a boolean. Default: false.
-- "ratio" must be a positive integer. Default: 2.
-- "safe" must be a positive integer. Default: 10.

Lua CJSON classifies a Lua table into one of three kinds when encoding a JSON array. This is determined by the number of values missing from the Lua array as follows:

Normal

All values are available.

Sparse

At least 1 value is missing.

Excessively sparse

The number of values missing exceeds the configured ratio.

Lua CJSON encodes sparse Lua arrays as JSON arrays using JSON null for the missing entries.

An array is excessively sparse when all the following conditions are met:

  • ratio > 0

  • maximum_index > safe

  • maximum_index > item_count * ratio

Lua CJSON will never consider an array to be excessively sparse when ratio = 0. The safe limit ensures that small Lua arrays are always encoded as sparse arrays.

By default, attempting to encode an excessively sparse array will generate an error. If convert is set to true, excessively sparse arrays will be converted to a JSON object.

The current settings are always returned. A particular setting is only changed when the argument is provided (non-nil).

Example: Encoding a sparse array
cjson.encode({ [3] = "data" })
-- Returns: '[null,null,"data"]'
Example: Enabling conversion to a JSON object
cjson.encode_sparse_array(true)
cjson.encode({ [1000] = "excessively sparse" })
-- Returns: '{"1000":"excessively sparse"}'

4. API (Variables)

4.1. _NAME

The name of the Lua CJSON module ("cjson").

4.2. _VERSION

The version number of the Lua CJSON module ("2.1.0").

4.3. null

Lua CJSON decodes JSON null as a Lua lightuserdata NULL pointer. cjson.null is provided for comparison.

5. References

lua-cjson-2.1.0+dfsg/manual.txt000066400000000000000000000417731201567770400163670ustar00rootroot00000000000000= Lua CJSON 2.1.0 Manual = Mark Pulford :revdate: 1st March 2012 Overview -------- The Lua CJSON module provides JSON support for Lua. *Features*:: - Fast, standards compliant encoding/parsing routines - Full support for JSON with UTF-8, including decoding surrogate pairs - Optional run-time support for common exceptions to the JSON specification (infinity, NaN,..) - No dependencies on other libraries *Caveats*:: - UTF-16 and UTF-32 are not supported Lua CJSON is covered by the MIT license. Review the file +LICENSE+ for details. The latest version of this software is available from the http://www.kyne.com.au/%7Emark/software/lua-cjson.php[Lua CJSON website]. Feel free to email me if you have any patches, suggestions, or comments. Installation ------------ Lua CJSON requires either http://www.lua.org[Lua] 5.1, Lua 5.2, or http://www.luajit.org[LuaJIT] to build. The build method can be selected from 4 options: Make:: Unix (including Linux, BSD, Mac OSX & Solaris), Windows CMake:: Unix, Windows RPM:: Linux LuaRocks:: Unix, Windows Make ~~~~ The included +Makefile+ has generic settings. First, review and update the included makefile to suit your platform (if required). Next, build and install the module: [source,sh] make install Or install manually into your Lua module directory: [source,sh] make cp cjson.so $LUA_MODULE_DIRECTORY CMake ~~~~~ http://www.cmake.org[CMake] can generate build configuration for many different platforms (including Unix and Windows). First, generate the makefile for your platform using CMake. If CMake is unable to find Lua, manually set the +LUA_DIR+ environment variable to the base prefix of your Lua 5.1 installation. While +cmake+ is used in the example below, +ccmake+ or +cmake-gui+ may be used to present an interface for changing the default build options. [source,sh] mkdir build cd build # Optional: export LUA_DIR=$LUA51_PREFIX cmake .. Next, build and install the module: [source,sh] make install # Or: make cp cjson.so $LUA_MODULE_DIRECTORY Review the http://www.cmake.org/cmake/help/documentation.html[CMake documentation] for further details. RPM ~~~ Linux distributions using http://rpm.org[RPM] can create a package via the included RPM spec file. Ensure the +rpm-build+ package (or similar) has been installed. Build and install the module via RPM: [source,sh] rpmbuild -tb lua-cjson-2.1.0.tar.gz rpm -Uvh $LUA_CJSON_RPM LuaRocks ~~~~~~~~ http://luarocks.org[LuaRocks] can be used to install and manage Lua modules on a wide range of platforms (including Windows). First, extract the Lua CJSON source package. Next, install the module: [source,sh] cd lua-cjson-2.1.0 luarocks make [NOTE] LuaRocks does not support platform specific configuration for Solaris. On Solaris, you may need to manually uncomment +USE_INTERNAL_ISINF+ in the rockspec before building this module. Review the http://luarocks.org/en/Documentation[LuaRocks documentation] for further details. [[build_options]] Build Options (#define) ~~~~~~~~~~~~~~~~~~~~~~~ Lua CJSON offers several +#define+ build options to address portability issues, and enable non-default features. Some build methods may automatically set platform specific options if required. Other features should be enabled manually. USE_INTERNAL_ISINF:: Workaround for Solaris platforms missing +isinf+. DISABLE_INVALID_NUMBERS:: Recommended on platforms where +strtod+ / +sprintf+ are not POSIX compliant (eg, Windows MinGW). Prevents +cjson.encode_invalid_numbers+ and +cjson.decode_invalid_numbers+ from being enabled. However, +cjson.encode_invalid_numbers+ may still be set to +"null"+. When using the Lua CJSON built-in floating point conversion this option is unnecessary and is ignored. Built-in floating point conversion ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Lua CJSON may be built with David Gay's http://www.netlib.org/fp/[floating point conversion routines]. This can increase overall performance by up to 50% on some platforms when converting a large amount of numeric data. However, this option reduces portability and is disabled by default. USE_INTERNAL_FPCONV:: Enable internal number conversion routines. IEEE_BIG_ENDIAN:: Must be set on big endian architectures. MULTIPLE_THREADS:: Must be set if Lua CJSON may be used in a multi-threaded application. Requires the _pthreads_ library. API (Functions) --------------- Synopsis ~~~~~~~~ [source,lua] ------------ -- Module instantiation local cjson = require "cjson" local cjson2 = cjson.new() local cjson_safe = require "cjson.safe" -- Translate Lua value to/from JSON text = cjson.encode(value) value = cjson.decode(text) -- Get and/or set Lua CJSON configuration setting = cjson.decode_invalid_numbers([setting]) setting = cjson.encode_invalid_numbers([setting]) keep = cjson.encode_keep_buffer([keep]) depth = cjson.encode_max_depth([depth]) depth = cjson.decode_max_depth([depth]) convert, ratio, safe = cjson.encode_sparse_array([convert[, ratio[, safe]]]) ------------ Module Instantiation ~~~~~~~~~~~~~~~~~~~~ [source,lua] ------------ local cjson = require "cjson" local cjson2 = cjson.new() local cjson_safe = require "cjson.safe" ------------ Import Lua CJSON via the Lua +require+ function. Lua CJSON does not register a global module table. The +cjson+ module will throw an error during JSON conversion if any invalid data is encountered. Refer to <> and <> for details. The +cjson.safe+ module behaves identically to the +cjson+ module, except when errors are encountered during JSON conversion. On error, the +cjson_safe.encode+ and +cjson_safe.decode+ functions will return +nil+ followed by the error message. +cjson.new+ can be used to instantiate an independent copy of the Lua CJSON module. The new module has a separate persistent encoding buffer, and default settings. Lua CJSON can support Lua implementations using multiple preemptive threads within a single Lua state provided the persistent encoding buffer is not shared. This can be achieved by one of the following methods: - Disabling the persistent encoding buffer with <> - Ensuring each thread calls <> separately (ie, treat +cjson.encode+ as non-reentrant). - Using a separate +cjson+ module table per preemptive thread (+cjson.new+) [NOTE] Lua CJSON uses +strtod+ and +snprintf+ to perform numeric conversion as they are usually well supported, fast and bug free. However, these functions require a workaround for JSON encoding/parsing under locales using a comma decimal separator. Lua CJSON detects the current locale during instantiation to determine and automatically implement the workaround if required. Lua CJSON should be reinitialised via +cjson.new+ if the locale of the current process changes. Using a different locale per thread is not supported. decode ~~~~~~ [source,lua] ------------ value = cjson.decode(json_text) ------------ +cjson.decode+ will deserialise any UTF-8 JSON string into a Lua value or table. UTF-16 and UTF-32 JSON strings are not supported. +cjson.decode+ requires that any NULL (ASCII 0) and double quote (ASCII 34) characters are escaped within strings. All escape codes will be decoded and other bytes will be passed transparently. UTF-8 characters are not validated during decoding and should be checked elsewhere if required. JSON +null+ will be converted to a NULL +lightuserdata+ value. This can be compared with +cjson.null+ for convenience. By default, numbers incompatible with the JSON specification (infinity, NaN, hexadecimal) can be decoded. This default can be changed with <>. .Example: Decoding [source,lua] json_text = '[ true, { "foo": "bar" } ]' value = cjson.decode(json_text) -- Returns: { true, { foo = "bar" } } [CAUTION] Care must be taken after decoding JSON objects with numeric keys. Each numeric key will be stored as a Lua +string+. Any subsequent code assuming type +number+ may break. [[decode_invalid_numbers]] decode_invalid_numbers ~~~~~~~~~~~~~~~~~~~~~~ [source,lua] ------------ setting = cjson.decode_invalid_numbers([setting]) -- "setting" must be a boolean. Default: true. ------------ Lua CJSON may generate an error when trying to decode numbers not supported by the JSON specification. _Invalid numbers_ are defined as: - infinity - not-a-number (NaN) - hexadecimal Available settings: +true+:: Accept and decode _invalid numbers_. This is the default setting. +false+:: Throw an error when _invalid numbers_ are encountered. The current setting is always returned, and is only updated when an argument is provided. [[decode_max_depth]] decode_max_depth ~~~~~~~~~~~~~~~~ [source,lua] ------------ depth = cjson.decode_max_depth([depth]) -- "depth" must be a positive integer. Default: 1000. ------------ Lua CJSON will generate an error when parsing deeply nested JSON once the maximum array/object depth has been exceeded. This check prevents unnecessarily complicated JSON from slowing down the application, or crashing the application due to lack of process stack space. An error may be generated before the depth limit is hit if Lua is unable to allocate more objects on the Lua stack. By default, Lua CJSON will reject JSON with arrays and/or objects nested more than 1000 levels deep. The current setting is always returned, and is only updated when an argument is provided. [[encode]] encode ~~~~~~ [source,lua] ------------ json_text = cjson.encode(value) ------------ +cjson.encode+ will serialise a Lua value into a string containing the JSON representation. +cjson.encode+ supports the following types: - +boolean+ - +lightuserdata+ (NULL value only) - +nil+ - +number+ - +string+ - +table+ The remaining Lua types will generate an error: - +function+ - +lightuserdata+ (non-NULL values) - +thread+ - +userdata+ By default, numbers are encoded with 14 significant digits. Refer to <> for details. Lua CJSON will escape the following characters within each UTF-8 string: - Control characters (ASCII 0 - 31) - Double quote (ASCII 34) - Forward slash (ASCII 47) - Blackslash (ASCII 92) - Delete (ASCII 127) All other bytes are passed transparently. [CAUTION] ========= Lua CJSON will successfully encode/decode binary strings, but this is technically not supported by JSON and may not be compatible with other JSON libraries. To ensure the output is valid JSON, applications should ensure all Lua strings passed to +cjson.encode+ are UTF-8. Base64 is commonly used to encode binary data as the most efficient encoding under UTF-8 can only reduce the encoded size by a further ~8%. Lua Base64 routines can be found in the http://w3.impa.br/%7Ediego/software/luasocket/[LuaSocket] and http://www.tecgraf.puc-rio.br/%7Elhf/ftp/lua/#lbase64[lbase64] packages. ========= Lua CJSON uses a heuristic to determine whether to encode a Lua table as a JSON array or an object. A Lua table with only positive integer keys of type +number+ will be encoded as a JSON array. All other tables will be encoded as a JSON object. Lua CJSON does not use metamethods when serialising tables. - +rawget+ is used to iterate over Lua arrays - +next+ is used to iterate over Lua objects Lua arrays with missing entries (_sparse arrays_) may optionally be encoded in several different ways. Refer to <> for details. JSON object keys are always strings. Hence +cjson.encode+ only supports table keys which are type +number+ or +string+. All other types will generate an error. [NOTE] Standards compliant JSON must be encapsulated in either an object (+{}+) or an array (+[]+). If strictly standards compliant JSON is desired, a table must be passed to +cjson.encode+. By default, encoding the following Lua values will generate errors: - Numbers incompatible with the JSON specification (infinity, NaN) - Tables nested more than 1000 levels deep - Excessively sparse Lua arrays These defaults can be changed with: - <> - <> - <> .Example: Encoding [source,lua] value = { true, { foo = "bar" } } json_text = cjson.encode(value) -- Returns: '[true,{"foo":"bar"}]' [[encode_invalid_numbers]] encode_invalid_numbers ~~~~~~~~~~~~~~~~~~~~~~ [source,lua] ------------ setting = cjson.encode_invalid_numbers([setting]) -- "setting" must a boolean or "null". Default: false. ------------ Lua CJSON may generate an error when encoding floating point numbers not supported by the JSON specification (_invalid numbers_): - infinity - not-a-number (NaN) Available settings: +true+:: Allow _invalid numbers_ to be encoded. This will generate non-standard JSON, but this output is supported by some libraries. +"null"+:: Encode _invalid numbers_ as a JSON +null+ value. This allows infinity and NaN to be encoded into valid JSON. +false+:: Throw an error when attempting to encode _invalid numbers_. This is the default setting. The current setting is always returned, and is only updated when an argument is provided. [[encode_keep_buffer]] encode_keep_buffer ~~~~~~~~~~~~~~~~~~ [source,lua] ------------ keep = cjson.encode_keep_buffer([keep]) -- "keep" must be a boolean. Default: true. ------------ Lua CJSON can reuse the JSON encoding buffer to improve performance. Available settings: +true+:: The buffer will grow to the largest size required and is not freed until the Lua CJSON module is garbage collected. This is the default setting. +false+:: Free the encode buffer after each call to +cjson.encode+. The current setting is always returned, and is only updated when an argument is provided. [[encode_max_depth]] encode_max_depth ~~~~~~~~~~~~~~~~ [source,lua] ------------ depth = cjson.encode_max_depth([depth]) -- "depth" must be a positive integer. Default: 1000. ------------ Once the maximum table depth has been exceeded Lua CJSON will generate an error. This prevents a deeply nested or recursive data structure from crashing the application. By default, Lua CJSON will generate an error when trying to encode data structures with more than 1000 nested tables. The current setting is always returned, and is only updated when an argument is provided. .Example: Recursive Lua table [source,lua] a = {}; a[1] = a [[encode_number_precision]] encode_number_precision ~~~~~~~~~~~~~~~~~~~~~~~ [source,lua] ------------ precision = cjson.encode_number_precision([precision]) -- "precision" must be an integer between 1 and 14. Default: 14. ------------ The amount of significant digits returned by Lua CJSON when encoding numbers can be changed to balance accuracy versus performance. For data structures containing many numbers, setting +cjson.encode_number_precision+ to a smaller integer, for example +3+, can improve encoding performance by up to 50%. By default, Lua CJSON will output 14 significant digits when converting a number to text. The current setting is always returned, and is only updated when an argument is provided. [[encode_sparse_array]] encode_sparse_array ~~~~~~~~~~~~~~~~~~~ [source,lua] ------------ convert, ratio, safe = cjson.encode_sparse_array([convert[, ratio[, safe]]]) -- "convert" must be a boolean. Default: false. -- "ratio" must be a positive integer. Default: 2. -- "safe" must be a positive integer. Default: 10. ------------ Lua CJSON classifies a Lua table into one of three kinds when encoding a JSON array. This is determined by the number of values missing from the Lua array as follows: Normal:: All values are available. Sparse:: At least 1 value is missing. Excessively sparse:: The number of values missing exceeds the configured ratio. Lua CJSON encodes sparse Lua arrays as JSON arrays using JSON +null+ for the missing entries. An array is excessively sparse when all the following conditions are met: - +ratio+ > +0+ - _maximum_index_ > +safe+ - _maximum_index_ > _item_count_ * +ratio+ Lua CJSON will never consider an array to be _excessively sparse_ when +ratio+ = +0+. The +safe+ limit ensures that small Lua arrays are always encoded as sparse arrays. By default, attempting to encode an _excessively sparse_ array will generate an error. If +convert+ is set to +true+, _excessively sparse_ arrays will be converted to a JSON object. The current settings are always returned. A particular setting is only changed when the argument is provided (non-++nil++). .Example: Encoding a sparse array [source,lua] cjson.encode({ [3] = "data" }) -- Returns: '[null,null,"data"]' .Example: Enabling conversion to a JSON object [source,lua] cjson.encode_sparse_array(true) cjson.encode({ [1000] = "excessively sparse" }) -- Returns: '{"1000":"excessively sparse"}' API (Variables) --------------- _NAME ~~~~~ The name of the Lua CJSON module (+"cjson"+). _VERSION ~~~~~~~~ The version number of the Lua CJSON module (+"2.1.0"+). null ~~~~ Lua CJSON decodes JSON +null+ as a Lua +lightuserdata+ NULL pointer. +cjson.null+ is provided for comparison. [sect1] References ---------- - http://tools.ietf.org/html/rfc4627[RFC 4627] - http://www.json.org/[JSON website] // vi:ft=asciidoc tw=72: lua-cjson-2.1.0+dfsg/performance.html000066400000000000000000000333731201567770400175350ustar00rootroot00000000000000 JSON module performance comparison under Lua

This performance comparison aims to provide a guide of relative performance between several fast and popular JSON modules.

The examples used in this comparison were mostly sourced from the JSON website and RFC 4627.

Performance will vary widely between platforms and data sets. These results should only be used as an approximation.

1. Modules

The following JSON modules for Lua were tested:

DKJSON 2.1
  • Lua implementation with no dependencies on other libraries

  • Supports LPeg to improve decode performance

Lua YAJL 2.0
  • C wrapper for the YAJL library

Lua CSJON 2.0.0
  • C implementation with no dependencies on other libraries

2. Summary

All modules were built and tested as follows:

DKJSON

Tested with/without LPeg 10.2.

Lua YAJL

Tested with YAJL 2.0.4.

Lua CJSON

Tested with Libc and internal floating point conversion routines.

The following Lua implementations were used for this comparison:

These results show the number of JSON operations per second sustained by each module. All results have been normalised against the pure Lua DKJSON implementation.

Decoding performance
             | DKJSON                  | Lua YAJL   | Lua CJSON
             | No LPeg     With LPeg   |            | Libc         Internal
             | Lua  JIT    Lua    JIT  | Lua   JIT  | Lua   JIT    Lua   JIT
example1     | 1x   2x     2.6x   3.4x | 7.1x  10x  | 14x   20x    14x   20x
example2     | 1x   2.2x   2.9x   4.4x | 6.7x  9.9x | 14x   22x    14x   22x
example3     | 1x   2.1x   3x     4.3x | 6.9x  9.3x | 14x   21x    15x   22x
example4     | 1x   2x     2.5x   3.7x | 7.3x  10x  | 12x   19x    12x   20x
example5     | 1x   2.2x   3x     4.5x | 7.8x  11x  | 16x   24x    16x   24x
numbers      | 1x   2.2x   2.3x   4x   | 4.6x  5.5x | 8.9x  10x    13x   17x
rfc-example1 | 1x   2.1x   2.8x   4.3x | 6.1x  8.1x | 13x   19x    14x   21x
rfc-example2 | 1x   2.1x   3.1x   4.2x | 7.1x  9.2x | 15x   21x    17x   24x
types        | 1x   2.2x   2.6x   4.3x | 5.3x  7.4x | 12x   20x    13x   21x
-------------|-------------------------|------------|-----------------------
= Average => | 1x   2.1x   2.7x   4.1x | 6.5x  9x   | 13x   20x    14x   21x
Encoding performance
             | DKJSON                  | Lua YAJL   | Lua CJSON
             | No LPeg     With LPeg   |            | Libc         Internal
             | Lua  JIT    Lua    JIT  | Lua   JIT  | Lua   JIT    Lua   JIT
example1     | 1x   1.8x   0.97x  1.6x | 3.1x  5.2x | 23x   29x    23x   29x
example2     | 1x   2x     0.97x  1.7x | 2.6x  4.3x | 22x   28x    22x   28x
example3     | 1x   1.9x   0.98x  1.6x | 2.8x  4.3x | 13x   15x    16x   18x
example4     | 1x   1.7x   0.96x  1.3x | 3.9x  6.1x | 15x   19x    17x   21x
example5     | 1x   2x     0.98x  1.7x | 2.7x  4.5x | 20x   23x    20x   23x
numbers      | 1x   2.3x   1x     2.2x | 1.3x  1.9x | 3.8x  4.1x   4.2x  4.6x
rfc-example1 | 1x   1.9x   0.97x  1.6x | 2.2x  3.2x | 8.5x  9.3x   11x   12x
rfc-example2 | 1x   1.9x   0.98x  1.6x | 2.6x  3.9x | 10x   11x    17x   19x
types        | 1x   2.2x   0.97x  2x   | 1.2x  1.9x | 11x   13x    12x   14x
-------------|-------------------------|------------|-----------------------
= Average => | 1x   1.9x   0.98x  1.7x | 2.5x  3.9x | 14x   17x    16x   19x
lua-cjson-2.1.0+dfsg/performance.txt000066400000000000000000000077401201567770400174070ustar00rootroot00000000000000JSON module performance comparison under Lua ============================================ Mark Pulford :revdate: January 22, 2012 This performance comparison aims to provide a guide of relative performance between several fast and popular JSON modules. The examples used in this comparison were mostly sourced from the http://json.org[JSON website] and http://tools.ietf.org/html/rfc4627[RFC 4627]. Performance will vary widely between platforms and data sets. These results should only be used as an approximation. Modules ------- The following JSON modules for Lua were tested: http://chiselapp.com/user/dhkolf/repository/dkjson/[DKJSON 2.1]:: - Lua implementation with no dependencies on other libraries - Supports LPeg to improve decode performance https://github.com/brimworks/lua-yajl[Lua YAJL 2.0]:: - C wrapper for the YAJL library http://www.kyne.com.au/%7Emark/software/lua-cjson.php[Lua CSJON 2.0.0]:: - C implementation with no dependencies on other libraries Summary ------- All modules were built and tested as follows: DKJSON:: Tested with/without LPeg 10.2. Lua YAJL:: Tested with YAJL 2.0.4. Lua CJSON:: Tested with Libc and internal floating point conversion routines. The following Lua implementations were used for this comparison: - http://www.lua.org[Lua 5.1.4] (_Lua_) - http://www.luajit.org[LuaJIT 2.0.0-beta9] (_JIT_) These results show the number of JSON operations per second sustained by each module. All results have been normalised against the pure Lua DKJSON implementation. .Decoding performance ............................................................................ | DKJSON | Lua YAJL | Lua CJSON | No LPeg With LPeg | | Libc Internal | Lua JIT Lua JIT | Lua JIT | Lua JIT Lua JIT example1 | 1x 2x 2.6x 3.4x | 7.1x 10x | 14x 20x 14x 20x example2 | 1x 2.2x 2.9x 4.4x | 6.7x 9.9x | 14x 22x 14x 22x example3 | 1x 2.1x 3x 4.3x | 6.9x 9.3x | 14x 21x 15x 22x example4 | 1x 2x 2.5x 3.7x | 7.3x 10x | 12x 19x 12x 20x example5 | 1x 2.2x 3x 4.5x | 7.8x 11x | 16x 24x 16x 24x numbers | 1x 2.2x 2.3x 4x | 4.6x 5.5x | 8.9x 10x 13x 17x rfc-example1 | 1x 2.1x 2.8x 4.3x | 6.1x 8.1x | 13x 19x 14x 21x rfc-example2 | 1x 2.1x 3.1x 4.2x | 7.1x 9.2x | 15x 21x 17x 24x types | 1x 2.2x 2.6x 4.3x | 5.3x 7.4x | 12x 20x 13x 21x -------------|-------------------------|------------|----------------------- = Average => | 1x 2.1x 2.7x 4.1x | 6.5x 9x | 13x 20x 14x 21x ............................................................................ .Encoding performance ............................................................................. | DKJSON | Lua YAJL | Lua CJSON | No LPeg With LPeg | | Libc Internal | Lua JIT Lua JIT | Lua JIT | Lua JIT Lua JIT example1 | 1x 1.8x 0.97x 1.6x | 3.1x 5.2x | 23x 29x 23x 29x example2 | 1x 2x 0.97x 1.7x | 2.6x 4.3x | 22x 28x 22x 28x example3 | 1x 1.9x 0.98x 1.6x | 2.8x 4.3x | 13x 15x 16x 18x example4 | 1x 1.7x 0.96x 1.3x | 3.9x 6.1x | 15x 19x 17x 21x example5 | 1x 2x 0.98x 1.7x | 2.7x 4.5x | 20x 23x 20x 23x numbers | 1x 2.3x 1x 2.2x | 1.3x 1.9x | 3.8x 4.1x 4.2x 4.6x rfc-example1 | 1x 1.9x 0.97x 1.6x | 2.2x 3.2x | 8.5x 9.3x 11x 12x rfc-example2 | 1x 1.9x 0.98x 1.6x | 2.6x 3.9x | 10x 11x 17x 19x types | 1x 2.2x 0.97x 2x | 1.2x 1.9x | 11x 13x 12x 14x -------------|-------------------------|------------|----------------------- = Average => | 1x 1.9x 0.98x 1.7x | 2.5x 3.9x | 14x 17x 16x 19x ............................................................................. // vi:ft=asciidoc tw=72: lua-cjson-2.1.0+dfsg/runtests.sh000077500000000000000000000036201201567770400165640ustar00rootroot00000000000000#!/bin/sh PLATFORM="`uname -s`" [ "$1" ] && VERSION="$1" || VERSION="2.1.0" set -e # Portable "ggrep -A" replacement. # Work around Solaris awk record limit of 2559 bytes. # contextgrep PATTERN POST_MATCH_LINES contextgrep() { cut -c -2500 | awk "/$1/ { count = ($2 + 1) } count > 0 { count--; print }" } do_tests() { echo cd tests lua -e 'print("Testing Lua CJSON version " .. require("cjson")._VERSION)' ./test.lua | contextgrep 'FAIL|Summary' 3 | grep -v PASS | cut -c -150 cd .. } echo "===== Setting LuaRocks PATH =====" eval "`luarocks path`" echo "===== Building UTF-8 test data =====" ( cd tests && ./genutf8.pl; ) echo "===== Cleaning old build data =====" make clean rm -f tests/cjson.so echo "===== Verifying cjson.so is not installed =====" cd tests if lua -e 'require "cjson"' 2>/dev/null then cat < "$LOG" RPM="`awk '/^Wrote: / && ! /debuginfo/ { print $2}' < "$LOG"`" sudo -- rpm -Uvh \"$RPM\" do_tests sudo -- rpm -e lua-cjson rm -f "$LOG" else echo "==> skipping, $TGZ not found" fi fi # vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/strbuf.c000066400000000000000000000137641201567770400160210ustar00rootroot00000000000000/* strbuf - String buffer routines * * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include "strbuf.h" static void die(const char *fmt, ...) { va_list arg; va_start(arg, fmt); vfprintf(stderr, fmt, arg); va_end(arg); fprintf(stderr, "\n"); exit(-1); } void strbuf_init(strbuf_t *s, int len) { int size; if (len <= 0) size = STRBUF_DEFAULT_SIZE; else size = len + 1; /* \0 terminator */ s->buf = NULL; s->size = size; s->length = 0; s->increment = STRBUF_DEFAULT_INCREMENT; s->dynamic = 0; s->reallocs = 0; s->debug = 0; s->buf = malloc(size); if (!s->buf) die("Out of memory"); strbuf_ensure_null(s); } strbuf_t *strbuf_new(int len) { strbuf_t *s; s = malloc(sizeof(strbuf_t)); if (!s) die("Out of memory"); strbuf_init(s, len); /* Dynamic strbuf allocation / deallocation */ s->dynamic = 1; return s; } void strbuf_set_increment(strbuf_t *s, int increment) { /* Increment > 0: Linear buffer growth rate * Increment < -1: Exponential buffer growth rate */ if (increment == 0 || increment == -1) die("BUG: Invalid string increment"); s->increment = increment; } static inline void debug_stats(strbuf_t *s) { if (s->debug) { fprintf(stderr, "strbuf(%lx) reallocs: %d, length: %d, size: %d\n", (long)s, s->reallocs, s->length, s->size); } } /* If strbuf_t has not been dynamically allocated, strbuf_free() can * be called any number of times strbuf_init() */ void strbuf_free(strbuf_t *s) { debug_stats(s); if (s->buf) { free(s->buf); s->buf = NULL; } if (s->dynamic) free(s); } char *strbuf_free_to_string(strbuf_t *s, int *len) { char *buf; debug_stats(s); strbuf_ensure_null(s); buf = s->buf; if (len) *len = s->length; if (s->dynamic) free(s); return buf; } static int calculate_new_size(strbuf_t *s, int len) { int reqsize, newsize; if (len <= 0) die("BUG: Invalid strbuf length requested"); /* Ensure there is room for optional NULL termination */ reqsize = len + 1; /* If the user has requested to shrink the buffer, do it exactly */ if (s->size > reqsize) return reqsize; newsize = s->size; if (s->increment < 0) { /* Exponential sizing */ while (newsize < reqsize) newsize *= -s->increment; } else { /* Linear sizing */ newsize = ((newsize + s->increment - 1) / s->increment) * s->increment; } return newsize; } /* Ensure strbuf can handle a string length bytes long (ignoring NULL * optional termination). */ void strbuf_resize(strbuf_t *s, int len) { int newsize; newsize = calculate_new_size(s, len); if (s->debug > 1) { fprintf(stderr, "strbuf(%lx) resize: %d => %d\n", (long)s, s->size, newsize); } s->size = newsize; s->buf = realloc(s->buf, s->size); if (!s->buf) die("Out of memory"); s->reallocs++; } void strbuf_append_string(strbuf_t *s, const char *str) { int space, i; space = strbuf_empty_length(s); for (i = 0; str[i]; i++) { if (space < 1) { strbuf_resize(s, s->length + 1); space = strbuf_empty_length(s); } s->buf[s->length] = str[i]; s->length++; space--; } } /* strbuf_append_fmt() should only be used when an upper bound * is known for the output string. */ void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...) { va_list arg; int fmt_len; strbuf_ensure_empty_length(s, len); va_start(arg, fmt); fmt_len = vsnprintf(s->buf + s->length, len, fmt, arg); va_end(arg); if (fmt_len < 0) die("BUG: Unable to convert number"); /* This should never happen.. */ s->length += fmt_len; } /* strbuf_append_fmt_retry() can be used when the there is no known * upper bound for the output string. */ void strbuf_append_fmt_retry(strbuf_t *s, const char *fmt, ...) { va_list arg; int fmt_len, try; int empty_len; /* If the first attempt to append fails, resize the buffer appropriately * and try again */ for (try = 0; ; try++) { va_start(arg, fmt); /* Append the new formatted string */ /* fmt_len is the length of the string required, excluding the * trailing NULL */ empty_len = strbuf_empty_length(s); /* Add 1 since there is also space to store the terminating NULL. */ fmt_len = vsnprintf(s->buf + s->length, empty_len + 1, fmt, arg); va_end(arg); if (fmt_len <= empty_len) break; /* SUCCESS */ if (try > 0) die("BUG: length of formatted string changed"); strbuf_resize(s, s->length + fmt_len); } s->length += fmt_len; } /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/strbuf.h000066400000000000000000000103751201567770400160210ustar00rootroot00000000000000/* strbuf - String buffer routines * * Copyright (c) 2010-2012 Mark Pulford * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include /* Size: Total bytes allocated to *buf * Length: String length, excluding optional NULL terminator. * Increment: Allocation increments when resizing the string buffer. * Dynamic: True if created via strbuf_new() */ typedef struct { char *buf; int size; int length; int increment; int dynamic; int reallocs; int debug; } strbuf_t; #ifndef STRBUF_DEFAULT_SIZE #define STRBUF_DEFAULT_SIZE 1023 #endif #ifndef STRBUF_DEFAULT_INCREMENT #define STRBUF_DEFAULT_INCREMENT -2 #endif /* Initialise */ extern strbuf_t *strbuf_new(int len); extern void strbuf_init(strbuf_t *s, int len); extern void strbuf_set_increment(strbuf_t *s, int increment); /* Release */ extern void strbuf_free(strbuf_t *s); extern char *strbuf_free_to_string(strbuf_t *s, int *len); /* Management */ extern void strbuf_resize(strbuf_t *s, int len); static int strbuf_empty_length(strbuf_t *s); static int strbuf_length(strbuf_t *s); static char *strbuf_string(strbuf_t *s, int *len); static void strbuf_ensure_empty_length(strbuf_t *s, int len); static char *strbuf_empty_ptr(strbuf_t *s); static void strbuf_extend_length(strbuf_t *s, int len); /* Update */ extern void strbuf_append_fmt(strbuf_t *s, int len, const char *fmt, ...); extern void strbuf_append_fmt_retry(strbuf_t *s, const char *format, ...); static void strbuf_append_mem(strbuf_t *s, const char *c, int len); extern void strbuf_append_string(strbuf_t *s, const char *str); static void strbuf_append_char(strbuf_t *s, const char c); static void strbuf_ensure_null(strbuf_t *s); /* Reset string for before use */ static inline void strbuf_reset(strbuf_t *s) { s->length = 0; } static inline int strbuf_allocated(strbuf_t *s) { return s->buf != NULL; } /* Return bytes remaining in the string buffer * Ensure there is space for a NULL terminator. */ static inline int strbuf_empty_length(strbuf_t *s) { return s->size - s->length - 1; } static inline void strbuf_ensure_empty_length(strbuf_t *s, int len) { if (len > strbuf_empty_length(s)) strbuf_resize(s, s->length + len); } static inline char *strbuf_empty_ptr(strbuf_t *s) { return s->buf + s->length; } static inline void strbuf_extend_length(strbuf_t *s, int len) { s->length += len; } static inline int strbuf_length(strbuf_t *s) { return s->length; } static inline void strbuf_append_char(strbuf_t *s, const char c) { strbuf_ensure_empty_length(s, 1); s->buf[s->length++] = c; } static inline void strbuf_append_char_unsafe(strbuf_t *s, const char c) { s->buf[s->length++] = c; } static inline void strbuf_append_mem(strbuf_t *s, const char *c, int len) { strbuf_ensure_empty_length(s, len); memcpy(s->buf + s->length, c, len); s->length += len; } static inline void strbuf_append_mem_unsafe(strbuf_t *s, const char *c, int len) { memcpy(s->buf + s->length, c, len); s->length += len; } static inline void strbuf_ensure_null(strbuf_t *s) { s->buf[s->length] = 0; } static inline char *strbuf_string(strbuf_t *s, int *len) { if (len) *len = s->length; return s->buf; } /* vi:ai et sw=4 ts=4: */ lua-cjson-2.1.0+dfsg/tests/000077500000000000000000000000001201567770400154775ustar00rootroot00000000000000lua-cjson-2.1.0+dfsg/tests/README000066400000000000000000000001711201567770400163560ustar00rootroot00000000000000These JSON examples were taken from the JSON website (http://json.org/example.html) and RFC 4627. Used with permission. lua-cjson-2.1.0+dfsg/tests/bench.lua000077500000000000000000000062571201567770400172760ustar00rootroot00000000000000#!/usr/bin/env lua -- This benchmark script measures wall clock time and should be -- run on an unloaded system. -- -- Your Mileage May Vary. -- -- Mark Pulford local json_module = os.getenv("JSON_MODULE") or "cjson" require "socket" local json = require(json_module) local util = require "cjson.util" local function find_func(mod, funcnames) for _, v in ipairs(funcnames) do if mod[v] then return mod[v] end end return nil end local json_encode = find_func(json, { "encode", "Encode", "to_string", "stringify", "json" }) local json_decode = find_func(json, { "decode", "Decode", "to_value", "parse" }) local function average(t) local total = 0 for _, v in ipairs(t) do total = total + v end return total / #t end function benchmark(tests, seconds, rep) local function bench(func, iter) -- Use socket.gettime() to measure microsecond resolution -- wall clock time. local t = socket.gettime() for i = 1, iter do func(i) end t = socket.gettime() - t -- Don't trust any results when the run lasted for less than a -- millisecond - return nil. if t < 0.001 then return nil end return (iter / t) end -- Roughly calculate the number of interations required -- to obtain a particular time period. local function calc_iter(func, seconds) local iter = 1 local rate -- Warm up the bench function first. func() while not rate do rate = bench(func, iter) iter = iter * 10 end return math.ceil(seconds * rate) end local test_results = {} for name, func in pairs(tests) do -- k(number), v(string) -- k(string), v(function) -- k(number), v(function) if type(func) == "string" then name = func func = _G[name] end local iter = calc_iter(func, seconds) local result = {} for i = 1, rep do result[i] = bench(func, iter) end -- Remove the slowest half (round down) of the result set table.sort(result) for i = 1, math.floor(#result / 2) do table.remove(result, 1) end test_results[name] = average(result) end return test_results end function bench_file(filename) local data_json = util.file_load(filename) local data_obj = json_decode(data_json) local function test_encode() json_encode(data_obj) end local function test_decode() json_decode(data_json) end local tests = {} if json_encode then tests.encode = test_encode end if json_decode then tests.decode = test_decode end return benchmark(tests, 0.1, 5) end -- Optionally load any custom configuration required for this module local success, data = pcall(util.file_load, ("bench-%s.lua"):format(json_module)) if success then util.run_script(data, _G) configure(json) end for i = 1, #arg do local results = bench_file(arg[i]) for k, v in pairs(results) do print(("%s\t%s\t%d"):format(arg[i], k, v)) end end -- vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/tests/example1.json000066400000000000000000000015471201567770400201150ustar00rootroot00000000000000{ "glossary": { "title": "example glossary", "GlossDiv": { "title": "S", "GlossList": { "GlossEntry": { "ID": "SGML", "SortAs": "SGML", "GlossTerm": "Standard Generalized Mark up Language", "Acronym": "SGML", "Abbrev": "ISO 8879:1986", "GlossDef": { "para": "A meta-markup language, used to create markup languages such as DocBook.", "GlossSeeAlso": ["GML", "XML"] }, "GlossSee": "markup" } } } } } lua-cjson-2.1.0+dfsg/tests/example2.json000066400000000000000000000003621201567770400201100ustar00rootroot00000000000000{"menu": { "id": "file", "value": "File", "popup": { "menuitem": [ {"value": "New", "onclick": "CreateNewDoc()"}, {"value": "Open", "onclick": "OpenDoc()"}, {"value": "Close", "onclick": "CloseDoc()"} ] } }} lua-cjson-2.1.0+dfsg/tests/example3.json000066400000000000000000000011311201567770400201040ustar00rootroot00000000000000{"widget": { "debug": "on", "window": { "title": "Sample Konfabulator Widget", "name": "main_window", "width": 500, "height": 500 }, "image": { "src": "Images/Sun.png", "name": "sun1", "hOffset": 250, "vOffset": 250, "alignment": "center" }, "text": { "data": "Click Here", "size": 36, "style": "bold", "name": "text1", "hOffset": 250, "vOffset": 100, "alignment": "center", "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;" } }} lua-cjson-2.1.0+dfsg/tests/example4.json000066400000000000000000000066071201567770400201220ustar00rootroot00000000000000{"web-app": { "servlet": [ { "servlet-name": "cofaxCDS", "servlet-class": "org.cofax.cds.CDSServlet", "init-param": { "configGlossary:installationAt": "Philadelphia, PA", "configGlossary:adminEmail": "ksm@pobox.com", "configGlossary:poweredBy": "Cofax", "configGlossary:poweredByIcon": "/images/cofax.gif", "configGlossary:staticPath": "/content/static", "templateProcessorClass": "org.cofax.WysiwygTemplate", "templateLoaderClass": "org.cofax.FilesTemplateLoader", "templatePath": "templates", "templateOverridePath": "", "defaultListTemplate": "listTemplate.htm", "defaultFileTemplate": "articleTemplate.htm", "useJSP": false, "jspListTemplate": "listTemplate.jsp", "jspFileTemplate": "articleTemplate.jsp", "cachePackageTagsTrack": 200, "cachePackageTagsStore": 200, "cachePackageTagsRefresh": 60, "cacheTemplatesTrack": 100, "cacheTemplatesStore": 50, "cacheTemplatesRefresh": 15, "cachePagesTrack": 200, "cachePagesStore": 100, "cachePagesRefresh": 10, "cachePagesDirtyRead": 10, "searchEngineListTemplate": "forSearchEnginesList.htm", "searchEngineFileTemplate": "forSearchEngines.htm", "searchEngineRobotsDb": "WEB-INF/robots.db", "useDataStore": true, "dataStoreClass": "org.cofax.SqlDataStore", "redirectionClass": "org.cofax.SqlRedirection", "dataStoreName": "cofax", "dataStoreDriver": "com.microsoft.jdbc.sqlserver.SQLServerDriver", "dataStoreUrl": "jdbc:microsoft:sqlserver://LOCALHOST:1433;DatabaseName=goon", "dataStoreUser": "sa", "dataStorePassword": "dataStoreTestQuery", "dataStoreTestQuery": "SET NOCOUNT ON;select test='test';", "dataStoreLogFile": "/usr/local/tomcat/logs/datastore.log", "dataStoreInitConns": 10, "dataStoreMaxConns": 100, "dataStoreConnUsageLimit": 100, "dataStoreLogLevel": "debug", "maxUrlLength": 500}}, { "servlet-name": "cofaxEmail", "servlet-class": "org.cofax.cds.EmailServlet", "init-param": { "mailHost": "mail1", "mailHostOverride": "mail2"}}, { "servlet-name": "cofaxAdmin", "servlet-class": "org.cofax.cds.AdminServlet"}, { "servlet-name": "fileServlet", "servlet-class": "org.cofax.cds.FileServlet"}, { "servlet-name": "cofaxTools", "servlet-class": "org.cofax.cms.CofaxToolsServlet", "init-param": { "templatePath": "toolstemplates/", "log": 1, "logLocation": "/usr/local/tomcat/logs/CofaxTools.log", "logMaxSize": "", "dataLog": 1, "dataLogLocation": "/usr/local/tomcat/logs/dataLog.log", "dataLogMaxSize": "", "removePageCache": "/content/admin/remove?cache=pages&id=", "removeTemplateCache": "/content/admin/remove?cache=templates&id=", "fileTransferFolder": "/usr/local/tomcat/webapps/content/fileTransferFolder", "lookInContext": 1, "adminGroupID": 4, "betaServer": true}}], "servlet-mapping": { "cofaxCDS": "/", "cofaxEmail": "/cofaxutil/aemail/*", "cofaxAdmin": "/admin/*", "fileServlet": "/static/*", "cofaxTools": "/tools/*"}, "taglib": { "taglib-uri": "cofax.tld", "taglib-location": "/WEB-INF/tlds/cofax.tld"}}} lua-cjson-2.1.0+dfsg/tests/example5.json000066400000000000000000000015511201567770400201140ustar00rootroot00000000000000{"menu": { "header": "SVG Viewer", "items": [ {"id": "Open"}, {"id": "OpenNew", "label": "Open New"}, null, {"id": "ZoomIn", "label": "Zoom In"}, {"id": "ZoomOut", "label": "Zoom Out"}, {"id": "OriginalView", "label": "Original View"}, null, {"id": "Quality"}, {"id": "Pause"}, {"id": "Mute"}, null, {"id": "Find", "label": "Find..."}, {"id": "FindAgain", "label": "Find Again"}, {"id": "Copy"}, {"id": "CopyAgain", "label": "Copy Again"}, {"id": "CopySVG", "label": "Copy SVG"}, {"id": "ViewSVG", "label": "View SVG"}, {"id": "ViewSource", "label": "View Source"}, {"id": "SaveAs", "label": "Save As"}, null, {"id": "Help"}, {"id": "About", "label": "About Adobe CVG Viewer..."} ] }} lua-cjson-2.1.0+dfsg/tests/genutf8.pl000077500000000000000000000011711201567770400174170ustar00rootroot00000000000000#!/usr/bin/env perl # Create test comparison data using a different UTF-8 implementation. # The generated utf8.dat file must have the following MD5 sum: # cff03b039d850f370a7362f3313e5268 use strict; # 0xD800 - 0xDFFF are used to encode supplementary codepoints # 0x10000 - 0x10FFFF are supplementary codepoints my (@codepoints) = (0 .. 0xD7FF, 0xE000 .. 0x10FFFF); my $utf8 = pack("U*", @codepoints); defined($utf8) or die "Unable create UTF-8 string\n"; open(FH, ">:utf8", "utf8.dat") or die "Unable to open utf8.dat: $!\n"; print FH $utf8 or die "Unable to write utf8.dat\n"; close(FH); # vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/tests/numbers.json000066400000000000000000000001761201567770400200510ustar00rootroot00000000000000[ 0.110001, 0.12345678910111, 0.412454033640, 2.6651441426902, 2.718281828459, 3.1415926535898, 2.1406926327793 ] lua-cjson-2.1.0+dfsg/tests/octets-escaped.dat000066400000000000000000000006261201567770400211000ustar00rootroot00000000000000"\u0000\u0001\u0002\u0003\u0004\u0005\u0006\u0007\b\t\n\u000b\f\r\u000e\u000f\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f !\"#$%&'()*+,-.\/0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\u007f"lua-cjson-2.1.0+dfsg/tests/rfc-example1.json000066400000000000000000000004471201567770400206630ustar00rootroot00000000000000{ "Image": { "Width": 800, "Height": 600, "Title": "View from 15th Floor", "Thumbnail": { "Url": "http://www.example.com/image/481989943", "Height": 125, "Width": "100" }, "IDs": [116, 943, 234, 38793] } } lua-cjson-2.1.0+dfsg/tests/rfc-example2.json000066400000000000000000000007211201567770400206570ustar00rootroot00000000000000[ { "precision": "zip", "Latitude": 37.7668, "Longitude": -122.3959, "Address": "", "City": "SAN FRANCISCO", "State": "CA", "Zip": "94107", "Country": "US" }, { "precision": "zip", "Latitude": 37.371991, "Longitude": -122.026020, "Address": "", "City": "SUNNYVALE", "State": "CA", "Zip": "94085", "Country": "US" } ] lua-cjson-2.1.0+dfsg/tests/test.lua000077500000000000000000000430711201567770400171710ustar00rootroot00000000000000#!/usr/bin/env lua -- Lua CJSON tests -- -- Mark Pulford -- -- Note: The output of this script is easier to read with "less -S" local json = require "cjson" local json_safe = require "cjson.safe" local util = require "cjson.util" local function gen_raw_octets() local chars = {} for i = 0, 255 do chars[i + 1] = string.char(i) end return table.concat(chars) end -- Generate every UTF-16 codepoint, including supplementary codes local function gen_utf16_escaped() -- Create raw table escapes local utf16_escaped = {} local count = 0 local function append_escape(code) local esc = ('\\u%04X'):format(code) table.insert(utf16_escaped, esc) end table.insert(utf16_escaped, '"') for i = 0, 0xD7FF do append_escape(i) end -- Skip 0xD800 - 0xDFFF since they are used to encode supplementary -- codepoints for i = 0xE000, 0xFFFF do append_escape(i) end -- Append surrogate pair for each supplementary codepoint for high = 0xD800, 0xDBFF do for low = 0xDC00, 0xDFFF do append_escape(high) append_escape(low) end end table.insert(utf16_escaped, '"') return table.concat(utf16_escaped) end function load_testdata() local data = {} -- Data for 8bit raw <-> escaped octets tests data.octets_raw = gen_raw_octets() data.octets_escaped = util.file_load("octets-escaped.dat") -- Data for \uXXXX -> UTF-8 test data.utf16_escaped = gen_utf16_escaped() -- Load matching data for utf16_escaped local utf8_loaded utf8_loaded, data.utf8_raw = pcall(util.file_load, "utf8.dat") if not utf8_loaded then data.utf8_raw = "Failed to load utf8.dat - please run genutf8.pl" end data.table_cycle = {} data.table_cycle[1] = data.table_cycle local big = {} for i = 1, 1100 do big = { { 10, false, true, json.null }, "string", a = big } end data.deeply_nested_data = big return data end function test_decode_cycle(filename) local obj1 = json.decode(util.file_load(filename)) local obj2 = json.decode(json.encode(obj1)) return util.compare_values(obj1, obj2) end -- Set up data used in tests local Inf = math.huge; local NaN = math.huge * 0; local testdata = load_testdata() local cjson_tests = { -- Test API variables { "Check module name, version", function () return json._NAME, json._VERSION end, { }, true, { "cjson", "2.1.0" } }, -- Test decoding simple types { "Decode string", json.decode, { '"test string"' }, true, { "test string" } }, { "Decode numbers", json.decode, { '[ 0.0, -5e3, -1, 0.3e-3, 1023.2, 0e10 ]' }, true, { { 0.0, -5000, -1, 0.0003, 1023.2, 0 } } }, { "Decode null", json.decode, { 'null' }, true, { json.null } }, { "Decode true", json.decode, { 'true' }, true, { true } }, { "Decode false", json.decode, { 'false' }, true, { false } }, { "Decode object with numeric keys", json.decode, { '{ "1": "one", "3": "three" }' }, true, { { ["1"] = "one", ["3"] = "three" } } }, { "Decode object with string keys", json.decode, { '{ "a": "a", "b": "b" }' }, true, { { a = "a", b = "b" } } }, { "Decode array", json.decode, { '[ "one", null, "three" ]' }, true, { { "one", json.null, "three" } } }, -- Test decoding errors { "Decode UTF-16BE [throw error]", json.decode, { '\0"\0"' }, false, { "JSON parser does not support UTF-16 or UTF-32" } }, { "Decode UTF-16LE [throw error]", json.decode, { '"\0"\0' }, false, { "JSON parser does not support UTF-16 or UTF-32" } }, { "Decode UTF-32BE [throw error]", json.decode, { '\0\0\0"' }, false, { "JSON parser does not support UTF-16 or UTF-32" } }, { "Decode UTF-32LE [throw error]", json.decode, { '"\0\0\0' }, false, { "JSON parser does not support UTF-16 or UTF-32" } }, { "Decode partial JSON [throw error]", json.decode, { '{ "unexpected eof": ' }, false, { "Expected value but found T_END at character 21" } }, { "Decode with extra comma [throw error]", json.decode, { '{ "extra data": true }, false' }, false, { "Expected the end but found T_COMMA at character 23" } }, { "Decode invalid escape code [throw error]", json.decode, { [[ { "bad escape \q code" } ]] }, false, { "Expected object key string but found invalid escape code at character 16" } }, { "Decode invalid unicode escape [throw error]", json.decode, { [[ { "bad unicode \u0f6 escape" } ]] }, false, { "Expected object key string but found invalid unicode escape code at character 17" } }, { "Decode invalid keyword [throw error]", json.decode, { ' [ "bad barewood", test ] ' }, false, { "Expected value but found invalid token at character 20" } }, { "Decode invalid number #1 [throw error]", json.decode, { '[ -+12 ]' }, false, { "Expected value but found invalid number at character 3" } }, { "Decode invalid number #2 [throw error]", json.decode, { '-v' }, false, { "Expected value but found invalid number at character 1" } }, { "Decode invalid number exponent [throw error]", json.decode, { '[ 0.4eg10 ]' }, false, { "Expected comma or array end but found invalid token at character 6" } }, -- Test decoding nested arrays / objects { "Set decode_max_depth(5)", json.decode_max_depth, { 5 }, true, { 5 } }, { "Decode array at nested limit", json.decode, { '[[[[[ "nested" ]]]]]' }, true, { {{{{{ "nested" }}}}} } }, { "Decode array over nested limit [throw error]", json.decode, { '[[[[[[ "nested" ]]]]]]' }, false, { "Found too many nested data structures (6) at character 6" } }, { "Decode object at nested limit", json.decode, { '{"a":{"b":{"c":{"d":{"e":"nested"}}}}}' }, true, { {a={b={c={d={e="nested"}}}}} } }, { "Decode object over nested limit [throw error]", json.decode, { '{"a":{"b":{"c":{"d":{"e":{"f":"nested"}}}}}}' }, false, { "Found too many nested data structures (6) at character 26" } }, { "Set decode_max_depth(1000)", json.decode_max_depth, { 1000 }, true, { 1000 } }, { "Decode deeply nested array [throw error]", json.decode, { string.rep("[", 1100) .. '1100' .. string.rep("]", 1100)}, false, { "Found too many nested data structures (1001) at character 1001" } }, -- Test encoding nested tables { "Set encode_max_depth(5)", json.encode_max_depth, { 5 }, true, { 5 } }, { "Encode nested table as array at nested limit", json.encode, { {{{{{"nested"}}}}} }, true, { '[[[[["nested"]]]]]' } }, { "Encode nested table as array after nested limit [throw error]", json.encode, { { {{{{{"nested"}}}}} } }, false, { "Cannot serialise, excessive nesting (6)" } }, { "Encode nested table as object at nested limit", json.encode, { {a={b={c={d={e="nested"}}}}} }, true, { '{"a":{"b":{"c":{"d":{"e":"nested"}}}}}' } }, { "Encode nested table as object over nested limit [throw error]", json.encode, { {a={b={c={d={e={f="nested"}}}}}} }, false, { "Cannot serialise, excessive nesting (6)" } }, { "Encode table with cycle [throw error]", json.encode, { testdata.table_cycle }, false, { "Cannot serialise, excessive nesting (6)" } }, { "Set encode_max_depth(1000)", json.encode_max_depth, { 1000 }, true, { 1000 } }, { "Encode deeply nested data [throw error]", json.encode, { testdata.deeply_nested_data }, false, { "Cannot serialise, excessive nesting (1001)" } }, -- Test encoding simple types { "Encode null", json.encode, { json.null }, true, { 'null' } }, { "Encode true", json.encode, { true }, true, { 'true' } }, { "Encode false", json.encode, { false }, true, { 'false' } }, { "Encode empty object", json.encode, { { } }, true, { '{}' } }, { "Encode integer", json.encode, { 10 }, true, { '10' } }, { "Encode string", json.encode, { "hello" }, true, { '"hello"' } }, { "Encode Lua function [throw error]", json.encode, { function () end }, false, { "Cannot serialise function: type not supported" } }, -- Test decoding invalid numbers { "Set decode_invalid_numbers(true)", json.decode_invalid_numbers, { true }, true, { true } }, { "Decode hexadecimal", json.decode, { '0x6.ffp1' }, true, { 13.9921875 } }, { "Decode numbers with leading zero", json.decode, { '[ 0123, 00.33 ]' }, true, { { 123, 0.33 } } }, { "Decode +-Inf", json.decode, { '[ +Inf, Inf, -Inf ]' }, true, { { Inf, Inf, -Inf } } }, { "Decode +-Infinity", json.decode, { '[ +Infinity, Infinity, -Infinity ]' }, true, { { Inf, Inf, -Inf } } }, { "Decode +-NaN", json.decode, { '[ +NaN, NaN, -NaN ]' }, true, { { NaN, NaN, NaN } } }, { "Decode Infrared (not infinity) [throw error]", json.decode, { 'Infrared' }, false, { "Expected the end but found invalid token at character 4" } }, { "Decode Noodle (not NaN) [throw error]", json.decode, { 'Noodle' }, false, { "Expected value but found invalid token at character 1" } }, { "Set decode_invalid_numbers(false)", json.decode_invalid_numbers, { false }, true, { false } }, { "Decode hexadecimal [throw error]", json.decode, { '0x6' }, false, { "Expected value but found invalid number at character 1" } }, { "Decode numbers with leading zero [throw error]", json.decode, { '[ 0123, 00.33 ]' }, false, { "Expected value but found invalid number at character 3" } }, { "Decode +-Inf [throw error]", json.decode, { '[ +Inf, Inf, -Inf ]' }, false, { "Expected value but found invalid token at character 3" } }, { "Decode +-Infinity [throw error]", json.decode, { '[ +Infinity, Infinity, -Infinity ]' }, false, { "Expected value but found invalid token at character 3" } }, { "Decode +-NaN [throw error]", json.decode, { '[ +NaN, NaN, -NaN ]' }, false, { "Expected value but found invalid token at character 3" } }, { 'Set decode_invalid_numbers("on")', json.decode_invalid_numbers, { "on" }, true, { true } }, -- Test encoding invalid numbers { "Set encode_invalid_numbers(false)", json.encode_invalid_numbers, { false }, true, { false } }, { "Encode NaN [throw error]", json.encode, { NaN }, false, { "Cannot serialise number: must not be NaN or Inf" } }, { "Encode Infinity [throw error]", json.encode, { Inf }, false, { "Cannot serialise number: must not be NaN or Inf" } }, { "Set encode_invalid_numbers(\"null\")", json.encode_invalid_numbers, { "null" }, true, { "null" } }, { "Encode NaN as null", json.encode, { NaN }, true, { "null" } }, { "Encode Infinity as null", json.encode, { Inf }, true, { "null" } }, { "Set encode_invalid_numbers(true)", json.encode_invalid_numbers, { true }, true, { true } }, { "Encode NaN", json.encode, { NaN }, true, { "nan" } }, { "Encode Infinity", json.encode, { Inf }, true, { "inf" } }, { 'Set encode_invalid_numbers("off")', json.encode_invalid_numbers, { "off" }, true, { false } }, -- Test encoding tables { "Set encode_sparse_array(true, 2, 3)", json.encode_sparse_array, { true, 2, 3 }, true, { true, 2, 3 } }, { "Encode sparse table as array #1", json.encode, { { [3] = "sparse test" } }, true, { '[null,null,"sparse test"]' } }, { "Encode sparse table as array #2", json.encode, { { [1] = "one", [4] = "sparse test" } }, true, { '["one",null,null,"sparse test"]' } }, { "Encode sparse array as object", json.encode, { { [1] = "one", [5] = "sparse test" } }, true, { '{"1":"one","5":"sparse test"}' } }, { "Encode table with numeric string key as object", json.encode, { { ["2"] = "numeric string key test" } }, true, { '{"2":"numeric string key test"}' } }, { "Set encode_sparse_array(false)", json.encode_sparse_array, { false }, true, { false, 2, 3 } }, { "Encode table with incompatible key [throw error]", json.encode, { { [false] = "wrong" } }, false, { "Cannot serialise boolean: table key must be a number or string" } }, -- Test escaping { "Encode all octets (8-bit clean)", json.encode, { testdata.octets_raw }, true, { testdata.octets_escaped } }, { "Decode all escaped octets", json.decode, { testdata.octets_escaped }, true, { testdata.octets_raw } }, { "Decode single UTF-16 escape", json.decode, { [["\uF800"]] }, true, { "\239\160\128" } }, { "Decode all UTF-16 escapes (including surrogate combinations)", json.decode, { testdata.utf16_escaped }, true, { testdata.utf8_raw } }, { "Decode swapped surrogate pair [throw error]", json.decode, { [["\uDC00\uD800"]] }, false, { "Expected value but found invalid unicode escape code at character 2" } }, { "Decode duplicate high surrogate [throw error]", json.decode, { [["\uDB00\uDB00"]] }, false, { "Expected value but found invalid unicode escape code at character 2" } }, { "Decode duplicate low surrogate [throw error]", json.decode, { [["\uDB00\uDB00"]] }, false, { "Expected value but found invalid unicode escape code at character 2" } }, { "Decode missing low surrogate [throw error]", json.decode, { [["\uDB00"]] }, false, { "Expected value but found invalid unicode escape code at character 2" } }, { "Decode invalid low surrogate [throw error]", json.decode, { [["\uDB00\uD"]] }, false, { "Expected value but found invalid unicode escape code at character 2" } }, -- Test locale support -- -- The standard Lua interpreter is ANSI C online doesn't support locales -- by default. Force a known problematic locale to test strtod()/sprintf(). { "Set locale to cs_CZ (comma separator)", function () os.setlocale("cs_CZ") json.new() end }, { "Encode number under comma locale", json.encode, { 1.5 }, true, { '1.5' } }, { "Decode number in array under comma locale", json.decode, { '[ 10, "test" ]' }, true, { { 10, "test" } } }, { "Revert locale to POSIX", function () os.setlocale("C") json.new() end }, -- Test encode_keep_buffer() and enable_number_precision() { "Set encode_keep_buffer(false)", json.encode_keep_buffer, { false }, true, { false } }, { "Set encode_number_precision(3)", json.encode_number_precision, { 3 }, true, { 3 } }, { "Encode number with precision 3", json.encode, { 1/3 }, true, { "0.333" } }, { "Set encode_number_precision(14)", json.encode_number_precision, { 14 }, true, { 14 } }, { "Set encode_keep_buffer(true)", json.encode_keep_buffer, { true }, true, { true } }, -- Test config API errors -- Function is listed as '?' due to pcall { "Set encode_number_precision(0) [throw error]", json.encode_number_precision, { 0 }, false, { "bad argument #1 to '?' (expected integer between 1 and 14)" } }, { "Set encode_number_precision(\"five\") [throw error]", json.encode_number_precision, { "five" }, false, { "bad argument #1 to '?' (number expected, got string)" } }, { "Set encode_keep_buffer(nil, true) [throw error]", json.encode_keep_buffer, { nil, true }, false, { "bad argument #2 to '?' (found too many arguments)" } }, { "Set encode_max_depth(\"wrong\") [throw error]", json.encode_max_depth, { "wrong" }, false, { "bad argument #1 to '?' (number expected, got string)" } }, { "Set decode_max_depth(0) [throw error]", json.decode_max_depth, { "0" }, false, { "bad argument #1 to '?' (expected integer between 1 and 2147483647)" } }, { "Set encode_invalid_numbers(-2) [throw error]", json.encode_invalid_numbers, { -2 }, false, { "bad argument #1 to '?' (invalid option '-2')" } }, { "Set decode_invalid_numbers(true, false) [throw error]", json.decode_invalid_numbers, { true, false }, false, { "bad argument #2 to '?' (found too many arguments)" } }, { "Set encode_sparse_array(\"not quite on\") [throw error]", json.encode_sparse_array, { "not quite on" }, false, { "bad argument #1 to '?' (invalid option 'not quite on')" } }, { "Reset Lua CJSON configuration", function () json = json.new() end }, -- Wrap in a function to ensure the table returned by json.new() is used { "Check encode_sparse_array()", function (...) return json.encode_sparse_array(...) end, { }, true, { false, 2, 10 } }, { "Encode (safe) simple value", json_safe.encode, { true }, true, { "true" } }, { "Encode (safe) argument validation [throw error]", json_safe.encode, { "arg1", "arg2" }, false, { "bad argument #1 to '?' (expected 1 argument)" } }, { "Decode (safe) error generation", json_safe.decode, { "Oops" }, true, { nil, "Expected value but found invalid token at character 1" } }, { "Decode (safe) error generation after new()", function(...) return json_safe.new().decode(...) end, { "Oops" }, true, { nil, "Expected value but found invalid token at character 1" } }, } print(("==> Testing Lua CJSON version %s\n"):format(json._VERSION)) util.run_test_group(cjson_tests) for _, filename in ipairs(arg) do util.run_test("Decode cycle " .. filename, test_decode_cycle, { filename }, true, { true }) end local pass, total = util.run_test_summary() if pass == total then print("==> Summary: all tests succeeded") else print(("==> Summary: %d/%d tests failed"):format(total - pass, total)) os.exit(1) end -- vi:ai et sw=4 ts=4: lua-cjson-2.1.0+dfsg/tests/types.json000066400000000000000000000000401201567770400175300ustar00rootroot00000000000000{ "array": [ 10, true, null ] }