pax_global_header00006660000000000000000000000064144766032360014525gustar00rootroot0000000000000052 comment=c9acd408ba3a374ba41dd62f595f43fa2e5bfa6f openstep-plist-0.3.1/000077500000000000000000000000001447660323600145145ustar00rootroot00000000000000openstep-plist-0.3.1/.coveragerc000066400000000000000000000012731447660323600166400ustar00rootroot00000000000000[run] # measure 'branch' coverage in addition to 'statement' coverage # See: http://coverage.readthedocs.org/en/coverage-4.0.3/branch.html#branch branch = True plugins = Cython.Coverage # list of directories or packages to measure source = src/openstep_plist [report] # Regexes for lines to exclude from consideration exclude_lines = # keywords to use in inline comments to skip coverage pragma: no cover # don't complain if tests don't hit defensive assertion code raise AssertionError raise NotImplementedError # don't complain if non-runnable code isn't run if 0: if __name__ == .__main__.: # ignore source code that can’t be found ignore_errors = True openstep-plist-0.3.1/.github/000077500000000000000000000000001447660323600160545ustar00rootroot00000000000000openstep-plist-0.3.1/.github/workflows/000077500000000000000000000000001447660323600201115ustar00rootroot00000000000000openstep-plist-0.3.1/.github/workflows/wheels.yml000066400000000000000000000110541447660323600221240ustar00rootroot00000000000000name: Wheels on: push: branches: [master] tags: ["v*.*.*"] pull_request: branches: [master] jobs: build_wheels: name: Build wheel ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: [ "ubuntu-latest", "windows-latest", "macos-latest" ] env: CIBW_ARCHS: auto64 CIBW_ARCHS_MACOS: "x86_64 universal2" # Skip # # * Python 3.6 and 3.7 on all platforms, # * PyPy on Windows. CIBW_SKIP: cp36-* cp37-* pp*-win_amd64 steps: - uses: actions/checkout@v2 with: submodules: recursive fetch-depth: 0 - uses: actions/setup-python@v2 with: python-version: "3.10" - name: Install cibuildwheel run: python -m pip install cibuildwheel - name: Build wheels run: python -m cibuildwheel --output-dir dist - uses: actions/upload-artifact@v2 with: name: wheels-${{ matrix.os }} path: dist/*.whl build_aarch64_wheels: runs-on: ubuntu-latest strategy: matrix: # aarch64 uses qemu so it's slow, build each py version in parallel jobs python: [38, 39, 310, 311, 312] arch: [aarch64] env: # Skip building aarch64 wheels for musllinux until someone ask... CIBW_SKIP: "*-musllinux*" CIBW_BUILD: cp${{ matrix.python }}-* CIBW_ARCHS: ${{ matrix.arch }} steps: - uses: actions/checkout@v2 with: submodules: recursive - uses: docker/setup-qemu-action@v1.2.0 with: platforms: all - name: Install dependencies run: pip install cibuildwheel - name: Build and Test Wheels run: python -m cibuildwheel --output-dir dist - uses: actions/upload-artifact@v2 with: name: wheels-${{ matrix.python }}-linux-${{ matrix.arch }} path: dist/*.whl deploy: # only run if the commit is tagged... if: startsWith(github.ref, 'refs/tags/v') # ... and all build jobs completed successfully needs: [build_wheels, build_aarch64_wheels] runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: submodules: recursive - name: Set up Python uses: actions/setup-python@v2 with: python-version: "3.x" - name: Install dependencies run: | python -m pip install --upgrade pip pip install --upgrade setuptools wheel twine cython - name: Download artifacts from build jobs uses: actions/download-artifact@v2 with: path: dist - name: Extract release notes from annotated tag message id: release_notes env: # e.g. v0.1.0a1, v1.2.0b2 or v2.3.0rc3, but not v1.0.0 PRERELEASE_TAG_PATTERN: "v[[:digit:]]+\\.[[:digit:]]+\\.[[:digit:]]+([ab]|rc)[[:digit:]]+" run: | # GH checkout action doesn't preserve tag annotations, we must fetch them # https://github.com/actions/checkout/issues/290 git fetch --tags --force # strip leading 'refs/tags/' to get the tag name TAG_NAME="${GITHUB_REF##*/}" # Dump tag message to temporary .md file (excluding the PGP signature at the bottom) TAG_MESSAGE=$(git tag -l --format='%(contents)' $TAG_NAME | sed -n '/-----BEGIN PGP SIGNATURE-----/q;p') echo "$TAG_MESSAGE" > "${{ runner.temp }}/release_notes.md" # if the tag has a pre-release suffix mark the Github Release accordingly if egrep -q "$PRERELEASE_TAG_PATTERN" <<< "$TAG_NAME"; then echo "Tag contains a pre-release suffix" echo "IS_PRERELEASE=true" >> "$GITHUB_ENV" else echo "Tag does not contain pre-release suffix" echo "IS_PRERELEASE=false" >> "$GITHUB_ENV" fi - name: Create GitHub release id: create_release uses: actions/create-release@v1 env: # This token is provided by Actions, you do not need to create your own token GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: ${{ github.ref }} body_path: "${{ runner.temp }}/release_notes.md" draft: false prerelease: ${{ env.IS_PRERELEASE }} - name: Build and publish env: TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} run: | if [ "$IS_PRERELEASE" == true ]; then echo "DEBUG: This is a pre-release" else echo "DEBUG: This is a final release" fi python setup.py sdist twine upload dist/wheels-*/*.whl dist/*.tar.gz openstep-plist-0.3.1/.gitignore000066400000000000000000000010251447660323600165020ustar00rootroot00000000000000# Byte-compiled / optimized files __pycache__/ *.py[co] *.o *.so # Distribution / Packaging *.egg *.egg-info *.eggs MANIFEST build dist # Unit test / coverage files .tox/* .cache/ .pytest_cache/ .coverage .coverage.* coverage.xml htmlcov/ # cython annotated html files src/openstep_plist/*.html tests/*.html # emacs backup files *~ # OSX Finder .DS_Store # Generated c file(s) from Cython source src/openstep_plist/*.c src/openstep_plist/*.cpp tests/*.c tests/*.cpp # autogenerated version file src/openstep_plist/_version.py openstep-plist-0.3.1/.gitmodules000066400000000000000000000001501447660323600166650ustar00rootroot00000000000000[submodule "vendor/msinttypes"] path = vendor/msinttypes url = https://github.com/chemeris/msinttypes openstep-plist-0.3.1/LICENSE000066400000000000000000000020521447660323600155200ustar00rootroot00000000000000Copyright 2018 The FontTools Organization Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. openstep-plist-0.3.1/MANIFEST.in000066400000000000000000000000431447660323600162470ustar00rootroot00000000000000include vendor/msinttypes/stdint.h openstep-plist-0.3.1/README.md000066400000000000000000000012441447660323600157740ustar00rootroot00000000000000[![Github CI Status](https://github.com/fonttools/openstep-plist/workflows/Wheels/badge.svg)](https://github.com/fonttools/openstep-plist/actions?query=workflow%3A%22Wheels%22) [![codecov](https://codecov.io/gh/fonttools/openstep-plist/branch/master/graph/badge.svg)](https://codecov.io/gh/fonttools/openstep-plist) [![PyPI](https://img.shields.io/pypi/v/openstep-plist.svg)](https://pypi.org/project/openstep-plist/) # openstep-plist A parser for the "old style" OpenStep property list format (also known as ASCII plist), written in Cython. Largely based on the CoreFoundation implementation found here: https://github.com/opensource-apple/CF/blob/master/CFOldStylePList.c openstep-plist-0.3.1/config.sh000066400000000000000000000015051447660323600163160ustar00rootroot00000000000000# Define custom utilities # Test for OSX with [ -n "$IS_OSX" ] function pre_build { # Any stuff that you need to do before you start building the wheels # Runs in the root directory of this repository. : } function run_tests { # The function is called from an empty temporary directory. cd .. # Get absolute path to the pre-compiled wheel wheelhouse=$(abspath wheelhouse) wheel=$(ls ${wheelhouse}/openstep_plist*.whl | head -n 1) if [ ! -e "${wheel}" ]; then echo "error: can't find wheel in ${wheelhouse} folder" 1>&2 exit 1 fi # select tox environment based on the current python version # E.g.: '2.7' -> 'py27' TOXENV="py${MB_PYTHON_VERSION//\./}-nocov" # Install pre-compiled wheel and run tests against it tox --installpkg "${wheel}" -e "${TOXENV}" } openstep-plist-0.3.1/pyproject.toml000066400000000000000000000003231447660323600174260ustar00rootroot00000000000000[build-system] requires = [ "setuptools", "wheel", "cython >= 0.28.5", ] build-backend = "setuptools.build_meta" [tool.cibuildwheel] test-requires = "pytest" test-command = "pytest {project}/tests" openstep-plist-0.3.1/setup.cfg000066400000000000000000000000431447660323600163320ustar00rootroot00000000000000[metadata] license_files = LICENSE openstep-plist-0.3.1/setup.py000066400000000000000000000104011447660323600162220ustar00rootroot00000000000000from setuptools import setup, find_packages, Extension from setuptools.command.build_ext import build_ext as _build_ext from setuptools.command.sdist import sdist as _sdist from distutils import log import os import sys import pkg_resources from io import open import re argv = sys.argv[1:] needs_wheel = {"bdist_wheel"}.intersection(argv) wheel = ["wheel"] if needs_wheel else [] # check if minimum required Cython is available cython_version_re = re.compile(r'\s*"cython\s*>=\s*([0-9][0-9\w\.]*)\s*"') with open("pyproject.toml", "r", encoding="utf-8") as fp: for line in fp: m = cython_version_re.match(line) if m: cython_min_version = m.group(1) break else: sys.exit("error: could not parse cython version from pyproject.toml") try: required_cython = "cython >= %s" % cython_min_version pkg_resources.require(required_cython) except pkg_resources.ResolutionError: with_cython = False else: with_cython = True class cython_build_ext(_build_ext): """Compile *.pyx source files to *.c using cythonize if Cython is installed, else use the pre-generated *.c sources. """ def finalize_options(self): if with_cython: from Cython.Build import cythonize # optionally enable line tracing for test coverage support linetrace = os.environ.get("CYTHON_TRACE") == "1" self.distribution.ext_modules[:] = cythonize( self.distribution.ext_modules, force=linetrace or self.force, annotate=os.environ.get("CYTHON_ANNOTATE") == "1", quiet=not self.verbose, compiler_directives={ "linetrace": linetrace, "language_level": 3, "embedsignature": True, }, include_path=["src"], ) else: log.warn( "%s not installed; using pre-generated *.c sources" % required_cython ) for ext in self.distribution.ext_modules: ext.sources = [re.sub(r"\.pyx$", ".c", n) for n in ext.sources] _build_ext.finalize_options(self) class cython_sdist(_sdist): """Run 'cythonize' on *.pyx sources to ensure the *.c files included in the source distribution are up-to-date. """ def run(self): if not with_cython: from distutils.errors import DistutilsSetupError raise DistutilsSetupError( "Cython >= %s is required to make sdist" % cython_min_version ) from Cython.Build import cythonize cythonize( self.distribution.ext_modules, force=True, quiet=not self.verbose, compiler_directives={"language_level": 3, "embedsignature": True}, include_path=["src"], ) _sdist.run(self) # need to include this for Visual Studio 2008 doesn't have stdint.h include_dirs = ( [os.path.join(os.path.dirname(__file__), "vendor", "msinttypes")] if os.name == "nt" and sys.version_info < (3,) else [] ) cython_modules = ["parser", "util", "writer", "_test"] extensions = [ Extension( "openstep_plist." + mod, sources=["src/openstep_plist/%s.pyx" % mod], include_dirs=include_dirs, language="c++", extra_compile_args=["-std=c++11"] if sys.platform != "win32" else [], ) for mod in cython_modules ] with open("README.md", "r") as f: long_description = f.read() version_file = os.path.join("src", "openstep_plist", "_version.py") setup_args = dict( name="openstep_plist", use_scm_version={"write_to": version_file}, description="ASCII plist parser written in Cython", author="Cosimo Lupo", author_email="cosimo@anthrotype.com", url="https://github.com/fonttools/openstep-plist", license="MIT", long_description=long_description, long_description_content_type="text/markdown", package_dir={"": "src"}, packages=find_packages("src"), include_package_data=True, ext_modules=extensions, setup_requires=["setuptools_scm"] + wheel, python_requires=">=3.8", cmdclass={"build_ext": cython_build_ext, "sdist": cython_sdist}, zip_safe=False, ) if __name__ == "__main__": setup(**setup_args) openstep-plist-0.3.1/src/000077500000000000000000000000001447660323600153035ustar00rootroot00000000000000openstep-plist-0.3.1/src/openstep_plist/000077500000000000000000000000001447660323600203535ustar00rootroot00000000000000openstep-plist-0.3.1/src/openstep_plist/__init__.py000066400000000000000000000003661447660323600224710ustar00rootroot00000000000000from .parser import load, loads, ParseError from .writer import dump, dumps try: from ._version import version as __version__ except ImportError: __version__ = "0.0.0+unknown" __all__ = ["load", "loads", "dump", "dumps", "ParseError"] openstep-plist-0.3.1/src/openstep_plist/__main__.py000077500000000000000000000042721447660323600224550ustar00rootroot00000000000000#!/usr/bin/env python from __future__ import absolute_import, unicode_literals import argparse import openstep_plist import json import binascii import pydoc from functools import partial from io import StringIO, open class BytesEncoder(json.JSONEncoder): def default(self, obj): from glyphsLib.types import BinaryData if isinstance(obj, (bytes, BinaryData)): return "<%s>" % binascii.hexlify(obj).decode() return json.JSONEncoder.default(self, obj) def main(args=None): if args is None: import sys args = sys.argv[1:] parser = argparse.ArgumentParser() parser.add_argument("infile", help="input file") parser.add_argument("outfile", help="output file", default="-", nargs="?") parser.add_argument( "-g", "--glyphs", help="use glyphsLib parser/writer", action="store_true" ) parser.add_argument( "--no-pager", dest="pager", help="do not use pager", action="store_false" ) parser.add_argument( "-j", "--json", help="use json to serialize", action="store_true", default=False ) parser.add_argument("-i", "--indent", help="indentation level", type=int, default=2) args = parser.parse_args(args) if not args.glyphs: parse = partial(openstep_plist.load, use_numbers=True) else: def parse(fp, dict_type=dict): from glyphsLib.parser import Parser s = fp.read() p = Parser(current_type=dict_type) return p.parse(s) if args.json: dump = partial( json.dump, cls=BytesEncoder, sort_keys=True, indent=" " * args.indent ) else: if args.glyphs: from glyphsLib.writer import dump else: dump = partial(openstep_plist.dump, indent=args.indent) with open(args.infile, "r", encoding="utf-8") as fp: data = parse(fp) if args.outfile == "-": if args.pager: buf = StringIO() dump(data, buf) pydoc.pager(buf.getvalue()) else: dump(data, sys.stdout) else: with open(args.outfile, "w", encoding="utf-8") as fp: dump(data, fp) if __name__ == "__main__": main() openstep-plist-0.3.1/src/openstep_plist/_test.pyx000066400000000000000000000042641447660323600222410ustar00rootroot00000000000000#cython: language_level=3 #distutils: define_macros=CYTHON_TRACE_NOGIL=1 from .parser cimport ( ParseInfo, line_number_strings as _line_number_strings, advance_to_non_space as _advance_to_non_space, get_slashed_char as _get_slashed_char, parse_unquoted_plist_string as _parse_unquoted_plist_string, parse_plist_string as _parse_plist_string, ) from .util cimport ( tounicode, is_valid_unquoted_string_char as _is_valid_unquoted_string_char, ) from .writer cimport string_needs_quotes from cpython.mem cimport PyMem_Free from cpython.unicode cimport ( PyUnicode_AsUCS4Copy, PyUnicode_GET_LENGTH, ) cdef class ParseContext: cdef unicode s cdef ParseInfo pi cdef Py_UCS4 *buf cdef object dict_type def __cinit__( self, string, Py_ssize_t offset=0, dict_type=dict, bint use_numbers=False ): self.s = tounicode(string) cdef Py_ssize_t length = PyUnicode_GET_LENGTH(self.s) self.buf = PyUnicode_AsUCS4Copy(self.s) if not self.buf: raise MemoryError() self.dict_type = dict_type self.pi = ParseInfo( begin=self.buf, curr=self.buf + offset, end=self.buf + length, dict_type=dict_type, use_numbers=use_numbers, ) def __dealloc__(self): PyMem_Free(self.buf) def is_valid_unquoted_string_char(Py_UCS4 c): return _is_valid_unquoted_string_char(c) def line_number_strings(s, offset=0): cdef ParseContext ctx = ParseContext(s, offset) return _line_number_strings(&ctx.pi) def advance_to_non_space(s, offset=0): cdef ParseContext ctx = ParseContext(s, offset) eof = not _advance_to_non_space(&ctx.pi) return None if eof else s[ctx.pi.curr - ctx.pi.begin] def get_slashed_char(s, offset=0): cdef ParseContext ctx = ParseContext(s, offset) return _get_slashed_char(&ctx.pi) def parse_unquoted_plist_string(s): cdef ParseContext ctx = ParseContext(s) return _parse_unquoted_plist_string(&ctx.pi) def parse_plist_string(s, required=True): cdef ParseContext ctx = ParseContext(s) return _parse_plist_string(&ctx.pi, required=required) openstep-plist-0.3.1/src/openstep_plist/parser.pxd000066400000000000000000000023011447660323600223600ustar00rootroot00000000000000#cython: language_level=3 from libc.stdint cimport uint32_t from libcpp.vector cimport vector ctypedef struct ParseInfo: const Py_UCS4 *begin const Py_UCS4 *curr const Py_UCS4 *end void *dict_type bint use_numbers cdef class ParseError(Exception): pass cdef uint32_t line_number_strings(ParseInfo *pi) cdef bint advance_to_non_space(ParseInfo *pi) cdef Py_UCS4 get_slashed_char(ParseInfo *pi) cdef unicode parse_quoted_plist_string(ParseInfo *pi, Py_UCS4 quote) cdef enum UnquotedType: UNQUOTED_STRING = 0 UNQUOTED_INTEGER = 1 UNQUOTED_FLOAT = 2 cdef UnquotedType get_unquoted_string_type(const Py_UCS4 *buf, Py_ssize_t length) cdef object parse_unquoted_plist_string(ParseInfo *pi, bint ensure_string=*) cdef unicode parse_plist_string(ParseInfo *pi, bint required=*) cdef list parse_plist_array(ParseInfo *pi) cdef object parse_plist_dict_content(ParseInfo *pi) cdef object parse_plist_dict(ParseInfo *pi) cdef unsigned char from_hex_digit(unsigned char ch) cdef int get_data_bytes(ParseInfo *pi, vector[unsigned char]& result) except -1 cdef bytes parse_plist_data(ParseInfo *pi) cdef object parse_plist_object(ParseInfo *pi, bint required=*) openstep-plist-0.3.1/src/openstep_plist/parser.pyx000066400000000000000000000445661447660323600224300ustar00rootroot00000000000000#cython: language_level=3 #distutils: define_macros=CYTHON_TRACE_NOGIL=1 from cpython.bytes cimport PyBytes_FromStringAndSize from cpython.mem cimport PyMem_Free from cpython.unicode cimport ( PyUnicode_4BYTE_KIND, PyUnicode_FromKindAndData, PyUnicode_AsUCS4Copy, PyUnicode_GET_LENGTH, ) from libc.stdint cimport uint8_t, uint16_t, uint32_t from libcpp.algorithm cimport copy from libcpp.iterator cimport back_inserter from libcpp.vector cimport vector from cpython.version cimport PY_MAJOR_VERSION cimport cython from .util cimport ( tounicode, tostr, is_valid_unquoted_string_char, isdigit, isxdigit, is_high_surrogate, is_low_surrogate, unicode_scalar_from_surrogates, ) cdef uint32_t line_number_strings(ParseInfo *pi): # warning: doesn't have a good idea of Unicode line separators cdef const Py_UCS4 *p = pi.begin cdef uint32_t count = 1 while p < pi.curr: if p[0] == c'\r': count += 1 if (p + 1)[0] == c'\n': p += 1 elif p[0] == c'\n': count += 1 p += 1 return count cdef bint advance_to_non_space(ParseInfo *pi): """Returns true if the advance found something that's not whitespace before the end of the buffer, false otherwise. """ cdef Py_UCS4 ch2, ch3 while pi.curr < pi.end: ch2 = pi.curr[0] pi.curr += 1 if ch2 >= 9 and ch2 <= 0x0d: # tab, newline, vt, form feed, carriage return continue elif ch2 == c' ' or ch2 == 0x2028 or ch2 == 0x2029: continue elif ch2 == c'/': if pi.curr >= pi.end: # whoops; back up and return pi.curr -= 1 return True elif pi.curr[0] == c'/': pi.curr += 1 while pi.curr < pi.end: # go to end of // comment line ch3 = pi.curr[0] if ch3 == c'\n' or ch3 == c'\r' or ch3 == 0x2028 or ch3 == 0x2029: break pi.curr += 1 elif pi.curr[0] == c'*': # handle C-style comments /* ... */ pi.curr += 1 while pi.curr < pi.end: ch2 = pi.curr[0] pi.curr += 1 if ch2 == c'*' and pi.curr < pi.end and pi.curr[0] == c'/': pi.curr += 1 # advance past the '/' break else: pi.curr -= 1 return True else: pi.curr -= 1 return True return False # Table mapping from NextStep Encoding to Unicode characters, used # for decoding octal escaped character codes within quoted plist strings. # Since the first 128 characters (0x0 - 0x7f) are identical to ASCII # and Unicode, the table only maps NextStep range from 0x80 - 0xFF. # Source: ftp://ftp.unicode.org/Public/MAPPINGS/VENDORS/NEXT/NEXTSTEP.TXT cdef unsigned short* NEXT_STEP_DECODING_TABLE = [ 0xA0, 0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC7, 0xC8, 0xC9, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD9, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xB5, 0xD7, 0xF7, 0xA9, 0xA1, 0xA2, 0xA3, 0x2044, 0xA5, 0x192, 0xA7, 0xA4, 0x2019, 0x201C, 0xAB, 0x2039, 0x203A, 0xFB01, 0xFB02, 0xAE, 0x2013, 0x2020, 0x2021, 0xB7, 0xA6, 0xB6, 0x2022, 0x201A, 0x201E, 0x201D, 0xBB, 0x2026, 0x2030, 0xAC, 0xBF, 0xB9, 0x2CB, 0xB4, 0x2C6, 0x2DC, 0xAF, 0x2D8, 0x2D9, 0xA8, 0xB2, 0x2DA, 0xB8, 0xB3, 0x2DD, 0x2DB, 0x2C7, 0x2014, 0xB1, 0xBC, 0xBD, 0xBE, 0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE7, 0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xC6, 0xED, 0xAA, 0xEE, 0xEF, 0xF0, 0xF1, 0x141, 0xD8, 0x152, 0xBA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xE6, 0xF9, 0xFA, 0xFB, 0x131, 0xFC, 0xFD, 0x142, 0xF8, 0x153, 0xDF, 0xFE, 0xFF, 0xFFFD, 0xFFFD, ] @cython.boundscheck(False) @cython.wraparound(False) cdef Py_UCS4 get_slashed_char(ParseInfo *pi): cdef Py_UCS4 result cdef uint8_t num cdef unsigned int codepoint, num_digits cdef unsigned long unum cdef unsigned long ch = pi.curr[0] pi.curr += 1 if ( ch == c'0' or ch == c'1' or ch == c'2' or ch == c'3' or ch == c'4' or ch == c'5' or ch == c'6' or ch == c'7' ): num = ch - c'0' # three digits maximum to avoid reading \000 followed by 5 as \5 ! ch = pi.curr[0] if ch >= c'0' and ch <= c'7': # we use in this test the fact that the buffer is zero-terminated pi.curr += 1 num = (num << 3) + ch - c'0' if pi.curr < pi.end: ch = pi.curr[0] if ch >= c'0' and ch <= c'7': pi.curr += 1 num = (num << 3) + ch - c'0' if num < 128: # ascii codepoint = num else: codepoint = NEXT_STEP_DECODING_TABLE[num-128] return codepoint elif ch == c'U': unum = 0 num_digits = 4 while pi.curr < pi.end and num_digits > 0: ch = pi.curr[0] if ch < 128 and isxdigit(ch): pi.curr += 1 unum = (unum << 4) + ( (ch - c'0') if ch <= c'9' else ( (ch - c'A' + 10) if ch <= c'F' else (ch - c'a' + 10) ) ) num_digits -= 1 return unum elif ch == c'a': return c'\a' elif ch == c'b': return c'\b' elif ch == c'f': return c'\f' elif ch == c'n': return c'\n' elif ch == c'r': return c'\r' elif ch == c't': return c'\t' elif ch == c'v': return c'\v' elif ch == c'"': return c'"' elif ch == c'\n': return c'\n' return ch cdef unicode parse_quoted_plist_string(ParseInfo *pi, Py_UCS4 quote): cdef vector[Py_UCS4] string cdef const Py_UCS4 *start_mark = pi.curr cdef const Py_UCS4 *mark = pi.curr cdef const Py_UCS4 *tmp cdef Py_UCS4 ch, ch2 while pi.curr < pi.end: ch = pi.curr[0] if ch == quote: break elif ch == c'\\': string.reserve(string.size() + (pi.curr - mark)) copy(mark, pi.curr, back_inserter(string)) pi.curr += 1 ch = get_slashed_char(pi) # If we are NOT on a "narrow" python 2 build, then we need to parse # two successive \UXXXX escape sequences as one surrogate pair # representing a "supplementary" Unicode scalar value. # If we are on a "narrow" build, then the two code units already # represent a single codepoint internally. if ( is_high_surrogate(ch) and pi.curr < pi.end and pi.curr[0] == c"\\" ): tmp = pi.curr pi.curr += 1 ch2 = get_slashed_char(pi) if is_low_surrogate(ch2): ch = unicode_scalar_from_surrogates(high=ch, low=ch2) else: # XXX maybe we should raise here instead of letting this # lone high surrogate (not followed by a low) pass through? pi.curr = tmp string.push_back(ch) mark = pi.curr else: pi.curr += 1 if pi.end <= pi.curr: raise ParseError( "Unterminated quoted string starting on line %d" % line_number_strings(pi) ) if mark != pi.curr: string.reserve(string.size() + (pi.curr - mark)) copy(mark, pi.curr, back_inserter(string)) # Advance past the quote character before returning pi.curr += 1 return PyUnicode_FromKindAndData( PyUnicode_4BYTE_KIND, string.const_data(), string.size() ) def string_to_number(unicode s not None, bint required=True): """Convert string s to either int or float. Raises ValueError if the string is not a number. """ cdef: Py_UCS4 c Py_UCS4* buf Py_ssize_t length = PyUnicode_GET_LENGTH(s) if length: buf = PyUnicode_AsUCS4Copy(s) if not buf: raise MemoryError() try: kind = get_unquoted_string_type(buf, length) if kind == UNQUOTED_FLOAT: return float(s) elif kind == UNQUOTED_INTEGER: return int(s) finally: PyMem_Free(buf) if required: raise ValueError(f"Could not convert string to float or int: {s!r}") else: return s cdef UnquotedType get_unquoted_string_type( const Py_UCS4 *buf, Py_ssize_t length ): """Check if Py_UCS4 array starts with a digit, or '-' followed by a digit, and if it contains a decimal point '.'. Return 0 if string cannot contain a number, 1 if it contains an integer, and 2 if it contains a float. """ # NOTE: floats in scientific notation (e.g. 1e-5) or starting with a # "." (e.g. .05) are not handled here, but are treated as strings. cdef: bint maybe_number = True bint is_float = False int i = 0 # deref here is safe since Py_UCS4* are NULL-terminated Py_UCS4 ch = buf[i] if ch == c'-': if length > 1: i += 1 ch = buf[i] if ch > c'9' or ch < c'0': maybe_number = False else: maybe_number = False elif ch > c'9' or ch < c'0': maybe_number = False if maybe_number: for i in range(i, length): ch = buf[i] if ch > c'9' or ch < c'.' or ch == c'/': return UNQUOTED_STRING # not a number elif ch == c'.': if not is_float: is_float = True else: # seen a second '.', it's not a float return UNQUOTED_STRING return UNQUOTED_FLOAT if is_float else UNQUOTED_INTEGER return UNQUOTED_STRING cdef object parse_unquoted_plist_string(ParseInfo *pi, bint ensure_string=False): cdef: const Py_UCS4 *mark = pi.curr Py_UCS4 ch Py_ssize_t length, i unicode s UnquotedType kind while pi.curr < pi.end: ch = pi.curr[0] if is_valid_unquoted_string_char(ch): pi.curr += 1 else: break if pi.curr != mark: length = pi.curr - mark s = PyUnicode_FromKindAndData(PyUnicode_4BYTE_KIND, mark, length) if not ensure_string and pi.use_numbers: kind = get_unquoted_string_type(mark, length) if kind == UNQUOTED_FLOAT: return float(s) elif kind == UNQUOTED_INTEGER: return int(s) return s raise ParseError("Unexpected EOF") cdef unicode parse_plist_string(ParseInfo *pi, bint required=True): cdef Py_UCS4 ch if not advance_to_non_space(pi): if required: raise ParseError("Unexpected EOF while parsing string") ch = pi.curr[0] if ch == c'\'' or ch == c'"': pi.curr += 1 return parse_quoted_plist_string(pi, ch) elif is_valid_unquoted_string_char(ch): return parse_unquoted_plist_string(pi, ensure_string=True) else: if required: raise ParseError( "Invalid string character at line %d: %r" % (line_number_strings(pi), ch) ) return None cdef list parse_plist_array(ParseInfo *pi): cdef list result = [] cdef object tmp = parse_plist_object(pi, required=False) cdef bint found_char while tmp is not None: result.append(tmp) found_char = advance_to_non_space(pi) if not found_char: raise ParseError( "Missing ',' for array at line %d" % line_number_strings(pi) ) if pi.curr[0] != c',': tmp = None else: pi.curr += 1 tmp = parse_plist_object(pi, required=False) found_char = advance_to_non_space(pi) if not found_char or pi.curr[0] != c')': raise ParseError( "Expected terminating ')' for array at line %d" % line_number_strings(pi) ) pi.curr += 1 return result cdef object parse_plist_dict_content(ParseInfo *pi): cdef object dict_type = pi.dict_type result = dict_type() cdef object value cdef bint found_char cdef object key = parse_plist_string(pi, required=False) while key is not None: found_char = advance_to_non_space(pi) if not found_char: raise ParseError( "Missing ';' on line %d" % line_number_strings(pi) ) if pi.curr[0] == c';': # This is a 'strings resource' file using the shortcut format, # although this check here really applies to all plists value = key elif pi.curr[0] == c'=': pi.curr += 1 value = parse_plist_object(pi, required=True) else: raise ParseError( "Unexpected character after key at line %d: %r" % (line_number_strings(pi), pi.curr[0]) ) result[key] = value key = None value = None found_char = advance_to_non_space(pi) if found_char and pi.curr[0] == c';': pi.curr += 1 key = parse_plist_string(pi, required=False) else: raise ParseError("Missing ';' on line %d" % line_number_strings(pi)) return result cdef object parse_plist_dict(ParseInfo *pi): result = parse_plist_dict_content(pi) if not advance_to_non_space(pi) or pi.curr[0] != c'}': raise ParseError( "Expected terminating '}' for dictionary at line %d" % line_number_strings(pi) ) pi.curr += 1 return result cdef inline unsigned char from_hex_digit(unsigned char ch): if isdigit(ch): return ch - c'0' if ch >= c'a' and ch <= c'f': return ch - c'a' + 10 elif ch >= c'A' and ch <= c'F': return ch - c'A' + 10 return 0xff # Just choose a large number for the error code cdef int get_data_bytes(ParseInfo *pi, vector[unsigned char]& result) except -1: cdef unsigned char first, second cdef int num_bytes_read = 0 cdef Py_UCS4 ch1, ch2 while pi.curr < pi.end: ch1 = pi.curr[0] if ch1 == c'>': return 0 first = from_hex_digit(ch1) if first != 0xff: # if the first char is a hex, then try to read a second hex pi.curr += 1 if pi.curr >= pi.end: raise ParseError( "Malformed data byte group at line %d: uneven length" % line_number_strings(pi) ) ch2 = pi.curr[0] if ch2 == c'>': raise ParseError( "Malformed data byte group at line %d: uneven length" % line_number_strings(pi) ) second = from_hex_digit(ch2) if second == 0xff: raise ParseError( "Malformed data byte group at line %d: invalid hex digit: %r" % (line_number_strings(pi), ch2) ) result.push_back((first << 4) + second) pi.curr += 1 elif ( ch1 == c' ' or ch1 == c'\n' or ch1 == c'\t' or ch1 == c'\r' or ch1 == 0x2028 or ch1 == 0x2029 ): pi.curr += 1 else: raise ParseError( "Malformed data byte group at line %d: invalid hex digit: %r" % (line_number_strings(pi), ch1) ) cdef bytes parse_plist_data(ParseInfo *pi): cdef vector[unsigned char] data get_data_bytes(pi, data) if pi.curr[0] == c">": pi.curr += 1 # move past '>' return PyBytes_FromStringAndSize(data.const_data(), data.size()) else: raise ParseError( "Expected terminating '>' for data at line %d" % line_number_strings(pi) ) cdef object parse_plist_object(ParseInfo *pi, bint required=True): cdef Py_UCS4 ch if not advance_to_non_space(pi): if required: raise ParseError("Unexpected EOF while parsing plist") ch = pi.curr[0] pi.curr += 1 if ch == c'{': return parse_plist_dict(pi) elif ch == c'(': return parse_plist_array(pi) elif ch == c'<': return parse_plist_data(pi) elif ch == c'\'' or ch == c'"': return parse_quoted_plist_string(pi, ch) elif is_valid_unquoted_string_char(ch): pi.curr -= 1 return parse_unquoted_plist_string(pi) else: pi.curr -= 1 # must back off the character we just read if required: raise ParseError( "Unexpected character at line %d: %r" % (line_number_strings(pi), ch) ) def loads(string, dict_type=dict, bint use_numbers=False): cdef unicode s = tounicode(string) cdef Py_ssize_t length = PyUnicode_GET_LENGTH(s) cdef const Py_UCS4 *begin cdef object result = None cdef Py_UCS4* buf = PyUnicode_AsUCS4Copy(s) if not buf: raise MemoryError() cdef ParseInfo pi = ParseInfo( begin=buf, curr=buf, end=buf + length, dict_type=dict_type, use_numbers=use_numbers, ) try: begin = pi.curr if not advance_to_non_space(&pi): # a file consisting of only whitespace or empty is defined as an # empty dictionary result = {} else: result = parse_plist_object(&pi, required=True) if result: if advance_to_non_space(&pi): if not isinstance(result, unicode): raise ParseError( "Junk after plist at line %d" % line_number_strings(&pi) ) else: # keep parsing for a 'strings resource' file: it looks like # a dictionary without the opening/closing curly braces pi.curr = begin result = parse_plist_dict_content(&pi) finally: PyMem_Free(buf) return result def load(fp, dict_type=dict, use_numbers=False): return loads(fp.read(), dict_type=dict_type, use_numbers=use_numbers) openstep-plist-0.3.1/src/openstep_plist/util.pxd000066400000000000000000000011421447660323600220430ustar00rootroot00000000000000#cython: language_level=3 from libc.stdint cimport uint16_t, uint32_t cdef extern from "": int isxdigit(int c) int isdigit(int c) int isprint(int c) cdef unicode tounicode(s, encoding=*, errors=*) cdef tostr(s, encoding=*, errors=*) cdef bint is_valid_unquoted_string_char(Py_UCS4 x) cdef bint is_high_surrogate(uint32_t ch) cdef bint is_low_surrogate(uint32_t ch) cdef uint32_t unicode_scalar_from_surrogates(uint16_t high, uint16_t low) cdef uint16_t high_surrogate_from_unicode_scalar(uint32_t scalar) cdef uint16_t low_surrogate_from_unicode_scalar(uint32_t scalar) openstep-plist-0.3.1/src/openstep_plist/util.pyx000066400000000000000000000034011447660323600220700ustar00rootroot00000000000000#cython: language_level=3 #distutils: define_macros=CYTHON_TRACE_NOGIL=1 from cpython.version cimport PY_MAJOR_VERSION from libc.stdint cimport uint16_t, uint32_t import sys cdef inline unicode tounicode(s, encoding="ascii", errors="strict"): if type(s) is unicode: return s elif PY_MAJOR_VERSION < 3 and isinstance(s, bytes): return (s).decode(encoding, errors=errors) elif isinstance(s, unicode): return unicode(s) else: raise TypeError(f"Could not convert to unicode: {s!r}") cdef inline object tostr(s, encoding="ascii", errors="strict"): if isinstance(s, bytes): return s if PY_MAJOR_VERSION < 3 else s.decode(encoding, errors=errors) elif isinstance(s, unicode): return s.encode(encoding, errors=errors) if PY_MAJOR_VERSION < 3 else s else: raise TypeError(f"Could not convert to str: {s!r}") cdef inline bint is_valid_unquoted_string_char(Py_UCS4 x): return ( (x >= c'a' and x <= c'z') or (x >= c'A' and x <= c'Z') or (x >= c'0' and x <= c'9') or x == c'_' or x == c'$' or x == c'/' or x == c':' or x == c'.' or x == c'-' ) cdef inline bint is_high_surrogate(uint32_t ch): return ch >= 0xD800 and ch <= 0xDBFF cdef inline bint is_low_surrogate(uint32_t ch): return ch >= 0xDC00 and ch <= 0xDFFF cdef inline uint32_t unicode_scalar_from_surrogates(uint16_t high, uint16_t low): return (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 cdef inline uint16_t high_surrogate_from_unicode_scalar(uint32_t scalar): return ((scalar - 0x10000) // 0x400) + 0xD800 cdef inline uint16_t low_surrogate_from_unicode_scalar(uint32_t scalar): return (scalar - 0x10000) % 0x400 + 0xDC00 openstep-plist-0.3.1/src/openstep_plist/writer.pxd000066400000000000000000000001061447660323600224010ustar00rootroot00000000000000#cython: language_level=3 cpdef bint string_needs_quotes(unicode a) openstep-plist-0.3.1/src/openstep_plist/writer.pyx000066400000000000000000000470171447660323600224420ustar00rootroot00000000000000#cython: language_level=3 #distutils: define_macros=CYTHON_TRACE_NOGIL=1 from collections import OrderedDict from cpython.unicode cimport ( PyUnicode_AsUTF8String, PyUnicode_4BYTE_KIND, PyUnicode_FromKindAndData, PyUnicode_AsUCS4Copy, PyUnicode_GET_LENGTH, PyUnicode_DATA, PyUnicode_KIND, PyUnicode_READ, ) from cpython.bytes cimport PyBytes_GET_SIZE from cpython.object cimport Py_SIZE from cpython.mem cimport PyMem_Free from libcpp.vector cimport vector from libc.stdint cimport uint16_t cimport cython from .util cimport ( tounicode, isdigit, isprint, high_surrogate_from_unicode_scalar, low_surrogate_from_unicode_scalar, ) cdef Py_UCS4 *HEX_MAP = [ c'0', c'1', c'2', c'3', c'4', c'5', c'6', c'7', c'8', c'9', c'A', c'B', c'C', c'D', c'E', c'F', ] cdef Py_UCS4 *ARRAY_SEP_NO_INDENT = [c',', c' '] cdef Py_UCS4 *DICT_KEY_VALUE_SEP = [c' ', c'=', c' '] cdef Py_UCS4 *DICT_ITEM_SEP_NO_INDENT = [c';', c' '] # this table includes A-Z, a-z, 0-9, '.', '_' and '$' cdef bint *VALID_UNQUOTED_CHARS = [ False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, False, True, False, False, False, False, False, False, False, False, False, True, False, True, True, True, True, True, True, True, True, True, True, False, False, False, False, False, False, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False, True, False, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, True, False, False, False, False, False, ] cpdef bint string_needs_quotes(unicode a): cdef Py_ssize_t length = len(a) # empty string is always quoted if length == 0: return True cdef: Py_ssize_t i Py_UCS4 ch bint is_number = True bint seen_period = False for i in range(length): ch = a[i] # if non-ASCII or contains any invalid unquoted characters, # we must write it with quotes if ch > 0x7F or not VALID_UNQUOTED_CHARS[ch]: return True elif is_number: # check if the string could be confused with an integer or float; # if so we write it with quotes to disambiguate its type if isdigit(ch): continue elif ch == c".": if not seen_period: seen_period = True else: # if it contains two '.', it can't be a number is_number = False else: # if any characters not in ".0123456789", it's not a number is_number = False return is_number cdef inline void escape_unicode(uint16_t ch, Py_UCS4 *dest): # caller must ensure 'dest' has rooms for 6 more Py_UCS4 dest[0] = c'\\' dest[1] = c'U' dest[5] = (ch & 15) + 55 if (ch & 15) > 9 else (ch & 15) + 48 ch >>= 4 dest[4] = (ch & 15) + 55 if (ch & 15) > 9 else (ch & 15) + 48 ch >>= 4 dest[3] = (ch & 15) + 55 if (ch & 15) > 9 else (ch & 15) + 48 ch >>= 4 dest[2] = (ch & 15) + 55 if (ch & 15) > 9 else (ch & 15) + 48 @cython.final cdef class Writer: cdef vector[Py_UCS4] *dest cdef bint unicode_escape cdef int float_precision cdef unicode indent cdef int current_indent_level cdef bint single_line_tuples def __cinit__( self, bint unicode_escape=True, int float_precision=6, indent=None, bint single_line_tuples=False, ): self.dest = new vector[Py_UCS4]() self.unicode_escape = unicode_escape self.float_precision = float_precision if indent is not None: if isinstance(indent, basestring): self.indent = tounicode(indent) else: self.indent = ' ' * indent else: self.indent = None self.single_line_tuples = single_line_tuples self.current_indent_level = 0 def __dealloc__(self): del self.dest def getvalue(self): return self._getvalue() def dump(self, file): cdef unicode s = self._getvalue() # figure out whether file object expects bytes or unicodes try: file.write(b"") except TypeError: file.write("") # this better not fail... # file already accepts unicodes; use it directly file.write(s) else: # file expects bytes; always encode as UTF-8 file.write(PyUnicode_AsUTF8String(s)) def write(self, object obj): return self.write_object(obj) cdef inline Py_ssize_t extend_buffer( self, const Py_UCS4 *s, Py_ssize_t length ) except -1: self.dest.reserve(self.dest.size() + length) self.dest.insert(self.dest.end(), s, s + length) return length cdef inline unicode _getvalue(self): return PyUnicode_FromKindAndData( PyUnicode_4BYTE_KIND, self.dest.const_data(), self.dest.size() ) cdef Py_ssize_t write_object(self, object obj) except -1: if obj is None: return self.write_string("(nil)") if isinstance(obj, unicode): return self.write_string(obj) elif isinstance(obj, bool): self.dest.push_back(c'1' if obj else c'0') return 1 elif isinstance(obj, float): return self.write_short_float_repr(obj) elif isinstance(obj, (int, long)): return self.write_unquoted_string(unicode(obj)) elif isinstance(obj, list): return self.write_array_from_list(obj) elif isinstance(obj, tuple): return self.write_array_from_tuple(obj) elif isinstance(obj, OrderedDict): return self.write_ordered_dict(obj) elif isinstance(obj, dict): return self.write_dict(obj) elif isinstance(obj, bytes): return self.write_data(obj) else: raise TypeError( f"Object of type {type(obj).__name__} is not PLIST serializable" ) cdef Py_ssize_t write_quoted_string(self, unicode string) except -1: cdef Py_ssize_t length = PyUnicode_GET_LENGTH(string) cdef Py_UCS4 *s = PyUnicode_AsUCS4Copy(string) if not s: raise MemoryError() try: return self._write_quoted_string(s, length) finally: PyMem_Free(s) cdef Py_ssize_t _write_quoted_string( self, const Py_UCS4 *s, Py_ssize_t length ) except -1: cdef: vector[Py_UCS4] *dest = self.dest bint unicode_escape = self.unicode_escape const Py_UCS4 *curr = s const Py_UCS4 *end = &s[length] Py_UCS4 *ptr unsigned long ch Py_ssize_t base_length = dest.size() Py_ssize_t new_length = 0 while curr < end: ch = curr[0] if ch == c'\t' or ch == c' ': new_length += 1 elif ( ch == c'\n' or ch == c'\\' or ch == c'"' or ch == c'\a' or ch == c'\b' or ch == c'\v' or ch == c'\f' or ch == c'\r' ): new_length += 2 else: if ch < 128: if isprint(ch): new_length += 1 else: new_length += 4 elif unicode_escape: if ch > 0xFFFF: new_length += 12 else: new_length += 6 else: new_length += 1 curr += 1 dest.resize(base_length + new_length + 2) ptr = dest.data() + base_length ptr[0] = '"' ptr += 1 curr = s while curr < end: ch = curr[0] if ch == c'\t' or ch == c' ': ptr[0] = ch ptr += 1 elif ch == c'\n': ptr[0] = c'\\'; ptr[1] = c'n'; ptr += 2 elif ch == c'\a': ptr[0] = c'\\'; ptr[1] = c'a'; ptr += 2 elif ch == c'\b': ptr[0] = c'\\'; ptr[1] = c'b'; ptr += 2 elif ch == c'\v': ptr[0] = c'\\'; ptr[1] = c'v'; ptr += 2 elif ch == c'\f': ptr[0] = c'\\'; ptr[1] = c'f'; ptr += 2 elif ch == c'\\': ptr[0] = c'\\'; ptr[1] = c'\\'; ptr += 2 elif ch == c'"': ptr[0] = c'\\'; ptr[1] = c'"'; ptr += 2 elif ch == c'\r': ptr[0] = c'\\'; ptr[1] = c'r'; ptr += 2 else: if ch < 128: if isprint(ch): ptr[0] = ch ptr += 1 else: ptr[0] = c'\\' ptr += 1 ptr[2] = (ch & 7) + c'0' ch >>= 3 ptr[1] = (ch & 7) + c'0' ch >>= 3 ptr[0] = (ch & 7) + c'0' ptr += 3 elif unicode_escape: if ch > 0xFFFF: escape_unicode(high_surrogate_from_unicode_scalar(ch), ptr) ptr += 6 escape_unicode(low_surrogate_from_unicode_scalar(ch), ptr) ptr += 6 else: escape_unicode(ch, ptr) ptr += 6 else: ptr[0] = ch ptr += 1 curr += 1 ptr[0] = c'"' return new_length + 2 cdef inline Py_ssize_t write_unquoted_string(self, unicode string) except -1: cdef int kind = PyUnicode_KIND(string) cdef Py_UCS4 ch cdef Py_ssize_t i, length = PyUnicode_GET_LENGTH(string) cdef void *data = PyUnicode_DATA(string) self.dest.reserve(self.dest.size() + length) for i in range(length): ch = PyUnicode_READ(kind, data, i) self.dest.push_back(ch) return length cdef Py_ssize_t write_string(self, unicode string) except -1: if string_needs_quotes(string): return self.write_quoted_string(string) else: return self.write_unquoted_string(string) cdef Py_ssize_t write_short_float_repr(self, object py_float) except -1: cdef: unicode string = f"{py_float:.{self.float_precision}f}" Py_ssize_t length = PyUnicode_GET_LENGTH(string) Py_UCS4 ch # read digits backwards, skipping all the '0's until either a # non-'0' or '.' is found while length > 0: ch = string[length-1] if ch == c'.': length -= 1 # skip the trailing dot break elif ch != c'0': break length -= 1 return self.write_unquoted_string(string[:length]) cdef Py_ssize_t write_data(self, bytes data) except -1: cdef: vector[Py_UCS4] *dest = self.dest const unsigned char *src = data Py_UCS4 *ptr Py_ssize_t length = PyBytes_GET_SIZE(data) Py_ssize_t extra_length, i, j # the number includes the opening '<' and closing '>', and the # interleaving spaces between each group of 4 bytes; each byte # is encoded with two hexadecimal digit extra_length = 2 + 2*length + ((length - 1)//4 if length > 4 else 0) j = dest.size() dest.resize(j + extra_length) ptr = dest.data() ptr[j] = c'<' j += 1 for i in range(length): ptr[j] = HEX_MAP[(src[i] >> 4) & 0x0F] j += 1 ptr[j] = HEX_MAP[src[i] & 0x0F] if (i & 3) == 3 and i < length - 1: # if we've just finished a 32-bit int, print a space j += 1 ptr[j] = c' ' j += 1 ptr[j] = c'>' return extra_length # XXX The two write_array_* methods are identical apart from the type of # the 'seq' (one is list, the other is tuple). I tried using fused type # ``'list_or_tuple' to avoid duplication but I couldn't make it work... @cython.boundscheck(False) @cython.wraparound(False) cdef Py_ssize_t write_array_from_list(self, list seq) except -1: cdef: Py_ssize_t length = len(seq) Py_ssize_t last Py_ssize_t count Py_ssize_t i vector[Py_UCS4] *dest = self.dest unicode indent, newline_indent = "" if length == 0: dest.push_back(c'(') dest.push_back(c')') return 2 dest.push_back(c'(') count = 1 indent = self.indent if indent is not None: self.current_indent_level += 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) last = length - 1 for i in range(length): count += self.write_object(seq[i]) if i != last: if indent is None: count += self.extend_buffer(ARRAY_SEP_NO_INDENT, 2) else: dest.push_back(c',') count += 1 + self.write_unquoted_string(newline_indent) if indent is not None: self.current_indent_level -= 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) dest.push_back(c')') count += 1 return count @cython.boundscheck(False) @cython.wraparound(False) cdef Py_ssize_t write_array_from_tuple(self, tuple seq) except -1: cdef: Py_ssize_t length = len(seq) Py_ssize_t last Py_ssize_t count Py_ssize_t i vector[Py_UCS4] *dest = self.dest unicode indent, newline_indent = "" if length == 0: dest.push_back(c'(') dest.push_back(c')') return 2 dest.push_back(c'(') count = 1 indent = self.indent if indent is not None and not self.single_line_tuples: self.current_indent_level += 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) last = length - 1 for i in range(length): count += self.write_object(seq[i]) if i != last: if indent is None: count += self.extend_buffer(ARRAY_SEP_NO_INDENT, 2) else: dest.push_back(c',') if self.single_line_tuples: dest.push_back(c' ') count += 1 count += 1 + self.write_unquoted_string(newline_indent) if indent is not None and not self.single_line_tuples: self.current_indent_level -= 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) dest.push_back(c')') count += 1 return count cdef Py_ssize_t write_dict(self, dict d) except -1: cdef: unicode indent unicode newline_indent = "" vector[Py_UCS4] *dest = self.dest Py_ssize_t last, count, i if not d: dest.push_back(c'{') dest.push_back(c'}') return 2 dest.push_back(c'{') count = 1 indent = self.indent if indent is not None: self.current_indent_level += 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) last = len(d) - 1 for i, (key, value) in enumerate(sorted(d.items())): if not isinstance(key, unicode): key = unicode(key) count += self.write_string(key) count += self.extend_buffer(DICT_KEY_VALUE_SEP, 3) count += self.write_object(value) if i != last: if indent is None: count += self.extend_buffer(DICT_ITEM_SEP_NO_INDENT, 2) else: dest.push_back(c';') count += 1 + self.write_unquoted_string(newline_indent) else: dest.push_back(c';') count += 1 if indent is not None: self.current_indent_level -= 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) dest.push_back(c'}') count += 1 return count cdef Py_ssize_t write_ordered_dict(self, object d) except -1: # This is the same as the write_dict method but doesn't sort the items. # Also, in `write_dict`, the type of `d` is `dict` so it uses optimized # C dict methods, whereas here is generic `object`, as OrderedDict does # not have a C API (as far as I know). cdef: unicode indent unicode newline_indent = "" vector[Py_UCS4] *dest = self.dest Py_ssize_t last, count, i if not d: dest.push_back(c'{') dest.push_back(c'}') return 2 dest.push_back(c'{') count = 1 indent = self.indent if indent is not None: self.current_indent_level += 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) last = len(d) - 1 # we don't sort OrderedDict for i, (key, value) in enumerate(d.items()): if not isinstance(key, unicode): key = unicode(key) count += self.write_string(key) count += self.extend_buffer(DICT_KEY_VALUE_SEP, 3) count += self.write_object(value) if i != last: if indent is None: count += self.extend_buffer(DICT_ITEM_SEP_NO_INDENT, 2) else: dest.push_back(c';') count += 1 + self.write_unquoted_string(newline_indent) else: dest.push_back(c';') count += 1 if indent is not None: self.current_indent_level -= 1 newline_indent = '\n' + self.current_indent_level * indent count += self.write_unquoted_string(newline_indent) dest.push_back(c'}') count += 1 return count def dumps(obj, bint unicode_escape=True, int float_precision=6, indent=None, single_line_tuples=False): w = Writer( unicode_escape=unicode_escape, float_precision=float_precision, indent=indent, single_line_tuples=single_line_tuples, ) w.write(obj) return w.getvalue() def dump(obj, fp, bint unicode_escape=True, int float_precision=6, indent=None, single_line_tuples=False): w = Writer( unicode_escape=unicode_escape, float_precision=float_precision, indent=indent, single_line_tuples=single_line_tuples, ) w.write(obj) w.dump(fp) openstep-plist-0.3.1/tests/000077500000000000000000000000001447660323600156565ustar00rootroot00000000000000openstep-plist-0.3.1/tests/__init__.py000066400000000000000000000000001447660323600177550ustar00rootroot00000000000000openstep-plist-0.3.1/tests/test_parser.py000066400000000000000000000245201447660323600205660ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import sys from io import StringIO, BytesIO from collections import OrderedDict from openstep_plist._test import ( line_number_strings, is_valid_unquoted_string_char, advance_to_non_space, get_slashed_char, parse_unquoted_plist_string, parse_plist_string, ) import openstep_plist from openstep_plist.parser import string_to_number import pytest def test_line_number_strings(): assert line_number_strings("", 0) == 1 assert line_number_strings("a\na", 1) == 1 assert line_number_strings("a\na", 2) == 2 assert line_number_strings("a\naa\n", 4) == 2 assert line_number_strings("a\naa\na", 5) == 3 assert line_number_strings("a\raa\ra", 5) == 3 assert line_number_strings("a\r\naa\ra", 6) == 3 assert line_number_strings("a\n\naa\n\na", 7) == 5 def test_is_valid_unquoted_string_char(): assert is_valid_unquoted_string_char(ord("a")) assert is_valid_unquoted_string_char(ord("b")) assert is_valid_unquoted_string_char(ord("z")) assert is_valid_unquoted_string_char(ord("A")) assert is_valid_unquoted_string_char(ord("B")) assert is_valid_unquoted_string_char(ord("Z")) assert is_valid_unquoted_string_char(ord("_")) assert is_valid_unquoted_string_char(ord("$")) assert is_valid_unquoted_string_char(ord("/")) assert is_valid_unquoted_string_char(ord(":")) assert is_valid_unquoted_string_char(ord(".")) assert is_valid_unquoted_string_char(ord("-")) assert not is_valid_unquoted_string_char(ord('"')) assert not is_valid_unquoted_string_char(ord(",")) assert not is_valid_unquoted_string_char(ord("{")) assert not is_valid_unquoted_string_char(ord(")")) assert not is_valid_unquoted_string_char(ord(";")) assert not is_valid_unquoted_string_char(0x00) # NULL assert not is_valid_unquoted_string_char(0x0A) # \n assert not is_valid_unquoted_string_char(0x0D) # \r @pytest.mark.parametrize( "string, offset, expected", [ ("", 0, None), (" a", 0, "a"), (" a", 1, "a"), (" a", 2, None), ("\t\ta", 1, "a"), ("\t\ta", 2, "a"), ("\t\ta", 3, None), ("abc//this is an inline comment", 3, None), ("abc //also this\n", 3, None), ("abc //this as well\n\nz", 3, "z"), ("abc/this is not a comment", 3, "/"), ("abc/", 3, "/"), # not a comment either ("abcd /* C-style comments! */z", 4, "z"), ], ) def test_advance_to_non_space(string, offset, expected): assert advance_to_non_space(string, offset) == expected @pytest.mark.parametrize( "string, expected", [ ("000", "\x00"), ("001", "\x01"), ("002", "\x02"), ("003", "\x03"), ("004", "\x04"), ("005", "\x05"), ("006", "\x06"), ("007", "\x07"), ("012", "\n"), ("111", "I"), ("111", "I"), ("200", "\xa0"), ("201", "\xc0"), ("375", "\xff"), ("376", "\ufffd"), ("376", "\ufffd"), ("U0000", "\u0000"), ("U0001", "\u0001"), ("U0411", "\u0411"), ("U00FA", "\u00fa"), ("a", "\a"), ("b", "\b"), ("f", "\f"), ("n", "\n"), ("r", "\r"), ("t", "\t"), ("v", "\v"), ('"', '"'), ("\n", "\n"), ("\\", "\\"), ("z", "z"), ], ) def test_get_slashed_char(string, expected): assert get_slashed_char(string) == expected @pytest.mark.parametrize( "string, expected", [ ("a", "a"), ("abc;", "abc"), # trailing chars left in buffer ("1", "1"), ("123456789", "123456789"), ("1.23456789", "1.23456789"), ], ) def test_parse_unquoted_plist_string(string, expected): assert parse_unquoted_plist_string(string) == expected def test_parse_unquoted_plist_string_EOF(): with pytest.raises(openstep_plist.ParseError, match="Unexpected EOF"): parse_unquoted_plist_string("") == expected @pytest.mark.parametrize( "string, expected", [ ("a", "a"), ('"a"', "a"), ("'a'", "a"), ('"a\\012b"', ("a\nb")), # surrogate pair gets decoded as a single scalar value ('"\\UD83D\\UDCA9"', "\U0001F4A9"), # '💩' # surrogate that don't go in pairs are simply passed through ('"\\UD83D"', "\ud83d"), ('"\\UD83D\\012"', "\ud83d\n"), ('"\\UDCA9"', "\udca9"), ('"\\UDCA9\\012"', "\udca9\n"), ], ) def test_parse_plist_string(string, expected): assert parse_plist_string(string) == expected def test_parse_plist_string_EOF(): with pytest.raises(openstep_plist.ParseError, match="Unexpected EOF"): parse_plist_string("") with pytest.raises(openstep_plist.ParseError, match="Unterminated quoted string"): parse_plist_string("'a") def test_parse_plist_string_invalid_char(): with pytest.raises(openstep_plist.ParseError, match="Invalid string character"): parse_plist_string("\\") assert parse_plist_string("\\", required=False) is None def test_parse_plist_array(): assert openstep_plist.loads("(1)") == ["1"] assert openstep_plist.loads("(1,)") == ["1"] assert openstep_plist.loads("(\t1 \r\n, 2.2, c,\n)") == ["1", "2.2", "c"] assert openstep_plist.loads("('1', '2')") == ["1", "2"] assert openstep_plist.loads("(\n1,\n\"'2'\"\n)") == ["1", "'2'"] @pytest.mark.parametrize("string, lineno", [("(a ", 1), ("(a,\nb,\r\nc", 3)]) def test_parse_plist_array_missing_comma(string, lineno): msg = "Missing ',' for array at line %d" % lineno with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads(string) @pytest.mark.parametrize("string, lineno", [("(a,", 1), ("(a,\nb, }", 2)]) def test_parse_plist_array_missing_paren(string, lineno): msg = r"Expected terminating '\)' for array at line %d" % lineno with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads(string) def test_parse_plist_array_empty(): assert openstep_plist.loads("()") == [] def test_parse_plist_dict_empty(): assert openstep_plist.loads("") == {} assert openstep_plist.loads("{}") == {} @pytest.mark.parametrize( "string, expected", [ ("{a=1;}", {"a": "1"}), ('{"a"="1";}', {"a": "1"}), ("{'a'='1';}", {"a": "1"}), ("{\na = 1;\n}", {"a": "1"}), ("{\na\n=\n1;\n}", {"a": "1"}), ("{a=1;b;}", {"a": "1", "b": "b"}), ], ) def test_parse_plist_dict(string, expected): assert openstep_plist.loads(string) == expected def test_parse_plist_dict_invalid(): msg = "Unexpected character after key at line 1: u?','" with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads("{a,}") msg = "Missing ';' on line 1" with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads("{b ") msg = "Missing ';' on line 2" with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads("{b = zzz;\nc = xxx}") msg = "Expected terminating '}' for dictionary at line 3" with pytest.raises(openstep_plist.ParseError, match=msg): openstep_plist.loads("{b = zzz;\nc = xxx;\nd = jjj;") @pytest.mark.parametrize( "string, expected", [ ("", b"\xaa"), ("", b"\xb1\xb0\xaf\xba"), ("", b"\xaa\xbb"), ("", b"\xcd\xef"), ("<4142\n4344>", b"ABCD"), ], ) def test_parse_plist_data(string, expected): assert openstep_plist.loads(string) == expected def test_parse_plist_data_invalid(): with pytest.raises(openstep_plist.ParseError, match="Expected terminating '>'"): openstep_plist.loads("") def test_parse_plist_object_invalid(): with pytest.raises(openstep_plist.ParseError, match="Unexpected character"): openstep_plist.loads(";") with pytest.raises( openstep_plist.ParseError, match="Unexpected EOF while parsing plist" ): openstep_plist.loads("{a=") with pytest.raises(openstep_plist.ParseError, match="Junk after plist at line 3"): openstep_plist.loads("{a=1;\nb=2;\n}...") def test_parse_string_resources(): assert openstep_plist.loads("a=1;\n'b' = 2.4;\n'c' = \"hello world\";") == { "a": "1", "b": "2.4", "c": "hello world", } def test_load(): fp = StringIO("{a=1;}") assert openstep_plist.load(fp) == {"a": "1"} def test_load_from_bytes(): if sys.version_info.major < 3: assert openstep_plist.loads(b"{a=1;}") == {"a": "1"} else: with pytest.raises(TypeError, match="Could not convert to unicode"): openstep_plist.loads(b"{a=1;}") @pytest.mark.parametrize( "string, expected", [ ("{a = 2;}", {"a": 2}), ("{a = {b = -2;};}", {"a": {"b": -2}}), ("{a = (1.5, -23.9999);}", {"a": [1.5, -23.9999]}), ("{a = x123; b = -c; minus = -;}", {"a": "x123", "b": "-c", "minus": "-"}), ], ) def test_loads_use_numbers(string, expected): assert openstep_plist.loads(string, use_numbers=True) == expected def test_loads_dict_type(): assert openstep_plist.loads( "{z = (a, b); y = (c, d); a = 'hello world';}", dict_type=OrderedDict ) == (OrderedDict([("z", ["a", "b"]), ("y", ["c", "d"]), ("a", "hello world")])) @pytest.mark.parametrize( "string, expected", [("2", 2), ("-2", -2), ("1.5", 1.5), ("-1.5", -1.5), ("23.99999", 23.99999)], ) def test_string_to_number(string, expected): assert string_to_number(string) == expected @pytest.mark.parametrize("string", ["", "10000s", " 1.5", "-", ".5", "1e-4", "1.2.3"]) @pytest.mark.parametrize("required", [True, False]) def test_string_to_number_invalid(string, required): if required: with pytest.raises(ValueError): string_to_number(string) else: string_to_number(string, required=False) == string openstep-plist-0.3.1/tests/test_writer.py000066400000000000000000000212521447660323600206050ustar00rootroot00000000000000# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals import openstep_plist from openstep_plist.writer import Writer, string_needs_quotes from io import StringIO, BytesIO from collections import OrderedDict import string import random import pytest class TestWriter(object): def test_simple(self): w = Writer() assert w.write("abc") == 3 assert w.getvalue() == "abc" f = StringIO() w.dump(f) assert f.getvalue() == "abc" def test_None(self): w = Writer() w.write(None) assert w.getvalue() == '"(nil)"' def test_unquoted_string(self): w = Writer() assert w.write(".appVersion") == 11 assert w.getvalue() == ".appVersion" @pytest.mark.parametrize( "string, expected", [ ("", '""'), ("\t", '"\t"'), ("\n\a\b\v\f\r", '"\\n\\a\\b\\v\\f\\r"'), ("\\", '"\\\\"'), ('"', '"\\""'), ("\0\1\2\3\4\5\6", '"\\000\\001\\002\\003\\004\\005\\006"'), ("\x0E\x0F\x10\x11\x12\x13", '"\\016\\017\\020\\021\\022\\023"'), ("\x14\x15\x16\x17\x18\x19", '"\\024\\025\\026\\027\\030\\031"'), ("\x1a\x1b\x1c\x1d\x1e\x1f\x7f", '"\\032\\033\\034\\035\\036\\037\\177"'), ("\x80\x81\x9E\x9F\xA0", '"\\U0080\\U0081\\U009E\\U009F\\U00A0"'), ("\U0001F4A9", '"\\UD83D\\UDCA9"'), # '💩' # if string may be confused with a number wrap it in quotes ("1", '"1"'), ("1.1", '"1.1"'), ("-23", '"-23"'), ("-23yyy", '"-23yyy"'), ("-", '"-"'), ("-a-", '"-a-"'), ], ) def test_quoted_string(self, string, expected): w = Writer() w.write(string) assert w.getvalue() == expected def test_quoted_string_no_unicode_escape(self): w = Writer(unicode_escape=False) w.write("\u0410") == 3 assert w.getvalue() == '"\u0410"' w = Writer(unicode_escape=False) assert w.write("\U0001F4A9") == 3 assert w.getvalue() == '"\U0001F4A9"' @pytest.mark.parametrize( "integer, expected", [ (0, "0"), (1, "1"), (123, "123"), (0x7fffffffffffffff, "9223372036854775807"), (0x7fffffffffffffff + 1, "9223372036854775808"), ], ) def test_int(self, integer, expected): w = Writer() w.write(integer) assert w.getvalue() == expected @pytest.mark.parametrize( "flt, expected", [ (0.0, "0"), (1.0, "1"), (123.456, "123.456"), (0.01, "0.01"), (0.001, "0.001"), (0.0001, "0.0001"), (0.00001, "0.00001"), (0.000001, "0.000001"), (0.0000001, "0"), # default precision is 6 ], ) def test_float(self, flt, expected): w = Writer() w.write(flt) assert w.getvalue() == expected def test_float_precision(self): w = Writer(float_precision=3) w.write(0.0001) assert w.getvalue() == "0" w = Writer(float_precision=0) w.write(0.999) assert w.getvalue() == "1" @pytest.mark.parametrize( "data, expected", [ (b"\x00", "<00>"), (b"\x00\x01", "<0001>"), (b"\x00\x01\x02", "<000102>"), (b"\x00\x01\x02\x03", "<00010203>"), (b"\x00\x01\x02\x03\x04", "<00010203 04>"), (b"\x00\x01\x02\x03\x04\x05", "<00010203 0405>"), (b"\x00\x01\x02\x03\x04\x05\x06", "<00010203 040506>"), (b"\x00\x01\x02\x03\x04\x05\x06\x07", "<00010203 04050607>"), (b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "<00010203 04050607 08>"), (b"\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11", "<090A0B0C 0D0E0F10 11>"), ], ids=lambda p: p.decode() if isinstance(p, bytes) else p, ) def test_data(self, data, expected): w = Writer() assert w.write(data) == len(expected) assert w.getvalue() == expected def test_bool(self): w = Writer() assert w.write(True) == 1 assert w.getvalue() == "1" w = Writer() assert w.write(False) == 1 assert w.getvalue() == "0" @pytest.mark.parametrize( "array, expected_no_indent, expected_indent", [ ([], "()", "()"), ((), "()", "()"), ([1], "(1)", "(\n 1\n)"), ([1, 2], "(1, 2)", "(\n 1,\n 2\n)"), ([1.2, 3.4, 5.6], "(1.2, 3.4, 5.6)", "(\n 1.2,\n 3.4,\n 5.6\n)"), ( (1, "a", ("b", 2)), "(1, a, (b, 2))", "(\n 1,\n a,\n (\n b,\n 2\n )\n)", ), ([b"a", b"b"], "(<61>, <62>)", "(\n <61>,\n <62>\n)"), ( [{"a": "b"}, {"c": "d"}], "({a = b;}, {c = d;})", "(\n {\n a = b;\n },\n {\n c = d;\n }\n)", ), ], ) def test_array(self, array, expected_no_indent, expected_indent): w = Writer() assert w.write(array) == len(expected_no_indent) assert w.getvalue() == expected_no_indent w = Writer(indent=2) assert w.write(array) == len(expected_indent) assert w.getvalue() == expected_indent @pytest.mark.parametrize( "dictionary, expected_no_indent, expected_indent", [ ({}, "{}", "{}"), (OrderedDict(), "{}", "{}"), ({"a": "b"}, "{a = b;}", "{\n a = b;\n}"), ({1: "c"}, '{"1" = c;}', '{\n "1" = c;\n}'), ( {"hello world": 12, "abc": [34, 56.8]}, '{abc = (34, 56.8); "hello world" = 12;}', '{\n abc = (\n 34,\n 56.8\n );\n "hello world" = 12;\n}', ), ( OrderedDict([("z", 2), ("a", 1), (12, "c")]), '{z = 2; a = 1; "12" = c;}', '{\n z = 2;\n a = 1;\n "12" = c;\n}', ), ], ) def test_dictionary(self, dictionary, expected_no_indent, expected_indent): w = Writer() assert w.write(dictionary) == len(expected_no_indent) assert w.getvalue() == expected_no_indent w = Writer(indent=" ") assert w.write(dictionary) == len(expected_indent) assert w.getvalue() == expected_indent def test_type_error(self): obj = object() w = Writer() with pytest.raises(TypeError, match="not PLIST serializable"): w.write(obj) def test_dumps(): assert openstep_plist.dumps( {"a": 1, "b": 2.9999999, "c d": [33, 44], "e": (b"fghilmno", b"pqrstuvz")} ) == ( '{a = 1; b = 3; "c d" = (33, 44); ' "e = (<66676869 6C6D6E6F>, <70717273 7475767A>);}" ) def test_dump(): plist = [1, b"2", {3: (4, "5", "\U0001F4A9")}] fp = StringIO() openstep_plist.dump(plist, fp) assert fp.getvalue() == '(1, <32>, {"3" = (4, "5", "\\UD83D\\UDCA9");})' fp = BytesIO() openstep_plist.dump(plist, fp, unicode_escape=False) assert fp.getvalue() == b'(1, <32>, {"3" = (4, "5", "\xf0\x9f\x92\xa9");})' with pytest.raises(AttributeError): openstep_plist.dump(plist, object()) valid_unquoted_chars = ( string.ascii_uppercase + string.ascii_lowercase + string.digits + "._$" ) invalid_unquoted_chars = [ chr(c) for c in range(128) if chr(c) not in valid_unquoted_chars ] @pytest.mark.parametrize( "string, expected", [ (string.ascii_uppercase, False), (string.ascii_lowercase, False), # digits are allowed unquoted if not in first position ("a" + string.digits, False), (".appVersion", False), ("_private", False), ("$PWD", False), ("1zzz", False), ("192.168.1.1", False), ("0", True), ("1", True), ("2", True), ("3", True), ("4", True), ("5", True), ("6", True), ("7", True), ("8", True), ("9", True), ("", True), ("-", True), ("A-Z", True), ("hello world", True), ("\\backslash", True), ("http://github.com", True), (random.choice(invalid_unquoted_chars), True), ], ) def test_string_needs_quotes(string, expected): assert string_needs_quotes(string) is expected def test_single_line_tuples(): assert openstep_plist.dumps({"a": 1, "b": (2, 3), "c": "Hello"}, indent=0) == ( """{ a = 1; b = ( 2, 3 ); c = Hello; }""" ) assert openstep_plist.dumps( {"a": 1, "b": (2, 3), "c": "Hello"}, indent=0, single_line_tuples=True ) == ( """{ a = 1; b = (2, 3); c = Hello; }""" ) openstep-plist-0.3.1/tox.ini000066400000000000000000000032071447660323600160310ustar00rootroot00000000000000[tox] project_name = openstep_plist envlist = py{38,39,310,311,312}-cov,coverage minversion = 3.4.0 isolated_build = true skip_missing_interpreters = true [testenv] skip_install = cov: true nocov: false deps = cython >= 0.28.5 pytest pytest-randomly pytest-cython cov: coverage changedir= {toxinidir} setenv = cov: PYTHONPATH=src cov: CYTHON_ANNOTATE=1 cov: CYTHON_TRACE=1 commands = cov: python setup.py build_ext -i nocov: pytest {posargs} cov: coverage run --parallel -m pytest {posargs} [testenv:coverage] skip_install = true deps = cython coverage diff_cover setenv = PYTHONPATH=src passenv = DIFF_AGAINST changedir = {toxinidir} commands = coverage erase coverage combine coverage report coverage xml -o {toxworkdir}/coverage.xml coverage html diff-cover --compare-branch {env:DIFF_AGAINST:origin/master} {toxworkdir}/coverage.xml [testenv:codecov] skip_install = true deps = {[testenv:coverage]deps} codecov setenv = {[testenv:coverage]setenv} passenv = TOXENV CI TRAVIS TRAVIS_* APPVEYOR APPVEYOR_* CODECOV_* changedir = {toxinidir} commands = coverage combine codecov --env TOXENV [testenv:wheel] description = build wheel package for upload to PyPI skip_install = true deps = setuptools >= 36.4.0 pip >= 18.0 wheel >= 0.31.0 changedir = {toxinidir} commands = python -c 'import os, glob; whl = glob.glob(".tox/dist/*.whl"); whl and os.remove(whl[0])' pip wheel --pre --no-deps --no-cache-dir --wheel-dir {distdir} --find-links {distdir} --no-binary {[tox]project_name} {[tox]project_name} [pytest] addopts = -v -r a openstep-plist-0.3.1/vendor/000077500000000000000000000000001447660323600160115ustar00rootroot00000000000000openstep-plist-0.3.1/vendor/msinttypes/000077500000000000000000000000001447660323600202305ustar00rootroot00000000000000